diff --git "a/521.jsonl" "b/521.jsonl" new file mode 100644--- /dev/null +++ "b/521.jsonl" @@ -0,0 +1,716 @@ +{"seq_id":"251495083","text":"import numpy as np\nimport time\nfrom scipy.stats import norm\nfrom Data_generation_by_grid_search_py36.function import get_mean_sd_var, get_mean_sd_var_skew_skewBL\n\nclass Outcome:\n def set_ds_ss(self, dsc_parameters, sample):\n ds =np.linspace(sample.begin, sample.end, dsc_parameters.ndraw)\n # print(f\"1/length={ dsc_parameters.ndraw/(sample.end-sample.begin) }\")\n self.ds= ds.clip(0) # this replaces the negative numbers with zero.\n self.ss = norm.pdf(self.ds, sample.mean_demand, sample.sd_demand)\n # self.ss_corrected = self.ss / self.ss.sum()\n self.ss_corrected = self.ss / sample.correction\n # print(f\"self.ss.sum():{self.ss.sum()}, sample.correction:{sample.correction}\")\n self.ds_mean, self.ds_sd, self.ds_var = get_mean_sd_var(self.ds, self.ss_corrected)\n\n\n def set_NN(self, producer,retailer):\n self.NN = (producer.number + retailer.number)/producer.RA\n\n\n def set_ps(self,producer):\n self.ps = producer.get_mc(self.ds)\n self.ps_mean, self.ps_sd, self.ps_var, self.ps_skew, self.ps_skew_BL = get_mean_sd_var_skew_skewBL(self.ps, self.ss_corrected)\n\n\n def get_premium(self, producer, retailer):\n return (-1 *\n (\n (producer.number /\n (self.NN * producer.c * np.power(producer.a, producer.x))) *\n ( producer.c * retailer.retail_rate * producer.cov_pxs_ps - producer.cov_px1s_ps)\n )\n )\n\n\n def set_fps(self, producer, retailer):\n self.forward_price = self.ps_mean + self.get_premium(producer, retailer)\n\n\n def save_outcomes(self, producer, retailer, sample, dsc_parameters, negative_omitted):\n list=[\n producer.number, retailer.number, sample.mean_demand, sample.sd_demand, self.ds_mean, self.ds_sd,int(negative_omitted) ,\n retailer.retail_rate,\n producer.c, producer.a, producer.RA, dsc_parameters.ndraw, self.forward_price, self.ps_mean, self.ds_var,self.ps_skew, self.ps_skew_BL,\n self.ps_var, 0,0,producer.profits_mean,producer.profits_sd,retailer.profits_mean,producer.profits_sd,\n 0,0,0,retailer.opt_forw_position, producer.opt_forw_position,producer.opt_forw_position_comp, time.time(),producer.cov_pxs_ps,producer.cov_px1s_ps, int(dsc_parameters.a_fixed_boolean)]\n self.outcome_list = list","sub_path":"Data_generation_by_grid_search_py36/outcomes.py","file_name":"outcomes.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"295056554","text":"import cv2\nimport numpy as np\nfrom wide_resnet import WideResNet\nimport face_recognition\n\ndef gender_predict(face_location,img):\n depth = 16\n k = 8\n img_size = 64\n margin = 0.4\n\n input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_h, img_w, _ = np.shape(input_img)\n\n weight_file = './pretrained_models/weights.28-3.73.hdf5'\n model = WideResNet(img_size, depth=depth, k=k)()\n model.load_weights(weight_file)\n\n #x1, y1, x2, y2, w, h = face_location.left(), face_location.top(), face_location.right() + 1, face_location.bottom() + 1, face_location.width(), face_location.height()\n #left, top, right, bottom = face_location.left(), face_location.top(), face_location.right() + 1, face_location.bottom()\n\n top, right, bottom, left = face_location\n w = right - left\n h = bottom - top\n xw1 = max(int(left - margin * w), 0)\n yw1 = max(int(top - margin * h), 0)\n xw2 = min(int(right + margin * w), img_w - 1)\n yw2 = min(int(bottom + margin * h), img_h - 1)\n\n #取下待检测人脸\n face = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))\n face = np.expand_dims(face, 0)\n\n #传入代检测人脸\n results = model.predict(face)\n #print(results[0][0][0])\n\n if results[0][0][0] < 0.5:\n label = 'male'\n else:\n label = 'female'\n\n return label\n'''\nimg_people = cv2.imread('./dataset/0014_color.jpg')\nimg_people_location = face_recognition.face_locations(img_people)\n\nlabel = gender_predict(img_people_location[0],img_people)\nprint(label)\n'''","sub_path":"gender.py","file_name":"gender.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"137076293","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 15 13:30:00 2019\n\n@author: 687spin\n\"\"\"\nimport visa\nimport time\nimport numpy as np\n \ncal = 1.897\n\noff = 0.0023\nclass Keithley263():\n \n \n def __init__(self, addr):\n self.port = addr\n rm = visa.ResourceManager()\n self.ps = rm.open_resource(self.port, timeout=3000)\n #time.sleep(3)\n #self.enable()\n # time.sleep(0.5)\n #self.ps.write('R0X')\n \n \n def getVoltage(self):\n temp = self.ps.query('G1X')\n# print(temp)\n return float(temp)\n \n def setVoltage(self,value):\n #value = value - (value % 0.00005)\n self.ps.write('V{}X'.format(value))\n \n def enable(self):\n self.ps.write('REN')\n \n def getCurrent(self):\n #2 amps/volt\n temp = (self.getVoltage() - off)*cal\n# print(temp)\n# head,val = temp.split('DCV')\n# print(val)\n return temp\n \n def rampCurrent(self, targetCurrent, rampRate=0.05,verbose = False, waitTime = 0.25):\n #2 amps/volt\n if (rampRate > 0.05):\n rampRate = 0.05 #amps/second\n #startCurrent=round(self.getVoltage()*cal, 6)\n startCurrent = self.getCurrent()\n #print(startCurrent)\n# if (abs(startCurrent-targetCurrent) > rampRate):\n currentSteps = np.append(np.arange(startCurrent,targetCurrent,rampRate),targetCurrent)\n if (targetCurrent>>>>>> Issue {}\n'''\n FEEDBACK_SEPARATOR='''\n>>>>>> Feedback {}\n'''\n REVIEW_SEPARATOR='''\n------------------\n'''\n WEB_LINK='''\nIf you would like to view the upstream patch on the web, follow this link:\n{}\n'''\n\n SWAG = ['Frrrresh', 'Crisper Than Cabbage', 'Awesome', 'Ahhhmazing',\n 'Cool As A Cucumber', 'Most Excellent', 'Eximious', 'Prestantious',\n 'Supernacular', 'Bodacious', 'Blue Chip', 'Blue Ribbon', 'Cracking',\n 'Dandy', 'Dynamite', 'Fab', 'Fabulous', 'Fantabulous',\n 'Scrumtrulescent', 'First Class', 'First Rate', 'First String',\n 'Five Star', 'Gangbusters', 'Grand', 'Groovy', 'HYPE', 'Jim-Dandy',\n 'Snazzy', 'Marvelous', 'Nifty', 'Par Excellence', 'Peachy Keen',\n 'PHAT', 'Prime', 'Prizewinning', 'Quality', 'Radical', 'Righteous',\n 'Sensational', 'Slick', 'Splendid', 'Lovely', 'Stellar', 'Sterling',\n 'Superb', 'Superior', 'Superlative', 'Supernal', 'Swell', 'Terrific',\n 'Tip-Top', 'Top Notch', 'Top Shelf', 'Unsurpassed', 'Wonderful']\n\nclass UpstreamReviewStrings(ReviewStrings):\n HASH_EXAMPLE='''\n (cherry picked from commit )\n'''\n INVALID_HASH_FOOTER='''\nPlease double check your commit hash is valid in the upstream tree and the hash\nis formatted properly in your commit message (see below):\n'''\n CLEAN_BACKPORT_FOOTER='''\nConsider changing your subject prefix to UPSTREAM to better reflect the\ncontents of this patch.\n'''\n\nclass FromgitReviewStrings(ReviewStrings):\n HASH_EXAMPLE='''\n (cherry picked from commit \n )\n'''\n INVALID_HASH_FOOTER='''\nPlease double check your commit hash is valid in the upstream tree, and please\nfully specify the remote tree and branch for FROMGIT changes (see below):\n'''\n CLEAN_BACKPORT_FOOTER='''\nConsider changing your subject prefix to FROMGIT to better reflect the\ncontents of this patch.\n'''\n\nclass FromlistReviewStrings(ReviewStrings):\n CLEAN_BACKPORT_FOOTER='''\nConsider changing your subject prefix to FROMLIST to better reflect the\ncontents of this patch.\n'''\n\n\nclass ReviewType(enum.Enum):\n FIXES_REF = 'fixes_ref'\n MISSING_FIELDS = 'missing_fields'\n MISSING_HASH = 'missing_hash'\n MISSING_AM = 'missing_am'\n INVALID_HASH = 'invalid_hash'\n INCORRECT_PREFIX = 'incorrect_prefix'\n ALTERED_UPSTREAM = 'altered_upstream'\n BACKPORT = 'backport'\n SUCCESS = 'success'\n CLEAR_VOTES = 'clear_votes'\n\n def __str__(self):\n return self.value\n def __repr__(self):\n return str(self)\n\n\nclass ReviewResult(object):\n def __init__(self, change, strings, dry_run=False):\n self.change = change\n self.strings = strings\n self.vote = 0\n self.notify = False\n self.dry_run = dry_run\n self.issues = {}\n self.feedback = {}\n self.web_link = None\n\n def add_review(self, review_type, msg, vote=0, notify=False, dry_run=False):\n # Take the lowest negative, or the highest positive\n if vote < 0 or self.vote < 0:\n self.vote = min(self.vote, vote)\n elif vote > 0 or self.vote > 0:\n self.vote = max(self.vote, vote)\n else:\n self.vote = vote\n\n if vote < 0:\n self.issues[review_type] = msg\n else:\n self.feedback[review_type] = msg\n\n self.notify = self.notify or notify\n self.dry_run = self.dry_run or dry_run\n\n def add_web_link(self, link):\n self.web_link = link\n\n def generate_issues(self):\n num_issues = len(self.issues)\n if not num_issues:\n return ''\n\n if num_issues > 1:\n msg = self.strings.FOUND_ISSUES_HEADER_MULTIPLE\n else:\n msg = self.strings.FOUND_ISSUES_HEADER_SINGLE\n\n for j,i in enumerate(self.issues.values()):\n if num_issues > 1:\n msg += self.strings.ISSUE_SEPARATOR.format(j + 1)\n msg += i\n return msg\n\n def generate_feedback(self):\n num_feedback = len(self.feedback)\n if not num_feedback:\n return ''\n\n if len(self.issues):\n msg = self.strings.FEEDBACK_AFTER_ISSUES\n elif self.vote > 0:\n msg = self.strings.POSITIVE_VOTE.format(random.choice(self.strings.SWAG))\n else:\n msg = ''\n\n for j,f in enumerate(self.feedback.values()):\n if num_feedback > 1:\n msg += self.strings.FEEDBACK_SEPARATOR.format(j + 1)\n msg += f\n return msg\n\n def generate_review_message(self):\n msg = self.strings.HEADER\n msg += self.generate_issues()\n if len(self.issues) and len(self.feedback):\n msg += self.strings.REVIEW_SEPARATOR\n msg += self.generate_feedback()\n msg += self.strings.REVIEW_SEPARATOR\n if self.web_link:\n msg += self.strings.WEB_LINK.format(self.web_link)\n msg += self.strings.FOOTER\n return msg\n\nclass ChangeReviewer(object):\n GERRIT_REMOTE = 'cros'\n def __init__(self, reviewer, change, dry_run):\n self.reviewer = reviewer\n self.is_backport = 'BACKPORT' in change.subject\n self.is_fixup = 'FIXUP' in change.subject\n self.is_revert = change.subject.startswith('Revert ')\n self.change = change\n self.dry_run = dry_run\n self.gerrit_patch = None\n self.upstream_patch = None\n self.review_result = None\n self.strings = None\n self.diff = None\n\n @staticmethod\n def can_review_change(change):\n raise NotImplementedError()\n\n def format_diff(self):\n msg = ''\n for l in self.diff:\n msg += ' {}\\n'.format(l)\n return msg\n\n def add_successful_review(self):\n msg = self.strings.SUCCESS.format(random.choice(self.strings.SWAG))\n self.review_result.add_review(ReviewType.SUCCESS, msg, vote=1)\n\n def add_clean_backport_review(self):\n msg = self.strings.CLEAN_BACKPORT_HEADER\n msg += self.strings.CLEAN_BACKPORT_FOOTER\n self.review_result.add_review(ReviewType.INCORRECT_PREFIX, msg, vote=-1,\n notify=True)\n\n def add_missing_fields_review(self, fields):\n missing = []\n if not fields['bug']:\n missing.append('BUG=')\n if not fields['test']:\n missing.append('TEST=')\n if not fields['sob']:\n cur_rev = self.change.current_revision\n missing.append('Signed-off-by: {} <{}>'.format(cur_rev.uploader_name,\n cur_rev.uploader_email))\n\n msg = self.strings.MISSING_FIELDS.format(', '.join(missing))\n self.review_result.add_review(ReviewType.MISSING_FIELDS, msg, vote=-1,\n notify=True)\n\n def get_gerrit_patch(self):\n for i in range(0, 4):\n try:\n self.gerrit_patch = self.reviewer.get_commit_from_remote(\n self.GERRIT_REMOTE, self.change.current_revision.ref)\n return True\n except:\n continue\n raise ValueError('ERROR: Could not get gerrit patch {}\\n'.format(\n self.change))\n\n def get_upstream_patch(self):\n raise NotImplementedError()\n\n def get_upstream_web_link(self):\n return None\n\n def get_patches(self):\n self.get_gerrit_patch()\n self.get_upstream_patch()\n\n def validate_commit_message(self):\n cur_rev = self.change.current_revision\n fields={'sob':False, 'bug':False, 'test':False}\n sob_name_re = re.compile('Signed-off-by:\\s+{}'.format(\n cur_rev.uploader_name))\n sob_email_re = re.compile('Signed-off-by:.*?<{}>'.format(\n cur_rev.uploader_email))\n for l in cur_rev.commit_message.splitlines():\n if l.startswith('BUG='):\n fields['bug'] = True\n elif l.startswith('TEST='):\n fields['test'] = True\n elif sob_name_re.match(l):\n fields['sob'] = True\n elif sob_email_re.match(l):\n fields['sob'] = True\n\n if not fields['bug'] or not fields['test'] or not fields['sob']:\n self.add_missing_fields_review(fields)\n\n def diff_patches(self, context=0):\n self.diff = self.reviewer.compare_diffs(self.upstream_patch,\n self.gerrit_patch, context=context)\n\n def compare_patches_clean(self):\n raise NotImplementedError()\n\n def compare_patches_backport(self):\n raise NotImplementedError()\n\n def compare_patches(self):\n if self.is_backport:\n # If a BACKPORT appears to be clean, increase the context to be sure\n # before suggesting switching to UPSTREAM prefix\n if len(self.diff) == 0:\n self.diff_patches(context=3)\n\n self.compare_patches_backport()\n else:\n self.compare_patches_clean()\n\n def review_patch(self):\n # Don't review these patches (yet)\n if self.is_fixup or self.is_revert:\n return None\n\n self.get_patches()\n self.validate_commit_message()\n if self.gerrit_patch and self.upstream_patch:\n self.diff_patches()\n self.compare_patches()\n\n if self.upstream_patch:\n self.get_upstream_web_link()\n\n if not self.review_result.issues and not self.review_result.feedback:\n return None\n return self.review_result\n\nclass GitChangeReviewer(ChangeReviewer):\n DEFAULT_REMOTE='git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git'\n def __init__(self, reviewer, change, dry_run):\n super().__init__(reviewer, change, dry_run)\n self.upstream_sha = None\n\n @staticmethod\n def can_review_change(change):\n raise NotImplementedError()\n\n def get_cgit_web_link_path(self):\n return '/commit/?head={}&id={}'.format(self.upstream_sha['branch'],\n self.upstream_sha['sha'])\n\n def get_upstream_web_link(self):\n remote = self.upstream_sha['remote']\n parsed = urllib.parse.urlparse(remote)\n l = 'https://'\n\n if parsed.netloc == 'git.kernel.org':\n l += parsed.netloc\n l += parsed.path\n l += self.get_cgit_web_link_path()\n elif 'github.com' in parsed.netloc:\n l += parsed.netloc\n l += parsed.path\n l += '/commit/{}'.format(self.upstream_sha['sha'])\n elif 'anongit' in parsed.netloc:\n l += parsed.netloc.replace('anongit', 'cgit')\n l += parsed.path\n l += self.get_cgit_web_link_path()\n elif 'git.infradead.org' in parsed.netloc:\n l = 'http://' # whomp whomp\n l += parsed.netloc\n l += parsed.path\n l += '/commit/{}'.format(self.upstream_sha['sha'])\n elif 'linuxtv.org' in parsed.netloc:\n l += 'git.linuxtv.org'\n l += parsed.path\n l += self.get_cgit_web_link_path()\n else:\n sys.stderr.write(\n 'ERROR: Could not parse web link for {}\\n'.format(remote))\n return\n\n r = requests.get(l)\n if r.status_code == 200:\n self.review_result.add_web_link(l)\n else:\n sys.stderr.write('ERROR: Got {} status for {}\\n'.format(r.status_code, l))\n return\n\n\n def add_missing_hash_review(self):\n msg = self.strings.MISSING_HASH_HEADER\n msg += self.strings.HASH_EXAMPLE\n msg += self.strings.MISSING_HASH_FOOTER\n self.review_result.add_review(ReviewType.MISSING_HASH, msg, vote=-1,\n notify=True)\n\n def add_invalid_hash_review(self, hashes):\n msg = self.strings.INVALID_HASH_HEADER\n for h in hashes:\n remote_str = h['remote']\n if h['branch']:\n remote_str += ' branch {}'.format(h['branch'])\n msg += self.strings.INVALID_HASH_LINE.format(h['sha'], remote_str)\n msg += self.strings.INVALID_HASH_FOOTER\n msg += self.strings.HASH_EXAMPLE\n self.review_result.add_review(ReviewType.INVALID_HASH, msg, vote=-1,\n notify=True)\n\n def add_fixes_ref_review(self, fixes_ref):\n msg = self.strings.FOUND_FIXES_REF_HEADER\n for l in fixes_ref.splitlines():\n msg += self.strings.FIXES_REF_LINE.format(l)\n msg += self.strings.FIXES_REF_FOOTER\n self.review_result.add_review(ReviewType.FIXES_REF, msg, notify=True)\n\n def add_altered_upstream_review(self):\n msg = self.strings.DIFFERS_HEADER\n msg += self.strings.ALTERED_UPSTREAM\n msg += self.format_diff()\n self.review_result.add_review(ReviewType.ALTERED_UPSTREAM,\n msg, vote=-1, notify=True)\n\n def add_backport_diff_review(self):\n msg = self.strings.DIFFERS_HEADER\n msg += self.strings.BACKPORT_DIFF\n msg += self.format_diff()\n self.review_result.add_review(ReviewType.BACKPORT, msg)\n\n def get_upstream_patch(self):\n upstream_shas = self.reviewer.get_cherry_pick_shas_from_patch(\n self.gerrit_patch)\n if not upstream_shas:\n self.add_missing_hash_review()\n return\n\n upstream_sha = None\n for s in reversed(upstream_shas):\n if not s['remote']:\n s['remote'] = self.DEFAULT_REMOTE\n if not s['branch']:\n s['branch'] = 'master'\n s['remote_name'] = self.reviewer.generate_remote_name(s['remote'])\n\n self.reviewer.fetch_remote(s['remote_name'], s['remote'], s['branch'])\n\n if not self.reviewer.is_sha_in_branch(s['sha'], s['remote_name'],\n s['branch']):\n continue\n\n self.upstream_patch = self.reviewer.get_commit_from_sha(s['sha'])\n self.upstream_sha = s\n\n if not self.upstream_patch:\n self.add_invalid_hash_review(upstream_shas)\n return\n\n def get_patches(self):\n super().get_patches()\n\n if self.upstream_patch and self.upstream_sha:\n fixes_ref = self.reviewer.find_fixes_reference(\n self.upstream_sha['sha'],\n self.upstream_sha['remote_name'],\n self.upstream_sha['branch'])\n if fixes_ref:\n self.add_fixes_ref_review(fixes_ref)\n\n def compare_patches_backport(self):\n if len(self.diff) == 0:\n self.add_clean_backport_review()\n else:\n self.add_backport_diff_review()\n\n def compare_patches_clean(self):\n if len(self.diff):\n self.add_altered_upstream_review()\n else:\n self.add_successful_review()\n\n\nclass UpstreamChangeReviewer(GitChangeReviewer):\n def __init__(self, reviewer, change, dry_run):\n super().__init__(reviewer, change, dry_run)\n self.strings = UpstreamReviewStrings()\n self.review_result = ReviewResult(self.change, self.strings, self.dry_run)\n\n @staticmethod\n def can_review_change(change):\n # labeled UPSTREAM or labeled BACKPORT\n return ('UPSTREAM' in change.subject or\n ('BACKPORT' in change.subject and\n 'FROMGIT' not in change.subject and\n 'FROMLIST' not in change.subject))\n\n\nclass FromgitChangeReviewer(GitChangeReviewer):\n def __init__(self, reviewer, change, dry_run):\n super().__init__(reviewer, change, dry_run)\n self.strings = FromgitReviewStrings()\n self.review_result = ReviewResult(self.change, self.strings, self.dry_run)\n\n @staticmethod\n def can_review_change(change):\n return 'FROMGIT' in change.subject\n\n\nclass FromlistChangeReviewer(ChangeReviewer):\n def __init__(self, reviewer, change, dry_run):\n super().__init__(reviewer, change, dry_run)\n self.strings = FromlistReviewStrings()\n self.review_result = ReviewResult(self.change, self.strings, self.dry_run)\n self.review_backports = False\n\n @staticmethod\n def can_review_change(change):\n return 'FROMLIST' in change.subject\n\n def add_missing_am_review(self, change):\n self.review_result.add_review(ReviewType.MISSING_AM,\n self.strings.MISSING_AM, vote=-1, notify=True)\n\n def add_altered_fromlist_review(self):\n msg = self.strings.ALTERED_FROMLIST\n msg += self.format_diff()\n self.review_result.add_review(ReviewType.ALTERED_UPSTREAM, msg)\n\n def add_fromlist_backport_review(self):\n msg = self.strings.BACKPORT_FROMLIST\n msg += self.format_diff()\n self.review_result.add_review(ReviewType.BACKPORT, msg)\n\n def add_clear_votes_review(self):\n msg = self.strings.CLEAR_VOTES\n self.review_result.add_review(ReviewType.CLEAR_VOTES, msg)\n\n def get_upstream_patch(self):\n patchwork_url = self.reviewer.get_am_from_from_patch(self.gerrit_patch)\n if not patchwork_url:\n self.add_missing_am_review(self.change)\n return\n\n for u in reversed(patchwork_url):\n try:\n self.upstream_patch = self.reviewer.get_commit_from_patchwork(u)\n break\n except:\n continue\n\n if not self.upstream_patch:\n sys.stderr.write(\n 'ERROR: patch missing from patchwork, or patchwork host '\n 'not whitelisted for {} ({})\\n'.format(self.change,\n patchwork_url))\n return\n\n def compare_patches_clean(self):\n if len(self.diff) == 0:\n self.add_successful_review()\n elif self.review_backports:\n self.add_altered_fromlist_review()\n else:\n self.add_clear_votes_review()\n\n def compare_patches_backport(self):\n if len(self.diff) == 0:\n self.add_clean_backport_review()\n elif self.review_backports:\n self.add_fromlist_backport_review()\n else:\n self.add_clear_votes_review()\n\n\nclass Troll(object):\n def __init__(self, url, args):\n self.url = url\n self.args = args\n self.gerrit = Gerrit(url)\n self.tag = 'autogenerated:review-o-matic'\n self.blacklist = {}\n self.stats = { str(ReviewType.SUCCESS): 0, str(ReviewType.BACKPORT): 0,\n str(ReviewType.ALTERED_UPSTREAM): 0,\n str(ReviewType.MISSING_FIELDS): 0,\n str(ReviewType.MISSING_HASH): 0,\n str(ReviewType.INVALID_HASH): 0,\n str(ReviewType.MISSING_AM): 0,\n str(ReviewType.INCORRECT_PREFIX): 0,\n str(ReviewType.FIXES_REF): 0 }\n\n def inc_stat(self, review_type):\n if self.args.dry_run:\n return\n key = str(review_type)\n if not self.stats.get(key):\n self.stats[key] = 1\n else:\n self.stats[key] += 1\n\n def do_review(self, change, review):\n print('Review for change: {}'.format(change.url()))\n print(' Issues: {}, Feedback: {}, Vote:{}, Notify:{}'.format(\n review.issues.keys(), review.feedback.keys(), review.vote,\n review.notify))\n\n if review.dry_run:\n print(review.generate_review_message())\n print('------')\n return\n\n for i in review.issues:\n self.inc_stat(i)\n for f in review.feedback:\n self.inc_stat(f)\n self.gerrit.review(change, self.tag, review.generate_review_message(),\n review.notify, vote_code_review=review.vote)\n\n def get_changes(self, prefix):\n message = '{}:'.format(prefix)\n after = datetime.date.today() - datetime.timedelta(days=5)\n changes = self.gerrit.query_changes(status='open', message=message,\n after=after, project='chromiumos/third_party/kernel')\n return changes\n\n def add_change_to_blacklist(self, change):\n self.blacklist[change.number] = change.current_revision.number\n\n def is_change_in_blacklist(self, change):\n return self.blacklist.get(change.number) == change.current_revision.number\n\n def process_changes(self, changes):\n rev = Reviewer(git_dir=self.args.git_dir, verbose=self.args.verbose,\n chatty=self.args.chatty)\n ret = 0\n for c in changes:\n if self.args.verbose:\n print('Processing change {}'.format(c.url()))\n\n # Blacklist if we've already reviewed this revision\n for m in c.messages:\n if m.tag == self.tag and m.revision_num == c.current_revision.number:\n self.add_change_to_blacklist(c)\n\n # Find a reviewer and blacklist if not found\n reviewer = None\n if FromlistChangeReviewer.can_review_change(c):\n reviewer = FromlistChangeReviewer(rev, c, self.args.dry_run)\n elif FromgitChangeReviewer.can_review_change(c):\n reviewer = FromgitChangeReviewer(rev, c, self.args.dry_run)\n elif UpstreamChangeReviewer.can_review_change(c):\n reviewer = UpstreamChangeReviewer(rev, c, self.args.dry_run)\n if not reviewer:\n self.add_change_to_blacklist(c)\n continue\n\n force_review = self.args.force_cl or self.args.force_all\n if not force_review and self.is_change_in_blacklist(c):\n continue\n\n result = reviewer.review_patch()\n if result:\n self.do_review(c, result)\n ret += 1\n\n self.add_change_to_blacklist(c)\n\n return ret\n\n def update_stats(self):\n if not self.args.dry_run and self.args.stats_file:\n with open(self.args.stats_file, 'wt') as f:\n json.dump(self.stats, f)\n print('--')\n summary = ' Summary: '\n total = 0\n for k,v in self.stats.items():\n summary += '{}={} '.format(k,v)\n total += v\n summary += 'total={}'.format(total)\n print(summary)\n print('')\n\n def run(self):\n if self.args.force_cl:\n c = self.gerrit.get_change(self.args.force_cl)\n print('Force reviewing change {}'.format(c))\n self.process_changes([c])\n return\n\n if self.args.stats_file:\n try:\n with open(self.args.stats_file, 'rt') as f:\n self.stats = json.load(f)\n except FileNotFoundError:\n self.update_stats()\n\n prefixes = ['UPSTREAM', 'BACKPORT', 'FROMGIT', 'FROMLIST']\n while True:\n try:\n did_review = 0\n for p in prefixes:\n changes = self.get_changes(p)\n if self.args.verbose:\n print('{} changes for prefix {}'.format(len(changes), p))\n did_review += self.process_changes(changes)\n if did_review > 0:\n self.update_stats()\n if not self.args.daemon:\n break\n if self.args.verbose:\n print('Finished! Going to sleep until next run')\n\n except (requests.exceptions.HTTPError, OSError) as e:\n sys.stderr.write('Error getting changes: ({})\\n'.format(str(e)))\n time.sleep(60)\n\n time.sleep(120)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Troll gerrit reviews')\n parser.add_argument('--git-dir', default=None, help='Path to git directory')\n parser.add_argument('--verbose', help='print commits', action='store_true')\n parser.add_argument('--chatty', help='print diffs', action='store_true')\n parser.add_argument('--daemon', action='store_true',\n help='Run in daemon mode, for continuous trolling')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='skip the review step')\n parser.add_argument('--force-cl', default=None, help='Force review a CL')\n parser.add_argument('--force-all', action='store_true', default=False,\n help='Force review all (implies dry-run)')\n parser.add_argument('--stats-file', default=None, help='Path to stats file')\n args = parser.parse_args()\n\n if args.force_all:\n args.dry_run = True\n\n troll = Troll('https://chromium-review.googlesource.com', args)\n troll.run()\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"troll-o-matic.py","file_name":"troll-o-matic.py","file_ext":"py","file_size_in_byte":26976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"14288794","text":"import os\n\nimport bleach\n\nfrom flask import abort, current_app, render_template, request\nfrom flask.views import View\nfrom frontmatter import loads as load_frontmatter_from_markdown\nfrom jinja2.exceptions import TemplateNotFound\nfrom mistune import Markdown\n\n\nclass TemplateFinder(View):\n \"\"\"\n A TemplateView that guesses the template name based on the\n url path\n \"\"\"\n\n def __init__(self):\n self.markdown_parser = Markdown(\n parse_block_html=True, parse_inline_html=True\n )\n\n def dispatch_request(self, *args, **kwargs):\n \"\"\"\n This is called when TemplateFinder is run as a view\n It tries to find the template for the request path\n and then passes that template name to TemplateView to render\n \"\"\"\n path = request.path.lstrip(\"/\")\n matching_template = self._get_template(path)\n\n if not matching_template:\n abort(404, f\"Can't find page for: {path}\")\n\n if matching_template[-2:] == \"md\":\n with open(\n f\"{current_app.template_folder}/{matching_template}\"\n ) as f:\n file_content = f.read()\n parsed_file = load_frontmatter_from_markdown(file_content)\n wrapper_template = parsed_file.metadata.get(\"wrapper_template\")\n\n if not wrapper_template or not os.path.isfile(\n current_app.template_folder + \"/\" + wrapper_template\n ):\n abort(404, f\"Can't find page for: {path}\")\n\n context = parsed_file.metadata.get(\"context\", {})\n return self._render_markdown(\n parsed_file.content, wrapper_template, context\n )\n\n return render_template(matching_template, **self._get_context())\n\n def _get_context(self):\n context = {}\n clean_path = request.path.strip(\"/\")\n for index, path in enumerate(clean_path.split(\"/\")):\n context[\"level_\" + str(index + 1)] = path\n return context\n\n def _get_template(self, url_path):\n \"\"\"\n Given a basic path, find an HTML or Markdown file\n \"\"\"\n\n # Try to match HTML or Markdown files\n if self._template_exists(url_path + \".html\"):\n return url_path + \".html\"\n elif self._template_exists(os.path.join(url_path, \"index.html\")):\n return os.path.join(url_path, \"index.html\")\n elif self._template_exists(url_path + \".md\"):\n return url_path + \".md\"\n elif self._template_exists(os.path.join(url_path, \"index.md\")):\n return os.path.join(url_path, \"index.md\")\n\n return None\n\n def _template_exists(self, path):\n \"\"\"\n Check if a template exists\n without raising an exception\n \"\"\"\n loader = current_app.jinja_loader\n try:\n loader.get_source({}, template=path)\n except TemplateNotFound:\n return False\n\n return True\n\n def _render_markdown(self, markdown, wrapper_file, context={}):\n \"\"\"\n :param markdown: Markdown to be rendered\n :param wrapper_file: The wrapper for the Markdown content\n :param context: Optional preexisting context\n \"\"\"\n\n clean_markdown = bleach.clean(markdown)\n rendered_markdown = self.markdown_parser(clean_markdown)\n\n context = {\"content\": rendered_markdown}\n\n return render_template(wrapper_file, **context)\n","sub_path":"templatefinder/templatefinder.py","file_name":"templatefinder.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"325171544","text":"\n\nfrom fonctions import *\nimport pickle\nfrom init import me\n\n\n\ndef game_analyse(file0):\n\n file0 = open(str(file0), \"r\")\n\n previous_partie = pickle.load( open(\"previous_partie_b\",\"rb\") )\n name_player_met = previous_partie.name_player_met\n player_met = previous_partie.player_met\n\n partie = Partie(name_player_met, player_met)\n\n buff, tour = 0,0\n\n for line in file0:\n line = line.split()\n\n buff = test_line(line)\n\n if buff != -1: tour = buff\n\n #---------------------nvx coup----------------------\n if tour == 0: \n if buff == 0: \n coup = Coup()\n init_table(line, coup, partie) # ok\n\n #---------------------preflop----------------------\n elif tour == 2 and line != []: \n analyse_pf(line, coup, tour)\n\n #---------------------flop-------------------------\n elif tour == 3 and line != []: \n analyse_f(line, coup, tour)\n\n #---------------------turn-------------------------\n elif tour == 4 and line != []: \n analyse_t(line, coup, tour)\n\n #---------------------river------------------------\n elif tour == 5 and line != []: \n analyse_r(line, coup, tour)\n\n #---------------------SUMMARY----------------------\n \"\"\"\n elif tour == 7 and line != []:\n get_hand_faced(coup, me)\n \"\"\"\n pickle.dump(partie, open(\"previous_partie_b\",\"wb\"))\n\n file0.close()\n\n\n\ngame_analyse(\"history_hand/20180923_WANTED(248506286)_real_holdem_no-limit.txt\")\n","sub_path":"py_files/Tracker_code.py","file_name":"Tracker_code.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351753471","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\n\r\nroot = Tk()\r\nroot.iconbitmap(\"TicTacToe.ico\")\r\nroot.title(\"Tic Tac Toe by ULAN\")\r\nroot.resizable(False, False)\r\n\r\n\r\n# True >>> \"X\" and False >>>> \"O\"\r\nclick = True \r\ntotal = 0\r\n\r\n# For CheckWin() We'll need it\r\nbtn1 = StringVar()\r\nbtn2 = StringVar()\r\nbtn3 = StringVar()\r\nbtn4 = StringVar()\r\nbtn5 = StringVar()\r\nbtn6 = StringVar()\r\nbtn7 = StringVar()\r\nbtn8 = StringVar()\r\nbtn9 = StringVar()\r\n\r\nO = PhotoImage(file=\"O.png\")\r\nX = PhotoImage(file=\"X.png\")\r\n\r\n\r\ndef play():\r\n\tbutton1 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\", bg=\"#d7d9d7\", textvariable=btn1, command=lambda: Click(1,0,0))\r\n\tbutton2 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#d7d9d7\", textvariable=btn2, command=lambda: Click(2,0,1))\r\n\tbutton3 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#d7d9d7\", textvariable=btn3, command=lambda: Click(3,0,2))\r\n\tbutton4 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#caccca\", textvariable=btn4, command=lambda: Click(4,1,0))\r\n\tbutton5 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#caccca\", textvariable=btn5, command=lambda: Click(5,1,1))\r\n\tbutton6 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#caccca\", textvariable=btn6, command=lambda: Click(6,1,2))\r\n\tbutton7 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#a1a6a2\", textvariable=btn7, command=lambda: Click(7,2,0))\r\n\tbutton8 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#a1a6a2\", textvariable=btn8, command=lambda: Click(8,2,1))\r\n\tbutton9 = Button(root, padx=75, pady=65, bd=3, relief=\"ridge\",bg=\"#a1a6a2\", textvariable=btn9, command=lambda: Click(9,2,2))\r\n\r\n\tbutton1.grid(row=0, column=0)\r\n\tbutton2.grid(row=0, column=1)\r\n\tbutton3.grid(row=0, column=2)\r\n\tbutton4.grid(row=1, column=0)\r\n\tbutton5.grid(row=1, column=1)\r\n\tbutton6.grid(row=1, column=2)\t\r\n\tbutton7.grid(row=2, column=0)\r\n\tbutton8.grid(row=2, column=1)\r\n\tbutton9.grid(row=2, column=2)\r\n\r\n\r\ndef Click(num,row,column):\r\n\tglobal click,total\r\n\tif click == True:\r\n\t\tlabel_image = Label(root, image=X)\r\n\t\tlabel_image.grid(row=row, column=column)\r\n\t\tif num == 1:\r\n\t\t\tbtn1.set(\"X\")\r\n\t\tif num == 2:\r\n\t\t\tbtn2.set(\"X\")\r\n\t\tif num == 3:\r\n\t\t\tbtn3.set(\"X\")\r\n\t\tif num == 4:\r\n\t\t\tbtn4.set(\"X\")\r\n\t\tif num == 5:\r\n\t\t\tbtn5.set(\"X\")\r\n\t\tif num == 6:\r\n\t\t\tbtn6.set(\"X\")\r\n\t\tif num == 7:\r\n\t\t\tbtn7.set(\"X\")\r\n\t\tif num == 8:\r\n\t\t\tbtn8.set(\"X\")\r\n\t\tif num == 9:\r\n\t\t\tbtn9.set(\"X\")\r\n\t\ttotal+=1\r\n\t\tclick=False\r\n\t\tCheckWin()\r\n\telse:\r\n\t\tlabel_image = Label(root, image=O)\r\n\t\tlabel_image.grid(row=row, column=column)\r\n\t\tif num == 1:\r\n\t\t\tbtn1.set(\"O\")\r\n\t\tif num == 2:\r\n\t\t\tbtn2.set(\"O\")\r\n\t\tif num == 3:\r\n\t\t\tbtn3.set(\"O\")\r\n\t\tif num == 4:\r\n\t\t\tbtn4.set(\"O\")\r\n\t\tif num == 5:\r\n\t\t\tbtn5.set(\"O\")\r\n\t\tif num == 6:\r\n\t\t\tbtn6.set(\"O\")\r\n\t\tif num == 7:\r\n\t\t\tbtn7.set(\"O\")\r\n\t\tif num == 8:\r\n\t\t\tbtn8.set(\"O\")\r\n\t\tif num == 9:\r\n\t\t\tbtn9.set(\"O\")\r\n\t\ttotal+=1\r\n\t\tclick=True\r\n\t\tCheckWin()\r\n\r\n# ---------------------------------------------------- CheckWin() ---------------------------------------\r\n\r\ndef CheckWin():\r\n\tglobal click,total\r\n\tif ((btn1.get() == \"X\" and btn2.get() == \"X\" and btn3.get() == \"X\") or\r\n\t\t(btn4.get() == \"X\" and btn5.get() == \"X\" and btn6.get()== \"X\") or\r\n\t\t(btn7.get() == \"X\" and btn8.get() == \"X\" and btn9.get() == \"X\") or\r\n\t\t(btn1.get() == \"X\" and btn4.get() == \"X\" and btn7.get() == \"X\") or\r\n\t\t(btn2.get() == \"X\" and btn5.get() == \"X\" and btn8.get() == \"X\") or\r\n\t\t(btn3.get() == \"X\" and btn6.get() == \"X\" and btn9.get() == \"X\") or\r\n\t\t(btn1.get() == \"X\" and btn5.get() == \"X\" and btn9.get() == \"X\") or\r\n\t\t(btn3.get() == \"X\" and btn5.get() == \"X\" and btn7.get() == \"X\")):\r\n\t\tresponse = messagebox.askquestion(\"Tic Tac Toe\", \"Player X Wins!\\nDo you want to play again?\")\r\n\t\tif response == \"no\":\r\n\t\t\tquit()\r\n\t\tclick = True\r\n\t\ttotal = 0\r\n\t\tclear()\r\n\t\tplay()\r\n\r\n\telif ((btn1.get() == \"O\" and btn2.get() == \"O\" and btn3.get() == \"O\") or\r\n\t\t(btn4.get() == \"O\" and btn5.get() == \"O\" and btn6.get() == \"O\") or\r\n\t\t(btn7.get() == \"O\" and btn8.get() == \"O\" and btn9.get() == \"O\") or\r\n\t\t(btn1.get() == \"O\" and btn4.get() == \"O\" and btn7.get() == \"O\") or\r\n\t\t(btn2.get() == \"O\" and btn5.get() == \"O\" and btn8.get() == \"O\") or\r\n\t\t(btn3.get() == \"O\" and btn6.get() == \"O\" and btn9.get() == \"O\") or\r\n\t\t(btn1.get() == \"O\" and btn5.get() == \"O\" and btn9.get() == \"O\") or\r\n\t\t(btn3.get() == \"O\" and btn5.get() == \"O\" and btn7.get() == \"O\")):\r\n\t\tresponse = messagebox.askquestion(\"Tic Tac Toe\", \"Player O Wins!\\nDo you want to play again?\")\r\n\t\tif response == \"no\":\r\n\t\t\tquit()\r\n\t\tclick = True\r\n\t\ttotal = 0\r\n\t\tclear()\r\n\t\tplay()\r\n\r\n\telif total == 9:\r\n\t\tresponse = messagebox.askquestion(\"Tic Tac Toe\", \"Tie Game!\\nDo you want to play again?\")\r\n\t\tif response == \"no\":\r\n\t\t\tquit()\r\n\t\tclick = True\r\n\t\ttotal = 0\r\n\t\tclear()\r\n\t\tplay()\r\n\r\ndef clear():\r\n\tbtn1.set(\"\")\r\n\tbtn2.set(\"\")\r\n\tbtn3.set(\"\")\r\n\tbtn4.set(\"\")\r\n\tbtn5.set(\"\")\r\n\tbtn6.set(\"\")\r\n\tbtn7.set(\"\")\r\n\tbtn8.set(\"\")\r\n\r\nplay()\r\n\r\nmainloop()","sub_path":"Start.py","file_name":"Start.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"149184474","text":"from scheduler.exceptions import KubeHTTPException\nfrom scheduler.resources import Resource\n\nMANIFEAT_CLASSES = {}\n\n\nclass BaseManifest(object):\n\n def manifest(self, api_version, ingress, ingress_class, namespace, **kwargs):\n path = \"/*\" if ingress_class in (\"gce\", \"alb\") else \"/\"\n hosts, tls = kwargs.pop(\"hosts\", None), kwargs.pop(\"tls\", None)\n version = kwargs.pop(\"version\", None)\n data = {\n \"kind\": \"Ingress\",\n \"apiVersion\": api_version,\n \"metadata\": {\n \"name\": ingress,\n \"annotations\": {\n \"kubernetes.io/tls-acme\": \"true\",\n }\n },\n \"spec\": {}\n }\n if hosts:\n data[\"spec\"][\"rules\"] = [{\n \"host\": host,\n \"http\": {\n \"paths\": [\n {\n \"path\": path,\n \"backend\": {\n \"serviceName\": ingress,\n \"servicePort\": 80\n }\n }\n ]\n }\n } for host in hosts]\n if ingress_class:\n data[\"metadata\"][\"annotations\"].update({\n \"kubernetes.io/ingress.class\": ingress_class\n })\n if tls:\n data[\"spec\"][\"tls\"] = tls\n if version:\n data[\"metadata\"][\"resourceVersion\"] = version\n return data\n\n\nclass NginxManifest(BaseManifest):\n\n def manifest(self, api_version, ingress, ingress_class, namespace, **kwargs):\n data = BaseManifest.manifest(\n self, api_version, ingress, ingress_class, namespace, **kwargs)\n if \"whitelist\" in kwargs:\n whitelist = \", \".join(kwargs.pop(\"whitelist\"))\n data[\"metadata\"][\"annotations\"].update({\n \"nginx.ingress.kubernetes.io/whitelist-source-range\": whitelist\n })\n if \"ssl_redirect\" in kwargs:\n ssl_redirect = kwargs.pop(\"ssl_redirect\")\n data[\"metadata\"][\"annotations\"].update({\n \"nginx.ingress.kubernetes.io/ssl-redirect\": ssl_redirect\n })\n return data\n\n\nMANIFEAT_CLASSES[\"nginx\"] = NginxManifest\n\n\nclass TraefikManifest(BaseManifest):\n\n def manifest(self, api_version, ingress, ingress_class, namespace, **kwargs):\n data = BaseManifest.manifest(\n self, api_version, ingress, ingress_class, namespace, **kwargs)\n if \"whitelist\" in kwargs:\n whitelist = \", \".join(kwargs.pop(\"whitelist\"))\n data[\"metadata\"][\"annotations\"].update({\n \"ingress.kubernetes.io/whitelist-x-forwarded-for\": \"true\",\n \"traefik.ingress.kubernetes.io/whitelist-source-range\": whitelist\n })\n if \"ssl_redirect\" in kwargs:\n ssl_redirect = kwargs.pop(\"ssl_redirect\")\n data[\"metadata\"][\"annotations\"].update({\n \"ingress.kubernetes.io/ssl-redirect\": ssl_redirect\n })\n return data\n\n\nMANIFEAT_CLASSES[\"traefik\"] = TraefikManifest\n\n\nclass Ingress(Resource):\n\n api_version = 'networking.k8s.io/v1beta1'\n api_prefix = 'apis'\n short_name = 'ingress'\n\n @staticmethod\n def manifest(api_version, ingress, ingress_class, namespace, **kwargs):\n return MANIFEAT_CLASSES.get(ingress_class, BaseManifest)().manifest(\n api_version, ingress, ingress_class, namespace, **kwargs\n )\n\n def get(self, namespace, ingress=None, **kwargs):\n \"\"\"\n Fetch a single Ingress or a list of Ingresses\n \"\"\"\n if ingress is not None:\n url = self.api(\"/namespaces/{}/ingresses/{}\", namespace, ingress)\n message = 'get Ingress ' + ingress\n else:\n url = self.api(\"/namespaces/{}/ingresses\", namespace)\n message = 'get Ingresses'\n\n response = self.http_get(url, params=self.query_params(**kwargs))\n if self.unhealthy(response.status_code):\n raise KubeHTTPException(response, message)\n\n return response\n\n def create(self, ingress, ingress_class, namespace, **kwargs):\n url = self.api(\"/namespaces/{}/ingresses\", namespace)\n data = self.manifest(self.api_version, ingress, ingress_class, namespace, **kwargs)\n response = self.http_post(url, json=data)\n\n if not response.status_code == 201:\n raise KubeHTTPException(response, \"create Ingress {}\".format(namespace))\n\n return response\n\n def put(self, ingress, ingress_class, namespace, version, **kwargs):\n url = self.api(\"/namespaces/{}/ingresses/{}\", namespace, ingress)\n kwargs[\"version\"] = version\n data = self.manifest(self.api_version, ingress, ingress_class, namespace, **kwargs)\n response = self.http_put(url, json=data)\n\n if self.unhealthy(response.status_code):\n raise KubeHTTPException(response, \"put Ingress {}\".format(namespace))\n\n return response\n\n def patch(self, ingress, namespace, data):\n url = self.api(\"/namespaces/{}/ingresses/{}\", namespace, ingress)\n response = self.http_put(url, json=data)\n\n if self.unhealthy(response.status_code):\n raise KubeHTTPException(response, \"patch Ingress {}\".format(namespace))\n\n return response\n\n def delete(self, namespace, ingress):\n url = self.api(\"/namespaces/{}/ingresses/{}\", namespace, ingress)\n response = self.http_delete(url)\n if self.unhealthy(response.status_code):\n raise KubeHTTPException(response, 'delete Ingress \"{}\"', namespace)\n\n return response\n","sub_path":"rootfs/scheduler/resources/ingress.py","file_name":"ingress.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"454422567","text":"#!/usr/bin/env python\n\"\"\"Flask server controller.\n\nFlask is light-weight and modular so this is actually all we need to set up a simple HTML page.\n\"\"\"\n\nimport os\nimport subprocess\nfrom subprocess import Popen, PIPE\nfrom urllib.parse import unquote\nimport flask\nfrom flask import jsonify, request\nfrom robot.comms.connection import Connection\nimport time\nimport datetime\nimport robot.util.utils as utils\nfrom robot.util.utils import run_shell\nfrom shlex import split\n\nimport robot.basestation.stream_capture as stream_capture\nfrom robot.basestation.stream_capture import start_recording_feed, stop_recording_feed, \\\nis_recording_stream, stream_capture, add_rotation\nimport robot.basestation.ros_utils as ros_utils\nfrom robot.basestation.ros_utils import fetch_ros_master_uri, fetch_ros_master_ip\n\napp = flask.Flask(__name__)\n\n@app.route(\"/arm\")\ndef index():\n \"\"\"The arm panel.\"\"\"\n return flask.render_template(\"pages/Arm.html\", roverIP=fetch_ros_master_ip())\n\n@app.route(\"/camerapopup\")\ndef camerapopup():\n \"\"\"Camera Pop-up.\"\"\"\n return flask.render_template(\"pages/CameraPopUp.html\", roverIP=fetch_ros_master_ip())\n\n@app.route(\"/\")\n@app.route(\"/rover\")\ndef rover():\n \"\"\"Rover control panel.\"\"\"\n return flask.render_template(\"pages/Rover.html\", roverIP=fetch_ros_master_ip())\n\n\n@app.route(\"/science\")\ndef science():\n \"\"\"Science page.\"\"\"\n return flask.render_template(\"pages/Science.html\", roverIP=fetch_ros_master_ip())\n\n\n@app.route(\"/pds\")\ndef pds():\n \"\"\"PDS page.\"\"\"\n return flask.render_template(\"pages/PDS.html\", roverIP=fetch_ros_master_ip())\n\n\n@app.route(\"/stream\")\ndef stream():\n \"\"\"Streams page.\"\"\"\n return flask.render_template(\"pages/Streams.html\", roverIP=fetch_ros_master_ip())\n\n\n@app.route(\"/navigation\")\ndef navigation():\n \"\"\"Navigation page.\"\"\"\n return flask.render_template(\"pages/Navigation.html\", roverIP=fetch_ros_master_ip())\n\n\n# routes for science page\n@app.route('/science/numSections')\ndef numSections():\n return '4'\n\n\n@app.route('/science/initialSection')\ndef initialSection():\n return '0'\n\n\n@app.route(\"/ping_rover\")\ndef ping_rover():\n \"\"\"Pings ROS_MASTER_URI and return response object with resulting outputs.\n\n Pings rover first directly with Unix ping command,\n then using ros ping_acknowledgment service.\n\n Returns JSON object with the following fields:\n success -- whether requests was successful\n ping_msg -- output of Unix ping command\n ros_msg -- output of the ROS ping_acknowledgment service\n \"\"\"\n ping_output, error = run_shell(\"ping -c 1 \" + fetch_ros_master_ip())\n ping_output = ping_output.decode()\n\n print(\"Output: \" + ping_output)\n\n if \"Destination Net Unreachable\" in ping_output:\n error_msg = \"Basestation has no connection to network, aborting ROS ping.\"\n return jsonify(success=False, ping_msg=ping_output, ros_msg=error_msg)\n\n if \"Destination Host Unreachable\" in ping_output:\n error_msg = \"Rover has no connection to network, aborting ROS ping.\"\n return jsonify(success=False, ping_msg=ping_output, ros_msg=error_msg)\n\n if error:\n print(\"Error: \" + error.decode())\n\n ros_output, error = run_shell(\n \"rosrun ping_acknowledgment ping_response_client.py\")\n ros_output = ros_output.decode()\n\n print(\"Pinging rover\")\n print(\"Output: \" + ros_output)\n\n if error:\n print(\"Error: \" + error.decode())\n\n return jsonify(success=True, ping_msg=ping_output, ros_msg=ros_output)\n\n\n# only to be used when hacky implementation is fixed\n# see odroid_rx package for details\n@app.route(\"/odroid_rx\", methods=[\"POST\"])\ndef odroid_rx():\n script_dir = os.path.dirname(os.path.realpath(__file__))\n log_file = script_dir + \"/../rospackages/src/odroid_rx/scripts/odroid_rx.txt\"\n print(\"odroid_rx\")\n\n # query the topic exactly once\n output, error = run_shell(\"cat\", log_file)\n output = str(output, \"utf-8\")\n\n print(\"output: \" + output)\n\n return jsonify(success=True, odroid_rx=output)\n\n# Rover controls\n@app.route(\"/rover_drive\", methods=[\"POST\"])\ndef rover_drive():\n print(\"rover_drive\")\n\n cmd = str(request.get_data('cmd'), \"utf-8\")\n print(\"cmd: \" + cmd)\n # remove fluff, only command remains\n if cmd:\n cmd = cmd.split(\"=\")[1]\n # decode URI\n cmd = unquote(cmd)\n\n if local:\n rover_ip = \"127.0.0.1\"\n base_ip = rover_ip\n rover_port = 5020\n base_port = 5025\n else:\n rover_ip = \"172.16.1.30\"\n base_ip = \"172.16.1.20\"\n rover_port = 5030\n base_port = rover_port\n print(\"cmd: \" + cmd)\n sender = Connection(\"rover_drive_sender\", rover_ip, rover_port)\n\n error = str(None)\n\n try:\n sender.send(cmd)\n except OSError:\n error = \"Network is unreachable\"\n print(error)\n\n receiver = Connection(\"rover_drive_receiver\", base_ip, base_port)\n feedback = str(None)\n error = str(None)\n\n try:\n feedback = receiver.receive(timeout=2)\n except OSError:\n error = \"Network error\"\n print(error)\n\n print(\"feedback:\", feedback)\n\n if not feedback:\n feedback = \"Timeout limit exceeded, no data received\"\n\n return jsonify(success=True, cmd=cmd, feedback=feedback, error=error)\n\n\n@app.route(\"/capture_image/\", methods=[\"POST\", \"GET\"])\ndef capture_image():\n stream_url= request.args['stream_url']\n rotation = int(request.args['camera_rotation'])\n success, message = stream_capture(stream_url, rotation)\n return jsonify(success=success, msg=message)\n\n\n@app.route(\"/initiate_feed_recording/\", methods=[\"POST\", \"GET\"])\ndef initiate_feed_recording():\n stream_url = request.args['stream_url']\n if is_recording_stream(stream_url):\n return jsonify(success=False, msg=\"Stream is already recording\")\n else:\n success, message = start_recording_feed(stream_url)\n return jsonify(success=success, msg=message)\n\n\n@app.route(\"/stop_feed_recording/\", methods=[\"POST\", \"GET\"])\ndef stop_feed_recording():\n stream_url = request.args['stream_url']\n rotation = int(request.args['camera_rotation'])\n if is_recording_stream(stream_url):\n success, message = add_rotation(stream_url, rotation)\n\n if not success:\n print('add_rotation method failed:', message)\n\n success, message = stop_recording_feed(stream_url)\n\n if not success:\n print('stop_recording_feed method failed:', message)\n return jsonify(success=success, msg=message)\n else:\n return jsonify(success=False, msg=\"Attempted to stop stream that was not recording\")\n\n\n@app.route(\"/is_recording/\", methods=[\"POST\", \"GET\"])\ndef is_recording():\n stream_url = request.args['stream_url']\n return jsonify(is_recording=is_recording_stream(stream_url))\n\n\nif __name__ == \"__main__\":\n\n # feature toggles\n # the following two are used for UDP based communication with the Connection class\n global local\n local = False\n\n app.run(debug=True, host='0.0.0.0')\n # add param `host= '0.0.0.0'` if you want to run on your machine's IP address\n","sub_path":"robot/basestation/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"523755055","text":"import logging\nimport queue\nimport threading\nimport time\nimport crcmod\nimport serial\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass UartConnectionThread(threading.Thread):\n \"\"\"\n Class responsible for handling UART connection (sending and receiving data).\n \"\"\"\n\n def __init__(self, _serial, in_queue, out_queue):\n \"\"\"\n Initializes UART connection thread class.\n\n :param _serial: serial.Serial, class allowing for serial port access.\n :param in_queue: queue.Queue, queue to which will be put received bytes.\n :param out_queue: queue.Queue, queue with bytes that will be sent.\n \"\"\"\n super().__init__()\n self.daemon = True\n\n self._serial = _serial\n self._serial_busy = threading.Event()\n\n self._in_queue = in_queue\n self._out_queue = out_queue\n\n def set_baudrate(self, baudrate):\n \"\"\"\n Sets UART baudrate.\n\n :param baudrate: int, UART baudrate\n \"\"\"\n settings = self._serial.get_settings()\n settings[\"baudrate\"] = baudrate\n\n self._serial.apply_settings(settings)\n\n def stop(self):\n \"\"\"\n Stops UART connection thread.\n \"\"\"\n try:\n self._serial_busy.clear()\n self.join()\n except RuntimeError as e:\n LOGGER.info(\"Tried to stop UartConnectionThread before it is started. An error occurred: %s\", e)\n\n def run(self):\n \"\"\"\n A run method that sending and receiving data over the UART.\n \"\"\"\n self._serial_busy.set()\n\n self._serial.reset_input_buffer()\n self._serial.reset_output_buffer()\n\n data_to_be_sent = bytearray()\n\n while self._serial_busy.is_set():\n if not data_to_be_sent and not self._out_queue.empty():\n data_to_be_sent = self._out_queue.get_nowait()\n\n if data_to_be_sent:\n sent_bytes_n = self._serial.write(data_to_be_sent)\n data_to_be_sent = data_to_be_sent[sent_bytes_n:]\n\n self._in_queue.put(self._serial.read_all())\n\n time.sleep(0.001) # Needed on Linux. Otherwise CPU utilization 100% is observed.\n\n\ndef create_uart_connection_thread(in_queue, out_queue, port, baudrate=56700, timeout_s=0, write_timeout_s=0):\n \"\"\"\n Function creates UartConnectionThread object.\n\n :param in_queue: queue.Queue, queue to which will be put received bytes.\n :param out_queue: queue.Queue, queue with bytes that will be sent.\n :param port: str, UART com port name\n :param baudrate: int, UART baudrate\n :param timeout_s: int, timeout for receiving data from UART\n :param write_timeout_s: int, timeout for sendin data over UART\n :return: UartConnectionThread object\n \"\"\"\n _serial = serial.Serial(\n port,\n baudrate,\n timeout=timeout_s,\n write_timeout=write_timeout_s\n )\n\n return UartConnectionThread(_serial, in_queue, out_queue)\n\n\nclass UartAdapterObserver:\n \"\"\"\n Abstract class responsible for receiving notifications from UartAdapter.\n Inherit this class to make registering of your class possible.\n \"\"\"\n\n def new_frame_notification(self, frame):\n \"\"\"\n Called when new frame is received. Should be overloaded in derived class.\n\n :param frame: bytes, received frame, consist of len, cmd and payload (without preamble and crc)\n :return: None\n \"\"\"\n pass\n\n\nclass UartAdapter(threading.Thread):\n \"\"\"\n Class responsible for communicating with firmware over uart protocol in new thread. In order to start execution of\n thread start() method has to be called on\n instance of UartAdapter class.\n Provides methods for sending or writing from serial buffer using Queue.\n This class expects that registered object is of MeshNodeUart type which will have received_frames queue populated\n with every parsed frame.\n \"\"\"\n\n UART_PREAMBLE = bytes.fromhex(\"AA55\")\n\n def __init__(self, port, baud_rate=56700, timeout_s=0, write_timeout_s=0):\n \"\"\"\n Initializes UartAdapter class.\n\n :param port: COM port to be used\n :param baud_rate: UART baudrate\n :param timeout_s: UART connection timeout\n :param write_timeout_s: UART connection write timeout\n \"\"\"\n super().__init__()\n self.daemon = True\n\n self._in_queue = queue.Queue()\n self._out_queue = queue.Queue()\n\n self._uart_conn = create_uart_connection_thread(\n self._in_queue,\n self._out_queue,\n port=port,\n baudrate=baud_rate,\n timeout_s=timeout_s,\n write_timeout_s=write_timeout_s\n )\n self._uart_conn.start()\n\n self.observers = []\n\n self._processing_frames = threading.Event()\n\n LOGGER.info(\"UART adapter initialized\")\n\n def change_baudrate(self, baud_rate):\n \"\"\"\n Sets serial port baudrate. This method is thread safe\n\n :param baud_rate: int, new baudrate value\n \"\"\"\n self._uart_conn.set_baudrate(baud_rate)\n\n def register_observer(self, observer_obj):\n \"\"\"\n Register objects for notification (updating queue). Registered object has to be of UartAdapterObserver class.\n\n :param observer_obj: instance of object\n \"\"\"\n assert isinstance(observer_obj,\n UartAdapterObserver), \"[UART ADAPTER] Invalid class instance. Expected 'UartAdapterObserver'\"\n if observer_obj not in self.observers:\n self.observers.append(observer_obj)\n\n def unregister_observer(self, observer_obj):\n \"\"\"\n Unregister objects for notification. Unregistered object has to be of UartAdapterObserver class.\n\n :param observer_obj: instance of object\n \"\"\"\n assert isinstance(observer_obj,\n UartAdapterObserver), \"[UART ADAPTER] Invalid class instance. Expected 'UartAdapterObserver'\"\n self.observers.remove(observer_obj)\n\n def _insert_parsed_frame(self, frame):\n \"\"\"\n Populates queues for all registered objects with parsed frame.\n\n :param frame_dict: dict representing parsed uart frame\n \"\"\"\n for observer in self.observers:\n observer.new_frame_notification(frame)\n\n def write_uart_frame(self, uart_frame, send_raw=False):\n \"\"\"\n Writes uart_frame directly to serial buffer\n\n :param uart_frame: bytes with raw data (without preamble and crc)\n :param send_raw: bool, if set to True will send raw bytes else will append preamble and crc\n \"\"\"\n assert type(uart_frame) in [bytes], \"Given data type for creating uart frame: '{}' is invalid. \" \\\n \"Expected bytes.\".format(type(uart_frame))\n\n if send_raw:\n frame_to_send = uart_frame\n else:\n crc = UartAdapter.calculate_crc_bytes(uart_frame)\n frame_to_send = UartAdapter.UART_PREAMBLE + uart_frame + crc\n self._out_queue.put(frame_to_send)\n\n def stop(self):\n \"\"\"\n Stops execution of thread.\n \"\"\"\n self._uart_conn.stop()\n\n try:\n self._processing_frames.clear()\n self.join()\n except RuntimeError as e:\n LOGGER.info(\"Tried to stop UartAdapter thread before it is started. An error occurred: %s\", e)\n\n def run(self):\n \"\"\"\n Main UartAdapter loop. Responsible for receiving data from UART\n\n :return: None\n \"\"\"\n LOGGER.info(\"uart adapter run\")\n self._processing_frames.set()\n\n not_processed_buffer_data = bytearray()\n\n while self._processing_frames.is_set():\n try:\n not_processed_buffer_data += self._in_queue.get(timeout=0.1)\n\n frames, not_processed_buffer_data = UartAdapter.extract_frames(not_processed_buffer_data)\n for frame in frames:\n self._insert_parsed_frame(frame)\n\n except queue.Empty:\n pass\n\n @staticmethod\n def extract_frames(raw_uart_data):\n \"\"\"\n Extract single-message-frames (without preamble and crc) from raw uart data.\n\n :param raw_uart_data: bytes, bytearray Raw uart data\n :return: tuple: list of extracted uart frames, bytes remaining data\n \"\"\"\n uart_frames = list()\n raw_uart_data = UartAdapter.eat_bytes_until_preamble(raw_uart_data)\n\n if len(raw_uart_data) < 6:\n return uart_frames, raw_uart_data\n\n data_len = int.from_bytes(raw_uart_data[2:3], byteorder='little', signed=False)\n if len(raw_uart_data) < 6 + data_len:\n return uart_frames, raw_uart_data\n\n uart_frame = raw_uart_data[:4 + data_len + 2]\n remaining_data = raw_uart_data[6 + data_len:]\n expected_crc = UartAdapter.calculate_crc(uart_frame[2:4 + data_len])\n actual_crc = int.from_bytes(uart_frame[len(uart_frame) - 2:], byteorder='little',\n signed=False)\n\n uart_frame_no_preamble_and_crc = uart_frame[2:len(uart_frame) - 2]\n\n if expected_crc == actual_crc:\n uart_frames.append(uart_frame_no_preamble_and_crc)\n if len(remaining_data) > 0:\n another_frames, remaining_data = UartAdapter.extract_frames(remaining_data)\n uart_frames.extend(another_frames)\n return uart_frames, remaining_data\n else:\n return uart_frames, remaining_data\n\n @staticmethod\n def calculate_crc(bytes_data):\n \"\"\"\n Calculates checksum for given series of bytes.\n Parameters for checksum:\n - polynomial = 0x8005\n - init value = 0xFFFF\n\n :param bytes_data: bytes type data for calculating checksum\n :return: int, checksum\n \"\"\"\n _checksum_function = crcmod.mkCrcFun(0x18005, rev=False, initCrc=0xFFFF, xorOut=0x0000)\n assert type(bytes_data) == bytes or type(\n bytes_data) == bytearray, \"Given invalid data type '{}', expected 'bytes'\".format(type(bytes_data))\n checksum = _checksum_function(bytes_data)\n return checksum\n\n @staticmethod\n def calculate_crc_bytes(bytes_data):\n \"\"\"\n Calculates checksum for given series of bytes\n\n :param bytes_data: bytes type data for calculating checksum\n :return: bytes, checksum\n \"\"\"\n return UartAdapter.calculate_crc(bytes_data).to_bytes(2, byteorder='little', signed=False)\n\n @staticmethod\n def eat_bytes_until_preamble(buffer_data):\n \"\"\"\n Removes bytes from received data until preamble indicating a new frame is found.\n\n :param buffer_data: bytearray with raw data obtained from the serial port buffer\n :return: bytearray with raw data, with preamble on first bytes (or all raw data, if there was no preamble in\n the buffer).\n \"\"\"\n orphaned_bytes = bytearray()\n while len(buffer_data) > 0 and buffer_data[:2] != bytearray(UartAdapter.UART_PREAMBLE):\n orphaned_bytes.append(buffer_data.pop(0))\n if len(orphaned_bytes) > 0:\n if len(buffer_data) == 0:\n return orphaned_bytes\n print(\"Removed orphaned bytes: {}\".format(orphaned_bytes))\n return buffer_data\n","sub_path":"silvair_uart_common_libs/uart_common_classes.py","file_name":"uart_common_classes.py","file_ext":"py","file_size_in_byte":11373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"55310196","text":"import pygame\n\npygame.init()\n\nscreen = pygame.display.set_mode([400,300])\npygame.display.set_caption(\"\")\n \ndone = False\nclock = pygame.time.Clock()\n\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n print(\"mouseclick(\", pos, \")\")\n elif event.type == pygame.KEYDOWN:\n key = event.key\n print(\"key(\", key, \")\")\n\n screen.fill( (0, 0, 0) )\n clock.tick(60)\n pygame.display.flip()\n \npygame.quit()\n","sub_path":"pygame-grid4.py","file_name":"pygame-grid4.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168850008","text":"def jump_to_end(arr):\n max_step = arr[0]\n last_index = len(arr) - 1\n for i in range(len(arr)):\n if max_step >= last_index:\n return True\n else:\n max_step = i + arr[i]\n return False\n\n\n# a = [3, 2, 0, 0, 1, 2, 1]\n# a = [2, 2, 1, 0, 4]\na = [3, 2, 1, 0, 4]\nprint(jump_to_end(a))\n","sub_path":"datastructs/arrays/jump_to_end.py","file_name":"jump_to_end.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"490576294","text":"import scrapy\nfrom scrapy.http.request import Request\n\nclass HHScrapy(scrapy.Spider):\n name = \"parsehh\"\n start_urls = [\"https://hh.ru/search/vacancy?text=python&area=113&salary=¤cy_code=RUR&experience=doesNotMatter&order_by=publication_time&search_period=30&items_on_page=100&no_magic=true\"]\n \n def parse(self, response):\n for dataset in response.css(\"div.search-item-name\"):\n vacancy = dataset.css(\"a::attr(href)\").extract_first()\n yield Request(vacancy, callback=self.parse_vacancy)\n\n \"\"\" get the total number of pages \"\"\"\n for record_page in response.css(\"a::attr(data-page)\").extract()[4:5]:\n record_page = int(record_page)\n\n \"\"\" crawl all pages \"\"\"\n for i in range(1, record_page):\n urls = \"{}&page={}\".format(self.start_urls[0], str(i))\n yield Request(urls, callback=self.parse)\n\n \"\"\" get vacancy information \"\"\"\n def parse_vacancy(self, response):\n title = response.xpath(\"//title/text()\").extract_first() \n name = response.xpath(\"//meta[@itemprop='name']/@content\").extract_first() \n url = response.xpath(\"//meta[@itemprop='url']/@content\").extract_first() \n datePosted = response.xpath(\"//meta[@itemprop='datePosted']/@content\").extract_first() \n minValue = response.xpath(\"//meta[@itemprop='minValue']/@content\").extract_first() or ''\n currency = response.xpath(\"//meta[@itemprop='currency']/@content\").extract_first() or ''\n addressLocality = response.xpath(\"//meta[@itemprop='addressLocality']/@content\").extract_first() \n employment = response.xpath(\"//span[@itemprop='workHours']//text()\").extract_first() \n value = \"{} {}\".format(minValue,currency)\n # description = response.xpath(\"//div[@class='g-user-content']//text()\").extract()\n\n yield { \n 'date': datePosted,\n 'name': name,\n 'title': title,\n 'url': url,\n 'employment': employment,\n 'address': addressLocality,\n 'minValue': value,\n }","sub_path":"parsehh/spiders/hhspider.py","file_name":"hhspider.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"55747441","text":"import numpy as np\nfrom .utils import (\n zero_pad_array, fill_invalid,\n apply_log10, calc_perc\n)\n\n\ndef transfrom_raw_to_array(downloaded_data):\n \"\"\"\n Transforms the given dictionary of DataFrames\n to a preprocessed numpy array ready for training\n\n Arguments:\n downloaded_data {dict {pd.Dataframe}}\n\n Returns:\n np.array\n \"\"\"\n dataset = []\n\n max_len = max([len(i) for i in downloaded_data.values()])\n\n # each item is a pd.Dataframe\n for df in downloaded_data.values():\n\n close = list(calc_perc(df['close']))\n volume = list(calc_perc(df['volume']))\n apply_log10(df['reddit'])\n reddit = list(df['reddit'])\n apply_log10(df['news'])\n news = list(df['news'])\n\n # the combined array will have shape (4, max_len)\n # so it needs to be transposed with .T\n combined = np.array([close, volume, reddit, news]).T\n fill_invalid(combined)\n dataset.append(zero_pad_array(combined, max_len))\n\n # shape == (len(downloaded_data), max_len, 4)\n dataset = np.array(dataset)\n return dataset\n\n\ndef connect_data(component):\n \"\"\"\n Sticks all the data together into one big dataframe\n\n Arguments:\n component {download_manager.Component}\n\n Returns:\n pd.Dataframe\n \"\"\"\n # stock_downloaded is already a pd.Dataframe\n stock_db = component.stock_downloaded.copy()\n stock_db['reddit'] = 0\n stock_db['news'] = 0\n\n idx = stock_db.index\n\n # This part takes the most time but there isn't much that can be\n # done to speed it up\n for subreddit in component.reddit_downloaded.keys():\n for query in component.reddit_downloaded[subreddit].keys():\n for day, num in component.reddit_downloaded[subreddit][query].items():\n if day in idx:\n stock_db.loc[day, 'reddit'] += num\n\n for day, num in component.news_downloaded[component.code].items():\n if day in idx:\n stock_db.loc[day, 'news'] += num\n\n return stock_db\n\n\ndef split_to_sequences(data, seq_size=300):\n \"\"\"\n Splits the given data into sequences of given size\n Each concurrent sequence differs from the previous one\n by having one more point at the end and one less at the start\n\n Arguments:\n data {np.array} -- rank 3 array\n seq_size {int} -- (default: {300})\n\n Returns:\n np.array -- shape (num_sequences, seq_size, data.shape[2])\n \"\"\"\n split = []\n for idx in range(data.shape[1] - seq_size + 1):\n split.append(data[:, idx:idx+seq_size, :])\n return np.concatenate(split)\n","sub_path":"preprocess_utils/dataset_creation.py","file_name":"dataset_creation.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"434308291","text":"# 2016년에 가장 관심을 많이 받았던 비감독(Unsupervised) 학습 방법인\n# Generative Adversarial Network(GAN)을 구현해봅니다.\n# https://arxiv.org/abs/1406.2661\n\n#########\n# 기본적인import,데이터가져오기\n######\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"./mnist/data/\", one_hot=True)\n\n#########\n# 옵션 설정\n######\ntotal_epoch = 100 # 반복 수행 횟수\nbatch_size = 100 # 한번에 만드는 이미지 갯수\nlearning_rate = 0.0002 # 학습 시 변수를 조정하는 정도\n# 신경망 레이어 구성 옵션\nn_hidden = 256 # 신경망(두뇌)의 복잡도\nn_input = 28 * 28 # 입력이미지의 크기\nn_noise = 128 # 생성기의 입력값으로 사용할 노이즈의 크기\n\n#########\n# 신경망 모델 구성\n######\n################################모델 생성 시작################################################\n################################공장이라고보면=>생산기계를 만드는 과정################################################\n# 입력 이미지 X\nX = tf.placeholder(tf.float32, [None, n_input])\n# 노이즈 Z를 입력값으로 사용합니다.\nZ = tf.placeholder(tf.float32, [None, n_noise])\n\n# 생성기 신경망에 사용하는 변수들입니다.\nG_W1 = tf.Variable(tf.random_normal([n_noise, n_hidden], stddev=0.01))\nG_b1 = tf.Variable(tf.zeros([n_hidden]))\nG_W2 = tf.Variable(tf.random_normal([n_hidden, n_input], stddev=0.01))\nG_b2 = tf.Variable(tf.zeros([n_input]))\n\n# 판별기 신경망에 사용하는 변수들입니다.\nD_W1 = tf.Variable(tf.random_normal([n_input, n_hidden], stddev=0.01))\nD_b1 = tf.Variable(tf.zeros([n_hidden]))\n# 판별기의 최종 결과값은 얼마나 진짜와 가깝냐를 판단하는 단 하나의 값\n# 판별기에서 열심히 계산한 결과를 단 하나의 값으로 만들기 위해 아래의 연산을 사용\nD_W2 = tf.Variable(tf.random_normal([n_hidden, 1], stddev=0.01))\nD_b2 = tf.Variable(tf.zeros([1]))\n\n\n# 생성기(G) 신경망을 구성합니다.\ndef generator(noise_z):\n hidden = tf.nn.relu(\n tf.matmul(noise_z, G_W1) + G_b1)\n output = tf.nn.sigmoid(\n tf.matmul(hidden, G_W2) + G_b2)\n\n return output\n\n\n# 판별기(D) 신경망을 구성합니다.\ndef discriminator(inputs):\n hidden = tf.nn.relu(\n tf.matmul(inputs, D_W1) + D_b1)\n output = tf.nn.sigmoid(\n tf.matmul(hidden, D_W2) + D_b2)\n\n return output\n\n\n# 랜덤한 노이즈(Z)를 만듭니다.\ndef get_noise(batch_size, n_noise):\n return np.random.normal(size=(batch_size, n_noise))\n\n################################모델 생성 완료################################################\n\n\n################################학습 시나리오 작성 시작################################################\n################################공장이라고보면=>생산기계를 배치하는 과정################################################\n# 1. (생성자)노이즈를 이용해 랜덤한 이미지를 생성합니다.\nG = generator(Z)\n\n################################공장이라고보면=>작업하는 룰을 결정하는 과정################################################\n# 2-1. (판별자)노이즈를 이용해 생성한 이미지가 진짜 이미지인지 판별한 값을 구합니다.\nD_gene = discriminator(G) # 가짜이미지 판별 => 0일수록 좋음\n# 2-2. (판별자)진짜 이미지를 이용해 판별한 값을 구합니다.\nD_real = discriminator(X) # 진짜이미지 판별 => 1일수록 좋음\n# 2-3. (판별자) D_gene(가짜이미지 판별)은 최대한작게, D_real(진짜이미지 판별)은 최대한크게 만들도록 내부변수를 조정\nloss_D = tf.reduce_mean(tf.log(D_real) + tf.log(1 - D_gene))\n\n\n# (생성자) D_gene(가짜이미지 판별)을 최대한크게하도록 내부변수를 조정\nloss_G = tf.reduce_mean(tf.log(D_gene))\n\n# loss_D 를 구할 때는 판별기 신경망에 사용되는 변수만 사용하고,\n# loss_G 를 구할 때는 생성기 신경망에 사용되는 변수만 사용하여 최적화를 합니다.\nD_var_list = [D_W1, D_b1, D_W2, D_b2]\nG_var_list = [G_W1, G_b1, G_W2, G_b2]\n\n########## <공장이라고생각하면=>이전 결과를 토대로 작업방식을 최적화하는 방법을 정함> ##########\n# GAN 논문의 수식에 따르면 loss 를 극대화 해야하지만, minimize 하는 최적화 함수를 사용하기 때문에\n# 최적화 하려는 loss_D 와 loss_G 에 음수 부호를 붙여줍니다.\ntrain_D = tf.train.AdamOptimizer(learning_rate).minimize(-loss_D,\n var_list=D_var_list)\ntrain_G = tf.train.AdamOptimizer(learning_rate).minimize(-loss_G,\n var_list=G_var_list)\n\n################################학습 시나리오 작성 완료################################################\n\n#########\n# 신경망 모델 학습\n######\n\n################################학습 시나리오 실행 시작################################################\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\ntotal_batch = int(mnist.train.num_examples/batch_size)\nloss_val_D, loss_val_G = 0, 0\n\n########## <공장이라고생각하면=>전기와 원재료를 넣어서 공장을 가동함> ##########\nfor epoch in range(total_epoch):\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n noise = get_noise(batch_size, n_noise)\n\n # 판별기와 생성기 신경망을 각각 학습시킵니다.\n _, loss_val_D = sess.run([train_D, loss_D],\n feed_dict={X: batch_xs, Z: noise})\n _, loss_val_G = sess.run([train_G, loss_G],\n feed_dict={Z: noise})\n\n print('Epoch:', '%04d' % epoch,\n 'D loss: {:.4}'.format(loss_val_D),\n 'G loss: {:.4}'.format(loss_val_G))\n\n #########\n # 학습이 되어가는 모습을 보기 위해 주기적으로 이미지를 생성하여 저장\n ######\n if epoch == 0 or (epoch + 1) % 10 == 0:\n sample_size = 10\n noise = get_noise(sample_size, n_noise)\n samples = sess.run(G, feed_dict={Z: noise})\n\n fig, ax = plt.subplots(1, sample_size, figsize=(sample_size, 1))\n\n for i in range(sample_size):\n ax[i].set_axis_off()\n ax[i].imshow(np.reshape(samples[i], (28, 28)))\n\n plt.savefig('samples/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')\n plt.close(fig)\n\nprint('최적화 완료!')\n################################학습 시나리오 실행 완료################################################\n","sub_path":"09 - GAN/01 - GAN.py","file_name":"01 - GAN.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"319635969","text":"from tensorflow.keras.applications import MobileNetV2\r\nfrom tensorflow.keras.layers import Activation, Dense\r\nfrom tensorflow.keras.models import Model\r\n\r\nfrom boiling_learning.management import ElementCreator\r\nfrom boiling_learning.model.model import ProblemType, make_creator_method\r\n\r\n\r\ndef build(\r\n input_shape,\r\n hidden_layers_policy,\r\n output_layer_policy,\r\n problem=ProblemType.REGRESSION,\r\n num_classes=None,\r\n):\r\n mobile_net = MobileNetV2(\r\n input_shape=input_shape,\r\n include_top=False,\r\n weights='imagenet',\r\n pooling='avg',\r\n )\r\n x = Dense(256, activation='relu', dtype=hidden_layers_policy)(\r\n mobile_net.output\r\n )\r\n\r\n if ProblemType.get_type(problem) is ProblemType.CLASSIFICATION:\r\n x = Dense(num_classes, dtype=hidden_layers_policy)(x)\r\n predictions = Activation('softmax', dtype=output_layer_policy)(x)\r\n elif ProblemType.get_type(problem) is ProblemType.REGRESSION:\r\n x = Dense(1, dtype=hidden_layers_policy)(x)\r\n predictions = Activation('linear', dtype=output_layer_policy)(x)\r\n else:\r\n raise ValueError(f'unknown problem type: \\\"{problem}\\\"')\r\n\r\n return Model(inputs=mobile_net.input, outputs=predictions)\r\n\r\n\r\ncreator = ElementCreator(\r\n method=make_creator_method(builder=build),\r\n name='BLMobileNet',\r\n default_params=dict(\r\n verbose=2,\r\n checkpoint={'restore': False},\r\n num_classes=None,\r\n problem=ProblemType.REGRESSION,\r\n fetch=['model', 'history'],\r\n ),\r\n expand_params=True,\r\n)\r\n","sub_path":"boiling_learning/model/_definitions/BLMobileNet.py","file_name":"BLMobileNet.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"389566717","text":"\"\"\"\ndynamicsynmk2.py を加筆・修正したもの\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Synapse_exp:\n def __init__(self, T=600, dt=0.05, tau_rise_AMPA=0.8, tau_rise_NMDA=145, tau_inact_AMPA=5, tau_inact_NMDA=55,\n tau_rec=200, U_SE_AMPA=0.1, U_SE_NMDA=0.03):\n # simulation time\n self.T = T\n # time step\n self.dt = dt\n # all time\n self.t = np.arange(0, self.T, self.dt)\n\n # firing time\n self.t_ap = -1000\n\n # time constant\n # self.tau_rise = tau_rise\n self.tau_rise_AMPA = tau_rise_AMPA\n self.tau_rise_NMDA = tau_rise_NMDA\n self.tau_inact_AMPA = tau_inact_AMPA\n self.tau_inact_NMDA = tau_inact_NMDA\n self.tau_rec = tau_rec\n\n # self.U_SE = self.U_SE_AMPA + self.U_SE_NMDA\n self.U_SE_AMPA = U_SE_AMPA\n self.U_SE_NMDA = U_SE_NMDA\n\n # Recovered\n self.R = 1 * np.ones(int(self.T / self.dt))\n self.dR = 0\n # Effective\n self.E = 0 * np.ones(int(self.T / self.dt))\n # self.dE = 0\n self.E_AMPA = 0 * np.ones(int(self.T / self.dt))\n self.dE_AMPA = 0\n self.E_NMDA = 0 * np.ones(int(self.T / self.dt))\n self.dE_NMDA = 0\n # Inactive\n self.I = 0 * np.ones(int(self.T / self.dt)) # I = 1 - R - E\n\n # for calculate time-constant\n self.E_max_time = 0\n self.E_max = 0\n self.EAMPA_max_time = 0\n self.EAMPA_max = 0\n self.ENMDA_max_time = 0\n self.ENMDA_max = 0\n\n # calculated time constant\n # self.Erise = 0\n self.EAMPA_rise = 0\n self.ENMDA_rise = 0\n self.counter_rise = 0\n self.sum_Erise = 0\n # self.Efall = 0\n self.EAMPA_fall = 0\n self.ENMDA_fall = 0\n self.counter_fall = 0\n self.sum_Efall = 0\n\n def exp_decay(self, x, tau_rise):\n if -(x / tau_rise) > 100:\n return 0\n else:\n return np.exp(-x / tau_rise)\n\n def calculation(self):\n for i in range(0, len(self.t)-1):\n if self.t[i] == 50:\n self.t_ap = self.t[i]\n # pass\n \"\"\"\n elif 600 < self.t[i] < 1000 and self.t[i] % 50 == 0:\n self.t_ap = self.t[i]\n\n elif 2000 < self.t[i] < 2400 and self.t[i] % 5 == 0:\n self.t_ap = self.t[i]\n\n elif 3400 < self.t[i] < 4000 and self.t[i] % 2 == 0:\n self.t_ap = self.t[i]\n \"\"\"\n \"\"\"\n # calculate R, E, I\n self.dR = self.dt * ((self.I[i] / self.tau_rec) - self.U_SE * self.R[i] * self.exp_decay(self.t[i] - self.t_ap))\n self.dE = self.dt * ((- self.E[i] / self.tau_inact) + self.U_SE * self.R[i] * self.exp_decay(self.t[i] - self.t_ap))\n\n self.R[i + 1] = self.R[i] + self.dR\n self.E[i + 1] = self.E[i] + self.dE\n self.I[i + 1] = 1 - self.R[i + 1] - self.E[i + 1]\n \"\"\"\n\n # calculate R, E_AMPA, E_NMDA, I\n self.dR = self.dt * ((self.I[i] / self.tau_rec) - self.R[i] * (\n self.U_SE_AMPA * self.exp_decay(self.t[i] - self.t_ap, self.tau_rise_AMPA)\n + self.U_SE_NMDA * self.exp_decay(self.t[i] - self.t_ap, self.tau_rise_NMDA)))\n self.dE_AMPA = self.dt * ((- self.E_AMPA[i] / self.tau_inact_AMPA) + self.U_SE_AMPA * self.R[i] * self.exp_decay(self.t[i] - self.t_ap, self.tau_rise_AMPA))\n self.dE_NMDA = self.dt * ((- self.E_NMDA[i] / self.tau_inact_NMDA) + self.U_SE_NMDA * self.R[i] * self.exp_decay(self.t[i] - self.t_ap, self.tau_rise_NMDA))\n\n self.R[i + 1] = self.R[i] + self.dR\n self.E_AMPA[i + 1] = self.E_AMPA[i] + self.dE_AMPA\n self.E_NMDA[i + 1] = self.E_NMDA[i] + self.dE_NMDA\n self.I[i + 1] = 1 - self.R[i + 1] - self.E_AMPA[i + 1] - self.E_NMDA[i + 1]\n\n for i in range(0, len(self.t)-1):\n if self.EAMPA_max <= self.E_AMPA[i]:\n self.EAMPA_max = self.E_AMPA[i]\n self.EAMPA_max_time = self.t[i]\n\n if self.ENMDA_max <= self.E_NMDA[i]:\n self.ENMDA_max = self.E_NMDA[i]\n self.ENMDA_max_time = self.t[i]\n\n for i in range(0, int(self.EAMPA_max_time / self.dt)):\n if 0.60 < self.E_AMPA[i] / self.EAMPA_max < 0.66:\n self.sum_Erise += self.t[i] - self.t_ap\n self.counter_rise += 1\n\n if self.counter_rise == 0:\n self.EAMPA_rise = 0\n else:\n self.EAMPA_rise = round(self.sum_Erise / self.counter_rise, 5)\n\n self.sum_Erise = 0\n self.counter_rise = 0\n\n for i in range(0, int(self.ENMDA_max_time / self.dt)):\n if 0.60 < self.E_NMDA[i] / self.ENMDA_max < 0.66:\n self.sum_Erise += self.t[i] - self.t_ap\n self.counter_rise += 1\n\n if self.counter_rise == 0:\n self.ENMDA_rise = 0\n else:\n self.ENMDA_rise = round(self.sum_Erise / self.counter_rise, 5)\n\n for i in range(int(self.EAMPA_max_time / self.dt), len(self.t) - 1):\n if 1 - 0.625 > self.E_AMPA[i] / self.EAMPA_max > 1 - 0.635:\n self.sum_Efall += self.t[i] - self.EAMPA_max_time\n self.counter_fall += 1\n\n if self.counter_fall == 0:\n self.EAMPA_fall = 0\n else:\n self.EAMPA_fall = round(self.sum_Efall / self.counter_fall, 5)\n\n self.sum_Efall = 0\n self.counter_fall = 0\n\n for i in range(int(self.ENMDA_max_time / self.dt), len(self.t) - 1):\n if 1 - 0.625 > self.E_NMDA[i] / self.ENMDA_max > 1 - 0.635:\n self.sum_Efall += self.t[i] - self.ENMDA_max_time\n self.counter_fall += 1\n\n if self.counter_fall == 0:\n self.ENMDA_fall = 0\n else:\n self.ENMDA_fall = round(self.sum_Efall / self.counter_fall, 5)\n\n for i in range(0, len(self.t)-1):\n self.E[i] = self.E_AMPA[i] + self.E_NMDA[i]\n\n\ndef main():\n synapse = Synapse_exp()\n synapse.calculation()\n\n print('EAMPA_max : ' + str(synapse.EAMPA_max))\n print('EAMPA_rise : ' + str(synapse.EAMPA_rise))\n print('EAMPA_fall : ' + str(synapse.EAMPA_fall))\n print()\n print('ENMDA_max : ' + str(synapse.ENMDA_max))\n print('ENMDA_rise : ' + str(synapse.ENMDA_rise))\n print('ENMDA_fall : ' + str(synapse.ENMDA_fall))\n\n ax1 = plt.subplot2grid((3, 1), (0, 0))\n ax1.plot(synapse.t, synapse.E_AMPA)\n ax1.set_title('E_AMPA')\n ax2 = plt.subplot2grid((3, 1), (1, 0))\n ax2.plot(synapse.t, synapse.E_NMDA)\n ax2.set_title('E_NMDA')\n ax3 = plt.subplot2grid((3, 1), (2, 0))\n ax3.plot(synapse.t, synapse.R)\n ax3.set_title('R')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dynamic_synapse/dynamic_synapse_exp/synapse_class.py","file_name":"synapse_class.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"139405192","text":"from db2 import User, Group, init_db, Session\n\nfrom logger import Logger\nfrom logging import getLogger\nfrom sqlalchemy import select\n\n\nLogger()\nlogger = getLogger(\"src.\" + __name__)\n\n\ndef user_add(name):\n logger.info(\"user_add\")\n logger.info(f\"user_add: {Session}\")\n\n user = User(name=name, email=f\"{name}@example.com\")\n Session.add(user)\n\n logger.info(user in Session.dirty) # False\n logger.info(user in Session) # True\n logger.info(user in Session.dirty) # False\n\n\ndef group_add(name):\n logger.info(\"group_add\")\n logger.info(f\"group_add: {Session}\")\n\n group = Group(group_name=\"splatoon\", user_name=name)\n Session.add(group)\n\n\ndef select_user(name=None):\n logger.info(\"select_user\")\n logger.info(f\"select_user: {Session}\")\n\n if name is not None:\n res = Session.execute(select(User).where(User.name == name)).first()\n else:\n res = Session.execute(select(User)).all()\n return res\n\n\ntry:\n init_db()\n\n user_add(\"ika\")\n user_add(\"tako\")\n group_add(\"ika\")\n\n logger.info(f\"main: {Session}\")\n\n # flushしてからでないとinsertの前にselectしてしまう。\n # selectはflushも兼ねてるんじゃなかったのか?\n Session.flush()\n res = select_user(\"ika\")\n logger.info(res) # (,)\n logger.info(res.User) # \n\n # updateの場合はselectした時点でflushされる。\n res[0].email = \"inkring@example.com\"\n res = select_user()\n\n for r in res:\n logger.info(r.User)\n\nexcept Exception as e:\n logger.exception(f\"{e.__class__.__name__}: {e}\")\n Session.rollback()\nfinally:\n # Rollback\n Session.remove()\n","sub_path":"sqlalchemy/src/sample5.py","file_name":"sample5.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"290460521","text":"#!/usr/bin/env python\n\nimport yaml\nimport sys\nimport os\nimport re\nimport itertools\nimport subprocess\nimport pipes\n\nCHANG_TMP_PATH = os.environ[\"CHANG_TMP_PATH\"]\nCHANG_APP_ID = os.environ[\"CHANG_APP_ID\"]\nCHANG_APP_NAME = os.environ[\"CHANG_APP_NAME\"]\nCHANG_NETWORK = os.environ[\"CHANG_NETWORK\"]\nCHANG_SET = os.environ[\"CHANG_SET\"]\nCHANG_SYNC_ENABLED = (os.environ[\"CHANG_SYNC_ENABLED\"] == \"true\")\n\n\nif len(sys.argv) != 2:\n chang_error(\"USAGE...\")\n\nclass VolumesMapper:\n def __init__(self):\n self.i = itertools.count()\n self.map = {}\n\n def __getitem__(self, volume):\n if not self.map.get(volume, False):\n self.map[volume] = str(next(self.i))\n return self.map[volume]\n\ndef chang_service_network_alias(name):\n return subprocess.check_output([\"/bin/bash\", \"%sc\" % CHANG_SET, 'chang_service_network_alias \"{0}\"'.format(name)])\n\ndef chang_external_volume_name(name):\n return subprocess.check_output([\"/bin/bash\", \"%sc\" % CHANG_SET, \"chang_external_volume_name '%s'\" % name])\n\nVOLUMES_MAPPER = VolumesMapper()\n\ndef chang_error(message):\n os.system(\"bash -c 'chang_error {0}'\".format(message))\n sys.exit(1)\n\nchang_compose_file = sys.argv[1]\nchang_compose = yaml.load(file(chang_compose_file, 'r'), Loader=FullLoader)\n\nos.system(\"mkdir -p {0}\".format(CHANG_TMP_PATH))\n\nenv_file = open(\"{0}/environment\".format(CHANG_TMP_PATH), \"w\")\nfor line in chang_compose[\"environment\"]:\n env_file.write(\"export {0}\\n\".format(line))\n\nenvironment = chang_compose.get(\"environment\", [])\nservices = {}\n\nkeys = [\"command\", \"user\", \"build\", \"depends_on\", \"working_dir\", \"tty\", \"image\", \"environment\", \"extends\"]\nfor name in chang_compose[\"services\"]:\n desc = chang_compose[\"services\"][name]\n service = {}\n for key in keys:\n if desc.get(key, False):\n service[key] = desc[key]\n for volume in desc.get(\"volumes\", []):\n match = re.search(\"^([^:]+):(.*)$\", volume)\n volume_name = match.group(1)\n volume_mount = match.group(2)\n if not service.get(\"volumes\", False):\n service[\"volumes\"] = []\n if CHANG_SYNC_ENABLED or volume_name != \"chang\":\n volume_key = VOLUMES_MAPPER[volume_name]\n else:\n volume_key = \"./\"\n service[\"volumes\"].append(volume_key + \":\" + volume_mount)\n env = service.get(\"environment\", [])\n service[\"environment\"] = environment + env\n service[\"networks\"] = { \"chang\": { \"aliases\": [chang_service_network_alias(name)] }}\n services[name] = service\n\nwatch_file = open(\"{0}/watch\".format(CHANG_TMP_PATH), \"w\")\nfor watch in chang_compose.get(\"watch\", []):\n watch_file.write(\"export {0}\\n\".format(line))\n\nvolumes = {}\nfor volume in VOLUMES_MAPPER.map:\n volumes[VOLUMES_MAPPER[volume]] = {\n \"external\": { \"name\": chang_external_volume_name(volume) }\n }\n\ncompose_yaml = {\n \"version\": \"2\",\n \"services\": services,\n \"volumes\": volumes,\n \"networks\": {\n \"chang\": { \"external\": { \"name\": CHANG_NETWORK } }\n }\n}\n\ncompose_file = open(\"{0}/compose_file\".format(CHANG_TMP_PATH), \"w\")\nyaml.dump(compose_yaml, compose_file, default_flow_style=False)\n\nvolumes_file = open(\"{0}/volumes\".format(CHANG_TMP_PATH), \"w\")\nfor volume in VOLUMES_MAPPER.map:\n if CHANG_SYNC_ENABLED or volume_name != \"chang\":\n volumes_file.write(chang_external_volume_name(volume) + \"\\n\")\n\nproxy_file = open(\"{0}/proxy\".format(CHANG_TMP_PATH), \"w\")\nif chang_compose.get(\"server\", {}).get(\"root\", False):\n service, port = chang_compose[\"server\"][\"root\"].split(\":\")\n proxy_file.write(\"$CHANG_NETWORK $CHANG_APP_NAME $CHANG_REV_PROXY_PORT %s %s\\n\" % (service, port))\nif chang_compose.get(\"server\", {}).get(\"subdomains\", False):\n for subdomain in chang_compose[\"server\"][\"subdomains\"]:\n service, port = chang_compose[\"server\"][\"subdomains\"][subdomain].split(\":\")\n proxy_file.write(\"$CHANG_NETWORK %s.$CHANG_APP_NAME $CHANG_REV_PROXY_PORT %s %s\\n\" % (subdomain, service, port))\n","sub_path":"chang_compose.py","file_name":"chang_compose.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"617708037","text":"from os import path\nfrom tempfile import TemporaryDirectory, NamedTemporaryFile\n\nimport hail as hl\nfrom .utils import benchmark, resource\n\n\n@benchmark\ndef shuffle_key_rows_by_mt():\n mt = hl.read_matrix_table(resource('profile.mt'))\n mt = mt.annotate_rows(reversed_position_locus=hl.struct(\n contig=mt.locus.contig,\n position=-mt.locus.position))\n mt = mt.key_rows_by(mt.reversed_position_locus)\n mt._force_count_rows()\n\n\n@benchmark\ndef shuffle_order_by_10m_int():\n t = hl.utils.range_table(10_000_000, n_partitions=100)\n t = t.order_by(-t.idx)\n t._force_count()\n\n\n@benchmark\ndef shuffle_key_rows_by_4096_byte_rows():\n mt = hl.utils.range_matrix_table(100_000, (1 << 12) // 4)\n mt = mt.annotate_entries(entry=mt.row_idx * mt.col_idx)\n mt = mt.key_rows_by(backward_rows_idx=-mt.row_idx)\n mt._force_count_rows()\n\n\n@benchmark\ndef shuffle_key_rows_by_65k_byte_rows():\n mt = hl.utils.range_matrix_table(10_000, (1 << 16) // 4)\n mt = mt.annotate_entries(entry=mt.row_idx * mt.col_idx)\n mt = mt.key_rows_by(backward_rows_idx=-mt.row_idx)\n mt._force_count_rows()\n","sub_path":"benchmark/python/benchmark_hail/run/shuffle_benchmarks.py","file_name":"shuffle_benchmarks.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"579737312","text":"#!/usr/bin/env python2.7\n# \n\nimport os, sys, json, numpy, astropy, scipy\nimport astropy.io.ascii as asciitable\nfrom scipy import interpolate, optimize\nfrom pprint import pprint\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+os.sep+'Softwares'+os.sep+'lib_python_dzliu'+os.sep+'crabtable')\nfrom CrabTable import *\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+os.sep+'Softwares'+os.sep+'lib_python_dzliu'+os.sep+'crabplot')\nfrom CrabPlot import *\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+os.sep+'Softwares'+os.sep+'lib_python_dzliu'+os.sep+'crabcurvefit')\nfrom CrabCurveFit import *\n\n\n# Print usage\n#if len(sys.argv) <= 1:\n# print('Usage: almacosmos_fit_simu_corr_ecorr_via_2D_interpolation.py simu_data_correction_table.txt')\n# print('# Note: simu_data_correction_table.txt is the output file of \"almacosmos_calc_simu_stats.py\"!')\n# sys.exit()\n\n\n# Read catalog\ninput_simu_data_table = 'datatable_param_grid_cell_statistics.txt' # sys.argv[1] # 'datatable_param_grid_cell_statistics.txt'\ndata_table = CrabTable(input_simu_data_table)\nx1_obs = data_table.getColumn('cell_par1_median')\nx2_obs = data_table.getColumn('cell_par2_median')\n#ecorr = data_table.getColumn('cell_noi_scatter')\necorr_noi = data_table.getColumn('cell_noi_scatter')\necorr_noi_L68 = data_table.getColumn('cell_noi_scatter_L68')\necorr_noi_H68 = data_table.getColumn('cell_noi_scatter_H68')\necorr_min = numpy.nanmin(numpy.column_stack((ecorr_noi,ecorr_noi_L68,ecorr_noi_H68)), axis=1)\nasciitable.write(numpy.column_stack((x1_obs,x2_obs,ecorr_min,ecorr_noi,ecorr_noi_L68,ecorr_noi_H68)), sys.stdout, \n names=['x1_obs','x2_obs','ecorr_min','ecorr_noi','ecorr_noi_L68','ecorr_noi_H68'], \n Writer=asciitable.FixedWidthTwoLine, delimiter='|', delimiter_pad=' ', position_char='-', bookend=True)\ny_obs = ecorr_noi # ecorr_min ##\n\n\n# Mask NaN\nnan_filter = (~numpy.isnan(y_obs))\nx1_obs = x1_obs[nan_filter]\nx2_obs = x2_obs[nan_filter]\ny_obs = y_obs[nan_filter]\n\n\n# Make x1 x2 grid\n#x1_grid = numpy.arange(numpy.log10(2.0), numpy.log10(1000.0)+0.05, 0.01); x1_interval = 0.01\n#x2_grid = numpy.arange(1.0, 4.0+0.5, 0.5); x2_interval = 0.5\nx1_grid = numpy.power(10,numpy.arange(numpy.log10(2.5),numpy.log10(5e3),0.05))\nx2_grid = numpy.array([1.00, 1.25, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 4.50, 5.00])\nx1_interval = x1_grid[1:len(x1_grid)] - x1_grid[0:len(x1_grid)-1]\nx2_interval = x2_grid[1:len(x2_grid)] - x2_grid[0:len(x2_grid)-1]\n\n\n# Make x1 x2 mesh\nx1_mesh, x2_mesh = numpy.meshgrid(numpy.log10(x1_grid), x2_grid)\nx_mesh = numpy.column_stack((x1_mesh.flatten(),x2_mesh.flatten()))\n\n\n# Make 2D interpolation\nx_arr = numpy.column_stack((numpy.log10(x1_obs),x2_obs))\ny_arr = y_obs\narray_extrapolated = interpolate.griddata(x_arr, y_arr, x_mesh, method='nearest')\narray_interpolated = interpolate.griddata(x_arr, y_arr, x_mesh, method='cubic')\narray_mask = numpy.isnan(array_interpolated)\narray_combined = array_interpolated\narray_combined[array_mask] = array_extrapolated[array_mask]\n\n\n# Save base_interp\nbase_interp = {}\nbase_interp['x'] = x_arr.tolist() # note that here x1 is in log. \nbase_interp['y'] = y_arr.tolist()\ntable_content = json.dumps(base_interp, indent=4)\nwith open('base_interp_array_for_ecorr.json', 'w') as fp:\n fp.write(table_content)\nprint('Output to \"base_interp_array_for_ecorr.json\"!')\n\n\n# Save interp_table\ninterp_table = {}\ninterp_table['x'] = numpy.column_stack((numpy.power(10,x1_mesh.flatten()),x2_mesh.flatten())).tolist() # note that here x1 is in log. \ninterp_table['y'] = array_interpolated.tolist()\ntable_content = json.dumps(interp_table, indent=4)\nwith open('interp_table_ecorr.json', 'w') as fp:\n fp.write(table_content)\nprint('Output to \"interp_table_ecorr.json\"!')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Pipeline/a3cosmos-MC-simulation-statistics-analysis-tools/almacosmos_fit_simu_corr_ecorr_via_2D_interpolation.py","file_name":"almacosmos_fit_simu_corr_ecorr_via_2D_interpolation.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28915335","text":"from socket import *\nimport sys\nimport time\nclass Ping():\n def __init__(self):\n self.socket = socket(AF_INET,SOCK_DGRAM)\n def Start(self):\n self.__times = 10\n port = sys.argv[1]\n for i in range(self.__times):\n self.socket.sendto('reqest'.encode(),(\"localhost\",int(port)))\n start = time.time()\n rep = self.socket.recv(2048)\n end = time.time()\n print(end-start)\nif __name__==\"__main__\":\n Ping = Ping()\n Ping.Start()\n\n","sub_path":"UDPPing/UDPpingClient.py","file_name":"UDPpingClient.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"115243257","text":"import pygame\nimport imageio\nfrom vae_example_conv import *\nimport random\n\n\nNMB_HIDDEN = 16\nDISTANCE = NMB_HIDDEN*50 + 80\nnet = VAE()\ncheckpoint = torch.load('checkpoint.pth.tar')\nnet.load_state_dict(checkpoint['state_dict'])\nnet.eval()\n\ndef loadImg(location='test.png'):\n return torch.Tensor(imageio.imread(location)[:,:,:3]).permute(2,0,1).unsqueeze(0) / 255.\n\ndef toImg(emb):\n return net.decode(emb)[0].permute(1,2,0).detach()\n\ndef loadData(x):\n img = torch.Tensor(imageio.imread(x))\n dset = torch.zeros(35*35, 32,32,3)\n tel = 0\n for y in range(0,35*32,32):\n for x in range(0,35*32,32):\n dset[tel] = img[y:y+32, x:x+32]\n tel += 1\n return dset.permute((0,3,1,2))[:-52]/255.\n\nimagesTrain = loadData('dataset.png')\n \nclass Bar:\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.moving = False\n def update(self, dp):\n if self.moving:\n self.y = pygame.mouse.get_pos()[1]\n pygame.draw.rect(dp, (0,0,0), [self.x-20, self.y-8, 40, 16])\n pygame.draw.rect(dp, (100,100,100), [self.x-16, self.y-5, 32, 10])\n pos = -self.getRel()\n if pos < -3.5: self.y = (3.5*20)+200\n if pos > 3.5: self.y = 200-(3.5*20)\n draw(dp, '%.2f' % (-self.getRel()),self.x-25, 350 , (0,255,0), (0,0,255))\n def checkPressed(self, mousePosition):\n if self.x-20 < mousePosition[0] < self.x+20 and self.y-6 < mousePosition[1] < self.y+6:\n self.moving = True\n def unclick(self):\n self.moving = False\n def getRel(self):\n return ((self.y -200) / 20.)\n\nclass RandomButton():\n def __init__(self):\n self.isPressed = False\n def update(self, dp):\n draw(dp, 'Click for new Random Image', DISTANCE-20, 30, (0,255,0), (0,0,255))\n def checkPressed(self, mp):\n return DISTANCE-15 < mp[0] < DISTANCE+300 and 20 < mp[1] < 80\n\ndef draw( dp, text, x,y, c1= (255,20,20), c2 = (255,255,255)):\n text = font.render(text, True, c1, c2) \n rect = text.get_rect() \n rect.topleft = (x, y)\n dp.blit(text, rect) \n\ndef load():\n img = (imagesTrain[random.randint(0, len(imagesTrain)-1)]*255).permute(1,2,0).numpy()\n imageio.imsave('test.png', img.reshape((32,32,3)))\n img = pygame.transform.scale(pygame.image.load('test.png'), (100,100))\n testImage(net, 0)\n reconstruction = pygame.transform.scale(pygame.image.load('test_reconstruction_0.png'), (100,100))\n torchImg = loadImg()\n starty = [float(i)for i in net.encode(torchImg)[0].view(-1)]\n bars = [Bar(50*(i+1), (20*starty[i])+200) for i in range(len(starty))]\n return img, reconstruction,bars\n\ndef testImage(net, i=0):\n img = loadImg()\n with torch.no_grad():\n prediction = net.decode(net.encode(img)[0])[0]\n prediction = (prediction.permute(1,2,0).numpy()*255).astype('uint8')\n imageio.imsave('test_reconstruction_%i.png' % i, prediction)\n\nif __name__ == \"__main__\":\n pygame.init()\n font = pygame.font.Font('freesansbold.ttf', 20)\n gameDisplay = pygame.display.set_mode((DISTANCE+300, 400))\n pygame.display.set_caption('VAE example')\n clock = pygame.time.Clock()\n \n img, reconstruction,bars = load()\n randomButton = RandomButton()\n reload = False\n \n while 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP:\n [bar.unclick() for bar in bars]\n if randomButton.checkPressed(pygame.mouse.get_pos()):\n img, reconstruction,bars = load()\n reload = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n mp = pygame.mouse.get_pos()\n [bar.checkPressed(mp)for bar in bars]\n reload = True\n if reload:\n decon = torch.Tensor([[(i.y-200)/20. for i in bars]])\n prediction = net.decode(decon).detach()[0].permute(1,2,0)\n prediction = (prediction.numpy()*255).astype('uint8')\n imageio.imsave('test_reconstruction_0.png', prediction)\n reconstruction = pygame.transform.scale(pygame.image.load('test_reconstruction_0.png'), (100,100))\n gameDisplay.fill((255,255,255))\n [bar.update(gameDisplay) for bar in bars]\n randomButton.update(gameDisplay)\n gameDisplay.blit(img, (DISTANCE,90))\n gameDisplay.blit(reconstruction, (DISTANCE,220))\n draw(gameDisplay, '<-- Input', DISTANCE+110, 130)\n draw(gameDisplay, '<-- Reconstruction', DISTANCE+110, 260)\n pygame.display.update()\n clock.tick(30)\n \n","sub_path":"20200602_homework_week8/scripts_and_files_for_homework/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"421603529","text":"def proccess_ebook(filename):\n\n file = open(filename, 'r', encoding='utf-8')\n\n content = file.read()\n\n lines = content.split(\"\\n\")\n\n words_map = {}\n\n for line in lines:\n if len(line) > 0:\n words = []\n for word in line.split(' '):\n if word.isnumeric() or word.islower():\n words.append(word)\n\n for word in words:\n if word in words_map.keys():\n words_map[word] +=1\n else:\n words_map[word]= 1\n\n value = ''\n count = 0\n\n for word in words_map.keys():\n if words_map[word] > count:\n count = words_map[word]\n value = word\n\n result = {k: v for k, v in sorted(words_map.items(), key=lambda item: item[1])}\n\n return list(result.items())\nif __name__ == '__main__':\n print(proccess_ebook('pg66474.txt'))","sub_path":"python/lesson1/proc_ebook.py","file_name":"proc_ebook.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"17026865","text":"import sys\nnewfile = \"\\\\hline\\n\"\nlen_ = 0\nfor line in sys.stdin:\n split = line.split()\n if len_ == 0:\n len_ = len(split)\n else:\n if len_ != len(split):\n raise Exception(\"Invalid line lengths.\")\n newfile += \" \"\n for item in split:\n newfile += item + \" & \"\n newfile = newfile[:-2]\n newfile += \"\\\\\\\\ \\\\hline\\n\"\nl = \"|l\" * len_ + \"|\"\nnewfile = \"\\\\begin{table}[H]\\n\\\\begin{tabular}{\" + l + \"}\\n\" + newfile\nnewfile += \"\\\\end{tabular}\\n\\\\end{table}\"\nprint(newfile.strip())\n","sub_path":"tablegenerator.py","file_name":"tablegenerator.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348494151","text":"import time\nimport os\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\nfrom torch.autograd import Variable\nfrom apex import amp\nfrom losses import simple_dice_loss3D, WeightedCrossEntropyLoss\nfrom utils import compare_output, metrics, draw_images, plot_epochs\n\n# https://github.com/mcarilli/mixed_precision_references/blob/master/Pytorch_Devcon_2019/devcon_2019_mcarilli_final.pdf\n\ndef target_to_one_hot(target):\n temp = torch.reshape(target, (-1,)).long()\n target = torch.zeros([torch.numel(temp), 2])\n target[torch.arange(torch.numel(temp)),temp] = 1\n return target\n\ndef train(model, train_loader, valid_loader, model_name, n_epochs = 100, loss_function = 'dice', AMP=True, gpu=True, test_data = None, models_folder = \"./\"):\n optimizer = optim.Adam(model.parameters(), weight_decay=0.00001)\n if AMP:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n \n if loss_function == 'wce':\n criterion = WeightedCrossEntropyLoss()\n elif loss_function == 'ce':\n criterion = CrossEntropyLoss()\n else: # DICE\n criterion = nn.Softmax(dim=1)\n\n exists_best_model = False\n filename = models_folder + model_name + '.pth'\n\n if os.path.isfile(filename):\n checkpoint = torch.load(filename)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n train_losses = checkpoint['train_losses']\n valid_losses = checkpoint['valid_losses']\n current_epoch = checkpoint['epochs']\n best_model_state_dict = checkpoint['best_model_state_dict']\n best_optimizer_state_dict = checkpoint['optimizer_state_dict']\n valid_loss_min = checkpoint['valid_loss_min']\n if AMP:\n amp.load_state_dict(checkpoint['amp_state_dict'])\n exists_best_model = True\n del checkpoint\n torch.cuda.empty_cache()\n n_epochs = max(n_epochs - current_epoch, 0)\n print(\"---\")\n print(\"Modelo entrenado {} epochs, se entrenará {} epochs más para llegar a {}\".format(current_epoch, n_epochs, n_epochs + current_epoch))\n else:\n train_losses = []\n valid_losses = []\n current_epoch = 0\n valid_loss_min = np.Inf\n best_model_state_dict = model.state_dict()\n best_optimizer_state_dict = optimizer.state_dict()\n\n start_training = time.time()\n for epoch in range(current_epoch+1, current_epoch + n_epochs + 1): \n start_epoch = time.time()\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n ###################\n # train the model #\n ###################\n model.train()\n for data, target, correct_cell_count, resized_cell_count in train_loader:\n if 1 not in target:\n continue\n target = target.squeeze(0)\n # move tensors to GPU if CUDA is available\n if gpu:\n data = Variable(data).cuda()\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n datasize = data.size(0)\n del data\n if loss_function in {'wce', 'ce'}:\n if gpu:\n target = Variable(target).cuda().long()\n loss = criterion(output, target)\n else: # 'dice'\n target = target_to_one_hot(target).float()\n if gpu:\n target = Variable(target).cuda()\n # calculate the batch loss\n output = output.permute(0,2,3,4,1).contiguous().view(-1,2).float()\n loss = simple_dice_loss3D(criterion(output), target)\n # backward pass: compute gradient of the loss with respect to model parameters\n if AMP:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update training loss\n train_loss += loss.item() * datasize\n del target\n del output\n\n ###################### \n # validate the model #\n ######################\n model.eval()\n for data, target, correct_cell_count, resized_cell_count in valid_loader:\n if 1 not in target:\n continue\n target = target.squeeze(0)\n # move tensors to GPU if CUDA is available\n if gpu:\n data = Variable(data).cuda()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n datasize = data.size(0)\n del data\n if loss_function in {'wce', 'ce'}:\n if gpu:\n target = Variable(target).cuda().long()\n loss = criterion(output, target)\n else:\n target = target_to_one_hot(target).float()\n if gpu:\n target = Variable(target).cuda()\n # calculate the batch loss\n output = output.permute(0,2,3,4,1).contiguous().view(-1,2).float()\n loss = simple_dice_loss3D(criterion(output), target)\n del target\n del output\n # update average validation loss\n valid_loss += loss.item() * datasize\n # calculate average losses\n train_loss = train_loss/len(train_loader.sampler)\n valid_loss = valid_loss/len(valid_loader.sampler)\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n # print training/validation statistics\n print('Epoch: {} Tiempo:{:.0f}s \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(\n epoch, time.time()-start_epoch, train_loss, valid_loss))\n # save model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n best_model_state_dict = model.state_dict()\n best_optimizer_state_dict = optimizer.state_dict()\n print('Validation loss decreased. Train loss: {:.6f} Validation Loss: ({:.6f} --> {:.6f}). Saving model ...'.format(\n train_loss,\n valid_loss_min,\n valid_loss,\n ))\n valid_loss_min = valid_loss\n exists_best_model = True\n if exists_best_model:\n file_obj = {\n 'epochs': epoch,\n 'best_model_state_dict': best_model_state_dict,\n 'best_optimizer_state_dict' : best_optimizer_state_dict,\n 'model_state_dict' : model.state_dict(),\n 'optimizer_state_dict' : optimizer.state_dict(),\n 'train_losses': train_losses,\n 'valid_losses': valid_losses,\n 'valid_loss_min': valid_loss_min,\n }\n if AMP:\n file_obj['amp_state_dict'] = amp.state_dict()\n torch.save(file_obj, filename)\n\n print(\"-----\")\n\n plot_epochs(train_losses, valid_losses, model_name)\n if test_data:\n metrics(model, test_data, save=True, model_name=model_name)\n\n print(\"Entrenamiento terminado en {:.2f}m\".format((time.time() - start_training)/60))","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"248805597","text":"from __future__ import print_function\n\nimport json\nimport boto3\n\nprint('Loading function')\n\ndynamodb = boto3.resource('dynamodb')\ns3 = boto3.resource('s3')\n\ndef lambda_handler(event, context):\n bucketName = \"tshimoda\"\n tableName = \"CARE-TrainingLog-stg\"\n keys = [\"user_id\", \"training_id\", \"absolute_angles\", \"angles\", \"average_angle\", \"average_duration\", \"count\", \"dominant_hand\", \"durations\", \"ended_at\", \"initial_angle\", \"ord\", \"started_at\", \"target_angle\", \"rank\"]\n wait_time = event['wait_time']\n limit = event['limit']\n count = event['count']\n lek = event.get('lek')\n \n try:\n table = dynamodb.Table(tableName)\n \n if lek: \n hoge = table.scan(Limit=limit,ExclusiveStartKey=lek)\n else:\n hoge = table.scan(Limit=limit)\n \n if hoge['Items']:\n newkey = tableName + '/' + tableName + '-' + str(limit) + '_' + str(count) + '.csv'\n count = count + 1\n obj = s3.Object(bucketName, newkey)\n \n \n items=hoge['Items']\n #keys = [key for key in items[0].keys()]\n string=\"\"\n \n for key in keys:\n string += key\n if key!=keys[-1]:\n string += '\\t'\n string += '\\n'\n \n for item in items:\n for key in keys:\n if item.get(key):\n if isinstance(item[key], list):\n for i in range (len(item[key])):\n string += str(item[key][i])\n if i != len(item[key])-1:\n string += ','\n else:\n string += str(' '.join(str(item[key]).split(\"\\t\")))\n if key!=keys[-1]:\n string += \"\\t\"\n if item!=items[-1]:\n string += '\\n'\n \n obj.put(Body = string.encode())\n \n if hoge.get('LastEvaluatedKey'):\n lek = hoge['LastEvaluatedKey']\n jobStatus = 'RUNNABLE'\n else:\n lek = {}\n jobStatus = 'SUCCEEDED'\n \n else:\n lek = {}\n jobStatus = 'SUCCEEDED'\n \n return {\n 'wait_time': wait_time,\n 'status': jobStatus,\n 'tableName': tableName,\n 'limit': limit,\n 'count': count,\n 'lek': lek\n }\n except Exception as e:\n print(e)\n message = 'Error'\n print(message)\n raise Exception(message)\n","sub_path":"scan_TrainingLog.py","file_name":"scan_TrainingLog.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"46945709","text":"import threading\n\nimport user, userRepository, userOrganization, repositoryTopic, userPR\nfrom scrap import repositoryContributors\nimport pandas as pd\n\n\n# 多线程来加快爬取速度\nclass myThread(threading.Thread):\n def __init__(self, name, index_range):\n threading.Thread.__init__(self)\n self.name = name\n self.index_range = index_range\n\n def run(self):\n print(\"Starting \" + self.name)\n repos = pd.read_csv('../userRepository.csv')\n login_list = repos['login'].values\n name_list = repos['name'].values\n for i in range(self.index_range[0], self.index_range[1] + 1):\n topics = repositoryTopic.RepositoryTopic(name_list[i], login_list[i])\n flag = topics.fetch()\n if flag:\n topics.toDataFrame()\n topics.saveCSV(\"repositoryTopics.csv\", 'a+')\n else:\n print(\"该仓库不存在topic\")\n print(\"Exiting \" + self.name)\n\n\nif __name__ == '__main__':\n thread_list = []\n range_list = [(0, 10000), (10001, 20000), (20001, 30000), (30001, 40000), (40001, 50000), (50001, 60000),\n (60001, 70000), (70001, 81182)]\n\n # 创建新线程\n for i in range(1, 9):\n thread = myThread(\"Thread-\" + str(i), range_list[i - 1])\n thread.start()\n thread_list.append(thread)\n\n # 等待线程完成\n for thread in thread_list:\n thread.join()\n","sub_path":"scrap/scrap_repo_topic.py","file_name":"scrap_repo_topic.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"338589799","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\n\nclass my_CNN_Model:\n def __init__(self, model_name=\"Resnet50\"):\n self.model_name = model_name\n filename = 'bottleneck_features/Dog'+model_name+\"Data.npz\"\n try:\n self.bottleneck_features = np.load(filename)\n except:\n print(\"Can't find bottleneck features for \"+model_name)\n self.train = self.bottleneck_features['train']\n self.valid = self.bottleneck_features['valid']\n self.test = self.bottleneck_features['test']\n self.save_weights = 'saved_models/weights.best.'+model_name+'.hdf5'\n \n def create_model(self):\n self.model = Sequential()\n self.model.add(GlobalAveragePooling2D(input_shape=train_Resnet50.shape[1:]))\n self.model.add(Dense(133, activation='softmax'))\n self.model.summary()\n \n def compile_model(self):\n self.model.compile(loss=\"categorical_crossentropy\", optimizer=\"rmsprop\", metrics=[\"accuracy\"])\n \n def train_model(self):\n checkpointer = ModelCheckpoint(filepath=self.save_weights,verbose=1, save_best_only=True)\n self.model.fit(self.train, train_targets, validation_data=(self.valid, valid_targets), epochs=20,\n batch_size=20, callbacks=[checkpointer], verbose=1)\n \n self.model.load_weights(self.save_weights)\n \n def test_model(self):\n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in self.test]\n test_accuracy = 100*np.sum(np.array(predictions)==np.argmax(test_targets, axis=1))/len(predictions)\n print('Test accuracy: %.4f%%' % test_accuracy)\n \n def predict_breed(self, img_path):\n # extract bottleneck features\n bottleneck_feature = extract_Resnet50(path_to_tensor(img_path))\n # obtain predicted vector\n predicted_vector = Resnet50_model.predict(bottleneck_feature)\n # return dog breed that is predicted by the model\n winners =dict(zip(dog_names, predicted_vector[0]))\n top5 = list(sorted(dog_names,key=lambda x: -winners[x]))[:5]\n \n return [(breed, winners[breed]) for breed in top5]\n \n def final_predictor(self,img_path):\n human = face_detector(img_path)\n dog = dog_detector(img_path)\n \n if not human and not dog:\n return \"I don't see a human or a dog in that photo! Try a different one.\"\n breeds = self.predict_breed(img_path)\n \n if human:\n salutation = \"Hello, human!\"\n else:\n salutation = \"What a cute doge!\"\n \n print(salutation)\n plt.imshow(img_path)\n print(\"You look most like a...\")\n for entry in breeds:\n print(\"{0:.2%} {1}\".format(breeds[1], breeds[0]))\n \n ","sub_path":"cnn_model_gen.py","file_name":"cnn_model_gen.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"529574353","text":"\"\"\"\nAuthor: Michel Peltriaux\nOrganization: Spatial data infrastructure Rhineland-Palatinate, Germany\nContact: michel.peltriaux@vermkv.rlp.de\nCreated on: 12.07.19\n\n\nThis file holds all messages that are used system-wide\n\"\"\"\nfrom django.utils.translation import gettext_lazy as _\n\n# GROUP ACTIVITIES #\n# These messages HAVE to be untranslated, since they are written into the db\n# and will be translated during the template rendering process automatically\nPUBLISHING_REQUEST_CREATED = _(\"Publish request created\")\nSERVICE_REGISTERED = _(\"Service registered\")\nSERVICE_REMOVED = _(\"Service removed\")\nSERVICE_UPDATED = _(\"Service updated\")\nRESOURCE_EDITED = _(\"%(title)s edited\")\nSERVICE_ACTIVATED = _(\"Resource %(title)s activated\")\nSERVICE_ACTIVATED_TEMPLATE = _(\"Service {} activated\")\nSERVICE_DEACTIVATED = _(\"Resource %(title)s deactivated\")\nSERVICE_DEACTIVATED_TEMPLATE = _(\"Service {} deactivated\")\nSERVICE_MD_RESTORED = _(\"Service metadata restored\")\nGROUP_EDITED = _(\"Group edited\")\nDATASET_MD_EDITED = _(\"Dataset metadata edited\")\n\n####################\n\nPARAMETER_ERROR = _(\"The parameter '{}' is invalid.\")\n\nFORM_INPUT_INVALID = _(\"The input was not valid.\")\n\nUSERNAME_OR_PW_INVALID = _(\"Username or password incorrect\")\nREGISTRATION_FAILED_MISSING_DATA = _(\"Registration failed due to missing form data.\")\nACCOUNT_UPDATE_SUCCESS = _(\"Account updated successfully!\")\nACCOUNT_NOT_ACTIVATED = _(\"Your account is currently not activated\")\nLOGOUT_FORCED = _(\"You have been logged out.\")\n\nACTIVATION_LINK_INVALID = _(\"Your activation link was invalid. Please contact an administrator.\")\nACTIVATION_LINK_EXPIRED = _(\"Your account was not activated in time. Please register again.\")\nACTIVATION_LINK_SENT = _(\"An activation link for your account was sent. Please check your e-mails!\")\n\nUNKNOWN_EMAIL = _(\"This e-mail is not known\")\nLOGOUT_SUCCESS = _(\"Successfully logged out!\")\nEMAIL_INVALID = _(\"The e-mail address was not valid\")\n\nPASSWORD_CHANGE_SUCCESS = _(\"Password successfully changed!\")\nPASSWORD_CHANGE_OLD_PASSWORD_WRONG = _(\"Old password was wrong!\")\nPASSWORD_CHANGE_NO_MATCH = _(\"Passwords didn't match!\")\nPASSWORD_SENT = _(\"A new password has been sent. Please check your e-mails!\")\n\nSESSION_TIMEOUT = _(\"Session timeout. You have been logged out.\")\nCONNECTION_TIMEOUT = _(\"Timeout while loading '{}'!\")\n\nNO_PERMISSION = _(\"You do not have permissions for this!\")\nRESOURCE_IS_OWNED_BY_ANOTHER_GROUP = _(\"Resource is owned by another group. Access denied.\")\nREQUESTING_USER_IS_NOT_MEMBER_OF_THE_GROUP = _(\"Requesting user is not member of the group. Access denied.\")\nREQUESTING_USER_IS_NOT_MEMBER_OF_THE_ORGANIZATION = _(\"Requesting user is not member of the organization. Access denied.\")\n\n\nMETADATA_RESTORING_SUCCESS = _(\"Metadata restored to original\")\nMETADATA_EDITING_SUCCESS = _(\"Metadata editing successful\")\nMETADATA_ADDED_SUCCESS = _(\"Metadata added successful\")\nMETADATA_IS_ORIGINAL = _(\"Metadata is original. Reset aborted.\")\nMETADATA_PROXY_NOT_POSSIBLE_DUE_TO_SECURED = _(\"You have to turn off the secured access before you can turn off the proxy.\")\n\nRESOURCE_NOT_FOUND = _(\"The requested resource could not be found.\")\nRESOURCE_NOT_FOUND_OR_NOT_OWNER = _(\"The requested resource does not exist or you are not the owner.\")\n\nREQUEST_ACTIVATION_TIMEOVER = _(\"The request was not activated in time. Request was deleted.\")\n\nPUBLISH_REQUEST_SENT = _(\"Publish request from %(group)s has been sent to the organization %(organization)s\")\nPUBLISH_REQUEST_ACCEPTED = _(\"Publish request has been accepted.\")\nPUBLISH_REQUEST_DENIED = _(\"Publish request has been denied.\")\nPUBLISH_REQUEST_ABORTED_ALREADY_PUBLISHER = _(\"Your group already is a publisher for this organization!\")\nPUBLISH_REQUEST_ABORTED_OWN_ORG = _(\"You cannot be a publisher to your group's own organization! You publish by default like this.\")\nPUBLISH_REQUEST_ABORTED_IS_PENDING = _(\"Your group already has sent a request. Please be patient!\")\n\nPUBLISH_PERMISSION_REMOVED = _(\"Publishing permission of {} for {} removed.\")\nPUBLISH_PERMISSION_REMOVING_DENIED = _(\"Publish permission removing denied. You are not a member of the organization nor a member of the publishing group!\")\n\nGROUP_CAN_NOT_BE_OWN_PARENT = _(\"A group can not be parent to itself!\")\nGROUP_IS_OTHERS_PROPERTY = _(\"This group is owned by another user. Action denied.\")\n\nGROUP_SUCCESSFULLY_DELETED = _(\"Group %(name)s was deleted successfully.\")\nGROUP_SUCCESSFULLY_EDITED = _(\"Group %(name)s was edited successfully.\")\nGROUP_SUCCESSFULLY_CREATED = _(\"Group %(name)s was created successfully.\")\nORGANIZATION_CAN_NOT_BE_OWN_PARENT = _(\"An organization can not be parent to itself!\")\nORGANIZATION_IS_OTHERS_PROPERTY = _(\"This organization is owned by another user. Action denied.\")\nORGANIZATION_SUCCESSFULLY_EDITED = _(\"Organization %(organization_name)s was edited successfully.\")\nORGANIZATION_SUCCESSFULLY_CREATED = _(\"Organization %(organization_name)s was created successfully.\")\nORGANIZATION_SUCCESSFULLY_DELETED = _(\"Organization %(organization_name)s was deleted successfully.\")\n\nSERVICE_PENDING_TASK_ABORTED = _(\"{} for resource '{}' was canceled.\")\nSERVICE_SUCCESSFULLY_DELETED = _(\"Resource %(name)s was successfully deleted.\")\nSERVICE_REGISTRATION_ABORTED = _(\"The service registration for '{}' was canceled\")\nSERVICE_UPDATE_WRONG_TYPE = _(\"You tried to update a service to another service type. This is not possible!\")\nSERVICE_UPDATE_ABORTED_NO_DIFF = _(\"The provided capabilities document is not different from the currently registered. Update canceled!\")\nSERVICE_GENERIC_ERROR = _(\"The service could not be registered. Please check your metadata and contact an administrator.\")\nSERVICE_LAYER_NOT_FOUND = _(\"The requested layer could not be found.\")\nSERVICE_NOT_FOUND = _(\"The requested service could not be found.\")\nSERVICE_DISABLED = _(\"423 - The requested resource is currently disabled.\")\nSERVICE_CAPABILITIES_UNAVAILABLE =_(\"The requested capabilities are currently unavailable. Add 'fallback=true' to your query if you want a cached document.\")\nSERVICE_NO_ROOT_LAYER = _(\"No root layer could be found for this service!\")\n\nSECURITY_PROXY_ERROR_MULTIPLE_SECURED_OPERATIONS = _(\"There are multiple secured operations for one metadata. Please contact an administator.\")\nSECURITY_PROXY_NOT_ALLOWED = _(\"You have no permission to access this resource.\")\nSECURITY_PROXY_DEACTIVATING_NOT_ALLOWED = _(\"The resource is authenticated externally. Proxy can not be deactivated.\")\nSECURITY_PROXY_MUST_BE_ENABLED_FOR_LOGGING = _(\"Proxy must be activated to be logged!\")\nSECURITY_PROXY_MUST_BE_ENABLED_FOR_SECURED_ACCESS = _(\"Proxy must be enabled if service shall stay secured!\")\nSECURITY_PROXY_ERROR_OPERATION_NOT_SUPPORTED = _(\"The requested operation is not supported by this resource.\")\nSECURITY_PROXY_ERROR_BROKEN_URI = _(\"The requested uri seems to be broken. Please inform an administrator.\")\nSECURITY_PROXY_WARNING_ONLY_FOR_ROOT = _(\"This setting is only available for the top level element.\")\nSECURITY_PROXY_ERROR_MISSING_REQUEST_TYPE = _(\"No 'request' parameter provided.\")\nSECURITY_PROXY_ERROR_MISSING_EXT_AUTH_KEY = _(\"Login credentials for external authentication could not be decrypted. The key is missing. Please inform an administrator.\")\nSECURITY_PROXY_ERROR_WRONG_EXT_AUTH_KEY = _(\"Login credentials for external authentication could not be decrypted. The key is wrong. Please inform an administrator.\")\n\nLOGGING_INVALID_OUTPUTFORMAT = _(\"No logable outputformat given. Logable formats are {}. \\nAlternatively remove the OUTPUTFORMAT parameter from your request.\")\n\nOPERATION_HANDLER_MULTIPLE_QUERIES_NOT_ALLOWED = _(\"Multiple feature queries in a single request detected. Please use one query per request.\")\n\nMULTIPLE_SERVICE_METADATA_FOUND = _(\"There are several service metadata documents for this service. Please contact an administrator.\")\n\nEDITOR_INVALID_ISO_LINK = _(\"'{}' was invalid.\")\nEDITOR_ACCESS_RESTRICTED = _(\"Access for '{}' changed successfully.\")\n\nTD_POINT_HAS_NOT_ENOUGH_VALUES = _(\"2D-Points must hold two values for x and y.\") # TD_ = 2D_, has to be renamed due to pep8\n\nEMAIL_IS_UNKNOWN = _(\"Inserted email address is unknown.\")\n\nSUBSCRIPTION_EDITING_SUCCESSFULL = _(\"Subscription edited successfully.\")\nSUBSCRIPTION_EDITING_UNSUCCESSFULL = _(\"Subscription could not be edited.\")\nSUBSCRIPTION_REMOVED_TEMPLATE = _(\"Subscription for '{}' was removed.\")\nSUBSCRIPTION_SUCCESSFULLY_CREATED = _(\"Subscription sucessfully created.\")\nSUBSCRIPTION_ALREADY_EXISTS_TEMPLATE = _(\"'{}' already subscribed.\")\nSUBSCRIPTION_SUCCESSFULLY_DELETED = _(\"Subscription successfully deleted.\")\n\nMAP_CONTEXT_SUCCESSFULLY_EDITED = _(\"Map Context %(title)s was edited successfully.\")\nMAP_CONTEXT_SUCCESSFULLY_CREATED = _(\"Map Context %(title)s was created successfully.\")\nMAP_CONTEXT_SUCCESSFULLY_DELETED = _(\"Map Context %(title)s was deleted successfully.\")\n\nGROUP_INVITATION_CREATED = _(\"%(user)s has been invited to %(group)s\")\n\nMONITORING_RUN_SCHEDULED = _(\"Monitoring run for %(metadatas)s scheduled.\")\n\nHARVEST_RUN_SCHEDULED = _(\"Harvest run for %(metadata)s scheduled.\")\n\n","sub_path":"mrmap/MrMap/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"234922987","text":"import turtle\nsquish = turtle.Turtle()\n\ndef drSquare():\n for i in range(4):\n squish.forward(100)\n squish.left(90)\n \n # squish.forward(-10)\n squish.left(20)\n\ndef main():\n wn = turtle.Screen()\n wn.bgcolor(\"lightgreen\")\n squish.width(3)\n squish.color(\"blue\")\n\n squish.penup()\n squish.left(120)\n squish.pendown()\n for i in range(20):\n drSquare()\n\n wn.exitonclick()\n\nif __name__ == \"__main__\":\n main()","sub_path":"misc/ip/section_4/exercises/square_star.py","file_name":"square_star.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"163888225","text":"import os\nimport sys\nimport unittest\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom app.util import get_show_tables_command_string\n\nfrom app.db import (get_database_connection,\n create_database,\n get_properties_by_address_or_description_fragment,\n get_selected_properties,\n set_property_selected_true,\n set_property_selected_false)\n\n\nIN_MEMORY_DB_NAME = \":memory:\"\n\n\nclass TestDB(unittest.TestCase):\n def test_create_database(self):\n connection = get_database_connection(IN_MEMORY_DB_NAME)\n\n with connection:\n create_database(connection)\n\n cursor = connection.cursor()\n\n cursor.execute(get_show_tables_command_string())\n\n response = cursor.fetchall()\n\n if len(response) > 0:\n self.assertEqual(response[0][0], \"properties\", \"The correct database table was not created.\")\n else:\n self.fail(\"There appears to be no database tables. This is unexpected.\")\n\n def test_get_properties_by_address_or_description_fragment(self):\n connection = get_database_connection(IN_MEMORY_DB_NAME)\n\n with connection:\n create_database(connection)\n\n properties = get_properties_by_address_or_description_fragment(connection, \"six\")\n\n self.assertGreater(len(properties), 0, \"Zero properties were returned. This is unexpected.\")\n\n def test_get_selected_properties_and_set_property_selected_true(self):\n connection = get_database_connection(IN_MEMORY_DB_NAME)\n\n with connection:\n create_database(connection)\n\n set_property_selected_true(connection, 0)\n\n selected_properties = get_selected_properties(connection)\n\n self.assertEqual(len(selected_properties), 1, \"There was not exactly one selected property. This is unexpected.\")\n self.assertEqual(selected_properties[0][\"index\"], 0, \"The selected property's index was not zero. This is unexpected.\")\n\n def test_set_property_selected_false(self):\n connection = get_database_connection(IN_MEMORY_DB_NAME)\n\n with connection:\n create_database(connection)\n\n set_property_selected_true(connection, 0)\n\n selected_properties = get_selected_properties(connection)\n\n self.assertEqual(len(selected_properties), 1, \"There was not exactly one selected property. This is unexpected.\")\n self.assertEqual(selected_properties[0][\"index\"], 0, \"The selected property's index was not zero. This is unexpected.\")\n\n set_property_selected_false(connection, 0)\n\n selected_properties_2 = get_selected_properties(connection)\n\n self.assertEqual(len(selected_properties_2), 0, \"There was not exactly zero selected properties. This is unexpected.\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"627303084","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.template import RequestContext\nfrom .models import Tags,Tests\nfrom .models import Questions\nfrom .models import Document\nfrom .forms import DocumentForm\nfrom superadmin.models import AddInstitute\nfrom django.shortcuts import render_to_response\n\n\n\n# Create your views here.\n\ndef index(request):\n\treturn render(request,\"index.html\",{})\n\n\ndef tags(request):\n\tuser = request.session[\"user\"]\n\treturn render(request,\"add_tag.html\",{'user':user})\n\n\t\"\"\"\n\trequestdict = request.POST\n\tsource = requestdict[\"source\"]\n\tlang = requestdict[\"lang\"]\n\ttestcases = requestdict[\"testcases\"]\n\toutput=requestdict.get(\"output\")\n\ttimeout=1\n\t\n\turl = \"api.hackerrank.com/checker/submission.json\"\n\tapi_key = \"hackerrank|161256-622|faa76a548e2dce2ef37df6a68d4dc0c75bd760f3\"\n\tr = requests.post(\"http://api.hackerrank.com/checker/submission.json\", data = {\n\t \"source\" : source,\n\t \"lang\" : lang,\n\t \"testcases\" : testcases,\n\t \"api_key\" : api_key\n\t})\n\tresult = r.json()\n\ttry:\n\t\tout=int(result['result']['stdout'][0])\n\n\t\n\n\t\toutput=int(output)\n\t\tif(output==out):\n\t\t\treturn HttpResponse(\"Success
Output: \"+result['result']['stdout'][0]+ \"
\" +\"Compile Time: \"+str(result['result']['time'][0])\t)\n\t\telse:\n\t\t\treturn HttpResponse(\"Wrong Answer. Try Again!\"+\"
\"+\"Your Output:\"+(result['result']['stdout'][0]))\n\t\t#return HttpResponse(out)\t\n\texcept TypeError:\t\n\t\treturn HttpResponse(result['result']['compilemessage'])\t\n\t\t#return HttpResponse(\"Compile error:
\"+result['result']['compilemessage'])\n\texcept KeyError:\n\t\treturn HttpResponse(\"Key error
Output format not recognized\")\t\n\texcept:\n\t\treturn HttpResponse(\"Compile error
Unexcpected output (output type not allowed)\")\t\n\n\t\"\"\"\n\ndef inserttag(request):\n\trequestdict = request.POST\n\tname=requestdict[\"tag\"]\n\tp = Tags(tag_name=name)\n\tp.save()\n\treturn render(request,\"add_tag.html\",{})\t\n\ndef addquestion(request):\t\n\ttags=Tags.objects.all()\n\tuser = request.session[\"user\"]\n\treturn TemplateResponse(request,'addquestions.html',{'tags':tags,'user':user})\n\ndef viewquestions(request):\t\n\ttags=Questions.objects.all()\n\treturn TemplateResponse(request,'viewquestions.html',{'tags':tags})\n\n\ndef updatequestion(request):\n\tif request.method == 'POST':\n\t\ttagname = str(request.POST['tagnames'])\n\t\tfilename = str(request.FILES['docfile'])\n\t\tquestionname =str( request.POST['question'])\n\t\tp = Questions(tag_name=tagname,file_name=filename,question_name=questionname)\n\t\tp.save()\n\t\tform = DocumentForm(request.POST, request.FILES)\n\t\tnewdoc = Document(docfile = request.FILES['docfile'])\n\t\tnewdoc.save()\n\tdocuments = Document.objects.all()\n\t#return HttpResponse(tagname+' '+filename+' '+questionname)\n\treturn render(request,\"addquestions.html\",{'success':'yes'})\n\ndef scheduleTest(request):\n\tnames=AddInstitute.objects.all()\n\tuser = request.session[\"user\"]\n\tquestion_names = Questions.objects.all()\n\treturn TemplateResponse(request,'schedule_test.html',{'names':names,'user':user,'questionNames':question_names}) \n\n\ndef push_test(request):\n\tif request.method == 'POST':\n\t\tinstitutename = str(request.POST['institute_name'])\n\t\ttestname =str( request.POST['test_name'])\n\t\tquestionname = str( request.POST['question_name'])\n\t\tuser = request.session[\"user\"]\n\t\tp=Tests(institute_name=institutename,test_name=testname,question_name=questionname)\n\t\tp.save()\n\n\t\treturn render(request,\"schedule_test.html\",{'user':user})\n","sub_path":"techportal/coder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"646080259","text":"import numpy\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial import distance\r\nimport timeit\r\n\r\ndataset=numpy.loadtxt(r\"C:\\Users\\Utente\\Desktop\\clusters1.txt\", delimiter=\",\",skiprows=1)\r\n\r\n\r\n\r\n\r\n\r\n#for l in range(0,1):\r\n \r\ndef K_means(dataset):\r\n \r\n \r\n randIndex = numpy.random.choice(dataset.shape[0], 15 , replace = False)\r\n \r\n t_centroids=centroids=numpy.array(dataset[randIndex])\r\n \r\n distances=numpy.empty(5000,float)\r\n \r\n \r\n for j in range (0,100): #100 iterations max\r\n \r\n \r\n for k,pts in enumerate(dataset):\r\n \r\n distanze=numpy.array([distance.euclidean(pts,centroids[ce]) for ce in range(0,15)])\r\n \r\n val=numpy.where(distanze==numpy.min(distanze))[0]\r\n \r\n distances[k]=val\r\n \r\n \r\n for i in range(0, 15):\r\n\r\n t_centroids[i] = numpy.average(dataset[distances==i],axis=0) \r\n \r\n if numpy.array_equal(t_centroids, centroids):\r\n \r\n break\r\n \r\n centroids=t_centroids\r\n \r\n \r\n \r\n\r\n # plt.scatter(dataset[:,0] ,dataset[:,1], c=(distances+(distances*2)),s=2)\r\n \r\n # plt.scatter(centroids[:,0],centroids[:,1],c='r') \r\n \r\n # plt.show()\r\n \r\n \r\n\r\nprint(\"k_means\", timeit.timeit(lambda: K_means(dataset), number=1))\r\n \r\n \r\n ","sub_path":"lab4_1.py","file_name":"lab4_1.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"73558630","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport tensorflow as tf\nimport keras_tuner as kt\nimport numpy as np\ndf_train_labels_original = pd.read_csv('train_labels.csv',low_memory=False, dtype= {\n 'damage_grade':'uint8'\n}).set_index('building_id').apply(lambda x: x-1)\ndf_train_values_original = pd.read_csv('train_values.csv',low_memory=False, dtype= {\n 'geo_level_1_id':'category', \n 'geo_level_2_id':'int64',\n 'geo_level_3_id':'int64', \n 'count_floors_pre_eq':'uint8',\n 'age':'uint16',\n 'area_percentage':'uint16', \n 'height_percentage':'uint16', \n 'land_surface_condition':'category', \n 'foundation_type':'category',\n 'roof_type':'category',\n 'ground_floor_type':'category',\n 'other_floor_type':'category',\n 'position':'category',\n 'plan_configuration':'category', \n 'has_superstructure_adobe_mud':'uint8',\n 'has_superstructure_mud_mortar_stone':'uint8',\n 'has_superstructure_stone_flag':'uint8',\n 'has_superstructure_cement_mortar_stone':'uint8', \n 'has_superstructure_mud_mortar_brick':'uint8', \n 'has_superstructure_cement_mortar_brick':'uint8', \n 'has_superstructure_timber':'uint8', \n 'has_superstructure_bamboo':'uint8',\n 'has_superstructure_rc_non_engineered':'uint8',\n 'has_superstructure_rc_engineered':'uint8',\n 'has_superstructure_other':'uint8', \n 'legal_ownership_status':'category',\n 'count_families':'uint16', \n 'has_secondary_use':'uint8', \n 'has_secondary_use_agriculture':'uint8', \n 'has_secondary_use_hotel':'uint8',\n 'has_secondary_use_rental':'uint8',\n 'has_secondary_use_institution':'uint8',\n 'has_secondary_use_school':'uint8', \n 'has_secondary_use_industry':'uint8', \n 'has_secondary_use_health_post':'uint8', \n 'has_secondary_use_gov_office':'uint8', \n 'has_secondary_use_use_police':'uint8', \n 'has_secondary_use_other':'uint8',\n}).set_index('building_id').drop(columns=['geo_level_3_id'])\n\n\npd.options.display.float_format = '{:20,.2f}'.format\n\n\n# In[2]:\n\n\ndf = df_train_values_original.join(df_train_labels_original,how=\"inner\")\ndf\n\n\n# In[3]:\n\n\ndef mean_encode(dataframe, column_name):\n new_column_names = {\n 0: column_name+'_0',\n 1: column_name+'_1',\n 2: column_name+'_2',\n }\n cross = pd.crosstab(dataframe[column_name], dataframe['damage_grade']).rename(columns=new_column_names)\n prob = cross.divide(cross.apply('sum',axis=1),axis=0).reset_index()\n return dataframe.reset_index().merge(prob,on=column_name).set_index('building_id').drop(columns=[column_name])\n\n\n# In[4]:\n\n\ndef one_hot_encode_data(dataframe, column_name):\n dummies = pd.get_dummies(dataframe[column_name])\n rename_columns = {}\n for column in dummies.columns.values:\n rename_columns[column] = column_name + '_' + column\n return dataframe.drop(columns=[column_name]).join(dummies.rename(columns=rename_columns))\n\n\n# In[5]:\n\n\n#df = mean_encode(df, 'geo_level_2_id')\ndf = one_hot_encode_data(df,'land_surface_condition')\ndf = one_hot_encode_data(df,'foundation_type')\ndf = one_hot_encode_data(df,'roof_type')\ndf = one_hot_encode_data(df,'ground_floor_type')\ndf = one_hot_encode_data(df,'other_floor_type')\ndf = one_hot_encode_data(df,'position')\ndf = one_hot_encode_data(df,'plan_configuration')\ndf = one_hot_encode_data(df,'legal_ownership_status')\ndf = one_hot_encode_data(df,'geo_level_1_id')\ndf\n\n\n# In[6]:\n\n\ntrain_df, target = (df.drop(columns=['damage_grade'])[:int(len(df)/2)], df['damage_grade'][:int(len(df)/2)])\ntest_df, test_target = (df.drop(columns=['damage_grade'])[int(len(df)/2):], df['damage_grade'][int(len(df)/2):])\n\n\n# In[12]:\n\n\nfrom sklearn.model_selection import StratifiedKFold\nfolds = StratifiedKFold(n_splits=5, shuffle=True).split(train_df, target)\n\nfor training_index, validation_index in folds:\n x_train = train_df.iloc[training_index]\n x_validation = train_df.iloc[validation_index]\n # 'columns' is a list of columns to encode\n means = x_validation['geo_level_2_id'].map(target.groupby('geo_level_2_id').mean())\n x_validation['geo_level_2_id' + \"_mean_target\"] = means\n # train_new is a dataframe copy we made of the training data\n train_new.iloc[value_index] = x_validation\n\nglobal_mean = training[\"target\"].mean()\n\n# replace nans with the global mean\ntrain_new.fillna(global_mean, inplace=True)\n\n\n# In[25]:\n\n\ndataset = tf.data.Dataset.from_tensor_slices((train_df.values, target.values))\nfor feat, targ in dataset.take(5):\n print ('Features: {}, Target: {}'.format(feat, targ))\n\n\n# In[26]:\ndef compile_model(hp):\n layers = []\n l_amount = hp.Int('l_amount', min_value=3, max_value=9, step=1)\n l_size = hp.Int('l_size', min_value=200, max_value=1000, step=100)\n for x in range(l_amount):\n layers.append(tf.keras.layers.Dense(l_size, activation='relu'))\n layers.append(tf.keras.layers.Dense(units=3, activation='softmax'))\n model = tf.keras.Sequential(layers)\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n opt = tf.keras.optimizers.Adam(clipnorm=1.0)\n model.compile(optimizer=opt,\n loss=loss_fn,\n metrics=['accuracy'])\n return model\n\ndef compile_model(hp):\n layers = []\n l_amount = hp.Int('l_amount', min_value=4, max_value=7, step=1)\n l_size = hp.Int('l_size', min_value=297, max_value=693, step=99)\n for x in range(l_amount):\n layers.append(tf.keras.layers.Dense(l_size, activation='relu'))\n layers.append(tf.keras.layers.Dense(units=3, activation='softmax'))\n model = tf.keras.Sequential(layers)\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n opt = tf.keras.optimizers.Adam(clipnorm=1.0)\n model.compile(optimizer=opt,\n loss=loss_fn,\n metrics=['accuracy'])\n return model\n\n\n# In[10]:\n\n\ntuner = kt.BayesianOptimization(compile_model,\n objective='val_accuracy',\n max_trials=20,\n )\n\n\n# In[12]:\n\n\nstop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)\n\ntuner.search(\n train_df.to_numpy(), \n target.to_numpy(), \n epochs=10, \n validation_data=(test_df, target_df),\n batch_size=128,\n callbacks=[stop_early],\n)\n\n# Get the optimal hyperparameters\nbest_hps=tuner.get_best_hyperparameters(num_trials=1)[0]\n\n\n# In[27]:\n\n\nbest_hps.get('l_size')\n\n\n# In[28]:\n\n\nbest_hps.get('l_amount')\n\n\n# In[29]:\n\n\nbest_model = compile_model(best_hps)\nbest_model.fit(dataset.batch(128), epochs=180)\n\n\n# In[16]:\n\n\nbest_model.save('modelos/NNModelBY')\n\n\n# In[17]:\n\n\nbest_model.evaluate(train_df.to_numpy(), target.to_numpy())[1]\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Neural Network Baysean Optimization.py","file_name":"Neural Network Baysean Optimization.py","file_ext":"py","file_size_in_byte":6695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"207242209","text":"from django.conf.urls import url\nfrom . import views\nfrom FreshBLD import settings\nfrom django.conf.urls.static import static\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token\n\n\nurlpatterns = [\n url(r'^kitchen$', views.KitchenView.as_view(), name='List of Kitchen'),\n url(r'^kitchen/(?P[0-9]+)$', views.ItemView.as_view(), name='Manu List'),\n url(r'^Cart$', views.MobileCartView.as_view(), name='get cart'),\n url(r'^CartCreate$', views.MobileCartCreate.as_view(), name='get cart'),\n url(r'^CleanCart$', views.CleanCart.as_view(), name='Clean Cart'),\n url(r'^api-token-auth/', obtain_jwt_token),\n url(r'^api-token-refresh/', refresh_jwt_token),\n url(r'^Checkout', views.Checkout.as_view(), name='check out'),\n url(r'^Buying', views.BuyingView.as_view(), name='Buying List View'),\n url(r'^Selling', views.BuyingView.as_view(), name='Buying List View'),\n url(r'^MyKitchen', views.MyKitchenView.as_view(), name='MyKitchen View'),\n url(r'^MyMenu', views.MyMenuView.as_view(), name='MyMenu View'),\n url(r'^CreateUser$', views.CreateUserView.as_view(), name='Create User View'),\n]\n","sub_path":"MobileAPI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"230823944","text":"from pso import *\n\nrandom.seed(2)\n\n# Given below are the sample test cases for all functions of pso. Uncomment whichever functions you want to test out.\n\n# T1\n# Sample test case for cost_function\nX = [2, 1]\ncost = cost_function(X)\nassert cost == 5.0\n\n# T2\n# Sample test case for initialise\nrandom.seed(2)\ninitial_position, initial_velocity, best_position, best_cost = initialise(3)\nassert initial_position == [9.120685437784989, -8.868972645463826, 6.709977562588993]\n\n# T3\n# Sample test case for assess\nposition = [-1, 2, -3]\nbest_position = [2, 3, 4]\nbest_cost = -1\nbest_cost = assess(position, best_position, best_cost, lambda x: sum(x))\nassert best_position == [-1, 2, -3]\n\n# T4\n# Sample test case for velocity_update\nrandom.seed(2)\nw = 0.2\nc1 = 1\nc2 = 2\nvelocity = [0.5, 0.5, 0.5]\nposition = [1, 2, 3]\nbest_position = [3, 4, 5]\nbest_group_position = [2, 3, 4]\nvelocity_update(w, c1, c2, velocity, position, best_position, best_group_position)\nassert velocity == [3.907723517897198, 0.3828467257714606, 3.242937734395946]\n\n# T5\n# Sample test case for position_update\nvelocity = [0.5, 0.5, 0.5]\nposition = [1, 2, 3]\nlimits = [-10, 10]\nposition_update(position, velocity, limits)\nassert position == [1.5, 2.5, 3.5]\n\n# T6\n# Sample test case for optimise\nrandom.seed(2)\nvector_length = 6\nlimits = [-10, 10]\nw = 0.2\nc1 = 1\nc2 = 2\nswarm_size = 15\nmax_iterations = 50\nbest_group_position, best_group_cost = optimise(vector_length, swarm_size, w, c1, c2, limits, max_iterations)\nassert best_group_position == [-0.011339304431086255, -10.0, -0.00224581601324191, -9.036576634485082, -0.014853603978905627, 10.0]\n\n","sub_path":"assignments/Assignment_2/pso_sample_tests.py","file_name":"pso_sample_tests.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"169613561","text":"\r\nwith open('automato2.txt', 'r') as f:\r\n lines = [line.rstrip() for line in f]\r\nprint(f\"inicial: {lines[0]}\")\r\nprint(f\"finais: {lines[1]}\")\r\nprint(f\"transicoes: {lines[2]}\")\r\n\r\n# manipulando as transicoes e adicionando na lista\r\ntransicoes = lines[2].split(',')\r\nt = []\r\nfor x in transicoes:\r\n itemString = x.replace('(', '')\r\n itemString = itemString.replace(')', '')\r\n item = itemString.split(\"|\")\r\n t.append(item)\r\n\r\n# pegando os simbolos utilizados apartir das transicoes\r\nsimbolos = []\r\nfor x in t:\r\n if x[1] not in simbolos:\r\n simbolos.append(x[1])\r\naceitacao = list(map(int, lines[1].split(','))) # transformando estados de aceitação em inteiros\r\n\r\n# pegando as entradas para testes\r\nwith open('automato2_entrada.txt', 'r') as f:\r\n entradas = [line.rstrip() for line in f]\r\nc = 0\r\ncadeias = []\r\nfor entrada in entradas:\r\n c+=1\r\n cad = list(entrada)\r\n cadeias.append(cad)\r\nf.close()\r\n\r\ndef percorreCadeia(cadeiaAtual, estadoAtual):\r\n # analisa a cadeia vazia\r\n if(cadeiaAtual == ['$']):\r\n if(estadoAtual in aceitacao):\r\n return True\r\n return False\r\n\r\n # verifica se o estado final da cadeia é um de aceitação\r\n if(cadeiaAtual == []):\r\n if(estadoAtual in aceitacao):\r\n return True\r\n return False\r\n\r\n # compara o primeiro símbolo da cadeia com todas as transições possíveis\r\n simboloAtual = cadeiaAtual[0]\r\n for i in range(len(t)):\r\n transicaoAtual = t[i]\r\n estadoInicialT = int(transicaoAtual[0])\r\n simboloT = transicaoAtual[1]\r\n if((estadoInicialT == estadoAtual) and (simboloT == simboloAtual)):\r\n # muda o estado atual\r\n estado = int(transicaoAtual[2])\r\n # passa para o próximo símbolo da cadeia\r\n cadeiaNova = cadeiaAtual[1:]\r\n # recursividade (com próximo símbolo e novo estado atual)\r\n if(percorreCadeia(cadeiaNova,estado)):\r\n return True\r\n return False\r\n\r\nprint(\"\")\r\nwith open('automato3_resultado.txt', 'w') as f:\r\n # aceitação ou rejeição de cada cadeia de entrada\r\n for j in range(int(c)):\r\n cadeiaAtual = cadeias[j]\r\n # percorre cada cadeia (o estado 0 é sempre o estado inicial)\r\n if(percorreCadeia(cadeiaAtual,0)):\r\n print(\"aceita\")\r\n f.writelines(\"aceita\\n\")\r\n else:\r\n print(\"rejeita\")\r\n f.writelines(\"rejeita\\n\")\r\n\r\nprint(\"\\nResultado registrado em 'automato2_resultado.txt'\")\r\ninput()\r\n","sub_path":"Automato2/automato2.py","file_name":"automato2.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"595975365","text":"rule_list = [\"A N B\", \"B NE C\", \"C N A\"] # test input -> invalid\n\n\ndef process_rules(input_rules: list) -> str:\n\n working_list = [rule.split(\" \") for rule in input_rules] # split each rule: \"B NE C\" -> [B, NE, C]\n north_dict = {}\n south_dict = {}\n east_dict = {}\n west_dict = {}\n\n for rule_part in working_list:\n direction = rule_part[1]\n left_node = rule_part[0]\n right_node = rule_part[2]\n\n if direction[0] == \"N\":\n\n validator = dfs_before_insert(north_dict, left_node, right_node)\n\n if validator is True:\n north_dict[left_node] = right_node\n south_dict[right_node] = left_node\n\n else:\n\n return \"Invalid Rule: %(r)s is already north of %(l)s\" % {'r': right_node, 'l': left_node}\n\n if direction[0] == \"S\":\n\n validator = dfs_before_insert(south_dict, left_node, right_node)\n\n if validator is True:\n\n north_dict[left_node] = right_node\n south_dict[right_node] = left_node\n\n else:\n\n return \"Invalid Rule: %(r)s is already south of %(l)s\" % {'r': right_node, 'l': left_node}\n\n if len(direction) > 1:\n\n if direction[1] == \"E\":\n\n validator = dfs_before_insert(east_dict, left_node, right_node)\n\n if validator is True:\n east_dict[left_node] = right_node\n west_dict[right_node] = left_node\n\n else:\n\n return \"Invalid Rule: %(r)s is already east of %(l)s\" % {'r': right_node, 'l': left_node}\n\n if direction[1] == \"W\":\n\n validator = dfs_before_insert(west_dict, left_node, right_node)\n\n if validator is True:\n\n west_dict[left_node] = right_node\n east_dict[right_node] = left_node\n\n else:\n\n return \"Invalid Rule: %(r)s is already west of %(l)s\" % {'r': right_node, 'l': left_node}\n\n return \"All rules are valid\"\n\n\ndef dfs_before_insert(dir_dict: dict, ln: str, rn: str) -> bool:\n\n val = rn\n\n while val in dir_dict.keys():\n\n val = dir_dict[val]\n\n if val is ln:\n\n return False\n\n return True\n","sub_path":"cr/prob87_cardinal_directions.py","file_name":"prob87_cardinal_directions.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"113417312","text":"# -*- coding: utf-8 -*- \nimport json, re, os\nimport sys\n\nfrom math import sin, cos, sqrt, atan2, radians\n\n############## Utility functions for finding distances via Google Geocode API ##############\n# get_lat_long: Fetch the latitude and longitude of a place name string using the Google geocode API\n#\n# Input: name string, e.g. \"Ann Arbor, MI\"\n# Output: a floating-point tuple containing the latitude and longitude, or [None, None] if not found\n#\ndef get_lat_long(place):\n place = re.sub('\\s','+', place, flags=re.UNICODE)\n url = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + place\n content = urllib2.urlopen(url).read()\n\n obj = json.loads(content)\n results = obj['results']\n\n lat = long = None\n if len(results) > 0:\n loc = results[0]['geometry']['location']\n lat = float(loc['lat'])\n long = float(loc['lng'])\n\n return [lat, long]\n\n# Great circle distance between two points using the haversine formula\n#\n# pass the first point's latitude and longitude (in degrees) as a 2-tuple\n# pass the second point's latitude and longitude (in degrees) as a 2-tuple\n# returns -1 if either input point is invalid, i.e. negative degree values\ndef great_circle_distance(pt1, pt2):\n R = 6371.0 # mean radius of the Earth in km\n\n if (pt1[0] < 0 or pt2[0] < 0):\n return -1\n\n lat1_r = radians(pt1[0])\n lon1_r = radians(pt1[1])\n lat2_r = radians(pt2[0])\n lon2_r = radians(pt2[1])\n\n dlon = lon2_r - lon1_r\n dlat = lat2_r - lat1_r\n\n a = (sin(dlat/2))**2 + cos(lat1_r) * cos(lat2_r) * (sin(dlon/2))**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n\n return distance\n","sub_path":"si601-hw-4/get_fb_distance_functions.py","file_name":"get_fb_distance_functions.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"2620803","text":"from fbprophet import Prophet\nfrom fbprophet.plot import plot_plotly, plot_components_plotly\nfrom sklearn.metrics import mean_absolute_error\nimport pandas as pd\n\ndataset = pd.read_csv('acoes.csv')\n# dataset.set_index(keys=['Date'], inplace=True)\n\ndataset = dataset[['Date', 'BOVA']].rename(columns={'Date': 'ds', 'BOVA': 'y'})\n\n# Modelo\nmodelo = Prophet()\nmodelo.fit(dataset)\n\nfuturo = modelo.make_future_dataframe(periods=90)\nprevisoes = modelo.predict(futuro)\n\n# Gráfico das previsões\nmodelo.plot(previsoes, xlabel='Data', ylabel='Preço');\n\nmodelo.plot_components(previsoes);\n\nplot_plotly(modelo, previsoes)\nplot_components_plotly(modelo, previsoes)\n\n# Avaliação do modelo\npred = modelo.make_future_dataframe(periods=0)\nprevisoes = modelo.predict(pred)\n\nprevisoes = previsoes['yhat'].tail(365)\nmean_absolute_error(teste, previsoes)","sub_path":"Codigos/FB_Prophet.py","file_name":"FB_Prophet.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"62839525","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nTHEME = 'themes/plumage'\n# THEME = 'themes/blue-penguin'\n\nAUTHOR = 'by-the-water'\nSITENAME = 'By the water'\nSITESUBTITLE = 'Geoscience, Oil & Gas, AI'\nSITEURL = 'https://by-the-water.github.io'\n\nGOOGLE_ANALYTICS = u'UA-99319293-1'\n\nPATH = 'content'\n\nTIMEZONE = 'America/Chicago'\n\nDEFAULT_LANG = 'en'\n\nAUTHOR_URL = 'pages/about.html'\n#AUTHORS_SAVE_AS = ''\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n # ('Python.org', 'http://python.org/'),\n # ('Jinja2', 'http://jinja.pocoo.org/'),\n # ('You can modify those links in your config file', '#'),)\n\n# Social widget\n# SOCIAL = (('You can add links in your config file', '#'),\n # ('Another social link', '#'),)\n\t\t \nDEFAULT_PAGINATION = 8\n\n# FAVICON = 'url-to-favicon'\n \nDISPLAY_PAGES_ON_MENU = True\n#SUMMARY_MAX_LENGTH = None\n\n# DISQUS_SITENAME = 'https-by-the-water-github-io'\nDISQUS_SITENAME = \"https-by-the-water-github-io\"\n\n# all the following settings are *optional*\n\n# all defaults to True.\nDISPLAY_HEADER = True\nDISPLAY_FOOTER = True\nDISPLAY_HOME = True\nDISPLAY_MENU = True\n\n# provided as examples, they make ‘clean’ urls. used by MENU_INTERNAL_PAGES.\n# TAGS_URL = 'tags'\n# TAGS_SAVE_AS = 'tags/index.html'\n# AUTHORS_URL = 'authors'\n# AUTHORS_SAVE_AS = 'authors/index.html'\n# CATEGORIES_URL = 'categories'\n# CATEGORIES_SAVE_AS = 'categories/index.html'\nARCHIVES_URL = 'archives'\nARCHIVES_SAVE_AS = 'archives/index.html'\n\n# use those if you want pelican standard pages to appear in your menu\nMENU_INTERNAL_PAGES = (\n# ('Tags', TAGS_URL, TAGS_SAVE_AS),\n# ('Authors', AUTHORS_URL, AUTHORS_SAVE_AS),\n# ('Categories', CATEGORIES_URL, CATEGORIES_SAVE_AS),\n ('Archives', ARCHIVES_URL, ARCHIVES_SAVE_AS),\n)\n# additional menu items\nMENUITEMS = (\n# ('GitHub', 'https://github.com/'),\n# ('Linux Kernel', 'https://www.kernel.org/'),\n\t ('Email Me','mailto:rossxsy@gmail.com'),\n)\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n# URLs\nARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'\nARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'\n\n# Plugins\nPLUGIN_PATHS = [u'plugins']\nPLUGINS = [u'sitemap']\n# Configuration for the \"sitemap\" plugin \nSITEMAP = { \n\t'format': 'xml', \n\t'priorities': { \n\t\t'articles': 1, \n\t\t'indexes': 0.5, \n\t\t'pages': 0.5, \n\t\t}, \n\t'changefreqs': { \n\t\t'articles': 'always', \n\t\t'indexes': 'daily', \n\t\t'pages': 'monthly' \n\t\t} \n\t}","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"377859784","text":"\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(bucket=dict(required=True), dest=dict(default=None, type='path'), encrypt=dict(default=True, type='bool'), encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), expiry=dict(default=600, type='int', aliases=['expiration']), headers=dict(type='dict'), marker=dict(default=''), max_keys=dict(default=1000, type='int'), metadata=dict(type='dict'), mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object=dict(), permission=dict(type='list', default=['private']), version=dict(default=None), overwrite=dict(aliases=['force'], default='always'), prefix=dict(default=''), retries=dict(aliases=['retry'], type='int', default=0), s3_url=dict(aliases=['S3_URL']), dualstack=dict(default='no', type='bool'), rgw=dict(default='no', type='bool'), src=dict(), ignore_nonexistent_bucket=dict(default=False, type='bool'), encryption_kms_key_id=dict()))\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[['mode', 'put', ['src', 'object']], ['mode', 'get', ['dest', 'object']], ['mode', 'getstr', ['object']], ['mode', 'geturl', ['object']]])\n if (module._name == 's3'):\n module.deprecate(\"The 's3' module is being renamed 'aws_s3'\", version=2.7)\n bucket = module.params.get('bucket')\n encrypt = module.params.get('encrypt')\n expiry = module.params.get('expiry')\n dest = module.params.get('dest', '')\n headers = module.params.get('headers')\n marker = module.params.get('marker')\n max_keys = module.params.get('max_keys')\n metadata = module.params.get('metadata')\n mode = module.params.get('mode')\n obj = module.params.get('object')\n version = module.params.get('version')\n overwrite = module.params.get('overwrite')\n prefix = module.params.get('prefix')\n retries = module.params.get('retries')\n s3_url = module.params.get('s3_url')\n dualstack = module.params.get('dualstack')\n rgw = module.params.get('rgw')\n src = module.params.get('src')\n ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')\n object_canned_acl = ['private', 'public-read', 'public-read-write', 'aws-exec-read', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control']\n bucket_canned_acl = ['private', 'public-read', 'public-read-write', 'authenticated-read']\n if (overwrite not in ['always', 'never', 'different']):\n if module.boolean(overwrite):\n overwrite = 'always'\n else:\n overwrite = 'never'\n (region, ec2_url, aws_connect_kwargs) = get_aws_connection_info(module, boto3=True)\n if (region in ('us-east-1', '', None)):\n location = 'us-east-1'\n else:\n location = region\n if module.params.get('object'):\n obj = module.params['object']\n if obj.startswith('/'):\n obj = obj[1:]\n if (obj and (mode == 'delete')):\n module.fail_json(msg='Parameter obj cannot be used with mode=delete')\n if ((not s3_url) and ('S3_URL' in os.environ)):\n s3_url = os.environ['S3_URL']\n if (dualstack and ('amazonaws.com' not in s3_url)):\n module.fail_json(msg='dualstack only applies to AWS S3')\n if (dualstack and (not module.botocore_at_least('1.4.45'))):\n module.fail_json(msg='dualstack requires botocore >= 1.4.45')\n if (rgw and (not s3_url)):\n module.fail_json(msg='rgw flavour requires s3_url')\n if s3_url:\n for key in ['validate_certs', 'security_token', 'profile_name']:\n aws_connect_kwargs.pop(key, None)\n s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)\n validate = (not ignore_nonexistent_bucket)\n bucket_acl = [acl for acl in module.params.get('permission') if (acl in bucket_canned_acl)]\n object_acl = [acl for acl in module.params.get('permission') if (acl in object_canned_acl)]\n error_acl = [acl for acl in module.params.get('permission') if ((acl not in bucket_canned_acl) and (acl not in object_canned_acl))]\n if error_acl:\n module.fail_json(msg=('Unknown permission specified: %s' % error_acl))\n bucketrtn = bucket_check(module, s3, bucket, validate=validate)\n if (validate and (mode not in ('create', 'put', 'delete')) and (not bucketrtn)):\n module.fail_json(msg='Source bucket cannot be found.')\n if (mode == 'get'):\n keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)\n if (keyrtn is False):\n if version:\n module.fail_json(msg=('Key %s with version id %s does not exist.' % (obj, version)))\n else:\n module.fail_json(msg=('Key %s does not exist.' % obj))\n if path_check(dest):\n if keysum_compare(module, dest, s3, bucket, obj, version=version):\n sum_matches = True\n if (overwrite == 'always'):\n try:\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n except Sigv4Required:\n s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n else:\n module.exit_json(msg='Local and remote object are identical, ignoring. Use overwrite=always parameter to force.', changed=False)\n else:\n sum_matches = False\n if (overwrite in ('always', 'different')):\n try:\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n except Sigv4Required:\n s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n else:\n module.exit_json(msg='WARNING: Checksums do not match. Use overwrite parameter to force download.')\n else:\n try:\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n except Sigv4Required:\n s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)\n download_s3file(module, s3, bucket, obj, dest, retries, version=version)\n if (mode == 'put'):\n if (not path_check(src)):\n module.fail_json(msg='Local object for PUT does not exist')\n if bucketrtn:\n keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)\n if (bucketrtn and keyrtn):\n if keysum_compare(module, src, s3, bucket, obj):\n sum_matches = True\n if (overwrite == 'always'):\n module.params['permission'] = object_acl\n upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)\n else:\n get_download_url(module, s3, bucket, obj, expiry, changed=False)\n else:\n sum_matches = False\n if (overwrite in ('always', 'different')):\n module.params['permission'] = object_acl\n upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)\n else:\n module.exit_json(msg='WARNING: Checksums do not match. Use overwrite parameter to force upload.')\n if (not bucketrtn):\n module.params['permission'] = bucket_acl\n create_bucket(module, s3, bucket, location)\n module.params['permission'] = object_acl\n upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)\n if (bucketrtn and (not keyrtn)):\n module.params['permission'] = object_acl\n upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)\n if (mode == 'delobj'):\n if (obj is None):\n module.fail_json(msg='object parameter is required')\n if bucket:\n deletertn = delete_key(module, s3, bucket, obj)\n if (deletertn is True):\n module.exit_json(msg=('Object deleted from bucket %s.' % bucket), changed=True)\n else:\n module.fail_json(msg='Bucket parameter is required.')\n if (mode == 'delete'):\n if bucket:\n deletertn = delete_bucket(module, s3, bucket)\n if (deletertn is True):\n module.exit_json(msg=('Bucket %s and all keys have been deleted.' % bucket), changed=True)\n else:\n module.fail_json(msg='Bucket parameter is required.')\n if (mode == 'list'):\n exists = bucket_check(module, s3, bucket)\n if (not exists):\n module.fail_json(msg=('Target bucket (%s) cannot be found' % bucket))\n list_keys(module, s3, bucket, prefix, marker, max_keys)\n if (mode == 'create'):\n if (bucket and (not obj)):\n if bucketrtn:\n module.exit_json(msg='Bucket already exists.', changed=False)\n else:\n module.params['permission'] = bucket_acl\n module.exit_json(msg='Bucket created successfully', changed=create_bucket(module, s3, bucket, location))\n if (bucket and obj):\n if obj.endswith('/'):\n dirobj = obj\n else:\n dirobj = (obj + '/')\n if bucketrtn:\n if key_check(module, s3, bucket, dirobj):\n module.exit_json(msg=('Bucket %s and key %s already exists.' % (bucket, obj)), changed=False)\n else:\n module.params['permission'] = object_acl\n create_dirkey(module, s3, bucket, dirobj, encrypt)\n else:\n module.params['permission'] = bucket_acl\n created = create_bucket(module, s3, bucket, location)\n module.params['permission'] = object_acl\n create_dirkey(module, s3, bucket, dirobj, encrypt)\n if (mode == 'geturl'):\n if ((not bucket) and (not obj)):\n module.fail_json(msg='Bucket and Object parameters must be set')\n keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)\n if keyrtn:\n get_download_url(module, s3, bucket, obj, expiry)\n else:\n module.fail_json(msg=('Key %s does not exist.' % obj))\n if (mode == 'getstr'):\n if (bucket and obj):\n keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)\n if keyrtn:\n try:\n download_s3str(module, s3, bucket, obj, version=version)\n except Sigv4Required:\n s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)\n download_s3str(module, s3, bucket, obj, version=version)\n elif (version is not None):\n module.fail_json(msg=('Key %s with version id %s does not exist.' % (obj, version)))\n else:\n module.fail_json(msg=('Key %s does not exist.' % obj))\n module.exit_json(failed=False)\n","sub_path":"Data Set/bug-fixing-1/4b3e5998bf862211a8e080e6c5e5cef22f87ca09-
-fix.py","file_name":"4b3e5998bf862211a8e080e6c5e5cef22f87ca09-
-fix.py","file_ext":"py","file_size_in_byte":11275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340471171","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0003_recipe_publish_date'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='recipe',\n name='publish_date',\n field=models.DateTimeField(default=datetime.datetime.utcnow),\n ),\n ]\n","sub_path":"cookin/app/migrations/0004_auto_20151115_0654.py","file_name":"0004_auto_20151115_0654.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409446548","text":"#!/usr/bin/env python3\n\nimport re\nimport sys\nimport string\n\ndef read_chunk(file_in):\n \"\"\"Yields a list of blank line delimited 'records' from file_in.\n Records are lists of strings with leading and trailing whitespace\n removed.\n \"\"\"\n chunk = []\n while True:\n line = file_in.readline()\n if line == '':\n break\n elif line == '\\n':\n yield chunk\n chunk = []\n else:\n chunk.append(line.strip())\n\n\n# regexps is a list of (pattern, keyname, format_str, conversion) \nregexps = [\n (r'^(Motion)Notify event', 'event', str.lower),\n (r'^(KeyPress) event', 'event', str.lower),\n (r'time (\\d+),', 'time', int), \n (r'(\\(\\d+,\\d+\\)), root:', 'location', eval),\n (r'keysym 0x[\\dabcdef]+, ([^)]+)', 'keysym', str.lower)\n]\n \ndef extract_fields(line, regexps):\n \"\"\"Returns a dictionary containing keys and optional values from line.\n If there is no value corresponding to a key, it is returned as None.\n \"\"\"\n fields = {}\n for pattern, key, convert in regexps:\n match = re.search(pattern, line)\n if match is None:\n continue\n value = convert(match.group(1))\n fields[key] = value\n\n\n return fields\n \n\ndef xev_actions(file_in):\n \"\"\"Yields a series of motion or keypress events from file_in.\"\"\"\n for chunk in read_chunk(file_in):\n line = ' '.join(chunk)\n fields = extract_fields(line, regexps)\n if 'event' in fields:\n yield fields\n\n\nif __name__ == '__main__':\n for d in xev_actions(sys.stdin):\n try:\n print(d)\n except BrokenPipeError:\n sys.exit(1)\n","sub_path":"xev_parse.py","file_name":"xev_parse.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"319003148","text":"from openpyxl import Workbook #modulo per manipolare file excel\nfrom openpyxl.chart import ( #moduli per creare grafici su excel\n ScatterChart,\n Reference,\n Series,\n)\nimport os\nimport serial\nimport re\nimport datetime\naltro='S'\nnow=datetime.datetime.now()\nre1='(start)'\t# Word 1\nre2='(\\\\s+)'\t# White Space 1\nre3='(time)'\t# Word 2\n\nend = re.compile(re1+re2+re3,re.IGNORECASE|re.DOTALL)\nprint('~=~=Spettrofotometro=~=~')\nprint('')\ntry:\n porta = serial.Serial('COM4') #apri porta seriale\nexcept:\n print('Spettrofotometro non trovato!')\n time.sleep(5)\nwb = Workbook() #crea un file excel\nwb.remove(wb.active) #rimuove il foglio di default\nwhile altro=='S':\n nome = input('Nome del campione: ') #inserire il nome del campione da analizzare\n ws = wb.create_sheet(nome)\n ws.cell(row=1,column=1,value=\"Lunghezza\")\n ws.cell(row=1,column=2,value=\"Assorbanza\")\n print('\\nAttendo i dati...')\n linea=[]\n scan=False\n riga=2\n while True:\n byte = porta.read() #legge 1 byte\n #print(byte)\n if byte==b\"\\n\":\n stringa = ''.join(linea) #converte la lista in stringa\n #print('rip')\n if end.search(stringa):\n scan=False #trovato la stringa con \"start time...\" quindi la scansione è finita\n print('Scansione terminata!')\n break\n if scan==True:\n stringa = stringa.replace(\" \",\"\")\n a,b=stringa.split(\":\")\n #print(a+'---'+b)\n ws.cell(row=riga,column=1,value=int(a))\n ws.cell(row=riga,column=2,value=float(b))\n riga=riga+1\n if stringa=='WL AU':\n scan=True #la scansione è iniziata!\n print('Inizio scansione!')\n linea=[]\n elif byte!=b\"\\r\":\n linea.append(str(byte, 'utf-8'))\n assorbanze=[]\n for i in range(2,riga):\n assorbanze.append(ws[\"B\"+str(i)].value)\n lunghezze=[]\n for i in range(2,riga):\n lunghezze.append(ws[\"A\"+str(i)].value)\n grafico = ScatterChart()\n grafico.title = \"Curva assorbanza di \"+nome\n grafico.style = 5\n grafico.y_axis.title = 'Assorbanza'\n grafico.x_axis.title = 'Lunghezza d\\'onda'\n grafico.legend=None\n grafico.x_axis.scaling.min=min(lunghezze)\n grafico.x_axis.scaling.max=max(lunghezze)\n grafico.y_axis.scaling.min=min(assorbanze)\n grafico.y_axis.scaling.max=max(assorbanze)\n valorix=Reference(ws, min_col=1,min_row=2,max_row=riga-1)\n valoriy=Reference(ws, min_col=2,min_row=2,max_row=riga-1)\n serie=Series(valoriy,valorix,title_from_data=True)\n grafico.series.append(serie)\n ws.add_chart(grafico) \n altro = 'C'\n porta.flushInput()\n while altro!='S' and altro!='N':\n altro = input('Scansionare un altro campione? (S/N)')\n altro = altro.replace(\"s\",\"S\")\n altro = altro.replace(\"n\",\"N\")\nnomefile = input('\\nNome del file: ')\nprint('SALVATAGGIO... IL FILE SARÀ SALVATO SUL DESKTOP!')\nwb.save(os.environ['UserProfile']+'\\\\Desktop\\\\'+nomefile+'.xlsx')\nprint('SALVATO')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"554397371","text":"def frange(x, y, jump):\n while x < y:\n yield x\n x += jump\n\n\ndef sgn(x: float) -> float:\n if x < 0:\n return -1\n if x > 0:\n return 1\n return 0\n\n\ndef linspace(start, stop, num=50, endpoint=True):\n num = int(num)\n start = start * 1.\n stop = stop * 1.\n\n if num == 1:\n yield stop\n return\n if endpoint:\n step = (stop - start) / (num - 1)\n else:\n step = (stop - start) / num\n\n for i in range(num):\n yield start + step * i\n","sub_path":"scad/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"650438313","text":"class Solution:\n def longestPalindrome(self, s: str) -> int:\n from collections import Counter\n char_count = dict(Counter(s))\n single = False\n length = 0\n\n for key, value in char_count.items():\n length += value if value % 2 == 0 else value - 1\n\n if value % 2 != 0 and not single:\n single = True\n\n return length if not single else length + 1\n\n","sub_path":"LeetCode/longestPalindrome.py","file_name":"longestPalindrome.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"310563147","text":"#!/usr/local/bin/python\n\n'''\nCOS424 project: Recognising facial expressions\nPo-Ta Chen, Sagar Setru, Hugh Wilson, Zidong Zhang\nModule: Feature extraction using Non Negative Matrix Factorisation\n INPUT\n An nxm training data matrix: n: pixel index; m: image index.\n An nxt testing data matrix\n RETURN\n An rxm encoded training data matrix: r: low dimensional component index;\n An rxt encoded testing data matrix\n'''\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport sys\nimport os\nimport time\nfrom scipy import ndimage\nfrom scipy import misc\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.decomposition import NMF\nimport sklearn.datasets as data\n\ndef prepareDataInput( inArray ):\n '''Take in a 3D n x m x p array and return\n a 2D (nxm) x p array'''\n nSize = np.shape( inArray )[0]\n mSize = np.shape( inArray )[1]\n pSize = np.shape( inArray )[2]\n outArray = np.zeros( (nSize*mSize, pSize) )\n for i in range(nSize):\n for j in range(mSize):\n outArray[(j+i*mSize),:] = inArray[i,j,:]\n return outArray\n\ndef produceEncoding( trainX, nComponents ):\n '''Produces an NMF encoding from the training\n data matrix'''\n model = NMF( n_components=nComponents, solver='cd', \\\n tol=1e-4, max_iter=200, alpha=0.0 )\n model.fit( trainX )\n return model\n\ndef prepareDataOutput( inArray, out0, out1 ):\n '''Take in a 2D (nxm) x p array and return\n a 3D n x m x p array. n: out0, m: out1'''\n nSize = out0\n mSize = out1\n pSize = np.shape( inArray )[1]\n outArray = np.zeros( (nSize,mSize,pSize) )\n for i in range(nSize):\n for j in range(mSize):\n outArray[i,j,:] = inArray[(j+i*mSize),:]\n return outArray\n\ndef reduceDim( trainData, testData, nComponents ):\n '''Takes in an image for each subject and returns\n a low dimensional representation for each subject '''\n outDim0 = np.shape( trainData )[0]\n trainOutDim1 = np.shape( trainData )[1]\n testOutDim1 = np.shape( testData )[1]\n trainX = prepareDataInput( trainData )\n testX = prepareDataInput( testData )\n model = produceEncoding( trainX, nComponents )\n lowDimTrainData = model.transform( trainX )\n lowDimTestData = model.transform( testX )\n lowDimTrainOutput = prepareDataOutput( lowDimTrainData, \\\n outDim0, trainOutDim1 )\n lowDimTestOutput = prepareDataOutput( lowDimTestData, \\\n outDim0, testOutDim1 )\n return lowDimTrainOutput, lowDimTestOutput, \\\n model.reconstruction_err_, model.components_\n\ndef main():\n nComponents = 50\n # Import a dataset for testing\n Faces = data.fetch_olivetti_faces()\n Images = Faces.images\n trainData = Images[:100,:,:]\n testData = Images[100:,:,:]\n # Produce a low dimensional representation\n lowDimTrainData, lowDimTestData = reduceDim( trainData, testData, \\\n nComponents )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"FEMs/NMF/nmf.py","file_name":"nmf.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"401686279","text":"import os\nimport cv2\nimport time\nimport pickle\n\n\n\ndef save_object(obj, filename):\n\t\"\"\"Save object to a pickle file\"\"\"\n\twith open(filename, 'wb') as output: # Overwrites any existing file.\n\t\tpickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_object(filename):\n\t\"\"\"Load object from a pickle file\"\"\"\n\twith open(filename, 'rb') as input:\n\t\treturn pickle.load(input)\n\n\ndef get_video_writer(filename, width, height):\n\t\"\"\"Return a video writer set to write to filename\"\"\"\n\t# Default resolutions of the frame are obtained.\n\tframe_width = int(width)\n\tframe_height = int(height)\n\treturn cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\n\n\ndef record(camera, filename, length):\n\t\"\"\"Record a video from camera\"\"\"\n\turl = camera.url.rsplit(\"/\",1)[0] + \"/video\"\n\n\tprint(\"Recording video on \",camera.name)\n\tprint(\"Reading from url\",url)\n\tprint(\"Saving video to\", filename)\n\n\tcap = cv2.VideoCapture(url)\n\n\twidth = cap.get(3)\n\theight = cap.get(4)\n\tout = get_video_writer('output.avi', width, height)\n\n\t# Check if camera opened successfully\n\tif (cap.isOpened() == False):\n\t\traise ValueError(\"Unable to read camera feed\")\n\n\tstart = time.time()\n\tduration = time.time() - start\n\n\twhile duration < length:\n\t\tret, frame = cap.read()\n\t\tif ret == True:\n\t\t\t# Write the frame into the file 'output.avi'\n\t\t\tout.write(frame)\n\t\t\t# Display the resulting frame\n\t\t\tcv2.imshow('frame',frame)\n\t\t\t# Press Q on keyboard to stop recording\n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\tbreak\n\t\t# Break the loop\n\t\telse:\n\t\t\tbreak\n\t\t# Update the running duration\n\t\tduration = time.time() - start\n\n\t# When everything done, release the video capture and video write objects\n\tcap.release()\n\tout.release()\n\n\t# Closes all the frames\n\tcv2.destroyAllWindows()\n\n\treturn filename","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"652912427","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ___\n# \n# \n# ___\n#
Content Copyright by Pierian Data
\n\n# # Errors and Exceptions Homework\n\n# ### Problem 1\n# Handle the exception thrown by the code below by using try and except blocks.\n\n# In[1]:\n\n\nfor i in ['a','b','c']:\n try:\n print(i**2)\n except:\n print(\"Not an integer\")\n\n\n# ### Problem 2\n# Handle the exception thrown by the code below by using try and except blocks. Then use a finally block to print 'All Done.'\n\n# In[2]:\n\n\nx = 5\ny = 0\ntry:\n z = x/y\nexcept:\n print(\"Zero divided by error\")\nfinally:\n print(\"All done\")\n\n\n# ### Problem 3\n# Write a function that asks for an integer and prints the square of it. Use a while loop with a try, except, else block to account for incorrect inputs.\n\n# In[3]:\n\n\ndef ask():\n while True:\n try:\n n=int(input(\"Input an integer:\"))\n except:\n print(\"An error occurred! Please try again!\")\n else:\n print(\"Thank you, your number squared is:\",n**2)\n\n\n# In[ ]:\n\n\nask()\n\n\n# # Great Job!\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"02-Errors and Exceptions Homework (1).py","file_name":"02-Errors and Exceptions Homework (1).py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"299816075","text":"__author__ = 'Nadia Aly'\nfrom array_2d import Array2D\nfrom arrayadt import Array\nfrom _arrayIterator import _ArrayIterator\n\n\nclass PPMImage():\n#\n def __init__(self, w, h):\n\n self.width = w\n self.height = h\n self.flag_array = Array2D(self.height,self.width)\n\n def __setitem__(self, key, value):\n\n self.flag_array[key] = value\n\n def writeToFile(self,output):\n\n # write header - looks most like the ppm guidelines\n header = bytes(\"P6\\n%d %d 255\\n\" % (self.width, self.height),'ascii')\n write_flag = open(output,mode='wb')\n write_flag.write(header)\n for i in range(self.height):\n for j in range(self.width):\n r, g, b = self.flag_array[i,j]\n r.to_bytes(1,'big')\n g.to_bytes(1,'big')\n b.to_bytes(1,'big')\n k = Array(3)\n k[0] = r\n k[1] = g\n k[2] = b\n working = bytearray(k)\n write_flag.write(working)\n\n\n\n","sub_path":"WriteCountryFlags/ppmimageadt.py","file_name":"ppmimageadt.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426049058","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# 2010-2013 Nico Schottelius (nico-cdist at schottelius.org)\n#\n# This file is part of cdist.\n#\n# cdist is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cdist is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with cdist. If not, see .\n#\n#\n\nimport logging\nimport sys\n\n\n# Define additional cdist logging levels.\nlogging.OFF = logging.CRITICAL + 10 # disable logging\nlogging.addLevelName(logging.OFF, 'OFF')\n\nlogging.VERBOSE = logging.INFO - 5\nlogging.addLevelName(logging.VERBOSE, 'VERBOSE')\n\n\ndef _verbose(msg, *args, **kwargs):\n logging.log(logging.VERBOSE, msg, *args, **kwargs)\n\n\nlogging.verbose = _verbose\n\nlogging.TRACE = logging.DEBUG - 5\nlogging.addLevelName(logging.TRACE, 'TRACE')\n\n\ndef _trace(msg, *args, **kwargs):\n logging.log(logging.TRACE, msg, *args, **kwargs)\n\n\nlogging.trace = _trace\n\n\nclass DefaultLog(logging.Logger):\n\n FORMAT = '%(levelname)s: %(message)s'\n\n class StdoutFilter(logging.Filter):\n def filter(self, rec):\n return rec.levelno != logging.ERROR\n\n class StderrFilter(logging.Filter):\n def filter(self, rec):\n return rec.levelno == logging.ERROR\n\n def __init__(self, name):\n super().__init__(name)\n\n formatter = logging.Formatter(self.FORMAT)\n\n self.addFilter(self)\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.addFilter(self.StdoutFilter())\n stdout_handler.setLevel(logging.TRACE)\n stdout_handler.setFormatter(formatter)\n\n stderr_handler = logging.StreamHandler(sys.stderr)\n stderr_handler.addFilter(self.StderrFilter())\n stderr_handler.setLevel(logging.ERROR)\n stderr_handler.setFormatter(formatter)\n\n self.addHandler(stdout_handler)\n self.addHandler(stderr_handler)\n\n def filter(self, record):\n \"\"\"Prefix messages with logger name\"\"\"\n\n record.msg = self.name + \": \" + str(record.msg)\n\n return True\n\n def verbose(self, msg, *args, **kwargs):\n self.log(logging.VERBOSE, msg, *args, **kwargs)\n\n def trace(self, msg, *args, **kwargs):\n self.log(logging.TRACE, msg, *args, **kwargs)\n\n\nclass ParallelLog(DefaultLog):\n FORMAT = '%(levelname)s: [%(process)d]: %(message)s'\n\n\ndef setupDefaultLogging():\n del logging.getLogger().handlers[:]\n logging.setLoggerClass(DefaultLog)\n\n\ndef setupParallelLogging():\n del logging.getLogger().handlers[:]\n logging.setLoggerClass(ParallelLog)\n\n\nsetupDefaultLogging()\n","sub_path":"cdist/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"438266054","text":"HOMBRE = 'hombre'\nMUJER = 'mujer'\n\nlista_grupo_mujeres = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',)\nlista_grupo_hombres = ('o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', )\n\nnombre = input(\"Nombre: \")\nsexo = input(\"Sexo: \")\n\nprimer_letra = nombre[0].lower()\nsexo = sexo.lower()\n\nif (\n sexo == MUJER and primer_letra in lista_grupo_mujeres\n) or (\n sexo == HOMBRE and primer_letra in lista_grupo_hombres\n):\n print(\"GRUPO A\")\nelse:\n print(\"GRUPO B\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"163156688","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport final_cnn_load as fcl\n\ndef findGEI(preProcessedData):\n \n GEIs = []\n testDf = pd.DataFrame()\n testDf = pd.DataFrame.from_dict(preProcessedData, dtype = 'uint32')\n \n for d in preProcessedData:\n tempArr = np.asarray(d.get(\"cycleImgs\"))\n GEIs.append(np.mean(tempArr, axis = 0))\n\n GEIs = np.asarray(GEIs) \n subIds = testDf['usr_id'].to_numpy(dtype = 'int32')\n \n return GEIs, subIds\n\n\n# In[2]:\n\n\nfrom sklearn.model_selection import train_test_split\n\ndef getFeatures(preProcessedData):\n \n combinedGEIs, subIds = findGEI(preProcessedData)\n \n X_train, X_test, y_train, y_test = train_test_split(combinedGEIs, subIds, test_size=0.30, random_state=42)\n \n return X_train, X_test, y_train, y_test\n\n\n# In[3]:\n\n\ndef callModel(preProcessedData):\n if preProcessedData[0].get(\"usr_id\") == 0:\n GEI, _ = findGEI(preProcessedData)\n return fcl.predict_model(GEI)\n #return 10000009\n else:\n X_train, X_test, y_train, y_test = getFeatures(preProcessedData)\n return 10000009\n# probVectorORClass = CNN(X_train, X_test, y_train, y_test) #final list of probabilities/final classification\n \n# return probVectorORClass\n \n\n","sub_path":"FeatrueExtractionforNewUsers.py","file_name":"FeatrueExtractionforNewUsers.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"413127292","text":"\"\"\"Resolwe custom serializer fields.\"\"\"\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.encoding import smart_text\n\nfrom rest_framework.relations import RelatedField\n\nfrom resolwe.flow.models import DescriptorSchema\nfrom resolwe.permissions.shortcuts import get_objects_for_user\nfrom resolwe.permissions.utils import get_full_perm\n\nfrom .descriptor import DescriptorSchemaSerializer\n\n\nclass ResolweSlugRelatedField(RelatedField):\n \"\"\"\n Resolwe specific implementation of SlugRelatedField.\n\n A read-write field that represents the target of the relationship\n by a unique combination of 'slug' and 'version' attributes.\n\n This is a modification of rest_framework.relations.SlugRelatedField.\n Since slug is not unique, (but combination of slug and version is),\n we filter objects by slug, by permissions and return object with\n highest version.\n\n \"\"\"\n\n default_error_messages = {\n 'does_not_exist': ('Invalid {model_name} {slug_name} \"{value}\" - object does not exist.'),\n 'invalid': ('Invalid value.'),\n }\n\n def __init__(self, slug_field='slug', **kwargs):\n \"\"\"Initialize attributes.\"\"\"\n self.slug_field = slug_field\n super().__init__(**kwargs)\n\n def to_internal_value(self, data):\n \"\"\"Convert to internal value.\"\"\"\n user = getattr(self.context.get('request'), 'user')\n queryset = self.get_queryset()\n permission = get_full_perm('view', queryset.model)\n try:\n return get_objects_for_user(\n user,\n permission,\n queryset.filter(**{self.slug_field: data}),\n ).latest()\n except ObjectDoesNotExist:\n self.fail(\n 'does_not_exist',\n slug_name=self.slug_field,\n value=smart_text(data),\n model_name=queryset.model._meta.model_name, # pylint: disable=protected-access\n )\n except (TypeError, ValueError):\n self.fail('invalid')\n\n def to_representation(self, obj):\n \"\"\"Convert to representation.\"\"\"\n return obj.pk\n\n\nclass NestedDescriptorSchemaSerializer(ResolweSlugRelatedField):\n \"\"\"DescriptorSchema specific implementation of ResolweSlugRelatedField.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize attributes.\"\"\"\n kwargs['queryset'] = DescriptorSchema.objects.all()\n super().__init__(slug_field='slug', **kwargs)\n\n def to_representation(self, obj):\n \"\"\"Convert to representation.\"\"\"\n return DescriptorSchemaSerializer(obj, required=self.required).data\n","sub_path":"resolwe/flow/serializers/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"169258328","text":"import pandas as pd\nimport sys\nfrom suds.client import Client as sudsclient\nimport ssl\nimport os\nif \"PYFLASKI\" in os.environ:\n from pyflaski.routines import fuzzy_search\nelse:\n from flaski.routines import fuzzy_search\n\ndavid_categories = [\n 'GOTERM_BP_FAT', 'GOTERM_CC_FAT', 'GOTERM_MF_FAT', 'KEGG_PATHWAY',\n 'BIOCARTA', 'PFAM', 'PROSITE' ]\n\ndavid_fields = [\n 'categoryName', 'termName', 'listHits', 'percent',\n 'ease', 'geneIds', 'listTotals', 'popHits', 'popTotals',\n 'foldEnrichment', 'bonferroni', 'benjamini', 'afdr']\n# include:\n# 'fisher'\n# 'termName' to 'term' and 'term_name'\n\nDEBUG_GENES=\"ENSMUSG00000092622,ENSMUSG00000004415,ENSMUSG00000017144,ENSMUSG00000028972,ENSMUSG00000031026,ENSMUSG00000006360,ENSMUSG00000039106,ENSMUSG00000038932,\\\nENSMUSG00000040629,ENSMUSG00000044254,ENSMUSG00000060675,ENSMUSG00000037465,ENSMUSG00000033998,ENSMUSG00000030785,ENSMUSG00000042808,ENSMUSG00000034612,ENSMUSG00000032883,\\\nENSMUSG00000037820,ENSMUSG00000052955,ENSMUSG00000005892,ENSMUSG00000086228,ENSMUSG00000035504,ENSMUSG00000074063,ENSMUSG00000085682,ENSMUSG00000048376,ENSMUSG00000018865,\\\nENSMUSG00000025104,ENSMUSG00000022763,ENSMUSG00000030800,ENSMUSG00000021226,ENSMUSG00000038188,ENSMUSG00000038507,ENSMUSG00000014776,ENSMUSG00000029151,ENSMUSG00000030549,\\\nENSMUSG00000063430,ENSMUSG00000021194,ENSMUSG00000028836,ENSMUSG00000003849,ENSMUSG00000017493,ENSMUSG00000001506,ENSMUSG00000059991,ENSMUSG00000058454,ENSMUSG00000024962,\\\nENSMUSG00000020042,ENSMUSG00000037035,ENSMUSG00000058301,ENSMUSG00000058741,ENSMUSG00000039814,ENSMUSG00000026807,ENSMUSG00000046607,ENSMUSG00000004341,ENSMUSG00000038291,\\\nENSMUSG00000070000,ENSMUSG00000029718,ENSMUSG00000026114,ENSMUSG00000032946,ENSMUSG00000022505,ENSMUSG00000034450,ENSMUSG00000067261,ENSMUSG00000022432,ENSMUSG00000022048,\\\nENSMUSG00000032494,ENSMUSG00000026418,ENSMUSG00000051455,ENSMUSG00000018411,ENSMUSG00000009596,ENSMUSG00000022469,ENSMUSG00000087283,ENSMUSG00000073779,ENSMUSG00000031379,\\\nENSMUSG00000034573,ENSMUSG00000008090,ENSMUSG00000046500,ENSMUSG00000013418,ENSMUSG00000028760,ENSMUSG00000003848,ENSMUSG00000040428,ENSMUSG00000004891,ENSMUSG00000030350,\\\nENSMUSG00000003037,ENSMUSG00000055553,ENSMUSG00000034112,ENSMUSG00000025196,ENSMUSG00000034324,ENSMUSG00000026775,ENSMUSG00000056537,ENSMUSG00000029168,ENSMUSG00000031410,\\\nENSMUSG00000034880,ENSMUSG00000034731,ENSMUSG00000031584,ENSMUSG00000084807,ENSMUSG00000031861,ENSMUSG00000022265,ENSMUSG00000031438,ENSMUSG00000033658,ENSMUSG00000059456,\\\nENSMUSG00000042249,ENSMUSG00000024331,ENSMUSG00000034807,ENSMUSG00000030747,ENSMUSG00000031660,ENSMUSG00000023800,ENSMUSG00000070880,ENSMUSG00000023045,ENSMUSG00000052724,\\\nENSMUSG00000061815,ENSMUSG00000032068,ENSMUSG00000030310,ENSMUSG00000013766,ENSMUSG00000063903,ENSMUSG00000023951,ENSMUSG00000030137,ENSMUSG00000015994,ENSMUSG00000040624,\\\nENSMUSG00000048644,ENSMUSG00000038840,ENSMUSG00000032015,ENSMUSG00000028949,ENSMUSG00000037971,ENSMUSG00000048371,ENSMUSG00000047264,ENSMUSG00000015243,ENSMUSG00000039865,\\\nENSMUSG00000031683,ENSMUSG00000032643,ENSMUSG00000074593,ENSMUSG00000032540,ENSMUSG00000040280,ENSMUSG00000024036,ENSMUSG00000074365,ENSMUSG00000021266,ENSMUSG00000104968,\\\nENSMUSG00000006205,ENSMUSG00000043419,ENSMUSG00000032020,ENSMUSG00000039395,ENSMUSG00000062939,ENSMUSG00000031985,ENSMUSG00000034486,ENSMUSG00000034863,ENSMUSG00000047502,\\\nENSMUSG00000050737,ENSMUSG00000024012,ENSMUSG00000008892,ENSMUSG00000015652,ENSMUSG00000022178,ENSMUSG00000048373,ENSMUSG00000022292,ENSMUSG00000019312,ENSMUSG00000039831,\\\nENSMUSG00000026458,ENSMUSG00000020122,ENSMUSG00000031924,ENSMUSG00000004565,ENSMUSG00000037669,ENSMUSG00000005267,ENSMUSG00000002949,ENSMUSG00000048988,ENSMUSG00000053856,\\\nENSMUSG00000090363,ENSMUSG00000009670,ENSMUSG00000056515,ENSMUSG00000036442,ENSMUSG00000031751,ENSMUSG00000030263,ENSMUSG00000022040,ENSMUSG00000031749,ENSMUSG00000038742,\\\nENSMUSG00000070780,ENSMUSG00000070708,ENSMUSG00000003808,ENSMUSG00000037997,ENSMUSG00000026773,ENSMUSG00000022099,ENSMUSG00000081593,ENSMUSG00000045467,ENSMUSG00000031509,\\\nENSMUSG00000031672,ENSMUSG00000030413,ENSMUSG00000042757,ENSMUSG00000031508,ENSMUSG00000022180,ENSMUSG00000037355,ENSMUSG00000035561,ENSMUSG00000106647,ENSMUSG00000063049,\\\nENSMUSG00000028785,ENSMUSG00000031453,ENSMUSG00000111147,ENSMUSG00000003283,ENSMUSG00000063488,ENSMUSG00000046774,ENSMUSG00000036054,ENSMUSG00000024042,ENSMUSG00000039157,\\\nENSMUSG00000038060,ENSMUSG00000030283,ENSMUSG00000038521,ENSMUSG00000038393,ENSMUSG00000030772,ENSMUSG00000030428,ENSMUSG00000041180,ENSMUSG00000031729,ENSMUSG00000054850,\\\nENSMUSG00000025931,ENSMUSG00000039384,ENSMUSG00000022479,ENSMUSG00000029287,ENSMUSG00000025743,ENSMUSG00000042386,ENSMUSG00000096210,ENSMUSG00000050288,ENSMUSG00000019261,\\\nENSMUSG00000040537,ENSMUSG00000026185,ENSMUSG00000029761,ENSMUSG00000027071,ENSMUSG00000005705,ENSMUSG00000008450,ENSMUSG00000018604,ENSMUSG00000060038,ENSMUSG00000006585,\\\nENSMUSG00000086236,ENSMUSG00000054408,ENSMUSG00000029122,ENSMUSG00000025742,ENSMUSG00000004319,ENSMUSG00000052675,ENSMUSG00000031948,ENSMUSG00000081044,ENSMUSG00000039830,\\\nENSMUSG00000030411,ENSMUSG00000045010,ENSMUSG00000039616,ENSMUSG00000011837,ENSMUSG00000022211,ENSMUSG00000001472,ENSMUSG00000000738,ENSMUSG00000042659,ENSMUSG00000071076,\\\nENSMUSG00000031838,ENSMUSG00000020256,ENSMUSG00000028017,ENSMUSG00000063659,ENSMUSG00000046718,ENSMUSG00000032715,ENSMUSG00000023495,ENSMUSG00000099370,ENSMUSG00000031486,\\\nENSMUSG00000038292,ENSMUSG00000031760,ENSMUSG00000007950,ENSMUSG00000039617,ENSMUSG00000057672,ENSMUSG00000031622,ENSMUSG00000025432,ENSMUSG00000055835,ENSMUSG00000031665,\\\nENSMUSG00000008206,ENSMUSG00000063018,ENSMUSG00000091568,ENSMUSG00000033931,ENSMUSG00000021701,ENSMUSG00000022016,ENSMUSG00000023995,ENSMUSG00000030630,ENSMUSG00000032796,\\\nENSMUSG00000029603,ENSMUSG00000048126,ENSMUSG00000053604,ENSMUSG00000097757,ENSMUSG00000087084,ENSMUSG00000018796,ENSMUSG00000037103,ENSMUSG00000017652,ENSMUSG00000020184,\\\nENSMUSG00000050914,ENSMUSG00000031765,ENSMUSG00000068758,ENSMUSG00000061126,ENSMUSG00000004952,ENSMUSG00000031731,ENSMUSG00000022754,ENSMUSG00000030523,ENSMUSG00000002668\"\ndef debug_david(user,DEBUG_GENES=DEBUG_GENES, ids=None):\n ssl._create_default_https_context = ssl._create_unverified_context\n url = 'https://david.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'\n client = sudsclient(url)\n client.wsdl.services[0].setlocation('https://david.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/')\n client_auth = client.service.authenticate(user)\n if not ids:\n ids=DEBUG_GENES\n database=\"ENSEMBL_GENE_ID\"\n name=\"target\"\n categories=\"GOTERM_BP_FAT,GOTERM_CC_FAT,GOTERM_MF_FAT,PFAM,KEGG_PATHWAY,OMIM_DISEASE\"\n p=0.1\n n=2\n size = client.service.addList(ids, database, name, 0) #| inputListIds,idType,listName,listType)\n client_categories = client.service.setCategories(categories)\n client_report = client.service.getChartReport(p, n)\n size_report = len(client_report)\n report=\"Success: \"+str(size)+\"; \"+str(size_report)\n return report\n\n\ndef run_david(pa, path_to_ensembl_maps=\"/flaski/data/david\"):\n\n\n #database, categories, user, ids, ids_bg = None, name = '', name_bg = '', verbose = False, p = 0.1, n = 2):\n # Modified from https://david.ncifcrf.gov/content.jsp?file=WS.html\n # by courtesy of HuangYi @ 20110424\n\n \"\"\"Queries the DAVID database for an enrichment analysis\n Check https://david.ncifcrf.gov/content.jsp?file=DAVID_API.html for database == \"type\" tag and categories == \"annot\" tag.\n\n Args:\n pa (dict): A dictionary of the style { \"argument\":\"value\"} as outputted by `figure_defaults`.\n\n Returns:\n None if no ids match the queried database, or a Pandas DataFrame with results.\n\n \"\"\"\n\n database=pa[\"database_value\"]\n categories_=[ s for s in list( pa.keys() ) ]\n categories_=[ s for s in categories_ if \"categories_\" in s ]\n categories_=[ s for s in categories_ if \"_value\" in s ]\n categories=[]\n for k in categories_:\n categories=categories+pa[k]\n categories=\",\".join(categories)\n user=pa[\"user\"]\n ids=pa[\"ids\"].split(\"\\n\")\n ids=[ s.rstrip(\"\\r\").strip(\" \") for s in ids if s != \" \"]\n ids=[ s for s in ids if s != \" \"]\n ids=[ s for s in ids if len(s) > 0 ]\n ids=[ s.split(\"\\t\") for s in ids ]\n idsdf=pd.DataFrame(ids)\n idsdf[0]=idsdf[0].apply( lambda x: str(x).split(\";\")[0] )\n\n names_dbs=[\"name_hsa_ensembl\", \"name_mus_ensembl\", \"name_cel_ensembl\",\"name_dros_ensembl\" ]\n if database in names_dbs:\n file_dic={\"name_hsa_ensembl\":\"Homo_sapiens.GRCh38.92.tsv\", \"name_mus_ensembl\":\"Mus_musculus.GRCm38.92.tsv\", \"name_cel_ensembl\":\"Caenorhabditis_elegans.WBcel235.92.tsv\",\"name_dros_ensembl\":\"Drosophila_melanogaster.BDGP6.28.92.tsv\"}\n id_name=pd.read_csv(path_to_ensembl_maps+\"/\"+file_dic[database],sep=\"\\t\")\n db_names=id_name[\"gene_name\"].tolist()\n query_names=idsdf[0].tolist()\n query_names=\",\".join(query_names)\n found_values, emsg=fuzzy_search(query_names,db_names)\n if emsg:\n return None, None, emsg\n newcol=idsdf.columns.tolist()[-1]+1\n id_name[\"gene_name\"]=id_name[\"gene_name\"].apply(lambda x: str(x).lower() )\n id_name.index=id_name[\"gene_name\"].tolist()\n id_name=id_name.to_dict()[\"gene_id\"]\n idsdf[newcol]=idsdf[0]\n idsdf[0]=idsdf[0].apply(lambda x: id_name[ str(x).lower() ])\n\n\n # insert mapping of ensembl gene name to gene id here\n\n annotations=idsdf.columns.tolist()\n ids=idsdf[0].tolist()\n ids_map={}\n if len(annotations) > 1:\n idsdf[0]=idsdf[0].apply(lambda x: x.upper() )\n idsdf.index=idsdf[0].tolist()\n idsdf=idsdf.drop([0],axis=1)\n ids_map=idsdf.to_dict()\n \n if \" \".join( pa[\"ids_bg\"].split(\" \")[:12] ) != \"Leave empty if you want to use all annotated genes for your\":\n ids_bg=pa[\"ids_bg\"].split(\"\\n\")\n ids_bg=[ s.rstrip(\"\\r\").strip(\" \") for s in ids_bg ]\n ids_bg=[ s for s in ids_bg if s != \" \"]\n ids_bg=[ s for s in ids_bg if len(s) > 0 ]\n if len(ids_bg) == 0:\n ids_bg = None\n else:\n if database in names_dbs:\n file_dic={\"name_hsa_ensembl\":\"Homo_sapiens.GRCh38.92.tsv\", \"name_mus_ensembl\":\"Mus_musculus.GRCm38.92.tsv\", \"name_cel_ensembl\":\"Caenorhabditis_elegans.WBcel235.92.tsv\",\"name_dros_ensembl\":\"Drosophila_melanogaster.BDGP6.92.tsv\"}\n id_name=pd.read_csv(path_to_ensembl_maps+file_dic[database],sep=\"\\t\")\n id_name_=id_name.copy()\n db_names=id_name[\"gene_name\"].tolist()\n query_names=\",\".join(ids_bg)\n found_values, emsg=fuzzy_search(query_names,db_names)\n if emsg:\n return None, None, emsg\n id_name[\"gene_name\"]=id_name[\"gene_name\"].apply(lambda x: str(x).lower() )\n id_name.index=id_name[\"gene_name\"].tolist()\n id_name=id_name.to_dict()[\"gene_id\"]\n ids_bg=[ id_name[ str(x).lower() ] for x in ids_bg ]\n id_name_=id_name_[ id_name_[\"gene_id\"].isin(ids_bg) ]\n id_name_[\"gene_id\"]=id_name_[\"gene_id\"].apply(lambda x: str(x).upper() )\n id_name_.index=id_name_[\"gene_id\"].tolist()\n id_name_=id_name_.to_dict()[\"gene_name\"]\n else:\n id_name_=None\n\n # bg_gene_names= keep on here\n\n else:\n ids_bg=None\n name=pa[\"name\"]\n if ids_bg is not None:\n name_bg=pa[\"name_bg\"]\n else:\n name_bg=\"\"\n\n p=pa[\"p\"]\n n=pa[\"n\"] \n #, categories, user, ids, ids_bg = None, name = '', name_bg = '', verbose = False, p = 0.1, n = 2\n\n verbose=False\n ids = ','.join([str(i) for i in ids])\n use_bg = 0\n\n if database in names_dbs:\n database=\"ENSEMBL_GENE_ID\"\n\n\n # print(\"Testing\")\n # test=debug_david(pa[\"user\"],ids=ids)\n # print(test)\n\n if ids_bg:\n ids_bg = ','.join([str(i) for i in ids_bg])\n\n\n ssl._create_default_https_context = ssl._create_unverified_context\n url = 'https://david.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'\n try:\n client = sudsclient(url)\n except:\n return None, None, \"Could not connect to DAVID. Server might be down.\"\n\n client.wsdl.services[0].setlocation('https://david.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/')\n try:\n client_auth = client.service.authenticate(user)\n except:\n return None, None, \"Could not connect to DAVID. Server might be down.\"\n \n if str(client_auth) == \"Failed. For user registration, go to http://david.abcc.ncifcrf.gov/webservice/register.htm\" :\n return None, None, str(client_auth)\n if verbose:\n print('User Authentication:', client_auth)\n sys.stdout.flush()\n\n # if ids_bg :\n # size = client.service.addList(ids_bg, database, name, 0)\n # if float(size) > float(0):\n # client_report=client.service.getListReport()\n # bg_mapped=[]\n # for r in client_report:\n # d = dict(r)\n # bg_mapped.append(d[\"values\"][0])\n # bg_not_mapped=[ s for s in ids_bg.split(\",\") if s not in bg_mapped ]\n\n size = client.service.addList(ids, database, name, 0) #| inputListIds,idType,listName,listType)\n report_stats=[['Mapping rate of ids: ', str(size)]]\n if verbose:\n print('Mapping rate of ids: ', str(size))\n sys.stdout.flush()\n if float(size) <= float(0):\n msg='Mapping rate of ids: %s.' %str(size)\n return None, None, msg\n\n # client_report=client.service.getListReport()\n # mapped=[]\n # for r in client_report:\n # d = dict(r)\n # mapped.append(d[\"values\"][0])\n # not_mapped=[ s for s in ids.split(\",\") if s not in mapped ]\n\n #print(\"Finished retrieving list report.\")\n #sys.stdout.flush()\n\n if ids_bg:\n #print(\"User given BG.\")\n #sys.stdout.flush()\n size_bg = client.service.addList(ids_bg, database, name_bg, 1)\n report_stats.append(['Mapping rate of background ids: ', str(size_bg)])\n if verbose:\n print('Mapping rate of background ids: ', str(size_bg))\n sys.stdout.flush()\n if float(size_bg) <= float(0):\n msg='Mapping rate of background ids: %s' %str(size_bg)\n return None, None, msg\n\n client_categories = client.service.setCategories(categories)\n report_stats.append(['Categories used: ', client_categories])\n if verbose:\n print('Categories used: ', client_categories)\n sys.stdout.flush()\n client_report = client.service.getChartReport(p, n)\n size_report = len(client_report)\n report_stats.append(['Records reported: ', str(size_report)])\n if verbose:\n print('Records reported: ', str(size_report))\n sys.stdout.flush()\n\n def get_map(x,ids_map):\n genes=x.split(\", \")\n genes=[ str(ids_map[gene.upper()]) for gene in genes ]\n genes=\", \".join(genes)\n return genes\n\n if size_report > 0:\n df = []\n for r in client_report:\n d = dict(r)\n line = []\n for f in david_fields:\n line.append(str(d[f]).encode('ascii','ignore'))\n df.append(line)\n df = pd.DataFrame(df)\n df.columns=david_fields\n for col in david_fields:\n df[col] = df[col].apply(lambda x: x.decode())\n\n df.columns=[\"Category\",\"Term\",\"Count\",\"%\",\"PValue\",\"Genes\",\"List Total\",\"Pop Hits\",\"Pop Total\",\"Fold Enrichment\",\"Bonferroni\",\"Benjamini\",\"FDR\"]\n \n # insert ensembl gene name to gene id here \n \n if len(list(ids_map.keys())) > 0:\n for annotation in list(ids_map.keys()):\n genes_to_annotation=ids_map[annotation]\n df[\"annotation_%s\" %str(annotation)]=df[\"Genes\"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )\n \n else:\n df=pd.DataFrame(columns=[\"Category\",\"Term\",\"Count\",\"%\",\"PValue\",\"Genes\",\"List Total\",\"Pop Hits\",\"Pop Total\",\"Fold Enrichment\",\"Bonferroni\",\"Benjamini\",\"FDR\"])\n\n # mapped=pd.DataFrame({ \"target_mapped\":mapped })\n # not_mapped=pd.DataFrame({ \"target_not_mapped\": not_mapped })\n\n # insert ensembl gene name to gene id here \n\n # if len(list(ids_map.keys())) > 0:\n\n # for annotation in list(ids_map.keys()):\n # genes_to_annotation=ids_map[annotation]\n # mapped[\"target_mapped_annotation_%s\" %str(annotation)]=mapped[\"target_mapped\"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )\n # not_mapped[\"target_not_mapped_annotation_%s\" %str(annotation)]=not_mapped[\"target_not_mapped\"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )\n\n # mapped=pd.concat([mapped,not_mapped],axis=1)\n\n # if ids_bg:\n # bg_mapped=pd.DataFrame({ \"bg_mapped\":bg_mapped })\n # bg_not_mapped=pd.DataFrame({ \"bg_not_mapped\": bg_not_mapped })\n # if id_name_:\n # bg_mapped[\"bg_mapped_name\"]=bg_mapped[\"bg_mapped\"].apply(lambda x: id_name_[x] )\n # bg_not_mapped[\"bg_not_mapped_name\"]=bg_not_mapped[\"bg_not_mapped\"].apply(lambda x: id_name_[x] )\n\n # # insert ensembl gene name to gene id here \n\n # # if len(list(ids_map.keys())) > 0:\n # # for annotation in list(ids_map.keys()):\n # # genes_to_annotation=ids_map[annotation]\n # # bg_mapped[\"bg_mapped_annotation_%s\" %str(annotation)]=bg_mapped[\"bg_mapped\"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )\n # # bg_not_mapped[\"bg_not_mapped_annotation_%s\" %str(annotation)]=bg_not_mapped[\"bg_not_mapped\"].apply(lambda x:get_map(x,ids_map=genes_to_annotation) )\n \n # mapped=pd.concat([mapped,bg_mapped],axis=1)\n # mapped=pd.concat([mapped,bg_not_mapped],axis=1)\n\n report_stats=pd.DataFrame(report_stats,columns=[\"Field\",\"Value\"])\n\n return df, report_stats, None\n\ndef figure_defaults():\n \"\"\"Generates default DAVID query arguments.\n\n :param database: A string for the database to query, e.g. 'WORMBASE_GENE_ID'\n :param categories: A comma separated string with databases\n :param user: A user ID registered at DAVID for querying\n :param ids: A list with identifiers\n :param name: A string with the name for the query set\n :param ids_bg: A list with the background identifiers to enrich against,\n 'None' for whole set\n :param name_bg: A string with the name for the background set\n :param p: Maximum p value for enrichment of a term\n :param n: Minimum number of genes within a term\n\n Returns:\n dict: A dictionary of the style { \"argument\":\"value\"}\n \"\"\"\n\n # 'GENE_SYMBOL',\n plot_arguments={\n \"database\":['AFFYMETRIX_3PRIME_IVT_ID', 'AFFYMETRIX_EXON_GENE_ID',\n 'AFFYMETRIX_SNP_ID', 'AGILENT_CHIP_ID',\n 'AGILENT_ID', 'AGILENT_OLIGO_ID',\n 'ENSEMBL_GENE_ID',\"name_hsa_ensembl\", \"name_mus_ensembl\", \"name_cel_ensembl\",\"name_dros_ensembl\", 'ENSEMBL_TRANSCRIPT_ID',\n 'ENTREZ_GENE_ID', 'FLYBASE_GENE_ID',\n 'FLYBASE_TRANSCRIPT_ID','GENBANK_ACCESSION',\n 'GENPEPT_ACCESSION', 'GENOMIC_GI_ACCESSION',\n 'PROTEIN_GI_ACCESSION', 'ILLUMINA_ID',\n 'IPI_ID', 'MGI_ID', 'PFAM_ID',\n 'PIR_ACCESSION','PIR_ID','PIR_NREF_ID', 'REFSEQ_GENOMIC',\n 'REFSEQ_MRNA','REFSEQ_PROTEIN','REFSEQ_RNA','RGD_ID',\n 'SGD_ID','TAIR_ID','UCSC_GENE_ID','UNIGENE',\n 'UNIPROT_ACCESSION','UNIPROT_ID','UNIREF100_ID','WORMBASE_GENE_ID',\n 'WORMPEP_ID','ZFIN_ID'],\\\n \"database_value\":'ENSEMBL_GENE_ID',\\\n \"categories_gene_ontology\":['GOTERM_BP_1', 'GOTERM_BP_2', 'GOTERM_BP_3', 'GOTERM_BP_4',\n 'GOTERM_BP_5', 'GOTERM_BP_ALL', 'GOTERM_BP_FAT', 'GOTERM_CC_1',\n 'GOTERM_CC_2', 'GOTERM_CC_3', 'GOTERM_CC_4', 'GOTERM_CC_5',\n 'GOTERM_CC_ALL', 'GOTERM_CC_FAT', 'GOTERM_MF_1', 'GOTERM_MF_2',\n 'GOTERM_MF_3', 'GOTERM_MF_4', 'GOTERM_MF_5', 'GOTERM_MF_ALL',\n 'GOTERM_MF_FAT'],\\\n \"categories_gene_ontology_value\": ['GOTERM_BP_FAT','GOTERM_CC_FAT','GOTERM_MF_FAT'],\\\n \"categories_gene_domains\":['BLOCKS_ID', 'COG', 'INTERPRO', 'PDB_ID',\n 'PFAM', 'PIR_ALN','PIR_HOMOLOGY_DOMAIN', 'PIR_SUPERFAMILY',\n 'PRINTS', 'PRODOM', 'PROSITE', 'SCOP_ID',\n 'SMART', 'TIGRFAMS'],\\\n \"categories_gene_domains_value\":[\"PFAM\"],\\\n \"categories_pathways\":['BBID', 'BIOCARTA', 'EC_NUMBER', 'KEGG_COMPOUND', 'KEGG_PATHWAY','KEGG_REACTION'],\\\n \"categories_pathways_value\":['KEGG_PATHWAY'],\\\n \"categories_general_annotations\":['ALIAS_GENE_SYMBOL', 'CHROMOSOME', 'CYTOBAND', 'GENE', 'GENE_SYMBOL', \n 'HOMOLOGOUS_GENE', 'LL_SUMMARY', 'OMIM_ID', 'PIR_SUMMARY', 'PROTEIN_MW',\n 'REFSEQ_PRODUCT', 'SEQUENCE_LENGTH'],\\\n \"categories_general_annotations_value\":[],\\\n \"categories_functional_categories\":['CGAP_EST_QUARTILE', 'CGAP_EST_RANK', 'COG_ONTOLOGY', \n 'PIR_SEQ_FEATURE', 'SP_COMMENT_TYPE', 'SP_PIR_KEYWORDS'],\\\n \"categories_functional_categories_value\":[],\\\n \"categories_protein_protein_interactions\":['BIND', 'DIP', 'HIV_INTERACTION_CATEGORY', \n 'HIV_INTERACTION', 'MINT', 'NCICB_CAPATHWAY'],\\\n \"categories_protein_protein_interactions_value\":[],\\\n \"categories_literature\":['GENERIF_SUMMARY','HIV_INTERACTION_PUBMED_ID','PUBMED_ID'],\\\n \"categories_literature_value\":[],\\\n \"categories_disease\":['GENETIC_ASSOCIATION_DB_DISEASE', 'OMIM_DISEASE'],\\\n \"categories_disease_value\":['OMIM_DISEASE'],\\\n \"user\":\"\",\\\n \"ids\":\"Enter target genes here...\",\\\n \"ids_bg\":\"Leave empty if you want to use all annotated genes for your organism\",\\\n \"name\":\"target list\",\\\n \"name_bg\":\"background list\",\\\n \"p\":\"0.1\",\\\n \"n\":\"2\",\\\n \"download_format\":[\"tsv\",\"xlsx\"],\\\n \"download_format_value\":\"xlsx\",\\\n \"download_name\":\"DAVID\",\\\n \"session_downloadn\":\"MySession.DAVID\",\\\n \"inputsessionfile\":\"Select file..\",\\\n \"session_argumentsn\":\"MyArguments.DAVID\",\\\n \"inputargumentsfile\":\"Select file..\"}\n\n return plot_arguments","sub_path":"flaski/apps/main/david.py","file_name":"david.py","file_ext":"py","file_size_in_byte":22058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"634126693","text":"# -*- coding:utf-8 -*-\n\n'''\n爬取糗事百科主页及分页的主要内容\n'''\n\nimport json\nimport re\nfrom multiprocessing import Pool\n\nimport chardet\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests import RequestException\n\nurl = 'http://www.qiushibaike.com'\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n}\n\n\ndef get_page_text(url):\n global content\n try:\n content = requests.get(url, headers=headers)\n content.encoding = 'utf-8'\n if content.status_code == 200:\n # print(content.text)\n return content.text\n else:\n return None\n except RequestException as e:\n print('请求错误:' + str(e))\n return None\n\n\ndef get_item_name(names):\n for name in names:\n name = re.findall(r\"\\S+\", name.text)\n return name[0].strip()\n\n\ndef get_item_detail(details):\n for detail in details:\n detail = re.findall(r\"\\S+\", detail.text)\n return \"\".join(detail)\n\n\ndef get_vote_num(vote_nums):\n for num in vote_nums:\n num = re.findall(r\"\\S+\", num.text)\n return num[0]\n\n\ndef parse_item_page(result):\n for item in result:\n if item.select('.thumb'):\n continue\n else:\n names = item.select('.author')\n name = get_item_name(names)\n # print(name)\n details = item.select('.content')\n detail = get_item_detail(details)\n # print(content)\n vote_nums = item.select('.stats-vote')\n vote_num = get_vote_num(vote_nums)\n # print(vote_num)\n yield {\n 'name': name,\n 'content': detail,\n 'vote_num': vote_num\n }\n\n\ndef write_to_file(detail):\n try:\n with open('result.txt', 'a')as f:\n f.write(json.dumps(detail, ensure_ascii=False) + '\\n\\n')\n f.close()\n file = open('result.txt', 'rb')\n data = file.read()\n print(chardet.detect(data))\n except Exception as e:\n print('写入失败:' + str(e))\n\n\ndef get_page_url(page_num):\n if page_num > 1:\n return (url + '/8hr/page/{}/').format(str(page_num))\n else:\n return url\n\n\ndef main(page_num):\n page_url = get_page_url(page_num)\n print(page_url)\n response = get_page_text(page_url)\n soup = BeautifulSoup(response, \"html.parser\")\n result = soup.find_all(id=re.compile('qiushi_tag'))\n for item in parse_item_page(result):\n # print(type(item))\n write_to_file(item)\n\n\nif __name__ == '__main__':\n groups = [x for x in range(1, 14)]\n pool = Pool()\n # try:\n pool.map(main, groups)\n # except TypeError:\n pool.close()\n pool.join()\n","sub_path":"test/bs4demo.py","file_name":"bs4demo.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"610776292","text":"# pylint: disable=relative-beyond-top-level,import-outside-toplevel\nimport unittest\nfrom typing import Union, List, Set\n\nfrom optimade.server.config import CONFIG\nfrom optimade.server import mappers\nfrom optimade.server.entry_collections import CI_FORCE_MONGO\n\nfrom .utils import SetClient\n\nMONGOMOCK_OLD = False\nMONGOMOCK_MSG = \"\"\nif not CI_FORCE_MONGO and not CONFIG.use_real_mongo:\n import mongomock\n\n MONGOMOCK_OLD = tuple(\n int(val) for val in mongomock.__version__.split(\".\")[0:3]\n ) <= (3, 19, 0)\n MONGOMOCK_MSG = f\"mongomock version {mongomock.__version__}<=3.19.0 is too old for this test, skipping...\"\n\n\nclass IncludeTests(SetClient, unittest.TestCase):\n \"\"\"Make sure `include` is handled correctly\n\n NOTE: Currently _only_ structures have relationships (references).\n \"\"\"\n\n server = \"regular\"\n\n def _check_response(\n self,\n request: str,\n expected_included_types: Union[List, Set],\n expected_included_resources: Union[List, Set],\n expected_relationship_types: Union[List, Set] = None,\n ):\n try:\n response = self.client.get(request)\n self.assertEqual(\n response.status_code, 200, msg=f\"Request failed: {response.json()}\"\n )\n\n response = response.json()\n response_data = (\n response[\"data\"]\n if isinstance(response[\"data\"], list)\n else [response[\"data\"]]\n )\n\n included_resource_types = list({_[\"type\"] for _ in response[\"included\"]})\n self.assertEqual(\n sorted(expected_included_types),\n sorted(included_resource_types),\n msg=f\"Expected relationship types: {expected_included_types}. \"\n f\"Does not match relationship types in response's included field: {included_resource_types}\",\n )\n\n if expected_relationship_types is None:\n expected_relationship_types = expected_included_types\n relationship_types = set()\n for entry in response_data:\n relationship_types.update(set(entry.get(\"relationships\", {}).keys()))\n self.assertEqual(\n sorted(expected_relationship_types),\n sorted(relationship_types),\n msg=f\"Expected relationship types: {expected_relationship_types}. \"\n f\"Does not match relationship types found in response data: {relationship_types}\",\n )\n\n included_resources = [_[\"id\"] for _ in response[\"included\"]]\n self.assertEqual(\n len(included_resources),\n len(expected_included_resources),\n msg=response[\"included\"],\n )\n self.assertEqual(\n sorted(set(included_resources)), sorted(expected_included_resources)\n )\n\n except Exception as exc:\n print(\"Request attempted:\")\n print(f\"{self.client.base_url}{request}\")\n raise exc\n\n def _check_error_response(\n self,\n request: str,\n expected_status: int = None,\n expected_title: str = None,\n expected_detail: str = None,\n ):\n expected_status = 400 if expected_status is None else expected_status\n expected_title = \"Bad Request\" if expected_title is None else expected_title\n super()._check_error_response(\n request, expected_status, expected_title, expected_detail\n )\n\n def test_default_value(self):\n \"\"\"Default value for `include` is 'references'\n\n Test also that passing `include=` equals passing the default value\n \"\"\"\n request = \"/structures\"\n expected_types = [\"references\"]\n expected_reference_ids = [\"dijkstra1968\", \"maddox1988\", \"dummy/2019\"]\n self._check_response(request, expected_types, expected_reference_ids)\n\n def test_empty_value(self):\n \"\"\"An empty value should resolve in no relationships being returned under `included`\"\"\"\n request = \"/structures?include=\"\n expected_types = []\n expected_reference_ids = []\n expected_data_relationship_types = [\"references\"]\n self._check_response(\n request,\n expected_types,\n expected_reference_ids,\n expected_data_relationship_types,\n )\n\n def test_default_value_single_entry(self):\n \"\"\"For single entry. Default value for `include` is 'references'\"\"\"\n request = \"/structures/mpf_1\"\n expected_types = [\"references\"]\n expected_reference_ids = [\"dijkstra1968\"]\n self._check_response(request, expected_types, expected_reference_ids)\n\n def test_empty_value_single_entry(self):\n \"\"\"For single entry. An empty value should resolve in no relationships being returned under `included`\"\"\"\n request = \"/structures/mpf_1?include=\"\n expected_types = []\n expected_reference_ids = []\n expected_data_relationship_types = [\"references\"]\n self._check_response(\n request,\n expected_types,\n expected_reference_ids,\n expected_data_relationship_types,\n )\n\n def test_wrong_relationship_type(self):\n \"\"\"A wrong type should result in a `400 Bad Request` response\"\"\"\n from optimade.server.routers import ENTRY_COLLECTIONS\n\n for wrong_type in (\"test\", '\"\"', \"''\"):\n request = f\"/structures?include={wrong_type}\"\n error_detail = (\n f\"'{wrong_type}' cannot be identified as a valid relationship type. \"\n f\"Known relationship types: {sorted(ENTRY_COLLECTIONS.keys())}\"\n )\n self._check_error_response(request, expected_detail=error_detail)\n\n\nclass ResponseFieldTests(SetClient, unittest.TestCase):\n \"\"\"Make sure response_fields is handled correctly\"\"\"\n\n server = \"regular\"\n\n get_mapper = {\n \"links\": mappers.LinksMapper,\n \"references\": mappers.ReferenceMapper,\n \"structures\": mappers.StructureMapper,\n }\n\n def required_fields_test_helper(\n self, endpoint: str, known_unused_fields: set, expected_fields: set\n ):\n \"\"\"Utility function for creating required fields tests\"\"\"\n expected_fields |= (\n self.get_mapper[endpoint].get_required_fields() - known_unused_fields\n )\n expected_fields.add(\"attributes\")\n request = f\"/{endpoint}?response_fields={','.join(expected_fields)}\"\n\n # Check response\n try:\n response = self.client.get(request)\n self.assertEqual(\n response.status_code, 200, msg=f\"Request failed: {response.json()}\"\n )\n\n response = response.json()\n response_fields = set()\n for entry in response[\"data\"]:\n response_fields.update(set(entry.keys()))\n response_fields.update(set(entry[\"attributes\"].keys()))\n self.assertEqual(sorted(expected_fields), sorted(response_fields))\n except Exception as exc:\n print(\"Request attempted:\")\n print(f\"{self.client.base_url}{request}\")\n raise exc\n\n def test_required_fields_links(self):\n \"\"\"Certain fields are REQUIRED, no matter the value of `response_fields`\"\"\"\n endpoint = \"links\"\n illegal_top_level_field = \"relationships\"\n non_used_top_level_fields = {\"links\"}\n non_used_top_level_fields.add(illegal_top_level_field)\n expected_fields = {\"homepage\", \"base_url\", \"link_type\"}\n self.required_fields_test_helper(\n endpoint, non_used_top_level_fields, expected_fields\n )\n\n def test_required_fields_references(self):\n \"\"\"Certain fields are REQUIRED, no matter the value of `response_fields`\"\"\"\n endpoint = \"references\"\n non_used_top_level_fields = {\"links\", \"relationships\"}\n expected_fields = {\"year\", \"journal\"}\n self.required_fields_test_helper(\n endpoint, non_used_top_level_fields, expected_fields\n )\n\n def test_required_fields_structures(self):\n \"\"\"Certain fields are REQUIRED, no matter the value of `response_fields`\"\"\"\n endpoint = \"structures\"\n non_used_top_level_fields = {\"links\"}\n expected_fields = {\"elements\", \"nelements\"}\n self.required_fields_test_helper(\n endpoint, non_used_top_level_fields, expected_fields\n )\n\n\nclass FilterTests(SetClient, unittest.TestCase):\n\n server = \"regular\"\n\n def test_custom_field(self):\n request = '/structures?filter=_exmpl_chemsys=\"Ac\"'\n expected_ids = [\"mpf_1\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_id(self):\n request = \"/structures?filter=id=mpf_2\"\n expected_ids = [\"mpf_2\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_geq(self):\n request = \"/structures?filter=nelements>=9\"\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_gt(self):\n request = \"/structures?filter=nelements>8\"\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_rhs_comparison(self):\n request = \"/structures?filter=89\"\n expected_ids = []\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_has(self):\n request = '/structures?filter=elements HAS \"Ti\"'\n expected_ids = [\"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_page_limit(self):\n request = '/structures?filter=elements HAS \"Ac\"&page_limit=2'\n expected_ids = [\"mpf_1\", \"mpf_2\"]\n expected_return = 6\n self._check_response(request, expected_ids, expected_return)\n\n request = '/structures?page_limit=2&filter=elements HAS \"Ac\"'\n expected_ids = [\"mpf_1\", \"mpf_2\"]\n expected_return = 6\n self._check_response(request, expected_ids, expected_return)\n\n def test_page_limit_max(self):\n request = f\"/structures?page_limit={CONFIG.page_limit_max + 1}\"\n self._check_error_response(\n request,\n expected_status=403,\n expected_title=\"HTTPException\",\n expected_detail=f\"Max allowed page_limit is {CONFIG.page_limit_max}, you requested {CONFIG.page_limit_max + 1}\",\n )\n\n def test_value_list_operator(self):\n request = \"/structures?filter=dimension_types HAS < 1\"\n self._check_error_response(\n request,\n expected_status=501,\n expected_title=\"NotImplementedError\",\n expected_detail=\"set_op_rhs not implemented for use with OPERATOR. Given: [Token(HAS, 'HAS'), Token(OPERATOR, '<'), 1]\",\n )\n\n def test_has_any_operator(self):\n request = \"/structures?filter=dimension_types HAS ANY > 1\"\n self._check_error_response(\n request,\n expected_status=501,\n expected_title=\"NotImplementedError\",\n expected_detail=\"OPERATOR > inside value_list [Token(OPERATOR, '>'), 1] not implemented.\",\n )\n\n def test_list_has_all(self):\n request = '/structures?filter=elements HAS ALL \"Ba\",\"F\",\"H\",\"Mn\",\"O\",\"Re\",\"Si\"'\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=elements HAS ALL \"Re\",\"Ti\"'\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_has_any(self):\n request = '/structures?filter=elements HAS ANY \"Re\",\"Ti\"'\n expected_ids = [\"mpf_3819\", \"mpf_3803\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_length_basic(self):\n request = \"/structures?filter=elements LENGTH = 9\"\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=elements LENGTH 9\"\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_length(self):\n request = \"/structures?filter=elements LENGTH >= 9\"\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=structure_features LENGTH > 0\"\n expected_ids = []\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=structure_features LENGTH > 0\"\n expected_ids = []\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=cartesian_site_positions LENGTH > 43\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=species_at_sites LENGTH > 43\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=nsites LENGTH > 43\"\n expected_ids = []\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=structure_features LENGTH != 0\"\n error_detail = \"Operator != not implemented for LENGTH filter.\"\n self._check_error_response(\n request,\n expected_status=501,\n expected_title=\"NotImplementedError\",\n expected_detail=error_detail,\n )\n\n @unittest.skipIf(MONGOMOCK_OLD, MONGOMOCK_MSG)\n def test_list_has_only(self):\n \"\"\" Test HAS ONLY query on elements.\n\n This test fails with mongomock<=3.19.0 when $size is 1, but works with a real mongo.\n\n TODO: this text and skip condition should be removed once mongomock>3.19.0 has been released, which should\n contain the bugfix for this: https://github.com/mongomock/mongomock/pull/597.\n\n \"\"\"\n\n request = '/structures?filter=elements HAS ONLY \"Ac\", \"Mg\"'\n expected_ids = [\"mpf_23\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=elements HAS ONLY \"Ac\"'\n expected_ids = [\"mpf_1\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_correlated(self):\n request = '/structures?filter=elements:elements_ratios HAS \"Ag\":\"0.2\"'\n self._check_error_response(\n request, expected_status=501, expected_title=\"NotImplementedError\"\n )\n # expected_ids = [\"mpf_259\"]\n # self._check_response(request, expected_ids, len(expected_ids))\n\n def test_is_known(self):\n request = \"/structures?filter=nsites IS KNOWN AND nsites>=44\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=lattice_vectors IS KNOWN AND nsites>=44\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_aliased_is_known(self):\n request = \"/structures?filter=id IS KNOWN AND nsites>=44\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures?filter=chemical_formula_reduced IS KNOWN AND nsites>=44\"\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = (\n \"/structures?filter=chemical_formula_descriptive IS KNOWN AND nsites>=44\"\n )\n expected_ids = [\"mpf_551\", \"mpf_3803\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_aliased_fields(self):\n request = '/structures?filter=chemical_formula_anonymous=\"A\"'\n expected_ids = [\"mpf_1\", \"mpf_200\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=chemical_formula_anonymous CONTAINS \"A2BC\"'\n expected_ids = [\"mpf_2\", \"mpf_3\", \"mpf_110\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_string_contains(self):\n request = '/structures?filter=chemical_formula_descriptive CONTAINS \"c2Ag\"'\n expected_ids = [\"mpf_3\", \"mpf_2\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_string_start(self):\n request = (\n '/structures?filter=chemical_formula_descriptive STARTS WITH \"Ag2CSNCl\"'\n )\n expected_ids = [\"mpf_259\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_string_end(self):\n request = '/structures?filter=chemical_formula_descriptive ENDS WITH \"NClO4\"'\n expected_ids = [\"mpf_259\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_list_has_and(self):\n request = '/structures?filter=elements HAS \"Ac\" AND nelements=1'\n expected_ids = [\"mpf_1\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_awkward_not_queries(self):\n \"\"\" Test an awkward query from the spec examples. It should return all but 2 structures\n in the test data. The test is done in three parts:\n\n - first query the individual expressions that make up the OR,\n - then do an empty query to get all IDs\n - then negate the expressions and ensure that all IDs are returned except\n those from the first queries.\n\n \"\"\"\n expected_ids = [\"mpf_3819\"]\n request = (\n '/structures?filter=chemical_formula_descriptive=\"Ba2NaTi2MnRe2Si8HO26F\" AND '\n 'chemical_formula_anonymous = \"A26B8C2D2E2FGHI\" '\n )\n self._check_response(request, expected_ids, len(expected_ids))\n\n expected_ids = [\"mpf_2\"]\n request = (\n '/structures?filter=chemical_formula_anonymous = \"A2BC\" AND '\n 'NOT chemical_formula_descriptive = \"Ac2AgPb\" '\n )\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = \"/structures\"\n unexpected_ids = [\"mpf_3819\", \"mpf_2\"]\n expected_ids = [\n structure[\"id\"]\n for structure in self.client.get(request).json()[\"data\"]\n if structure[\"id\"] not in unexpected_ids\n ]\n\n request = (\n \"/structures?filter=\"\n \"NOT ( \"\n 'chemical_formula_descriptive = \"Ba2NaTi2MnRe2Si8HO26F\" AND '\n 'chemical_formula_anonymous = \"A26B8C2D2E2FGHI\" OR '\n 'chemical_formula_anonymous = \"A2BC\" AND '\n 'NOT chemical_formula_descriptive = \"Ac2AgPb\" '\n \")\"\n )\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_not_or_and_precedence(self):\n request = '/structures?filter=NOT elements HAS \"Ac\" AND nelements=1'\n expected_ids = [\"mpf_200\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=nelements=1 AND NOT elements HAS \"Ac\"'\n expected_ids = [\"mpf_200\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=NOT elements HAS \"Ac\" AND nelements=1 OR nsites=1'\n expected_ids = [\"mpf_1\", \"mpf_200\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=elements HAS \"Ac\" AND nelements>1 AND nsites=1'\n expected_ids = []\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_brackets(self):\n request = '/structures?filter=elements HAS \"Ac\" AND nelements=1 OR nsites=1'\n expected_ids = [\"mpf_200\", \"mpf_1\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=(elements HAS \"Ac\" AND nelements=1) OR (elements HAS \"Ac\" AND nsites=1)'\n expected_ids = [\"mpf_1\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n def test_filter_on_relationships(self):\n request = '/structures?filter=references.id HAS \"dummy/2019\"'\n expected_ids = [\"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = (\n '/structures?filter=references.id HAS ANY \"dummy/2019\", \"dijkstra1968\"'\n )\n expected_ids = [\"mpf_1\", \"mpf_2\", \"mpf_3819\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=references.id HAS ONLY \"dijkstra1968\"'\n expected_ids = [\"mpf_1\", \"mpf_2\"]\n self._check_response(request, expected_ids, len(expected_ids))\n\n request = '/structures?filter=references.doi HAS ONLY \"10/123\"'\n error_detail = (\n 'Cannot filter relationships by field \"doi\", only \"id\" is supported.'\n )\n self._check_error_response(\n request,\n expected_status=501,\n expected_title=\"NotImplementedError\",\n expected_detail=error_detail,\n )\n\n def _check_response(\n self, request: str, expected_ids: Union[List, Set], expected_return: int\n ):\n try:\n response = self.client.get(request)\n self.assertEqual(\n response.status_code, 200, msg=f\"Request failed: {response.json()}\"\n )\n response = response.json()\n response_ids = [struct[\"id\"] for struct in response[\"data\"]]\n self.assertEqual(sorted(expected_ids), sorted(response_ids))\n self.assertEqual(response[\"meta\"][\"data_returned\"], expected_return)\n except Exception as exc:\n print(\"Request attempted:\")\n print(f\"{self.client.base_url}{request}\")\n raise exc\n\n def _check_error_response(\n self,\n request: str,\n expected_status: int = None,\n expected_title: str = None,\n expected_detail: str = None,\n ):\n expected_status = 500 if expected_status is None else expected_status\n super()._check_error_response(\n request, expected_status, expected_title, expected_detail\n )\n","sub_path":"tests/server/test_query_params.py","file_name":"test_query_params.py","file_ext":"py","file_size_in_byte":22524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"368335267","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tvdb', '0004_episode_episode_number'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tvshow',\n name='poster',\n field=models.ImageField(upload_to='tvdb'),\n preserve_default=True,\n ),\n ]\n","sub_path":"tvdb/migrations/0005_auto_20151013_1524.py","file_name":"0005_auto_20151013_1524.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"550613575","text":"import pandas as pd\nimport numpy as np\nimport lightgbm as lgb\nimport itertools\nimport os\nimport feature_extract as fe\nimport training as training\n\ndef preprocess(df):\n df = fe.goal_split(df)\n df = fe.country_encoding(df)\n df = fe.category1_encoding(df)\n df = fe.category2_encoding(df)\n \n return df\n\ndef mixup(x_train, y_train, alpha=0.4, seed=0):\n x_mixes, y_mixes = list(), list()\n index = np.arange(len(x_train))\n num_sample = 100\n for i in range(num_sample):\n np.random.seed(seed * num_sample + i)\n lam = np.random.beta(alpha, alpha, size=len(x_train))\n np.random.seed(seed * num_sample + i)\n np.random.shuffle(index)\n x_mix = lam.reshape(-1, 1) * x_train.values + (1 - lam).reshape(-1, 1) * x_train.values[index]\n y_mix = lam * y_train.values + (1 - lam) * y_train.values[index]\n x_mixes.append(x_mix)\n y_mixes.append(y_mix)\n\n x_mixes = np.concatenate(x_mixes)\n y_mixes = np.concatenate(y_mixes)\n\n return x_mix, y_mix\n\nif __name__ == '__main__':\n train_df, test_df = fe.read_df()\n sub_df = pd.read_csv('data/sample_submit.csv', header=None)\n sub_df.iloc[:, 1] = np.zeros(len(sub_df))\n\n train_df = preprocess(train_df)\n test_df = preprocess(test_df)\n train_df = pd.get_dummies(train_df, drop_first=True, columns=['country_encoding', 'category1_encoding'])\n test_df = pd.get_dummies(test_df, drop_first=True, columns=['country_encoding', 'category1_encoding'])\n features = list(train_df.columns)[8:]\n features.append('duration')\n target = 'state'\n\n\n cv = 10\n train_dfs, valid_dfs, test_dfs = training.cv(train_df, cv)\n scores = list()\n params = None\n tune = True\n name = 'mixup_baseline'\n for cv_idx in range(cv):\n #'prepare'\n x_train, y_train = train_dfs[cv_idx][features], train_dfs[cv_idx][target]\n x_valid, y_valid = valid_dfs[cv_idx][features], valid_dfs[cv_idx][target]\n x_test, y_test = test_dfs[cv_idx][features], test_dfs[cv_idx][target] \n \n x_train, y_train = mixup(x_train, y_train, seed=cv_idx)\n lgb_train, lgb_valid = lgb.Dataset(x_train, y_train), lgb.Dataset(x_valid, y_valid, free_raw_data=False)\n\n #'train'\n if tune:\n params = training.tuning_mixup(lgb_train, lgb_valid, 100)\n pd.to_pickle(params, 'params/{0}_cv{1}.pkl'.format(name, cv_idx))\n model = training.train_mixup(lgb_train, lgb_valid, params)\n score = training.evaluation_mixup(model, x_test, y_test)\n scores.append(score)\n model.save_model('model/{0}_cv{1}.txt'.format(name, cv_idx), num_iteration=model.best_iteration)\n\n #'predict'\n pred = model.predict(test_df[features])\n pred = np.where(pred < 0.5, 0, 1)\n sub_df.iloc[:, 1] += pred\n sub_df.iloc[:, 1] /= cv\n sub_df.iloc[:, 1] = np.round(sub_df.iloc[:, 1]).astype(np.int8)\n\n\n print(scores)\n print(np.mean(scores))\n\n sub_df.to_csv('result/{0}.csv'.format(name), index=None, header=None)\n","sub_path":"train/mixup_baseline.py","file_name":"mixup_baseline.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"557614921","text":"#!/usr/bin/env python3\n\n# Poly Chimps NFTs\n# custom code used for OpenSea NFT generation (Polygon)\n\nimport os\nimport random\nimport sqlite3\nimport time\nfrom PIL import Image\nfrom random import randint\nfrom random import seed\nfrom random import shuffle\n\n### GLOBAL VARIABLES ###\n\n# create dirs and arrays for each layer\ndirPath = os.path.dirname(os.path.realpath(__file__))\nbgDir = dirPath + \"\\BGs\\\\\"\nmonkeyDir = dirPath + \"\\Monkeys\\\\\"\nattributeDir = dirPath + \"\\Attributes\\\\\"\nsaveDir = dirPath + \"\\PC\\\\\"\n\n# NFT ids for naming\nnftID = 1 # 1-2500\n\n# create DB; making 2 tables for later comparison to see if any 2 rows match\nif os.path.exists(\"PolyChimps.db\"):\n os.remove(\"PolyChimps.db\")\nelse:\n print(\"The file does not exist\")\n\nconn = sqlite3.connect(\"PolyChimps.db\")\ndb = conn.cursor()\ndb.execute('''CREATE TABLE IF NOT EXISTS Chimps\n ([NFTid] string,[ChimpType] string, [Attr1] string,[Attr2] string,\n [Attr3] string,[Attr4] string,[Attr5] string,[Attr6] string)''')\n\ndef init():\n # initialize variables\n global currentAttributes\n\n currentAttributes = []\n\ndef offsetAttribute(attr):\n # some attributes looked better after creation at certain coordinates\n # use (-70,-10) for offset on mouth attribute \"partypipe.png\"\n # use (0,-10) for offset on mouth attribute \"clownnose.png\"\n # use (0,-10) for offset on eye attribute \"roboteyes.png\"\n if attr == \"partypipe.png\":\n x = -70\n y = -10\n elif attr == \"clownnose.png\" or attr == \"roboteyes.png\":\n x = 0\n y = -10\n else:\n x = 0\n y = 0\n \n return x, y\n\ndef attributeWeights(attr, type):\n roll = randint(1,1000)\n if attr == \"Eyes\":\n # alien + zombie have a different eye table than the golden/normal one\n if type == \"Alien\" or type == \"Zombie\":\n if roll <= 370:\n if type == \"Alien\":\n return \"alieneyes.png\"\n else:\n return \"zombieeyes.png\"\n elif roll > 370 and roll <= 440:\n return \"3dglasses.png\"\n elif roll > 440 and roll <= 480:\n if type == \"Alien\":\n return \"alieneyeglass.png\"\n else:\n return \"zombieeyeglass.png\"\n elif roll > 480 and roll <= 530:\n if type == \"Alien\":\n return \"alieneyepatch.png\"\n else:\n return \"zombieeyepatch.png\"\n elif roll > 530 and roll <= 550:\n return \"lasereyes.png\"\n elif roll > 550 and roll <= 620:\n return \"nerdglasses.png\"\n elif roll > 620 and roll <= 690:\n return \"partyglasses.png\"\n elif roll > 690 and roll <= 760:\n return \"squareglasses.png\"\n elif roll > 760 and roll <= 830:\n return \"sharpspecs.png\"\n elif roll > 830 and roll <= 900:\n return \"squareshades.png\"\n elif roll > 900 and roll <= 960:\n return \"thuglife.png\"\n else:\n return \"vrheadset.png\"\n else:\n if roll <= 50:\n return \"3dglasses.png\"\n elif roll > 50 and roll <= 175:\n return \"cheerfuleyes.png\"\n elif roll > 175 and roll <= 205:\n return \"eyeglass.png\"\n elif roll > 205 and roll <= 255:\n return \"eyepatch.png\"\n elif roll > 255 and roll <= 275:\n return \"lasereyes.png\"\n elif roll > 275 and roll <= 325:\n return \"nerdglasses.png\"\n elif roll > 325 and roll <= 450:\n return \"lineeyes.png\"\n elif roll > 450 and roll <= 500:\n return \"partyglasses.png\"\n elif roll > 500 and roll <= 580:\n return \"proudeyes.png\"\n elif roll > 580 and roll <= 630:\n return \"squareglasses.png\"\n elif roll > 630 and roll <= 660:\n return \"roboteyes.png\"\n elif roll > 660 and roll <= 710:\n return \"sharpspecs.png\"\n elif roll > 710 and roll <= 790:\n return \"sleepyeyes.png\"\n elif roll > 790 and roll <= 840:\n return \"squareshades.png\"\n elif roll > 840 and roll <= 920:\n return \"staringeyes.png\"\n elif roll > 920 and roll <= 970:\n return \"thuglife.png\"\n else:\n return \"vrheadset.png\"\n elif attr == \"Head\":\n if roll <= 75:\n return \"chefhat.png\"\n elif roll > 75 and roll <= 95:\n return \"crown.png\"\n elif roll > 95 and roll <= 145:\n return \"devilhorns.png\"\n elif roll > 145 and roll <= 170:\n return \"greybandana.png\"\n elif roll > 170 and roll <= 205:\n return \"blackbandana.png\" \n elif roll > 205 and roll <= 235:\n return \"jesterhat.png\"\n elif roll > 235 and roll <= 275:\n return \"purplemohawk.png\"\n elif roll > 275 and roll <= 315:\n return \"pinkmohawk.png\"\n elif roll > 315 and roll <= 345:\n return \"partyhat.png\"\n elif roll > 345 and roll <= 375:\n return \"piratehat.png\"\n elif roll > 375 and roll <= 500:\n return \"redbeanie.png\"\n elif roll > 500 and roll <= 545:\n return \"bluesweatband.png\"\n elif roll > 545 and roll <= 595:\n return \"redsweatband.png\"\n elif roll > 595 and roll <= 695:\n return \"tophat.png\"\n elif roll > 695 and roll <= 745:\n return \"vikinghelmet.png\"\n elif roll > 745 and roll <= 870:\n return \"wheatbeanie.png\"\n elif roll > 870 and roll <= 920:\n return \"wizardhat.png\"\n elif roll > 920 and roll <= 960:\n return \"greenpuff.png\"\n else:\n return \"yellowpuff.png\"\n elif attr == \"Mouth\":\n if roll <= 50:\n return \"banana.png\"\n elif roll > 50 and roll <= 150:\n return \"bubblepipe.png\"\n elif roll > 150 and roll <= 350:\n return \"buckteeth.png\"\n elif roll > 350 and roll <= 450:\n return \"cigarette.png\"\n elif roll > 450 and roll <= 600:\n return \"tongue.png\"\n elif roll > 600 and roll <= 800:\n return \"vampireteeth.png\"\n elif roll > 800 and roll <= 900:\n return \"vape.png\"\n elif roll > 900 and roll <= 950:\n return \"bubblegum.png\"\n else:\n return \"pacifier.png\"\n \n elif attr == \"Nose\":\n if roll <= 200:\n return \"clownnose.png\"\n elif roll > 200 and roll <= 700:\n return \"nostrils.png\"\n elif roll > 700 and roll <= 800:\n return \"heart.png\"\n else:\n return \"roseycheeks.png\"\n elif attr == \"Hands\":\n return \"blank.png\"\n # if roll <= 300:\n # return \"bikergloves.png\"\n # elif roll > 300 and roll <= 400:\n # return \"infinitygauntlets.png\"\n # elif roll > 400 and roll <= 600:\n # return \"boxinggloves.png\"\n # elif roll > 600 and roll <= 800:\n # return \"mmagloves.png\"\n # else:\n # return \"mittens.png\"\n elif attr == \"Ears\":\n if roll <= 80:\n return \"goldring.png\"\n elif roll > 80 and roll <= 150:\n return \"goldcross.png\"\n elif roll > 150 and roll <= 220:\n return \"goldstud.png\"\n elif roll > 220 and roll <= 270:\n return \"goldringandcross.png\"\n elif roll > 270 and roll <= 330:\n return \"goldringandstud.png\"\n elif roll > 330 and roll <= 400:\n return \"silverring.png\"\n elif roll > 400 and roll <= 470:\n return \"silvercross.png\"\n elif roll > 470 and roll <= 540:\n return \"silverstud.png\"\n elif roll > 540 and roll <= 590:\n return \"silverringandcross.png\"\n elif roll > 590 and roll <= 640:\n return \"silverringandstud.png\"\n elif roll > 640 and roll <= 710:\n return \"obsidianring.png\"\n elif roll > 710 and roll <= 780:\n return \"obsidiancross.png\"\n elif roll > 780 and roll <= 850:\n return \"obsidianstud.png\"\n elif roll > 850 and roll <= 900:\n return \"obsidianringandcross.png\"\n elif roll > 900 and roll <= 950:\n return \"obsidianringandstud.png\"\n else:\n return \"earpods.png\"\n else:\n print(\"Error calculating weights\")\n\ndef createNFT(numberCreated):\n global nftID\n monkeyAry = []\n attrAry = []\n\n while numberCreated > 0:\n # get monkey type; corresponding BG layer auto-applied depending on the monkey type chosen\n # 94% normal, 3% golden, 2% zombie, 1% alien\n monkeyType = randint(1, 100)\n if monkeyType <= 94:\n monkeyLayer = Image.open(monkeyDir + \"normal_mk.png\")\n bgLayer = Image.open(bgDir + \"normal_bg.png\")\n monkeyAry.append(\"Normal\")\n currentColor = \"Normal\"\n elif monkeyType > 94 and monkeyType <= 97:\n monkeyLayer = Image.open(monkeyDir + \"golden_mk.png\")\n bgLayer = Image.open(bgDir + \"golden_bg.png\")\n monkeyAry.append(\"Golden\")\n currentColor = \"Golden\"\n elif monkeyType > 97 and monkeyType <= 99:\n monkeyLayer = Image.open(monkeyDir + \"zombie_mk.png\")\n bgLayer = Image.open(bgDir + \"zombie_bg.png\")\n monkeyAry.append(\"Zombie\")\n currentColor = \"Zombie\"\n else:\n monkeyLayer = Image.open(monkeyDir + \"alien_mk.png\")\n bgLayer = Image.open(bgDir + \"alien_bg.png\")\n monkeyAry.append(\"Alien\")\n currentColor = \"Alien\"\n\n # combine the monkey and bg layers\n bgLayer.paste(monkeyLayer, (0,0), monkeyLayer)\n\n # get individual attributes and then shuffle the array\n attrAry = os.listdir(attributeDir)\n random.shuffle(attrAry)\n\n # iterate through the attributes and roll to see if it is added\n attrCounter = 0;\n for attr in attrAry:\n if attr == \"Eyes\":\n getAttrName = attributeWeights(attr, currentColor)\n eyeLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(eyeLayer, (x,y), eyeLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # roll to determine if you get a particular attribute\n # 0 => 90%, 1 => 80%, 2 => 60%, 3 => 40%, 4 => 20%, 5 => 10%\n roll = randint(1, 100)\n if attrCounter == 0:\n if roll >= 1 and roll <= 90:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # add attribute as \"none\" and skip to next attribute\n currentAttributes.append(\"none\")\n elif attrCounter == 1:\n if roll >= 1 and roll <= 80:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # do nothing, skip to next attribute\n currentAttributes.append(\"none\")\n elif attrCounter == 2: \n if roll >= 1 and roll <= 60:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # do nothing, skip to next attribute\n currentAttributes.append(\"none\")\n elif attrCounter == 3: \n if roll >= 1 and roll <= 40:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # do nothing, skip to next attribute\n currentAttributes.append(\"none\")\n elif attrCounter == 4:\n if roll >= 1 and roll <= 20:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # do nothing, skip to next attribute\n currentAttributes.append(\"none\")\n else: \n if roll >= 1 and roll <= 10:\n getAttrName = attributeWeights(attr, currentColor)\n newLayer = Image.open(attributeDir + attr + \"\\\\\" + getAttrName)\n x,y = offsetAttribute(getAttrName)\n bgLayer.paste(newLayer, (x,y), newLayer)\n currentAttributes.append(getAttrName)\n attrCounter += 1\n else:\n # do nothing, skip to next attribute\n currentAttributes.append(\"none\") \n \n # save img, add chimp to DB, increment NFT id, and subtract from total # to be created\n if nftID >= 1 and nftID < 10:\n nftIDformatted = \"000\" + str(nftID)\n elif nftID >= 10 and nftID < 100:\n nftIDformatted = \"00\" + str(nftID)\n elif nftID >= 100 and nftID < 1000:\n nftIDformatted = \"0\" + str(nftID)\n else:\n nftIDformatted = str(nftID)\n\n bgLayer.save(saveDir + \"PolyChimp #\" + nftIDformatted + \".png\")\n db.execute('INSERT INTO Chimps VALUES (?,?,?,?,?,?,?,?)', (\"#\" + nftIDformatted, monkeyAry[nftID - 1],\n currentAttributes[0], currentAttributes[1], currentAttributes[2], currentAttributes[3],\n currentAttributes[4], currentAttributes[5]))\n conn.commit()\n\n nftID += 1\n numberCreated -= 1\n\n # re-initialize variables\n init()\n \n # metadata\n # total number of each monkey type\n print(\"Normal: \" + str(monkeyAry.count(\"Normal\")))\n print(\"Golden: \" + str(monkeyAry.count(\"Golden\")))\n print(\"Zombie: \" + str(monkeyAry.count(\"Zombie\")))\n print(\"Alien: \" + str(monkeyAry.count(\"Alien\")))\n\n # checking for any duplicate chimps; if count > 1 then there are duplicates\n test2 = db.execute(\"SELECT count(*), NFTid, attr1, attr2, attr3, attr4, attr5, attr6 FROM Chimps\\\n GROUP BY NFTid, attr1, attr2, attr3, attr4, attr5, attr6 HAVING COUNT(*) > 1;\")\n data2 = db.fetchall()\n if not data2:\n print(\"There are no duplicate NFTs.\")\n else:\n print(\"There are duplicate NFTs!\")\n\n# pass in any number to create that number of NFTs\ninit()\ncreateNFT(2500)\n\n\n","sub_path":"nft_generation.py","file_name":"nft_generation.py","file_ext":"py","file_size_in_byte":16241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"331233504","text":"\"\"\"GUI software module for BP software\"\"\"\nfrom tkinter import *\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.animation as animation\nimport pickle\n\nclass MyGUI(Frame) :\n def __init__(self, master=None) :\n \"\"\"This func initializes the window\"\"\"\n Frame.__init__(self, master)\n self.master = master\n self.pack()\n\n #initialize default parameters from file\n self.initParams() \n\n #create the frames of the window\n self.frameLeft = Frame(self.master)\n self.frameLeft.pack(side=LEFT)\n self.frameRight = Frame(self.master)\n self.frameRight.pack(side=RIGHT)\n\n #initialize window\n self.createMenu()\n self.createCanvasPlot()\n self.createWidgets()\n self.createBindings()\n\n def NewFile(self):\n \"\"\"Not yet implemented\"\"\"\n pass\n def OpenFile(self) :\n \"\"\"Not yet implemented\"\"\"\n pass\n def About (self):\n \"\"\"Not yet implemented\"\"\"\n pass\n\n def createMenu(self) :\n \"\"\"This function creates the Menu\"\"\"\n self.menu = Menu(self.master)\n self.master.config(menu=self.menu)\n self.filemenu = Menu(self.menu)\n self.menu.add_cascade(label=\"File\", menu=self.filemenu)\n self.filemenu.add_command(label=\"New\", command=self.NewFile)\n self.filemenu.add_command(label=\"Open...\", command=self.OpenFile)\n self.filemenu.add_separator()\n self.filemenu.add_command(label=\"Exit\", command=self.master.destroy)\n\n self.helpmenu = Menu(self.master)\n self.menu.add_cascade(label=\"Help\", menu=self.helpmenu)\n self.helpmenu.add_command(label=\"About...\", command=self.About)\n\n def createCanvasPlot(self) :\n \"\"\"This function initializes the Canvas Plot and the animation function.\n The animation function is: self.animateCanvasPlot\n The animationfunction gets periodically called in order to\n update the CanvasPlot on the GUI.\"\"\"\n self.fig = Figure(figsize=(9,6), facecolor=\"white\")\n self.axis = self.fig.add_subplot(111)\n canvas = FigureCanvasTkAgg(self.fig, master=self.frameRight)\n canvas._tkcanvas.pack(expand=1)\n #animation\n self.ani = animation.FuncAnimation(self.fig, self.animateCanvasPlot, interval=1000)\n\n def animateCanvasPlot(self,i) :\n \"\"\"This function updates the Canvas Plot periodically. The Values\n which are need to update the Plot are read out of a file (pickle).\n In the pickle there are two arrays:\n x -->time\n [y1,y2,y3,...] -->each y is an array which saves the price for each timestamp\n ->So the lenght of each y array has to be the same like the x array\"\"\"\n x_arr = []\n y_arr = []\n #Read values out of a text file:\n #with open(\"CanvasValues.txt\", \"r\") as file :\n # for i in file :\n # line = i.strip() #remove white-spaces\n # data = line.split(\",\")\n # x_arr.append(int(data[0]))\n # y_arr.append(int(data[1]))\n\n #Read values out of a file (pickle)\n with open(\"xyvalues\", \"rb\") as file:\n x_arr = pickle.load(file)\n y_arr = pickle.load(file)\n\n self.axis.clear()\n for i in range(0,len(y_arr)) :\n if len(x_arr) == len(y_arr[i]):\n self.axis.plot(x_arr, y_arr[i]) #Plot all arrays\n else :\n print(\"ERROR: Length of x-y-arrays for the Plot is not the same!\",file=sys.stderr)\n \n \n def createWidgets(self) :\n \"\"\"This function creates all Widgets of the GUI\"\"\"\n\n #Start-Button\n self.buttonStart = Button(self.frameLeft)\n self.buttonStart.pack()\n self.buttonStart[\"text\"] = \"start\"\n self.buttonStart[\"background\"] = \"green yellow\"\n\n #Variable amount of buttons\n self.createDrinkButtons()\n\n #End-Button\n self.buttonEnd = Button(self.frameLeft)\n self.buttonEnd.pack()\n self.buttonEnd[\"text\"] = \"end\"\n self.buttonEnd[\"background\"] = \"red\"\n\n #Price-Label\n self.labelPrice = Label(self.frameLeft)\n self.labelPrice.pack(pady=10)\n self.labelPrice[\"text\"] = \"0.00 €\"\n \n\n def createDrinkButtons(self) :\n self.drinkButtons = []\n if len(self.drinks) > 0 and len(self.drinks) < 10 :\n for i in range(0, len(self.drinks)) :\n self.drinkButtons.append(Button(self.frameLeft))\n self.drinkButtons[i].pack()\n self.drinkButtons[i][\"text\"] = self.drinks[i]\n\n \n def createBindings(self) :\n \"\"\"This function creates the bindings to the UI\"\"\"\n self.buttonStart.bind(\"\", self.buttonHandler)\n self.buttonEnd.bind(\"\", self.buttonHandler)\n for i in range(0, len(self.drinkButtons)) :\n self.drinkButtons[i].bind(\"\", self.buttonHandler)\n\n def buttonHandler(self, event) :\n \"\"\"This function is responsible for the callbacks of all buttons.\"\"\"\n print(event.widget[\"text\"])\n \n\n def initParams(self) :\n \"\"\"This function will initialize all parameters which are necessary\n for the application out of a text file.\n FILENAME: params\n allowed parameters:\n DRINKS = Drink1,Drink2,Drink3,Drink3,...,DrinkN\n \"\"\"\n filename = \"params.txt\"\n with open(filename, \"r\") as file :\n for i in file :\n line = i.strip() #remove white-spaces\n if \"DRINKS\" in line :\n try :\n d = line.split(\"=\")[1]\n d = d.strip()\n self.drinks = d.split(\",\")\n for j in range(0, len(self.drinks)) :\n self.drinks[j] = self.drinks[j].strip().upper()\n print(self.drinks)\n except :\n print(\"ERROR: Couldn't read parameters from file\", filename, file=stderr)\n \n \n \n \n\n \n \nif __name__ == \"__main__\" :\n root = Tk()\n app = MyGUI(master=root)\n \n app.mainloop\n #root.destroy()\n","sub_path":"BP_GUI.py","file_name":"BP_GUI.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"391907419","text":"import numpy as np\nimport cv2 as cv\nimport xml.etree.cElementTree as ET\nimport matplotlib.pyplot as plt\n\ndef segmentation(color):\n '''\n (optional) Segmentation function\n :param color: input RGB image\n :return: segmented binary image\n '''\n lab = cv.cvtColor(color, cv.COLOR_COLOR2LAB)\n ret, binary = cv.threshold(lab[:, :, 0], 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n return binary\n\n\ndef cvt2Gray(color):\n '''\n convert input RGB image to grayscale\n :param color: input RGB image\n :return: converted gray image\n '''\n return cv.cvtColor(color, cv.COLOR_BGR2GRAY)\n\n\ndef read_img(img_path):\n '''\n read image path and return image in a numpy array\n\n :param img_path:\n :return:\n '''\n return cv.imread(img_path)\n\ndef accepting_images(imageList, params):\n\n returns = []\n def parsexmlstring(xmlstring):\n root = ET.XML(xmlstring)\n for detection_area in root.iter(\"areas\"):\n crops = []\n for area in detection_area.iter(\"area\"):\n arealist = []\n for point in area.iter(\"point\"):\n arealist.append([int(float(point.attrib[\"x\"])), int(float(point.attrib[\"y\"]))])\n # if point.attrib[\"type\"] == \"upperleft\":\n # arealist.append([int(float(point.attrib[\"x\"])), int(float(point.attrib[\"y\"]))])\n # if point.attrib[\"type\"] == \"downright\":\n # arealist.append([int(float(point.attrib[\"x\"])), int(float(point.attrib[\"y\"]))])\n xmin = np.min(np.array(arealist)[:, 0])\n ymin = np.min(np.array(arealist)[:, 1])\n xmax = np.max(np.array(arealist)[:, 0])\n ymax = np.max(np.array(arealist)[:, 1])\n new_area = [[xmin, ymin], [xmax, ymax]]\n\n crops.append(new_area)\n\n for cfg in root.iter(\"cfg\"):\n level1 = float(cfg.find(\"level1\").text)\n level2 = float(cfg.find(\"level2\").text)\n\n for labels in root.iter('labels'):\n for labelcode in labels.iter('label'):\n labelcode = labelcode.attrib[\"code\"]\n\n type = root.attrib[\"type\"]\n\n level1 = level1*1000\n level2 = level2*1000\n return crops, level1, level2, labelcode, type\n\n def writetoxml(crops, result, labelcode, mags, warning, type):\n root = ET.Element(\"rule_xml\", {\"type\": type})\n AREAS = ET.SubElement(root, \"areas\")\n if warning == True:\n i = 0\n for crop in crops:\n x1 = crop[0][0]\n y1 = crop[0][1]\n x2 = crop[1][0]\n y2 = crop[1][1]\n Area = ET.SubElement(AREAS, \"area\", {\"level\": str(result[i]), \"label\": str(labelcode), \"magnitude\": str(mags[i])})\n ET.SubElement(Area, \"point\", {\"x\": str(x1), \"y\": str(y1)})\n ET.SubElement(Area, \"point\", {\"x\": str(x2), \"y\": str(y2)})\n i += 1\n\n xmlstring = ET.tostring(root, encoding='utf8', method='xml')\n return xmlstring\n\n import random\n '''\n Main Function in this class. Taking two images and return 3 danger level values.\n :return:\n '''\n if len(imageList) < 2:\n print(\"at least two images is needed\")\n return [[],\"\"]\n\n idx = random.randint(0, len(imageList)-2)\n orig_imgPrev = imageList[idx]\n orig_imgAfter = imageList[idx+1]\n imgPrev = cv.cvtColor(orig_imgPrev, cv.COLOR_RGBA2RGB)\n imgAfter = cv.cvtColor(orig_imgAfter, cv.COLOR_RGBA2RGB)\n orig_grayPrev = cv.cvtColor(imgPrev, cv.COLOR_RGB2GRAY)\n orig_grayAfter = cv.cvtColor(imgAfter, cv.COLOR_RGB2GRAY)\n\n img_size = imgPrev.shape\n\n crops, level1, level2, labelcode, type = parsexmlstring(params)\n\n result = []\n mags = []\n for crop in crops:\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(crop[0][0], 0)\n bb[1] = np.maximum(crop[0][1], 0)\n bb[2] = np.minimum(crop[1][0], img_size[1])\n bb[3] = np.minimum(crop[1][1], img_size[0])\n grayPrev = orig_grayPrev[bb[1]:bb[3], bb[0]:bb[2]]\n grayAfter = orig_grayAfter[bb[1]:bb[3], bb[0]:bb[2]]\n\n flow = cv.calcOpticalFlowFarneback(grayPrev, grayAfter, None, 0.5,\n 3, 15, 3, 5, 1.2, 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n mag = np.max(mag)\n\n # print(mag)\n if mag < level1:\n res = 0\n # returns.append([])\n elif mag >= level1 and mag < level2:\n res = 1\n cv.rectangle(imgAfter, (bb[0], bb[1]), (bb[2], bb[3]), (0, 255, 0), 1)\n elif mag > level2:\n res = 2\n cv.rectangle(imgAfter, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 1)\n\n result.append(res)\n mag /= 1000\n mags.append(mag)\n\n warning = False\n for i in range(len(result)):\n if result[i] != 0:\n warning = True\n if warning == True:\n returns.append(imgAfter)\n else:\n returns.append([])\n\n xmlstring = writetoxml(crops, result, labelcode, mags, warning, type)\n xmlstring = xmlstring.decode()\n returns.append(xmlstring)\n return returns\n","sub_path":"All_Algorithms_in_One_Interface/src/transmission_line/getting_images_on.py","file_name":"getting_images_on.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"589843598","text":"\"\"\"Backup manager.\"\"\"\nimport asyncio\nimport logging\nfrom pathlib import Path\nfrom typing import Awaitable, Set\n\nfrom awesomeversion.awesomeversion import AwesomeVersion\nfrom awesomeversion.exceptions import AwesomeVersionCompare\n\nfrom ..const import FOLDER_HOMEASSISTANT, CoreState\nfrom ..coresys import CoreSysAttributes\nfrom ..exceptions import AddonsError\nfrom ..jobs.decorator import Job, JobCondition\nfrom ..utils.dt import utcnow\nfrom .backup import Backup\nfrom .const import BackupType\nfrom .utils import create_slug\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\nclass BackupManager(CoreSysAttributes):\n \"\"\"Manage backups.\"\"\"\n\n def __init__(self, coresys):\n \"\"\"Initialize a backup manager.\"\"\"\n self.coresys = coresys\n self._backups = {}\n self.lock = asyncio.Lock()\n\n @property\n def list_backups(self) -> Set[Backup]:\n \"\"\"Return a list of all backup objects.\"\"\"\n return set(self._backups.values())\n\n def get(self, slug):\n \"\"\"Return backup object.\"\"\"\n return self._backups.get(slug)\n\n def _create_backup(self, name, sys_type, password, homeassistant=True):\n \"\"\"Initialize a new backup object from name.\"\"\"\n date_str = utcnow().isoformat()\n slug = create_slug(name, date_str)\n tar_file = Path(self.sys_config.path_backup, f\"{slug}.tar\")\n\n # init object\n backup = Backup(self.coresys, tar_file)\n backup.new(slug, name, date_str, sys_type, password)\n\n # set general data\n if homeassistant:\n backup.store_homeassistant()\n\n backup.store_repositories()\n backup.store_dockerconfig()\n\n return backup\n\n def load(self):\n \"\"\"Load exists backups data.\n\n Return a coroutine.\n \"\"\"\n return self.reload()\n\n async def reload(self):\n \"\"\"Load exists backups.\"\"\"\n self._backups = {}\n\n async def _load_backup(tar_file):\n \"\"\"Load the backup.\"\"\"\n backup = Backup(self.coresys, tar_file)\n if await backup.load():\n self._backups[backup.slug] = backup\n\n tasks = [\n _load_backup(tar_file)\n for tar_file in self.sys_config.path_backup.glob(\"*.tar\")\n ]\n\n _LOGGER.info(\"Found %d backup files\", len(tasks))\n if tasks:\n await asyncio.wait(tasks)\n\n def remove(self, backup):\n \"\"\"Remove a backup.\"\"\"\n try:\n backup.tarfile.unlink()\n self._backups.pop(backup.slug, None)\n _LOGGER.info(\"Removed backup file %s\", backup.slug)\n\n except OSError as err:\n _LOGGER.error(\"Can't remove backup %s: %s\", backup.slug, err)\n return False\n\n return True\n\n async def import_backup(self, tar_file):\n \"\"\"Check backup tarfile and import it.\"\"\"\n backup = Backup(self.coresys, tar_file)\n\n # Read meta data\n if not await backup.load():\n return None\n\n # Already exists?\n if backup.slug in self._backups:\n _LOGGER.warning(\"Backup %s already exists! overwriting\", backup.slug)\n self.remove(self.get(backup.slug))\n\n # Move backup to backup\n tar_origin = Path(self.sys_config.path_backup, f\"{backup.slug}.tar\")\n try:\n backup.tarfile.rename(tar_origin)\n\n except OSError as err:\n _LOGGER.error(\"Can't move backup file to storage: %s\", err)\n return None\n\n # Load new backup\n backup = Backup(self.coresys, tar_origin)\n if not await backup.load():\n return None\n _LOGGER.info(\"Successfully imported %s\", backup.slug)\n\n self._backups[backup.slug] = backup\n return backup\n\n @Job(conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING])\n async def do_backup_full(self, name=\"\", password=None):\n \"\"\"Create a full backup.\"\"\"\n if self.lock.locked():\n _LOGGER.error(\"A backup/restore process is already running\")\n return None\n\n backup = self._create_backup(name, BackupType.FULL, password)\n _LOGGER.info(\"Creating new full backup with slug %s\", backup.slug)\n try:\n self.sys_core.state = CoreState.FREEZE\n await self.lock.acquire()\n\n async with backup:\n # Backup add-ons\n _LOGGER.info(\"Backing up %s store Add-ons\", backup.slug)\n await backup.store_addons()\n\n # Backup folders\n _LOGGER.info(\"Backing up %s store folders\", backup.slug)\n await backup.store_folders()\n\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"Backup %s error\", backup.slug)\n self.sys_capture_exception(err)\n return None\n\n else:\n _LOGGER.info(\"Creating full backup with slug %s completed\", backup.slug)\n self._backups[backup.slug] = backup\n return backup\n\n finally:\n self.sys_core.state = CoreState.RUNNING\n self.lock.release()\n\n @Job(conditions=[JobCondition.FREE_SPACE, JobCondition.RUNNING])\n async def do_backup_partial(\n self, name=\"\", addons=None, folders=None, password=None, homeassistant=True\n ):\n \"\"\"Create a partial backup.\"\"\"\n if self.lock.locked():\n _LOGGER.error(\"A backup/restore process is already running\")\n return None\n\n addons = addons or []\n folders = folders or []\n\n if len(addons) == 0 and len(folders) == 0 and not homeassistant:\n _LOGGER.error(\"Nothing to create backup for\")\n return\n\n backup = self._create_backup(name, BackupType.PARTIAL, password, homeassistant)\n\n _LOGGER.info(\"Creating new partial backup with slug %s\", backup.slug)\n try:\n self.sys_core.state = CoreState.FREEZE\n await self.lock.acquire()\n\n async with backup:\n # Backup add-ons\n addon_list = []\n for addon_slug in addons:\n addon = self.sys_addons.get(addon_slug)\n if addon and addon.is_installed:\n addon_list.append(addon)\n continue\n _LOGGER.warning(\"Add-on %s not found/installed\", addon_slug)\n\n if addon_list:\n _LOGGER.info(\"Backing up %s store Add-ons\", backup.slug)\n await backup.store_addons(addon_list)\n\n # Backup folders\n if folders:\n _LOGGER.info(\"Backing up %s store folders\", backup.slug)\n await backup.store_folders(folders)\n\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"Backup %s error\", backup.slug)\n self.sys_capture_exception(err)\n return None\n\n else:\n _LOGGER.info(\"Creating partial backup with slug %s completed\", backup.slug)\n self._backups[backup.slug] = backup\n return backup\n\n finally:\n self.sys_core.state = CoreState.RUNNING\n self.lock.release()\n\n @Job(\n conditions=[\n JobCondition.FREE_SPACE,\n JobCondition.HEALTHY,\n JobCondition.INTERNET_HOST,\n JobCondition.INTERNET_SYSTEM,\n JobCondition.RUNNING,\n ]\n )\n async def do_restore_full(self, backup, password=None):\n \"\"\"Restore a backup.\"\"\"\n if self.lock.locked():\n _LOGGER.error(\"A backup/restore process is already running\")\n return False\n\n if backup.sys_type != BackupType.FULL:\n _LOGGER.error(\"%s is only a partial backup!\", backup.slug)\n return False\n\n if backup.protected and not backup.set_password(password):\n _LOGGER.error(\"Invalid password for backup %s\", backup.slug)\n return False\n\n _LOGGER.info(\"Full-Restore %s start\", backup.slug)\n try:\n self.sys_core.state = CoreState.FREEZE\n await self.lock.acquire()\n\n async with backup:\n # Stop Home-Assistant / Add-ons\n await self.sys_core.shutdown()\n\n # Restore folders\n _LOGGER.info(\"Restoring %s folders\", backup.slug)\n await backup.restore_folders()\n\n # Restore docker config\n _LOGGER.info(\"Restoring %s Docker Config\", backup.slug)\n backup.restore_dockerconfig()\n\n # Start homeassistant restore\n _LOGGER.info(\"Restoring %s Home-Assistant\", backup.slug)\n backup.restore_homeassistant()\n task_hass = self._update_core_task(backup.homeassistant_version)\n\n # Restore repositories\n _LOGGER.info(\"Restoring %s Repositories\", backup.slug)\n await backup.restore_repositories()\n\n # Delete delta add-ons\n _LOGGER.info(\"Removing add-ons not in the backup %s\", backup.slug)\n for addon in self.sys_addons.installed:\n if addon.slug in backup.addon_list:\n continue\n\n # Remove Add-on because it's not a part of the new env\n # Do it sequential avoid issue on slow IO\n try:\n await addon.uninstall()\n except AddonsError:\n _LOGGER.warning(\"Can't uninstall Add-on %s\", addon.slug)\n\n # Restore add-ons\n _LOGGER.info(\"Restore %s old add-ons\", backup.slug)\n await backup.restore_addons()\n\n # finish homeassistant task\n _LOGGER.info(\"Restore %s wait until homeassistant ready\", backup.slug)\n await task_hass\n await self.sys_homeassistant.core.start()\n\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"Restore %s error\", backup.slug)\n self.sys_capture_exception(err)\n return False\n\n else:\n _LOGGER.info(\"Full-Restore %s done\", backup.slug)\n return True\n\n finally:\n self.sys_core.state = CoreState.RUNNING\n self.lock.release()\n\n @Job(\n conditions=[\n JobCondition.FREE_SPACE,\n JobCondition.HEALTHY,\n JobCondition.INTERNET_HOST,\n JobCondition.INTERNET_SYSTEM,\n JobCondition.RUNNING,\n ]\n )\n async def do_restore_partial(\n self, backup, homeassistant=False, addons=None, folders=None, password=None\n ):\n \"\"\"Restore a backup.\"\"\"\n if self.lock.locked():\n _LOGGER.error(\"A backup/restore process is already running\")\n return False\n\n if backup.protected and not backup.set_password(password):\n _LOGGER.error(\"Invalid password for backup %s\", backup.slug)\n return False\n\n addons = addons or []\n folders = folders or []\n\n _LOGGER.info(\"Partial-Restore %s start\", backup.slug)\n try:\n self.sys_core.state = CoreState.FREEZE\n await self.lock.acquire()\n\n async with backup:\n # Restore docker config\n _LOGGER.info(\"Restoring %s Docker Config\", backup.slug)\n backup.restore_dockerconfig()\n\n # Stop Home-Assistant for config restore\n if FOLDER_HOMEASSISTANT in folders:\n await self.sys_homeassistant.core.stop()\n backup.restore_homeassistant()\n\n # Process folders\n if folders:\n _LOGGER.info(\"Restoring %s folders\", backup.slug)\n await backup.restore_folders(folders)\n\n # Process Home-Assistant\n task_hass = None\n if homeassistant:\n _LOGGER.info(\"Restoring %s Home-Assistant\", backup.slug)\n task_hass = self._update_core_task(backup.homeassistant_version)\n\n if addons:\n _LOGGER.info(\"Restoring %s Repositories\", backup.slug)\n await backup.restore_repositories()\n\n _LOGGER.info(\"Restoring %s old add-ons\", backup.slug)\n await backup.restore_addons(addons)\n\n # Make sure homeassistant run agen\n if task_hass:\n _LOGGER.info(\"Restore %s wait for Home-Assistant\", backup.slug)\n await task_hass\n\n # Do we need start HomeAssistant?\n if not await self.sys_homeassistant.core.is_running():\n await self.sys_homeassistant.core.start()\n\n # Check If we can access to API / otherwise restart\n if not await self.sys_homeassistant.api.check_api_state():\n _LOGGER.warning(\"Need restart HomeAssistant for API\")\n await self.sys_homeassistant.core.restart()\n\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"Restore %s error\", backup.slug)\n self.sys_capture_exception(err)\n return False\n\n else:\n _LOGGER.info(\"Partial-Restore %s done\", backup.slug)\n return True\n\n finally:\n self.sys_core.state = CoreState.RUNNING\n self.lock.release()\n\n def _update_core_task(self, version: AwesomeVersion) -> Awaitable[None]:\n \"\"\"Process core update if needed and make awaitable object.\"\"\"\n\n async def _core_update():\n try:\n if version == self.sys_homeassistant.version:\n return\n except (AwesomeVersionCompare, TypeError):\n pass\n await self.sys_homeassistant.core.update(version)\n\n return self.sys_create_task(_core_update())\n","sub_path":"supervisor/backups/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":13951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"465870272","text":"from pymatgen.ext.matproj import MPRester\r\nimport re\r\nfrom pymatgen.core.structure import Structure\r\nfrom pymatgen.io.cif import CifWriter\r\nfrom ase.io import read\r\nfrom ase.visualize import view\r\nfrom Functions import coords_transform, num_multiple\r\n\r\n\r\n\r\n\r\nclass CIF:\r\n def __init__(self, mat_id):\r\n self.id = 'mp-'+str(mat_id)\r\n def to_cif(self):\r\n m = MPRester(\"pc0rARlba5Ae3SArM09\")\r\n structure = m.query(self.id, ['initial_structure']) # 改变不同的晶体ID录入不同的数字\r\n f = structure[0]['initial_structure']\r\n f1 = str(f) + '\\n'\r\n print(f1)\r\n j_pv_lst = re.findall('abc(.*?)\\n', f1)[0] # abc : 19.257300 19.569178 21.133988\r\n j1_pv_lst = j_pv_lst.split(' ') # abc : 6.419100 6.523059 7.044663\r\n while ':' in j1_pv_lst:\r\n j1_pv_lst.remove(':')\r\n while '' in j1_pv_lst:\r\n j1_pv_lst.remove('')\r\n a = float(j1_pv_lst[0])\r\n b = float(j1_pv_lst[1])\r\n c = float(j1_pv_lst[2])\r\n mutiple = [a, b, c]\r\n angles = re.findall('angles(.*?)\\n', f1)[0]\r\n angles1 = angles.split(' ')\r\n while ':' in angles1:\r\n angles1.remove(':')\r\n while '' in angles1:\r\n angles1.remove('')\r\n angle_lst = []\r\n for i in angles1:\r\n num = float(i)\r\n angle_lst.append(num)\r\n # print(angle_lst)\r\n #print(mutiple)\r\n coords_lst1 = coords_transform(angle_lst[0], angle_lst[1], angle_lst[2])\r\n par_lst_matrix1 = [coords_lst1[0],coords_lst1[1],coords_lst1[2]]\r\n pri_vectors = []\r\n for i in range(len(par_lst_matrix1)):\r\n pri_vectors.append(num_multiple(par_lst_matrix1[i],mutiple[i]))\r\n par_lst_matrix = [pri_vectors[0],\r\n pri_vectors[1],\r\n pri_vectors[2]]\r\n print(par_lst_matrix)\r\n\r\n # 物质种类(比如:Lu2 Al4)\r\n y1 = re.findall('Full\\sFormula\\s(.*?)\\n', f1)[0]\r\n y1_lst = y1.lstrip('(').rstrip(')')\r\n material = re.findall('Full\\sFormula\\s(.*?)\\n', f1)[0].lstrip('(').rstrip(')')\r\n self.mat_name = material\r\n elements = material.split(' ')\r\n # print(elements) # (Re4 S8)\\(Re108 S216)\r\n zmb_lst = [chr(i) for i in range(97,123)] + [chr(i) for i in range(65,91)]\r\n szb_lst = [str(i) for i in range(0,10)]\r\n element_lst = []\r\n number_lst =[]\r\n c = []\r\n for element in elements: # 这个循环之后,得到原子与对应的原子数两个列表\r\n letter_lst = list(element)\r\n symbol_lst = []\r\n num_lst = []\r\n element_lst1 =[]\r\n number_lst1 =[]\r\n # print(letter_lst)\r\n for i in range(len(letter_lst)):\r\n if letter_lst[i] in szb_lst:\r\n num_lst.append(letter_lst[i])\r\n # print(num_lst)\r\n if letter_lst[i] in zmb_lst:\r\n symbol_lst.append(letter_lst[i])\r\n # print(symbol_lst)\r\n # print(num_lst) # ['1', '0', '8', '2', '1', '6']\r\n # print(symbol_lst) # ['R', 'e', 'S']\r\n element1 = ''.join(symbol_lst)\r\n # print(element1)\r\n element_lst1.append(element1)\r\n #print(num_lst)\r\n number1 = ''.join(num_lst)\r\n number_lst1.append(number1)\r\n #print(number_lst1)\r\n ys = 'a' # 元素\r\n gs = '0' # 个数\r\n # print(element_lst1)\r\n for i in element_lst1:\r\n if len(i) >= len(ys):\r\n ys = i\r\n element_lst.append(i)\r\n for i in number_lst1:\r\n if len(i) >= len(gs):\r\n gs = i\r\n number_lst.append(i)\r\n # print(element_lst)\r\n # print(number_lst)\r\n\r\n par_lst_species = [] # 用于Cifwrite参数(species)的\r\n for i in range(len(element_lst)):\r\n num = int(number_lst[i])\r\n for j in range(num):\r\n par_lst_species.append(element_lst[i])\r\n print(par_lst_species)\r\n\r\n\r\n\r\n # 每个原子的坐标\r\n ord_lst = [] # 最终Cifwriter所需要的coords参数\r\n ord_lst1 = []\r\n ord_lst2 = [] # 储存的形式为\r\n for element in element_lst:\r\n ord_lst1 = re.findall(element+'\\s\\s\\s\\s(.*?)\\n',f1)\r\n for ord in ord_lst1:\r\n ord1 = ord.split(' ')\r\n while '' in ord1:\r\n ord1.remove('')\r\n ord_lst2.append(ord1)\r\n for ord in ord_lst2:\r\n ord1 = []\r\n for string in ord:\r\n num = float(string)\r\n ord1.append(num)\r\n if len(ord1) == 3:\r\n ord2 = ord1\r\n ord_lst.append(ord2)\r\n par_lst_coords = ord_lst\r\n print(par_lst_coords)\r\n\r\n\r\n # 构建Structure类\r\n structure = Structure(par_lst_matrix,par_lst_species,par_lst_coords)\r\n print(structure)\r\n slab = CifWriter(structure, write_magmoms=True) # struct (Structure) – structure to write; symprec (float) – If not none, finds the symmetry of the structure and writes the cif with symmetry information. Passes symprec to the SpacegroupAnalyzer; write_magmoms (bool) – If True, will write magCIF file. Incompatible with symprec\r\n slab.write_file(r'C:\\Users\\wang1\\Desktop/new_crys/{}.cif'.format(self.mat_name))\r\n # f = read('/Users/mac/Desktop/crys/{}.cif'.format(self.mat_name))\r\n # view(f)\r\n\r\n# 在底下这个CIF的括号里\r\n\r\n\r\n# if __name__ == '__main__':\r\nfor i in range(2500, 5000): # to:2511\r\n try:\r\n CIF(i).to_cif()\r\n print(i)\r\n except:\r\n pass","sub_path":"download cif.py","file_name":"download cif.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"66916502","text":"from urllib.request import urlopen\n\n\ndef fetch_words():\n\twith urlopen('http://sixty-north.com/c/t.txt') as story:\n\t\tstory_words = []\n\t\tfor line in story:\n\t\t line_words = line.decode('utf-8').split()\n\t\t for word in line_words:\n\t\t story_words.append(word)\n\n\tfor word in story_words:\n\t\tprint(word)\n\n\n\n\n# inside python REPL\n\n\"\"\"\nrango@bluehost:~/Coding-Practice/Python/PythonFundamentals$ python3\nPython 3.6.6 (default, Sep 12 2018, 18:26:19) \n[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import words\n>>> words.fetch_words()\nIt\nwas\nthe\nbest\n+ many words below\n\n>>> \n\"\"\"\n\n\"\"\"\nwe can also import this in REPL as \n>>> from words import fetch_words\n\"\"\"\n\n\n\nprint(__name__) # evaluate to module name i.e. \"words\"\n\n# after adding the above line we get this in REPL\n\"\"\"\nrango@bluehost:~/Coding-Practice/Python/PythonFundamentals$ python3\nPython 3.6.6 (default, Sep 12 2018, 18:26:19) \n[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import words\nwords\n>>> \n\"\"\"\n\n# as a script\n\"\"\"\nrango@bluehost:~/Coding-Practice/Python/PythonFundamentals$ python3 words.py \n__main__\n\"\"\"\n\n","sub_path":"Python/PythonFundamentals/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"389738382","text":"# coding:utf-8\nimport codecs\nimport os\n\ntodelete = {'a', 'an', 'the', 'is', 'are', 'to', 'for', 'of', 'in', 'at', 'on', 'after', 'from', 'since', 'behind',\n 'beside', 'under', 'below', 'over', 'above', 'by', 'among', 'about', 'with', 'except', 'besides', 'up',\n 'near', 'this', 'not', 'and', 'or', 'that', 'if', 'be'}\n\n\ndef is_alphabet(uchar):\n if u'\\u0041' <= uchar <= u'\\u005a' or u'\\u0061' <= uchar <= u'\\u007a':\n return True\n else:\n return False\n\n\ndef word_process(word):\n if word[len(word) - 2:len(word)] == 'ed':\n return word[0:len(word) - 1]\n tmp = 0\n for i in range(len(word)):\n if not is_alphabet(word[i]):\n tmp = i\n break\n if tmp != 0:\n return word[0:tmp]\n return word\n\n\ndef makedict(datafile, dictionary, dictionary_content):\n recording = 0\n tmp2 = []\n for line in datafile:\n if line[0:4] == 'From':\n tmp = line.split()\n if tmp[len(tmp) - 1] in dictionary_content:\n dictionary_content[tmp[len(tmp) - 1]] += 1\n else:\n dictionary_content[tmp[len(tmp) - 1]] = 1\n if line[0:7] == 'Subject' or recording == 1:\n if recording == 1:\n tmp = line.split()\n elif line[0:7] == 'Subject':\n tmp = line[9:len(line)].split()\n for i in tmp:\n tmp2.append(i.strip('`~@#$%^&*()-_+=\\,.!?;:\"[]{}/|<>').lower())\n if tmp2[len(tmp2) - 1] in todelete:\n tmp2.pop()\n if line[0:25] == 'Content-Transfer-Encoding':\n recording = 1\n for i in tmp2:\n if not is_alphabet(i[0:1]) or len(i) > 13:\n continue\n i = word_process(i)\n if i in dictionary:\n dictionary[i] += 1\n else:\n dictionary[i] = 1\n\n\ndef readtxt(dictionary1, dictionary2, dictionary3, dictionary4):\n sourcedir = 'D:\\\\NJU\\\\数据挖掘\\\\作业\\\\大作业\\\\data\\\\train\\\\spam'\n dictionary = [dictionary1, dictionary2]\n dictionary_content = [dictionary3, dictionary4]\n for i in range(2):\n if i == 1:\n sourcedir = sourcedir[0:len(sourcedir) - 4] + 'ham'\n filename = os.listdir(sourcedir)\n for name in filename:\n datafile = codecs.open(sourcedir + '\\\\' + name, 'r', 'big5', 'ignore')\n makedict(datafile, dictionary[i], dictionary_content[i])\n datafile.close()\n for key in dictionary[i].keys():\n dictionary[i][key] = (dictionary[i][key] + 1) / (len(filename) + 2)\n","sub_path":"code/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"34335539","text":"class Solution(object):\n def nextGreaterElement(self, findNums, nums):\n \"\"\"\n :type findNums: List[int]\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n result = []\n \n for num in findNums:\n exists = False\n slot = 0\n while nums[slot] != num:\n slot += 1\n while slot < len(nums):\n if nums[slot] > num:\n exists = True\n result.append(nums[slot])\n break\n slot += 1\n if not exists:\n result.append(-1)\n return result","sub_path":"next-greater-element-i/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"519729488","text":"import random \nimport sys\nfrom string import ascii_lowercase\n\ninv_map = ['k', 'j', 'h', 's', 'x', 'l', 'f', 'q', 'n', 'u', 'y', 'o', 'm', 'g', 'i', 'd', 't', 'b', 'z', 'c', 'p', 'a', 'e', 'w', 'v', 'r']\n\ndef inv_enc2(text):\n\ttemp = ''\n\tfor i in text:\n\t\ttemp += inv_map[ord(i)-ord('a')]\n\treturn temp\n\n\n# [+] RECOVERY KEYS :\nkey1 = EncrytpedKey1 = list('xtfsyhhlizoiyx')\nEncryptedKey2 = 'eudlqgluduggdluqmocgyukhbqkx'\nEncryptedFlag = 'lvvrafwgtocdrdzfdqotiwvrcqnd'\n\n# RECOVERY KEY2 :\nkey2 = list(inv_enc2(EncryptedKey2))\nk = key2[:14]\nprint(f\"key2 : {''.join(key2)}\")\nprint(f\"k : {''.join(k)}\")\n\nrkey2 = []\ni = 0\nwhile len(rkey2) != 14:\n\tfor ch in ascii_lowercase:\n\t\ta = ord(k[i]) - ord('a') + ord(ch)\n\t\tif a > 122:\n\t\t\ta = a % 122\n\t\t\ta = a + 97\n\t\tif chr(a) == key2[i+14]:\n\t\t\trkey2.append(ch)\n\t\t\ti += 1\n\t\t\tbreak\n# KEY2 SUCCESSFULY RECOVERY\nprint(f\"key1 : {''.join(key1)}\") # xtfsyhhlizoiyx\nprint(f\"key2 : {''.join(rkey2)}\") # rettnahagbeogi\n\n# SWITCH POSITION\nprint('-'*25)\ntmp = rkey2\nrkey2 = key1\nkey1 = tmp\nprint(f\"key1 : {''.join(key1)}\") # rettnahagbeogi\nprint(f\"key2 : {''.join(rkey2)}\") # xtfsyhhlizoiyx\n# -------------------------------------\n# RECOVERY KEY1 & KEY2 POSITION\nfor _ in range(2):\n\tfor i in range(14):\n\t\tTEMP2 = rkey2[13-i]\n\t\trkey2[13-i] = rkey2[ (ord(key1[13-i]) - ord('a')) % 14 ] \n\t\trkey2[ (ord(key1[13-i]) - ord('a')) % 14 ] = TEMP2\n\n\tfor i in range(14):\n\t\tTEMP2 = key1[13-i]\n\t\tkey1[13-i] = key1[ (ord(rkey2[13-i]) - ord('a')) % 14 ] \n\t\tkey1[ (ord(rkey2[13-i]) - ord('a')) % 14 ] = TEMP2\n\nprint('-'*25)\nprint(f\"key1 : {''.join(key1)}\") # togetherbagain\nprint(f\"key2 : {''.join(rkey2)}\") # hfixhtxszyiylo\n# KEY1 & KEY2 POSITION SUCCESSFULY RECOVERY\n# -------------------------------------\n# RECOVERY KEY2 MAP\nprint('-'*25)\nrkey2 = inv_enc2(inv_enc2(rkey2))\nkey1 = ''.join(key1)\nrkey2 = ''.join(rkey2)\n\nassert key1 == rkey2\n\nKEY = key1.replace('b', '_')\nprint(f'FOUND KEY : {KEY}')\nprint('-'*25)\n# -------------------------------- [+] RECOVERY FLAG : -------------------------------- \nimport numpy \n\ninv_enc1 = lambda text,n: ''.join(numpy.roll(list(text),n))\n\n \nenc_flag = []\nfor i in range(7):\n\tfor j in range(7):\n\t\tfor k in range(7):\n\t\t\ttmp = inv_enc1(inv_enc1(inv_enc1('lvvrafwgtocdrdzfdqotiwvrcqnd',k), j), i)\n\t\t\tif tmp not in enc_flag:\n\t\t\t\tenc_flag.append(tmp)\n\nfor i, enc in enumerate(enc_flag):\n\tenc_flag[i] = inv_enc2(enc)\n \n# RESUTL AFTER inv_enc2(enc_flag) \nprint('+'*25)\n\nfor flag in enc_flag:\n\tflag = list(flag)\n\tkey1 = list('rettnahagbeogi')\n\tkey2 = list('xtfsyhhlizoiyx')\n\tfor _ in range(2):\n\t\tfor i in range(14,28):\n\t\t\tTEMP2 = key2[13-i]\n\t\t\tkey2[13-i] = key2[ (ord(key1[13-i]) - ord('a')) % 14 ] \n\t\t\tkey2[ (ord(key1[13-i]) - ord('a')) % 14 ] = TEMP2\n\n\t\t\tTEMP1 = flag[41-i]\n\t\t\tflag[41-i] = flag[ (ord(key2[13-i]) - ord('a')) % 28 ] \n\t\t\tflag[ (ord(key2[13-i]) - ord('a')) % 28 ] = TEMP1\n\n\t\tfor i in range(14):\n\t\t\tTEMP2 = key1[13-i]\n\t\t\tkey1[13-i] = key1[ (ord(key2[13-i]) - ord('a')) % 14 ] \n\t\t\tkey1[ (ord(key2[13-i]) - ord('a')) % 14 ] = TEMP2\n\n\t\t\tTEMP1 = flag[13-i]\n\t\t\tflag[13-i] = flag[ (ord(key1[13-i]) - ord('a')) % 28 ] \n\t\t\tflag[ (ord(key1[13-i]) - ord('a')) % 28 ] = TEMP1\n\n\tfor i in range(7):\n\t\tget = \"\".join(inv_enc1(flag,i))\n\t\tget = get.replace('a', '{')\n\t\tget = get.replace('b', '_')\n\t\tif 'csictf{' in get:\n\t\t\tprint(f'FOUND FLAG : {get}')\n\n# FLAG : 'csictf{all_the_kings_horses}'","sub_path":"csictf2020/Reversing/Scrambled Eggs/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"454174471","text":"from sqlalchemy import Table, Column, Integer, Float, String, MetaData, DateTime\nfrom datetime import datetime\n\n#create metaData\nmeta = MetaData()\n\n# create stations table\ndef create_stations(engine):\n stations = Table(\n \"stations\", meta,\n Column(\"number\", Integer, primary_key = True),\n Column(\"name\", String(128)),\n Column(\"address\", String(128)),\n Column(\"pos_lat\", Float),\n Column(\"pos_long\", Float),\n Column(\"bike_stands\", Integer))\n # if does not exist create\n if not engine.dialect.has_table(engine, \"stations\"):\n meta.create_all(engine)\n # return the variable\n return stations\n\ndef create_available(engine):\n available = Table(\n \"available\", meta,\n Column(\"number\", Integer),\n Column(\"available_bike_stands\", Integer),\n Column(\"available_bikes\", Integer),\n Column(\"last_update\", DateTime()))\n\n # if does not exist create\n if not engine.dialect.has_table(engine, \"available\"):\n meta.create_all(engine)\n\n return available\n\ndef create_weather(engine):\n weather = Table(\n \"weather\", meta,\n Column(\"type\", String(128)),\n Column(\"description\", String(128)),\n Column(\"icon\", String(128)),\n Column(\"humidity\", Float),\n Column(\"temp\", Float),\n Column(\"feels_like\", Float),\n Column(\"wind_speed\", Float),\n Column(\"pressure\", Float),\n Column(\"visibility\", Float),\n Column(\"time\", DateTime()))\n\n # if does not exist create\n if not engine.dialect.has_table(engine, \"weather\"):\n meta.create_all(engine)\n\n return weather\n\n# pull the stations data from the api\ndef get_stations(obj):\n return {\"number\": obj[\"number\"],\n \"name\": obj[\"name\"],\n \"address\": obj[\"address\"],\n \"pos_lat\": obj[\"position\"][\"lat\"],\n \"pos_long\": obj[\"position\"][\"lng\"],\n \"bike_stands\": obj[\"bike_stands\"]}\n\n# pull the availability data from the api\ndef get_available(obj):\n return {\"number\": obj[\"number\"],\n \"available_bike_stands\": obj[\"available_bike_stands\"],\n \"available_bikes\": obj[\"available_bikes\"],\n \"last_update\": datetime.fromtimestamp(obj[\"last_update\"] / 1e3)}\n\n\n# get the weather data\ndef get_conditions(obj):\n weather = (obj[\"weather\"])[0]\n current = obj[\"main\"]\n wind = obj[\"wind\"]\n return {\"type\": weather[\"main\"],\n \"description\": weather[\"description\"],\n \"icon\": weather[\"icon\"],\n \"humidity\": current[\"humidity\"],\n \"temp\": current[\"temp\"],\n \"feels_like\": current[\"feels_like\"],\n \"wind_speed\": wind[\"speed\"],\n \"pressure\": current[\"pressure\"],\n \"visibility\": obj[\"visibility\"],\n \"time\": datetime.now()}","sub_path":"db_control.py","file_name":"db_control.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"146018761","text":"from collections import Counter\nfrom itertools import permutations\nimport sys\ninput = sys.stdin.buffer.readline\nN, C = map(int, input().split())\nD = [[int(x) for x in input().split()] for _ in range(C)]\n\ncnt = [Counter() for _ in range(3)]\n\nfor i in range(N):\n c = [int(x)-1 for x in input().split()]\n for j in range(3):\n cnt[j].update(c[(3-(i+2)+j)%3::3])\n\nans = 1000*500*500+5\nfor p in permutations(range(C), 3):\n s = 0\n for j in range(3):\n for k, v in cnt[j].items():\n s += D[k][p[j]] * v\n ans = min(ans, s)\n\nprint(ans)","sub_path":"Python_codes/p03330/s922061312.py","file_name":"s922061312.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"154779046","text":"# Лабораторная работа 1.\n# Вычисление объема, площадей и радиусов вписанной и описанной сферы\n# Богатырев Иван\n###############################################################################\n# R - это радиус окружности, r - радиус шарового сегмента, #\n# h - высота шарового сегмента #\n# v - объем сектора шара, s_full - полная площадь поверхности, #\n# s_side - площадь сегмента шара, потому что полная площадь поверхности #\n# состоит из боковых площадей сегмаента шара и конуса. Я решил найти #\n# сегмент шара в качесте боковой стороны. #\n###############################################################################\n\n\nfrom math import pi, sqrt\n\n\nprint('Дан сектор шара...\\n')\nR, h = float(input('Введите радиус шара: ')), float(input('Введите высоту: '))\n\nif R > h > 0:\n r = sqrt(R * 2 * h - h**2)\n v = 2 / 3 * pi * R ** 2 * h # вычисление объема\n\n # вычисление площади полной поверхности\n s_full = 2 * pi * R * (R + (r / 2) - sqrt(R ** 2 - r ** 2))\n\n s_side = pi * (r ** 2 + h ** 2) # площадь сектора шара вместо боковой площади\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('Объем данного сектора щара составляет:', '{:.7g}'.format(v))\n print('Площадь поверхности данного сектора шара:', '{:.7g}'.format(s_full))\n print('Площади боковой поверхности не существует, '\n 'поэтому нашли площадь шарового сегмента:', '{:.7g}'.format(s_side))\n print('Также, невозможно вписать сектор шара в сферу и сферу в сектор шара.')\n x = input('\\nГотово! (Нажмите Enter для выхода из программы...)')\nelse:\n input('Недопустимые значения! (Нажмите Enter для выхода из программы...)')\n\n\n\n","sub_path":"1st_semester/Lab 1/Bogatyrev/Lab 1.py","file_name":"Lab 1.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339054913","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# Copyright (C) 2013, Cameron White\nfrom github import Github\nfrom github import MainClass\nfrom github.Authorization import Authorization\nfrom github.Requester import Requester\nimport re\nimport argparse\nimport ConfigParser\n\nSECTION = 'accounts'\n\ndef store_token(file_path, account_type, username, token):\n \n config = ConfigParser.ConfigParser()\n\n config.read(file_path)\n\n if not config.has_section(SECTION):\n config.add_section(SECTION)\n\n config.set(\n SECTION, \n '{}.{}.token'.format(account_type, username),\n token\n )\n\n with open(file_path, 'wb') as configfile:\n config.write(configfile)\n\ndef load_token(file_path, account_type, username):\n\n config = ConfigParser.ConfigParser()\n\n config.read(file_path)\n \n option = '{}.{}.token'.format(account_type, username)\n\n token = None\n if config.has_option(SECTION, option):\n token = config.get(SECTION, option)\n\n return token\n\ndef generate_tokens(file_path, account_type):\n \n config = ConfigParser.ConfigParser()\n\n config.read(file_path)\n \n if config.has_section(SECTION):\n for option, value in config.items(SECTION):\n if re.search('^\\w+\\.\\w+\\.token$', option):\n account_type, username, _ = option.split('.')\n yield account_type, username, value\n\ndef gitignore_types(github):\n for i in github.get_user('github')\\\n .get_repo('gitignore')\\\n .get_git_tree('master')\\\n .tree:\n t = re.split('.gitignore', i.path)\n if t[0] is not '':\n yield t[0]\n","sub_path":"GithubRemote/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"197640030","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# \n# Modulo Desarrollado por Juventud Productiva (Felipe Villamizar)\n# Visitanos en http://juventudproductivabicentenaria.blogspot.com/\n# Nuestro Correo juventudproductivabicentenaria@gmail.com\n#\n#############################################################################\n\nfrom openerp.osv import osv, fields\nfrom openerp.addons.website_apiform.controladores import panel, base_tools\n\n############################################################################\n# Esta clase tiene como finalidad filtrar lo grupos de roles en la vita \n# view_user_simple_form para controlar los grupos que se le desee colocar \n# a los mismos en un determinado formulario. \n# Cuando se haga una relación a res.users en cualquier objeto y se desee \n# ristringir soló para ciertos grupos... usted debe pasarle por el context\n# la siguiente clave \"only_groups_id\" con los id o ids que desea controlar. \n\n# comento los siguientes metodos:\n# \n# adicionar_groups_id => este metodo nos devuelve los ids de los grupos \n# pasandole una lista con los nombre del grupo\n# \n# raise_groups_id: este metodo genera el mensaje de para que el usuario \n# sepa cuales son los grupos prmetidos.\n# \n# default_groups_id: Con este metodo le adiciono a la diccionario vals\n# vals['groups_id'][0][2].append(group) los grupos\n# seleccionado y permitidos.\n# \n# create y write: es donde filamente verifico si cuando registren o \n# modifiquen a un usuario tiene una condicion de grupos de \n# permisos permitidos.\n# \n# Como usarlo: \n# \n# 1) en la clase en su .py declare e inicialice una lista global \n# groups_id=[]\n# \n# 2) luego cree este metodo:\n\n #def default_groups(self,cr,uid,name_rols,context=None):\n #res_groups_obj=self.pool.get('res.groups')\n #res_groups_ids=res_groups_obj.search(cr,uid,[\n #('name',\n #'in',\n #name_rols)\n #])\n #for group in res_groups_ids:\n #self.groups_id.append(group)\n #return self.groups_id\n \n #3) su constructor de la clase 'def __init__(self,..)' \n \n #def __init__(self, pool, cr):\n #init_res = super(ept_ure_ures, self).__init__(pool,\n #cr)\n #name_rols=['Coordinación General UREs']\n #groups_id=self.default_groups(\n #cr,\n #SUPERUSER_ID,\n #name_rols)\n #return init_res\n \n \n \n #4) Finalmente en su relación many2may.\n \n #user_ids': fields.many2many(\n #'res.users', \n #'ept_ure_relacion_ures_users', \n #'entidad_id', \n #'entidad_user_id', \n #'Equipo de la ure',\n #copy=False,\n #domain=[('groups_id', 'in',groups_id)],\n #context={'default_groups_id':groups_id,\n #'only_groups_id':groups_id,},\n #),\n \n############################################################################\nclass res_users(osv.osv):\n _name = 'res.users'\n _inherit=\"res.users\"\n \n def buscar_groups_id(self, cr, uid,name_rols,context=None):\n res_groups_obj=self.pool.get('res.groups')\n res_groups_ids=res_groups_obj.search(cr,uid,[('name','in',name_rols)])\n return res_groups_ids\n \n \n \n def raise_groups_id(self, cr, uid, context=None):\n grupos=' '\n res_groups_obj=self.pool.get('res.groups')\n groups_data=res_groups_obj.browse(cr,uid,context['only_groups_id'])\n for groups in groups_data:\n grupos+=groups.name+' \\n'\n raise osv.except_osv(('Error de asignación grupo'),\n (u'''El grupo que le esta asignando no corresponde con \n los autorizados para esta interfaz. Por esta enterfaz soló\n puede asignarle los siguientes grupos:\\n %s \n NOTA:Si desea crear un usuario con los grupos que esta asignando,\n debe comunnicarse con el administrador. , ''' % (grupos)))\n return True\n\n def default_groups_id(self, cr, uid, vals, context=None):\n #~ aplicamos teoria de conjuntos de lo que ponen por defecto\n #~ y lo que el usuario selecciono...\n adicionar=self.buscar_groups_id( cr, uid,['Employee'])\n groups_id= set(vals['groups_id'][0][2])\n default_groups_id= set(context['only_groups_id'])\n union=groups_id & default_groups_id;\n diferencia1= groups_id - default_groups_id;\n diferencia2=diferencia1-set(adicionar)\n if len(diferencia2)>0:\n self.raise_groups_id(cr, uid,context)\n #~ self.raise_groups_id(cr, uid,context)\n for group in adicionar:\n if not group in vals['groups_id'][0][2]:\n vals['groups_id'][0][2].append(group)\n return vals\n \n def create(self, cr, uid, vals, context=None):\n if context.has_key('only_groups_id'):\n vals=self.default_groups_id(cr,uid,vals,context)\n user_id = super(res_users, self).create(cr, uid, vals, context=context)\n return user_id\n \n def write(self, cr, uid, ids, vals, context=None):\n if context.has_key('only_groups_id') and vals.has_key('groups_id'):\n vals=self.default_groups_id(cr,uid,vals,context=context)\n res = super(res_users, self).write(cr, uid, ids, vals, context=context)\n return res\n","sub_path":"models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":6597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"134685502","text":"\"\"\"add boards table\n\nRevision ID: c1b883dbfff4\nRevises: b764aaedf10d\nCreate Date: 2017-10-30 19:37:00.544958\n\n\"\"\"\n# pylint: skip-file\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = 'c1b883dbfff4'\ndown_revision = 'b764aaedf10d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('boards',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('task_number', sa.Integer(), nullable=False),\n sa.Column(\n 'created_at',\n sa.DateTime(timezone=True),\n server_default=sa.text('now()'),\n nullable=False),\n sa.Column(\n 'updated_at',\n sa.DateTime(timezone=True),\n server_default=sa.text('now()'),\n nullable=False),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_boards')))\n op.add_column('tasks', sa.Column('board_id', sa.Integer(), nullable=True))\n op.create_foreign_key(\n op.f('fk_tasks_board_id_boards'), 'tasks', 'boards', ['board_id'],\n ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(\n op.f('fk_tasks_board_id_boards'), 'tasks', type_='foreignkey')\n op.drop_column('tasks', 'board_id')\n op.drop_table('boards')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/c1b883dbfff4_add_boards_table.py","file_name":"c1b883dbfff4_add_boards_table.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"197498728","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2007-2015 Mag. Christian Tanzer. All rights reserved\n# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at\n# ****************************************************************************\n#\n# This module is licensed under the terms of the BSD 3-Clause License\n# .\n# ****************************************************************************\n#\n#++\n# Name\n# TFL.D2.Affine\n#\n# Purpose\n# Model affine transformations in 2D space\n#\n# Revision Dates\n# 29-Nov-2007 (CT) Creation\n# 20-Aug-2012 (CT) Add `Reflection`, `__neg__`\n# 16-Oct-2015 (CT) Add `__future__` imports\n# ««revision-date»»···\n#--\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom _TFL import TFL\nfrom _TFL._D2 import D2\nimport _TFL._Meta.Object\n\nclass Affine (TFL.Meta.Object) :\n \"\"\"Affine transformation in 2D space.\n\n >>> t_2_4 = Affine.Trans (2, 4)\n >>> t_2_4\n Affine (1, 0, 2, 0, 1, 4)\n >>> - t_2_4\n Affine (1, 0, -2, 0, 1, -4)\n\n >>> s_3_5 = Affine.Scale (3, 5)\n >>> s_3_5\n Affine (3, 0, 0, 0, 5, 0)\n\n >>> - s_3_5\n Affine (0.333333333333, 0, 0, 0, 0.2, 0)\n\n >>> [t_2_4 (p) for p in ((0, 0), (0, 1), (1, 0))]\n [(2, 4), (2, 5), (3, 4)]\n >>> [s_3_5 (p) for p in [(0, 0), (1, 1), (2, 4), (2, 5), (3, 4)]]\n [(0, 0), (3, 5), (6, 20), (6, 25), (9, 20)]\n >>> t_2_4 (s_3_5 ((2, 2))), s_3_5 (t_2_4 ((2, 2)))\n ((8, 14), (12, 30))\n >>> t_s = t_2_4 * s_3_5\n >>> s_t = s_3_5 * t_2_4\n >>> t_s ((2, 2)), s_t ((2, 2))\n ((8, 14), (12, 30))\n\n >>> re_x = Affine.Reflection (0, 1)\n >>> re_y = Affine.Reflection (1, 0)\n\n >>> re_x\n Affine (-1, 0, 0, 0, 1, 0)\n >>> re_y\n Affine (1, 0, 0, 0, -1, 0)\n\n >>> - re_x\n Affine (-1, 0, 0, 0, 1, 0)\n\n >>> - re_y\n Affine (1, 0, 0, 0, -1, 0)\n\n >>> [re_x (p) for p in ((0, 0), (0, 1), (1, 0), (-1, 0), (0, -1))]\n [(0, 0), (0, 1), (-1, 0), (1, 0), (0, -1)]\n\n >>> [re_y (p) for p in ((0, 0), (0, 1), (1, 0), (-1, 0), (0, -1))]\n [(0, 0), (0, -1), (1, 0), (-1, 0), (0, 1)]\n\n >>> points = [(-10, 20), (0, 20), (-10, 0), (0, 0), (0, -20), (10, -20)]\n >>> c2s = Affine.Trans (10, 20) * re_y\n >>> c2s_b = Affine.Reflection (1, 0, 10, 20)\n >>> s2c = - c2s\n >>> c2s\n Affine (1, 0, 10, 0, -1, 20)\n >>> c2s_b\n Affine (1, 0, 10, 0, -1, 20)\n\n >>> s2c\n Affine (1, 0, -10, 0, -1, 20)\n\n >>> points\n [(-10, 20), (0, 20), (-10, 0), (0, 0), (0, -20), (10, -20)]\n\n >>> [re_y (p) for p in points]\n [(-10, -20), (0, -20), (-10, 0), (0, 0), (0, 20), (10, 20)]\n\n >>> [c2s (p) for p in points]\n [(0, 0), (10, 0), (0, 20), (10, 20), (10, 40), (20, 40)]\n\n >>> tps = [c2s_b (p) for p in points]\n >>> tps\n [(0, 0), (10, 0), (0, 20), (10, 20), (10, 40), (20, 40)]\n\n >>> ttps = [s2c (p) for p in tps]\n >>> ttps\n [(-10, 20), (0, 20), (-10, 0), (0, 0), (0, -20), (10, -20)]\n\n >>> points == ttps\n True\n\n \"\"\"\n\n _str_format = \"(\" + \", \".join ((\"%.12g\", ) * 6) + \")\"\n\n @classmethod\n def Reflection (cls, lx = 1, ly = 0, dx = 0, dy = 0) :\n \"\"\"Returns affine transformations for reflection about a line between\n (0, 0) and (lx, ly).\n \"\"\"\n n2 = (lx * lx + ly * ly)\n sd = (lx * lx - ly * ly) / n2\n xy = (2 * lx * ly) / n2\n return cls (sd, xy, dx, xy, - sd, dy)\n # end def Reflection\n\n @classmethod\n def Rot (cls, angle) :\n \"\"\"Returns affine transformation for counter-clockwise rotation by\n `angle`.\n \"\"\"\n return cls (angle.cos, - angle.sin, 0, angle.sin, angle.cos, 0)\n # end def Rot\n\n @classmethod\n def Scale (cls, sx, sy) :\n \"\"\"Returns affine transformation for scaling by `sx`, `sy`.\"\"\"\n return cls (sx, 0, 0, 0, sy, 0)\n # end def Scale\n\n @classmethod\n def Trans (cls, dx, dy) :\n \"\"\"Returns affine transformation for translation by `dx`, `dy`.\"\"\"\n return cls (1, 0, dx, 0, 1, dy)\n # end def Trans\n\n def __init__ (self, a, b, c, d, e, f) :\n def _clean (* args) :\n def _gen () :\n for x in args :\n i = int (x)\n yield i if x == i else x\n return tuple (_gen ())\n self._matrix = (_clean (a, b, c), _clean (d, e, f), (0, 0, 1))\n # end def __init__\n\n def __call__ (self, p) :\n \"\"\"Return affine transformation of point `p`.\"\"\"\n xc, yc = self._matrix [:2]\n pc = list (p) + [1]\n return \\\n ( sum (u * v for (u, v) in zip (pc, xc))\n , sum (u * w for (u, w) in zip (pc, yc))\n )\n # end def __call__\n\n def __mul__ (self, rhs) :\n if isinstance (rhs, Affine) :\n sm = self._matrix\n rm = list (zip (* rhs._matrix)) # transpose\n return self.__class__ \\\n ( sum (u * v for (u, v) in zip (sm [0], rm [0]))\n , sum (u * v for (u, v) in zip (sm [0], rm [1]))\n , sum (u * v for (u, v) in zip (sm [0], rm [2]))\n , sum (u * v for (u, v) in zip (sm [1], rm [0]))\n , sum (u * v for (u, v) in zip (sm [1], rm [1]))\n , sum (u * v for (u, v) in zip (sm [1], rm [2]))\n )\n # end def __mul__\n\n def __neg__ (self) :\n \"\"\"Return inverse affine transformation.\"\"\"\n (a, b, c), (d, e, f) = self._matrix [:2]\n A = +e\n B = -d\n D = -b\n E = +a\n G = b * f - c * e\n H = c * d - a * f\n K = a * e - b * d\n det = a * A + b * B\n assert det == K, (\"det = %s; K = %s\" % (det, K))\n return self.__class__ \\\n (A / det, D / det, G / det, B / det, E / det, H / det)\n # end def __neg__\n\n def __str__ (self) :\n (a, b, c), (d, e, f) = self._matrix [:2]\n return self._str_format % (a, b, c, d, e, f)\n # end def __str__\n\n def __repr__ (self) :\n return \"%s %s\" % (self.__class__.__name__, str (self))\n # end def __repr__\n\n# end class Affine\n\nif __name__ != \"__main__\" :\n D2._Export (\"*\")\n### __END__ TFL.D2.Affine\n","sub_path":"Functions/venv/lib/python3.6/site-packages/_TFL/_D2/Affine.py","file_name":"Affine.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"564262019","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal\n\nclass progressWidget(QtWidgets.QDialog):\n\n progressClosed = pyqtSignal(int, name='progressClosed')\n \n\n def __init__(self):\n QtWidgets.QDialog.__init__(self)\n #self.setAttribute()\n self.initUI()\n\n\n def initUI(self):\n\n self.progress = QtWidgets.QProgressBar(self)\n self.progress.setGeometry(0, 0, 250, 20)\n self.progress.setValue(0)\n self.directory = QtWidgets.QLabel('Directory', self)\n #self.directory.move(0, 30)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(100)\n sizePolicy.setVerticalStretch(0)\n self.directory.setSizePolicy(sizePolicy)\n self.directory.setGeometry(0, 30, 250, 20)\n self.show()\n\n def setValue(self,value):\n self.progress.setValue(value)\n self.setWindowTitle(str(value)+\"%\")\n\n def setDirectoryText(self,value):\n self.directory.setText(value)\n\n def closeEvent(self,event):\n print(\"progressClosed=OnClose\")\n self.progressClosed.emit(0)\n self.close()\n event.accept()","sub_path":"progressWidget.py","file_name":"progressWidget.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"539546183","text":"import logging\n\nlogging.info(\"Importing packages in bayes_opt_runner\")\nfrom ax.service.ax_client import AxClient\nfrom ax.service.utils.best_point import get_best_raw_objective_point\nfrom ax.plot.render import plot_config_to_html\nfrom ax.utils.report.render import render_report_elements\nfrom ax.plot.contour import interact_contour\nfrom ax.modelbridge.registry import Models\nfrom ax.core.base_trial import TrialStatus\nfrom ax.plot.slice import interact_slice\nfrom ax.plot.helper import get_range_parameters\nfrom ax.core.parameter import RangeParameter, ParameterType\nimport re\nfrom easy_module_attribute_getter import utils as emag_utils, YamlReader\nimport glob\nimport os\nimport csv\nimport pandas as pd\nfrom powerful_benchmarker.utils import common_functions as c_f, constants as const\nfrom powerful_benchmarker.runners.base_runner import BaseRunner\nfrom .single_experiment_runner import SingleExperimentRunner\nfrom losses.TopKPre import TopKPreLoss\nfrom losses.RSTopKPre import RSTopKPreLoss\nimport pytorch_metric_learning.utils.logging_presets as logging_presets\nimport numpy as np\nimport scipy.stats as scipy_stats\nimport shutil\nimport collections\nimport json\n\nlogging.info(\"Done importing packages in bayes_opt_runner\")\n\n\ndef set_optimizable_params_and_bounds(args_dict, bayes_params, parent_key, keywords=const.BAYESIAN_KEYWORDS):\n for k, v in args_dict.items():\n if not isinstance(v, dict):\n for keyword in keywords:\n log_scale = \"~LOG\" in keyword\n value_type = \"int\" if \"~INT\" in keyword else \"float\"\n if k.endswith(keyword) and (\"dict_of_yamls\" not in parent_key):\n assert isinstance(v, list)\n actual_key = re.sub('%s$' % keyword, '', k)\n param_name = actual_key if parent_key == '' else \"%s/%s\" % (parent_key, actual_key)\n bayes_params.append({\"name\": param_name, \"type\": \"range\", \"bounds\": v, \"log_scale\": log_scale,\n \"value_type\": value_type})\n else:\n next_parent_key = k if parent_key == '' else \"%s/%s\" % (parent_key, k)\n set_optimizable_params_and_bounds(v, bayes_params, next_parent_key, keywords)\n for keyword in keywords:\n emag_utils.remove_key_word(args_dict, keyword)\n\n\ndef replace_with_optimizer_values(param_path, input_dict, optimizer_value):\n for p in param_path.split(\"/\"):\n if p in input_dict:\n if isinstance(input_dict[p], dict):\n input_dict = input_dict[p]\n else:\n input_dict[p] = optimizer_value\n\n\ndef open_log(log_paths):\n for L in log_paths:\n try:\n ax_client = AxClient.load_from_json_file(filepath=L)\n break\n except IOError:\n ax_client = None\n return ax_client\n\nid_to_model = {\"TopKPre\": TopKPreLoss,\n \"RSTopKPre\": RSTopKPreLoss}\n\n\nclass BayesOptRunner(BaseRunner):\n def __init__(self, bayes_opt_iters, reproductions, model_id=None, **kwargs):\n super().__init__(**kwargs)\n self.model_id=model_id\n self.YR=self.set_YR(use_super=True, model_id=self.model_id)\n self.bayes_opt_iters = bayes_opt_iters\n self.reproductions = reproductions\n self.experiment_name = self.YR.args.experiment_name\n self.bayes_opt_root_experiment_folder = os.path.join(self.root_experiment_folder, self.experiment_name)\n if self.global_db_path is None:\n self.global_db_path = os.path.join(self.bayes_opt_root_experiment_folder, \"bayes_opt_experiments.db\")\n self.csv_folder = os.path.join(self.bayes_opt_root_experiment_folder, \"bayes_opt_record_keeper_logs\")\n self.tensorboard_folder = os.path.join(self.bayes_opt_root_experiment_folder, \"bayes_opt_tensorboard_logs\")\n self.ax_log_folder = os.path.join(self.bayes_opt_root_experiment_folder, \"bayes_opt_ax_logs\")\n self.best_parameters_filename = os.path.join(self.bayes_opt_root_experiment_folder, \"best_parameters.yaml\")\n self.most_recent_parameters_filename = os.path.join(self.bayes_opt_root_experiment_folder,\n \"most_recent_parameters.yaml\")\n self.bayes_opt_table_name = \"bayes_opt\"\n self.set_YR(use_super=False, model_id=self.model_id)\n\n def set_YR(self, use_super=True, model_id=None):\n if use_super:\n # super().set_YR()\n return SingleExperimentRunner.set_YR_from_json(self, model_id=self.model_id)\n else:\n self.YR, self.bayes_params = self.read_yaml_and_find_bayes()\n self.eval_primary_metric = c_f.first_val_of_dict(self.YR.args.hook_container)[\"primary_metric\"]\n\n def run(self):\n self.register(\"loss\", id_to_model[self.model_id])\n ax_client = self.get_ax_client()\n trials = ax_client.experiment.trials\n record_keeper, _, _ = logging_presets.get_record_keeper(self.csv_folder, self.tensorboard_folder)\n temp_YR_for_config_diffs = self.read_yaml_and_find_bayes(find_bayes_params=False)\n\n for i in range(0, self.bayes_opt_iters):\n if i in trials and trials[i].status == TrialStatus.COMPLETED:\n continue\n logging.info(\"Optimization iteration %d\" % i)\n c_f.save_config_files(self.YR.args.place_to_save_configs, temp_YR_for_config_diffs.args.dict_of_yamls, True,\n [i]) # save config diffs, if any\n sub_experiment_name = self.get_sub_experiment_name(i)\n parameters, trial_index, experiment_func = self.get_parameters_and_trial_index(ax_client,\n sub_experiment_name, i)\n ax_client.complete_trial(trial_index=trial_index, raw_data=experiment_func(parameters, sub_experiment_name))\n self.save_new_log(ax_client)\n self.update_records(record_keeper, ax_client, i)\n self.plot_progress(ax_client)\n\n logging.info(\"DONE BAYESIAN OPTIMIZATION\")\n self.plot_progress(ax_client)\n best_sub_experiment_name = self.save_best_parameters(record_keeper, ax_client)\n self.test_model(best_sub_experiment_name)\n self.reproduce_results(best_sub_experiment_name)\n self.create_accuracy_report(best_sub_experiment_name)\n logging.info(\"##### FINISHED #####\")\n\n def get_parameters_and_trial_index(self, ax_client, sub_experiment_name, input_trial_index):\n try:\n parameters = ax_client.get_trial_parameters(trial_index=input_trial_index)\n trial_index = input_trial_index\n experiment_func = self.resume_training\n except:\n parameters, trial_index = ax_client.get_next_trial()\n assert input_trial_index == trial_index\n self.save_new_log(ax_client)\n c_f.write_yaml(self.most_recent_parameters_filename, {\"parameters\": parameters, \"trial_index\": trial_index},\n open_as='w')\n experiment_func = self.run_new_experiment\n return parameters, trial_index, experiment_func\n\n def get_sub_experiment_name(self, iteration):\n return self.experiment_name + str(iteration)\n\n def get_sub_experiment_path(self, sub_experiment_name):\n return os.path.join(self.bayes_opt_root_experiment_folder, sub_experiment_name)\n\n def get_sub_experiment_bayes_opt_filename(self, sub_experiment_path):\n return os.path.join(sub_experiment_path, \"bayes_opt_parameters.yaml\")\n\n def get_all_log_paths(self):\n return sorted(glob.glob(os.path.join(self.ax_log_folder, \"log*.json\")), reverse=True)\n\n def save_new_log(self, ax_client):\n log_paths = self.get_all_log_paths()\n log_folder = self.ax_log_folder\n c_f.makedir_if_not_there(log_folder)\n new_log_path = os.path.join(log_folder, \"log%05d.json\" % len(log_paths))\n ax_client.save_to_json_file(filepath=new_log_path)\n\n def update_records(self, record_keeper, ax_client, iteration):\n df_as_dict = ax_client.get_trials_data_frame().to_dict()\n trial_index_key = [k for k, v in df_as_dict[\"trial_index\"].items() if v == iteration]\n assert len(trial_index_key) == 1\n trial_index_key = trial_index_key[0]\n most_recent = {k.replace('/', '_'): v[trial_index_key] for k, v in df_as_dict.items()}\n record_keeper.update_records(most_recent, global_iteration=iteration,\n input_group_name_for_non_objects=self.bayes_opt_table_name)\n record_keeper.save_records()\n\n def save_best_parameters(self, record_keeper, ax_client):\n q = record_keeper.query(\n \"SELECT * FROM {0} WHERE {1}=(SELECT max({1}) FROM {0})\".format(self.bayes_opt_table_name,\n self.eval_primary_metric))[0]\n best_trial_index = int(q['trial_index'])\n best_sub_experiment_name = self.get_sub_experiment_name(best_trial_index)\n logging.info(\"BEST SUB EXPERIMENT NAME: %s\" % best_sub_experiment_name)\n\n best_parameters, best_values = get_best_raw_objective_point(ax_client.experiment)\n assert np.isclose(best_values[self.eval_primary_metric][0], q[self.eval_primary_metric])\n best_parameters_dict = {\"best_sub_experiment_name\": best_sub_experiment_name,\n \"best_parameters\": best_parameters,\n \"best_values\": {k: {\"mean\": float(v[0]), \"SEM\": float(v[1])} for k, v in\n best_values.items()}}\n c_f.write_yaml(self.best_parameters_filename, best_parameters_dict, open_as='w')\n return best_sub_experiment_name\n\n def create_accuracy_report(self, best_sub_experiment_name):\n dummy_YR = self.read_yaml_and_find_bayes(find_bayes_params=False, merge_argparse=True)\n dummy_api_parser = self.get_api_parser(dummy_YR.args)\n global_record_keeper, _, _ = logging_presets.get_record_keeper(self.csv_folder, self.tensorboard_folder,\n self.global_db_path, \"\", False)\n exp_names = glob.glob(os.path.join(self.bayes_opt_root_experiment_folder, \"%s*\" % best_sub_experiment_name))\n\n exp_names = [os.path.basename(e) for e in exp_names]\n results, summary = {}, {}\n\n for eval_category in self.get_eval_types():\n table_name, eval_obj = dummy_api_parser.get_eval_record_name_dict(split_names=[\"test\"],\n for_inner_obj=eval_category,\n return_inner_obj=True)\n table_name = table_name[\"test\"]\n eval_type = eval_obj.__class__.__name__\n results[eval_type] = {}\n summary[eval_type] = collections.defaultdict(lambda: collections.defaultdict(list))\n\n for exp in exp_names:\n results[eval_type][exp] = {}\n exp_id = global_record_keeper.record_writer.global_db.get_experiment_id(exp)\n base_query = \"SELECT * FROM {} WHERE experiment_id=? AND id=? AND {}=?\".format(table_name,\n const.TRAINED_STATUS_COL_NAME)\n max_id_query = \"SELECT max(id) FROM {} WHERE experiment_id=? AND {}=?\".format(table_name,\n const.TRAINED_STATUS_COL_NAME)\n qs = {}\n\n for trained_status in [const.UNTRAINED_TRUNK, const.UNTRAINED_TRUNK_AND_EMBEDDER, const.TRAINED]:\n max_id = \\\n global_record_keeper.query(max_id_query, values=(exp_id, trained_status), use_global_db=True)[0][\n \"max(id)\"]\n q = global_record_keeper.query(base_query, values=(exp_id, max_id, trained_status),\n use_global_db=True)\n if len(q) > 0:\n qs[trained_status] = q[0]\n\n for trained_status, v1 in qs.items():\n q_as_dict = dict(v1)\n results[eval_type][exp][trained_status] = q_as_dict\n for acc_key, v2 in q_as_dict.items():\n if all(not acc_key.startswith(x) for x in\n [const.TRAINED_STATUS_COL_NAME, \"epoch\", \"SEM\", \"id\", \"experiment_id\", \"timestamp\"]):\n summary[eval_type][trained_status][acc_key].append(v2)\n\n for trained_status, v1 in summary[eval_type].items():\n for acc_key in v1.keys():\n v2 = [v3 for v3 in v1[acc_key] if v3 is not None]\n if len(v2) == 0:\n continue\n mean = np.mean(v2)\n cf_low, cf_high = scipy_stats.t.interval(0.95, len(v2) - 1, loc=np.mean(v2), scale=scipy_stats.sem(\n v2)) # https://stackoverflow.com/a/34474255\n cf_width = mean - cf_low\n summary[eval_type][trained_status][acc_key] = {\"mean\": float(mean),\n \"95%_confidence_interval\": (\n float(cf_low), float(cf_high)),\n \"95%_confidence_interval_width\": float(cf_width)}\n\n eval_type_without_split = c_f.first_val_of_dict(\n dummy_api_parser.get_eval_record_name_dict(for_inner_obj=eval_category))\n detailed_report_filename = os.path.join(self.bayes_opt_root_experiment_folder,\n \"detailed_report_{}.yaml\".format(eval_type_without_split))\n report_filename = os.path.join(self.bayes_opt_root_experiment_folder,\n \"report_{}.yaml\".format(eval_type_without_split))\n c_f.write_yaml(detailed_report_filename, results[eval_type], open_as=\"w\")\n c_f.write_yaml(report_filename, json.loads(json.dumps(summary[eval_type])), open_as=\"w\")\n\n def update_bayes_opt_search_space(self, ax_client):\n for bp in self.bayes_params:\n kwargs_dict = {\"name\": bp[\"name\"], \"lower\": bp[\"bounds\"][0], \"upper\": bp[\"bounds\"][1]}\n kwargs_dict[\"parameter_type\"] = ParameterType.FLOAT if bp[\"value_type\"] == \"float\" else ParameterType.INT\n kwargs_dict[\"log_scale\"] = bp[\"log_scale\"]\n ax_client.experiment.search_space.update_parameter(RangeParameter(**kwargs_dict))\n\n def get_ax_client(self):\n log_paths = self.get_all_log_paths()\n ax_client = None\n if len(log_paths) > 0:\n ax_client = open_log(log_paths)\n self.update_bayes_opt_search_space(ax_client)\n if ax_client is None:\n ax_client = AxClient()\n ax_client.create_experiment(parameters=self.bayes_params, name=self.YR.args.experiment_name, minimize=False,\n objective_name=self.eval_primary_metric)\n return ax_client\n\n def plot_progress(self, ax_client):\n model = Models.GPEI(experiment=ax_client.experiment, data=ax_client.experiment.fetch_data())\n html_elements = [plot_config_to_html(ax_client.get_optimization_trace())]\n model_params = get_range_parameters(model)\n try:\n if len(model_params) > 1:\n html_elements.append(\n plot_config_to_html(interact_contour(model=model, metric_name=self.eval_primary_metric)))\n else:\n html_elements.append(plot_config_to_html(\n interact_slice(model=model, param_name=model_params[0].name, metric_name=self.eval_primary_metric)))\n except TypeError:\n pass\n with open(os.path.join(self.bayes_opt_root_experiment_folder, \"optimization_plots.html\"), 'w') as f:\n f.write(render_report_elements(self.experiment_name, html_elements))\n\n def read_yaml_and_find_bayes(self, find_bayes_params=True, merge_argparse=False):\n # YR = self.setup_yaml_reader()\n YR=SingleExperimentRunner.set_YR_from_json(self, model_id=self.model_id)\n bayes_opt_config_exists = os.path.isdir(YR.args.place_to_save_configs)\n\n config_paths = self.get_saved_config_paths(YR.args) if bayes_opt_config_exists else self.get_root_config_paths(\n YR.args)\n merge_argparse = (self.merge_argparse_when_resuming or merge_argparse) if bayes_opt_config_exists else True\n YR.args, _, YR.args.dict_of_yamls = YR.load_yamls(config_paths=config_paths,\n max_merge_depth=float('inf'),\n max_argparse_merge_depth=float('inf'),\n merge_argparse=merge_argparse)\n\n if not bayes_opt_config_exists:\n c_f.save_config_files(YR.args.place_to_save_configs, YR.args.dict_of_yamls, False, [])\n\n if find_bayes_params:\n bayes_params = []\n set_optimizable_params_and_bounds(YR.args.__dict__, bayes_params, '')\n return YR, bayes_params\n return YR\n\n def set_experiment_name_and_place_to_save_configs(self, YR):\n YR.args.experiment_folder = self.get_sub_experiment_path(YR.args.experiment_name)\n YR.args.place_to_save_configs = os.path.join(YR.args.experiment_folder, \"configs\")\n\n def starting_fresh(self, experiment_name):\n def _starting_fresh(YR):\n emag_utils.remove_dicts(YR.args.__dict__)\n YR.args.dataset_root = self.dataset_root\n YR.args.experiment_name = experiment_name\n self.set_experiment_name_and_place_to_save_configs(YR)\n\n return _starting_fresh\n\n def get_simplified_yaml_reader(self, experiment_name):\n YR = self.setup_yaml_reader()\n self.starting_fresh(experiment_name)(YR)\n return YR\n\n def delete_sub_experiment_folder(self, sub_experiment_name):\n logging.warning(\"Deleting and starting fresh for %s\" % sub_experiment_name)\n shutil.rmtree(self.get_sub_experiment_path(sub_experiment_name))\n global_record_keeper, _, _ = logging_presets.get_record_keeper(self.csv_folder, self.tensorboard_folder,\n self.global_db_path, sub_experiment_name, False)\n global_record_keeper.record_writer.global_db.delete_experiment(sub_experiment_name)\n\n def try_resuming(self, YR, reproduction=False):\n try:\n SER = self.get_single_experiment_runner()\n starting_fresh_hook = self.starting_fresh(YR.args.experiment_name)\n if reproduction:\n output = SER.reproduce_results(YR, starting_fresh_hook=starting_fresh_hook, max_merge_depth=0,\n max_argparse_merge_depth=0)\n else:\n output = SER.run_new_experiment_or_resume(YR)\n except Exception as e:\n YR.args.resume_training = None\n logging.error(repr(e))\n logging.warning(\"Could not resume training for %s\" % YR.args.experiment_name)\n self.delete_sub_experiment_folder(YR.args.experiment_name)\n output = const.RESUME_FAILURE\n return output\n\n def resume_training(self, parameters, sub_experiment_name):\n local_YR = self.get_simplified_yaml_reader(sub_experiment_name)\n local_YR.args.resume_training = self.get_resume_training_value()\n\n try:\n loaded_parameters = c_f.load_yaml(\n self.get_sub_experiment_bayes_opt_filename(local_YR.args.experiment_folder))\n assert parameters == loaded_parameters\n parameter_load_successful = True\n except Exception as e:\n logging.error(repr(e))\n logging.warning(\"Input parameters and loaded parameters don't match for %s\" % sub_experiment_name)\n self.delete_sub_experiment_folder(sub_experiment_name)\n parameter_load_successful = False\n\n output = self.try_resuming(local_YR) if parameter_load_successful else const.RESUME_FAILURE\n return self.run_new_experiment(parameters, sub_experiment_name) if output == const.RESUME_FAILURE else output\n\n def run_new_experiment(self, parameters, sub_experiment_name):\n local_YR, _ = self.read_yaml_and_find_bayes()\n for param_path, value in parameters.items():\n replace_with_optimizer_values(param_path, local_YR.args.__dict__, value)\n for sub_dict in local_YR.args.dict_of_yamls.values():\n replace_with_optimizer_values(param_path, sub_dict, value)\n local_YR.args.experiment_name = sub_experiment_name\n local_YR.args.resume_training = None\n self.set_experiment_name_and_place_to_save_configs(local_YR)\n c_f.makedir_if_not_there(local_YR.args.experiment_folder)\n c_f.write_yaml(self.get_sub_experiment_bayes_opt_filename(local_YR.args.experiment_folder), parameters,\n open_as='w')\n SER = self.get_single_experiment_runner()\n return SER.start_experiment(local_YR.args)\n\n def test_model(self, sub_experiment_name):\n for eval_type in self.get_eval_types():\n local_YR = self.get_simplified_yaml_reader(sub_experiment_name)\n if eval_type == \"ensemble\":\n logging.info(\"Getting ensemble accuracy\")\n local_YR.args.evaluate_ensemble = True\n elif eval_type == \"aggregator\":\n logging.info(\"Getting aggregate accuracy\")\n local_YR.args.evaluate = True\n local_YR.args.resume_training = None\n local_YR.args.splits_to_eval = [\"test\"]\n SER = self.get_single_experiment_runner()\n SER.run_new_experiment_or_resume(local_YR)\n\n def reproduce_results(self, sub_experiment_name):\n if type(self.reproductions) in [list, tuple]:\n idx_list = range(*self.reproductions)\n else:\n idx_list = range(self.reproductions)\n for i in idx_list:\n local_YR = self.get_simplified_yaml_reader(\"%s_reproduction%d\" % (sub_experiment_name, i))\n local_YR.args.reproduce_results = self.get_sub_experiment_path(sub_experiment_name)\n local_YR.args.resume_training = None\n output = const.RESUME_FAILURE\n if os.path.isdir(local_YR.args.experiment_folder):\n local_YR.args.resume_training = self.get_resume_training_value()\n output = self.try_resuming(local_YR, reproduction=True)\n if output == const.RESUME_FAILURE:\n SER = self.get_single_experiment_runner()\n starting_fresh_hook = self.starting_fresh(local_YR.args.experiment_name)\n SER.reproduce_results(local_YR, starting_fresh_hook=starting_fresh_hook, max_merge_depth=0,\n max_argparse_merge_depth=0)\n self.test_model(local_YR.args.experiment_name)\n\n def get_resume_training_value(self):\n return \"latest\" if self.YR.args.resume_training is None else self.YR.args.resume_training\n\n def get_single_experiment_runner(self):\n SER = SingleExperimentRunner(root_experiment_folder=self.bayes_opt_root_experiment_folder,\n root_config_folder=self.YR.args.place_to_save_configs,\n dataset_root=self.dataset_root,\n pytorch_home=self.pytorch_home,\n global_db_path=self.global_db_path,\n merge_argparse_when_resuming=self.merge_argparse_when_resuming)\n\n SER.pytorch_getter = self.pytorch_getter\n return SER\n\n def get_eval_types(self):\n return [\"aggregator\", \"ensemble\"]","sub_path":"ptranking/ltr_dml/eval/bayes_opt_runner.py","file_name":"bayes_opt_runner.py","file_ext":"py","file_size_in_byte":24173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"92540823","text":"# Development environment settings\n\nimport os\nfrom conf.default import *\n\n###############################################################################\n## Debug Flags\nDEBUG = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')\nTEMPLATE_DEBUG = DEBUG\n\n###############################################################################\n## Django Toolbar\ntry:\n import debug_toolbar # NOQA\nexcept ImportError:\n pass\nelse:\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n MIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n )\n DEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n )\n DEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'HIDE_DJANGO_SQL': True,\n 'SQL_WARNING_THRESHOLD': 80,\n }\n\n###############################################################################\n## Fixtures\nFIXTURE_DIRS = (\n os.path.join(PROJECT_ROOT, 'fixtures'),\n) + FIXTURE_DIRS\n\n###############################################################################\n## Installed and Enabled Applications\nINSTALLED_APPS += (\n #'django.contrib.admin',\n #'django.contrib.admindocs',\n)\n\nDISABLED_APPS += (\n)\n\n###############################################################################\n## Template Settings\nTEMPLATE_CONTEXT_PROCESSORS += (\n #'django.core.context_processors.debug',\n)\n","sub_path":"conf/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"621992818","text":"def restore_bst(prefix_lst):\n bst = BinarySearchTreeMap()\n if len(prefix_lst) == 0:\n return bst\n bst[prefix_lst[0]] = None\n root_val = prefix_lst[0]\n if len(prefix_lst) == 1:\n return bst\n else:\n restore_bst_helper(bst, bst.root, root_val, prefix_lst[1:])\n return bst\n\ndef restore_bst_helper(tree, curr_root, data,lst):\n if len(lst) == 0:\n pass\n elif lst[0] < curr_root.item.key:\n new = BinarySearchTreeMap.Node(BinarySearchTreeMap.Item(lst[0]))\n curr_root.left = new\n new.parent = curr_root\n tree.size += 1\n restore_bst_helper(tree, new, data, lst[1:])\n else:\n new = BinarySearchTreeMap.Node(BinarySearchTreeMap.Item(lst[0]))\n if data is not None and lst[0] > data:\n tree.root.right = new\n new.parent = tree.root\n restore_bst_helper(tree, new, None, lst[1:])\n elif lst[0] > curr_root.parent.item.key:\n curr_root.parent.right = new\n new.parent = curr_root.parent\n restore_bst_helper(tree, new, data, lst[1:])\n else:\n curr_root.right = new\n new.parent = curr_root\n restore_bst_helper(tree, new, data, lst[1:])\n tree.size += 1\n","sub_path":"hw/hw8/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"57402913","text":"from flask import Flask, render_template, flash, redirect, url_for, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate # alembic - database migration tool\nfrom flask_admin import Admin # admin\nfrom flask_admin.contrib.sqla import ModelView # admin\nfrom flask_login import LoginManager, current_user, login_user, logout_user, login_required\n\nfrom forms import NewCategoryForm, NewProductForm, NewUserForm, PostForm, LoginForm, RemoveProduct, RemoveCategory, RemoveUser, RemovePost\n\napp = Flask(__name__) # init app\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../data/ecoswap_database.sqlite' # connecting to sqlite database\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # set false to stop warnings\napp.config['SECRET_KEY'] = 'random string' # forms\napp.config['FLASK_ADMIN_SWATCH'] = 'cerulean' # theme of admin pages\n\n\ndb = SQLAlchemy(app) # init db, binding to app\n\nmigrate = Migrate(app, db) # alembic\nlogin = LoginManager(app) # Flask-Login needs to know the view function that handles logins\nlogin.login_view = 'login' # 'login' is the function/endpoint for the login view\n\nfrom models import Category, Product, User, Post\nfrom api import api # api\napp.register_blueprint(api, url_prefix='/api') # api\n\nadmin = Admin(app, name='Ecoswap', template_mode='bootstrap3') # admin\nadmin.add_view(ModelView(Category, db.session)) # admin\nadmin.add_view(ModelView(Product, db.session)) # admin\nadmin.add_view(ModelView(User, db.session)) # admin\nadmin.add_view(ModelView(Post, db.session)) # admin\n\n## HOMEPAGE FOR APP\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n## ******************* USER LOGIN ********************* ##\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if not form.validate_on_submit():\n return render_template('login.html', form=form)\n\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n\n login_user(user)\n return redirect(url_for('index'))\n\n\n## ******************* USER REGISTRATION **************** ##\n\n# creating and reading new users for the site\n@app.route('/register', methods=['GET', 'POST'])\ndef newuser():\n form=NewUserForm()\n if not form.validate_on_submit():\n return render_template('new_user.html', form=form)\n\n username = form.username.data.strip()\n email = form.email.data.strip()\n \n if User.query.filter(User.username == username).count():\n flash(f'Error: {username} user already exists')\n return render_template('new_user.html', form=form)\n\n if User.query.filter(User.email == email).count():\n flash(f'Error: {email} user email address already exists')\n return render_template('new_user.html', form=form)\n\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash(f'New user {form.username.data} registered')\n return redirect('/login')\n\n## ******************* DELETING USERS - BACKEND ******************* ##\n\n@app.route('/delete_user', methods=['GET','POST'])\ndef remove_user():\n form = RemoveUser()\n\n if (request.method=='GET'):\n return render_template('delete_user.html', users=User.query.all(), form=form)\n else:\n\n if not form.validate_on_submit():\n return render_template('delete_user.html', form=form)\n \n name = request.form.get(\"name\") # string that the user passes through\n\n if not User.query.filter(User.username == name).count():\n flash(f'User \"{name}\" does not exist')\n return render_template('delete_user.html', form=form)\n\n user = User.query.filter(User.username == name).first() # finding the matching username\n\n db.session.delete(user)\n db.session.commit()\n flash(f'User \"{name}\" deleted')\n return redirect ('/')\n\n\n## ******************* ACCESSING PRODUCTS - FRONTEND ******************* ##\n\n# list all products available\n@app.route('/products')\n@login_required\ndef list_products():\n products_by_category = {\n category: Product.query.filter(Product.category == category)\n for category in Category.query.all()\n }\n return render_template('products.html', categories=Category.query.all(), products_by_category=products_by_category)\n\n# searching for products in a category\n@app.route('/nproducts_for_category', methods=['GET','POST'])\ndef nproducts_for_category():\n if (request.method == 'GET'):\n return render_template('nproducts_for_category.html', categories=Category.query.all())\n else:\n searched_category_id = request.form.get('category_id')\n products = Product.query.filter(Product.category_id == searched_category_id).all()\n return render_template('nproducts_for_category_results.html', products=products, categories=Category.query.all())\n\n\n## ************ CREATING PRODUCTS, CATEGORIES AND POSTS******* ##\n\n# reading and creating new products in the database\n@app.route('/new_product', methods=['GET', 'POST'])\ndef new_product():\n form = NewProductForm()\n if not form.validate_on_submit():\n return render_template('new_product.html', form=form)\n\n category = form.category.data.strip()\n name = form.name.data.strip()\n price = int(form.price.data)\n product_des = form.product_des.data.strip()\n\n if Product.query.filter(Product.name == name).count():\n flash(f'Error: {name} already exists')\n return render_template('new_product.html', form=form)\n\n prod_cat = Category.query.filter(Category.name == category).first()\n\n if not Category.query.filter(Category.name == category).count():\n flash(f'Error: {name} needs existing category {category}')\n return render_template('new_product.html', form=form)\n\n product = Product(category=prod_cat, name=name, product_des=product_des, price=price)\n db.session.add(product)\n db.session.commit()\n flash(f'New product {name} created')\n return redirect('/products')\n\n\n# reading and creating new categories in the database\n@app.route('/new_category', methods=['GET', 'POST'])\ndef new_category():\n form = NewCategoryForm()\n if not form.validate_on_submit():\n return render_template('new_category.html', form=form)\n\n name = form.name.data.strip()\n\n if Category.query.filter(Category.name == name).count():\n flash(f'Error: {name} already exists')\n return render_template('new_category.html', form=form)\n\n category = Category(name=name)\n db.session.add(category)\n db.session.commit()\n flash(f'New category {name} created')\n return redirect('/products')\n\n# reading and creating new posts in the database\n@app.route('/new_post', methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n if not form.validate_on_submit():\n return render_template('new_post.html', form=form)\n\n title = form.title.data.strip()\n content=form.content.data.strip()\n\n if Post.query.filter(Post.title == title).count():\n flash(f'Error: {title} already exists')\n return render_template('new_post.html', form=form)\n\n post = Post(title=title, content=content)\n db.session.add(post)\n db.session.commit()\n flash(f'New Post {title} created')\n return redirect('/')\n\n\n## ********** DELETING POSTS, CATEGORIES AND PRODUCT ENTRIES FROM DB ************ ##\n#Deleting existing products\n\n@app.route('/delete_product', methods=['GET','POST'])\ndef remove_product():\n form = RemoveProduct()\n\n if (request.method == 'GET'):\n return render_template('delete_product.html', products=Product.query.all(),form=form)\n\n else:\n if not form.validate_on_submit():\n return render_template('delete_product.html', form=form)\n\n # get prod. name submitted in form\n name = request.form.get(\"name\")\n print(name)\n\n # querying the database for the same product name\n if not Product.query.filter(Product.name == name).count():\n flash(f'Product \"{name}\" does not exist')\n return render_template('delete_product.html', form=form)\n\n prod = Product.query.filter(Product.name == name).one()\n \n # deleting from database\n db.session.delete(prod)\n db.session.commit()\n \n flash(f'Product \"{name}\" deleted')\n return redirect ('/products')\n\n\n# Deleting existing categories\n@app.route('/delete_category', methods=['GET', 'POST'])\n@login_required\ndef remove_category():\n form = RemoveCategory()\n\n if (request.method == 'GET'):\n return render_template('delete_category.html', categories=Category.query.all(),form=form)\n\n else:\n if not form.validate_on_submit():\n return render_template('delete_category.html', form=form)\n \n name = request.form.get(\"name\") # string that the user passes through\n\n if not Category.query.filter(Category.name == name).count():\n flash(f'Category \"{name}\" does not exist')\n return render_template('delete_category.html', form=form)\n\n category = Category.query.filter(Category.name == name).first() # finding the first thing matching category name\n\n db.session.delete(category)\n db.session.commit()\n flash(f'Category \"{name}\" deleted')\n return redirect ('/products')\n\n#Deleting Posts\n@app.route('/delete_post', methods=['GET','POST'])\ndef remove_post():\n form = RemovePost()\n if (request.method == 'GET'):\n return render_template('delete_post.html', posts=Post.query.all(),form=form)\n\n if not form.validate_on_submit():\n return render_template('delete_post.html', form=form)\n \n title = request.form.get(\"title\") # string that the user passes through\n\n post = Post.query.filter(Post.title == title).first() # finding the first post/row with matching title\n\n db.session.delete(post)\n db.session.commit()\n flash(f'Post \"{title}\" deleted')\n return redirect ('/products')\n\n\n## ******************* USER LOGOUT **************** ##\n\n# logging out of a session\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n## ******************* SEARCH **************** ##\n\n@app.route('/search', methods=('GET','POST'))\ndef search():\n if (request.method == 'GET'):\n return render_template ('search.html')\n else:\n searched_product = request.form.get('name')\n products = Product.query.filter(Product.name.ilike(f\"%{searched_product}%\")).all()\n return jsonify([product.as_dict() for product in products])\n\n\n## ************* POSTMAN *******************##\n\n\n#Updating the product\n@app.route('/new_product_update', methods=['PUT'])\ndef update(): \n \n # FOR READING DATA THROUGH POSTMAN\n # will have to read inputs in JSON\n request_data = request.get_json() \n \n name_to_update = None\n price_to_update = None\n product_des_to_update = None\n\n #if the user has submitted all this to be updated\n if request_data:\n if 'name' in request_data:\n name_to_update = request_data['name']\n\n if 'price' in request_data:\n price_to_update = int(request_data['price'])\n\n if 'product_des' in request_data:\n product_des_to_update = request_data['product_des']\n\n result = db.session.query(Product).filter(Product.name == name_to_update).count()\n if result < 1:\n return f\"{name_to_update} doesnt exist so update aborted\"\n\n prod = db.session.query(Product).filter(Product.name == name_to_update)\n try:\n prod = prod.update( # reassignment of updated product\n {\n \"name\": name_to_update,\n \"price\": price_to_update,\n \"product_des\": product_des_to_update\n }, synchronize_session = False\n )\n except Exception as exc:\n return(f\"Error: {exc}\")\n\n db.session.commit()\n flash(f'Product {name_to_update} edited')\n\n return(f'{name_to_update} has been updated')\n # return redirect('/products')\n\n\n@app.route('/allproducts', methods=['GET'])\ndef showproducts():\n products = Product.query.all()\n return jsonify([product.as_dict() for product in products])\n\n# @app.route('/users')\n# def list_users():\n# for user in User.query.all():\n# print(user)\n# return \"users :)\"","sub_path":"new/05/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"423095965","text":"#9_3.py\n#Author Siddharth Srinivasan\nfrom random import random\ndef main():\n probA, probB, n = getInputs()\n winsA, winsB = simNGames(n, probA, probB)\n printSummary(winsA, winsB)\n \ndef getInputs():\n a = eval(input(\"What is the probability team A wins a serve: \"))\n b = eval(input(\"What is the probability team B wins a serve: \"))\n c = eval(input(\"How many games: \"))\n return a,b,c\n \ndef simNGames(n,probA,probB):\n winsA = winsB = 0\n \n for i in range(n):\n if i%2 == 0: serving = \"A\"\n else:\n serving = \"B\"\n scoreA, scoreB = simOneGame(probA,probB,serving) \n if scoreA > scoreB:\n winsA = winsA + 1\n else:\n winsB = winsB + 1\n \n return winsA, winsB\n \n \ndef simOneGame(probA,probB,serving):\n scoreA = 0\n scoreB = 0\n while not gameOver(scoreA,scoreB):\n if serving ==\"A\":\n if random() < probA:\n scoreA = scoreA + 1\n else:\n serving = \"B\"\n else:\n if random() < probB:\n scoreB = scoreB + 1\n else:\n serving = \"A\"\n \n return scoreA, scoreB\n \ndef gameOver(a,b):\n return ((a >= 15) and (a-b >= 2)) or ((b >= 15) and (b-a >= 2))\n \ndef printSummary(winsA,winsB):\n n = winsA + winsB\n print(\"Games simulated:\",n)\n print(\"Wins for team A: {0} ({1:0.1%})\".format(winsA, winsA/n))\n print(\"Wins for team B: {0} ({1:0.1%})\".format(winsB, winsB/n))\n\n\nmain()\n","sub_path":"week9/9_3.py","file_name":"9_3.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"151871412","text":"from fastapi import FastAPI, Request\nfrom fastapi.responses import JSONResponse\nfrom tortoise.contrib.fastapi import register_tortoise\nfrom config.db import db_config\nfrom fastapi.exceptions import HTTPException\nimport routers\n\napp = FastAPI(name=\"Currency Exchanger\")\n\nregister_tortoise(\n app=app,\n modules={\"models\": [\"models\"]},\n config=db_config,\n generate_schemas=True,\n add_exception_handlers=True,\n)\n\napp.include_router(routers.router)\n\n\n@app.exception_handler(HTTPException)\nasync def unicorn_exception_handler(request: Request, exc: HTTPException):\n return JSONResponse(\n status_code=exc.status_code,\n content={\"message\": exc.detail},\n )\n","sub_path":"hurb_trial/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"9166660","text":"import pandas as pd\nimport numpy as np\nfrom xgboost import XGBRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom genetic_selection import GeneticSelectionCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold\nfrom termcolor import colored\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.decomposition import PCA\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsRegressor\n\n\ndef greedy_feature_selection(data, model, features, target):\n best_Score = []\n for i in range(1, 2 ** len(features)):\n b = bin(i)[2:]\n b = '0' * (len(features) - len(b)) + b\n\n f = [features[i] for i in range(len(features)) if b[i] == '1']\n s = score(model, data, f, target)\n print(f, colored(s, 'green'))\n best_Score.append((f, s))\n\n best_Score.sort(key=lambda x: x[1])\n print(best_Score[0])\n\n\ndef fill_item_weight(data, helper):\n weight_nan = data[pd.isnull(data['Item_Weight'])]\n for i in weight_nan.index:\n data.set_value(i, 'Item_Weight',\n helper[helper['Item_Identifier'] == data.iloc[i]['Item_Identifier']]['Item_Weight'].values)\n\n\ndef show_outlier(X, y):\n detector = IsolationForest(n_estimators=200, max_features=11)\n detector.fit(X, y)\n pca = PCA(n_components=3)\n x = pca.fit_transform(X, y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n label = detector.predict(X)\n ax.scatter(x[:, 0][label == -1], x[:, 1][label == -1], x[:, 2][label == -1], c='r')\n ax.scatter(x[:, 0][label == 1], x[:, 1][label == 1], x[:, 2][label == 1], c='b')\n plt.show()\n\n\ndef get_oulier_index(X, y):\n detector = IsolationForest(n_estimators=200, max_features=len(X[0]))\n detector.fit(X, y)\n return detector.predict(X)\n\n\ndef score(model, data, features, target):\n time = 3\n kfold = KFold(time, shuffle=True)\n score = 0\n\n for train_index, test_index in kfold.split(data[features].values):\n X_train, X_test = data[features].values[train_index], data[features].values[test_index]\n y_train, y_test = data[target].values[train_index], data[target].values[test_index]\n model.fit(X_train, y_train)\n score += np.sqrt(np.sum(np.power((y_test - model.predict(X_test)), 2)) / len(y_test))\n\n return score / time\n\n\ndef best_cols(cols, data, model, time, target, n_pop=700):\n genes = []\n for col in cols:\n genes.append(([col], score(model, data, [col], target)))\n\n for i in range(n_pop):\n print(i)\n selection_size = np.random.randint(1, len(cols))\n f = []\n fea = cols\n for j in range(selection_size):\n element = fea[np.random.randint(0, len(fea))]\n f.append(element)\n f = list(set(f))\n genes.append((f, score(model, data, f, target)))\n\n for i in range(time):\n print('Generation', i, 'created...')\n genes.sort(key=lambda x: x[1])\n print('gene0', genes[0][0])\n print(genes[0][1])\n\n first_gene = genes.pop(0)\n second_gene = genes.pop(1)\n if len(first_gene[0]) > 1 and len(second_gene[0]) > 1:\n cross_over_first_index = np.random.randint(1, len(first_gene))\n cross_over_second_index = np.random.randint(1, len(second_gene))\n\n new_gen1 = list(set(first_gene[0][:cross_over_first_index] + second_gene[0][cross_over_second_index:]))\n new_gen2 = list(set(first_gene[0][cross_over_first_index:] + second_gene[0][:cross_over_second_index]))\n\n genes.append((new_gen1, score(model, data, new_gen1, target)))\n genes.append((new_gen1, score(model, data, new_gen2, target)))\n else:\n print(colored('complex gen created', 'green'))\n new_gen = list(set(first_gene[0] + second_gene[0]))\n genes.append((new_gen, score(model, data, new_gen, target)))\n\n genes.append(first_gene)\n genes.append(second_gene)\n\n\ndef random_best_cols(cols, data, model, target, n_pop=8000):\n genes = []\n for i in range(n_pop):\n\n selection_size = np.random.randint(1, len(cols))\n f = []\n fea = cols\n for j in range(selection_size):\n element = fea[np.random.randint(0, len(fea))]\n f.append(element)\n f = list(set(f))\n print(i, len(f))\n genes.append((f, score(model, data, f, target)))\n genes.sort(key=lambda x: x[1])\n print(genes[0])\n\n\ndata = pd.read_csv('com_train.csv')\nhelper = pd.read_csv('../data/one_hot/one_hot_train.csv')\nfill_item_weight(data, helper)\nreg1 = ['Item_Fat_Content', 'Item_MRP', 'Outlet_Establishment_Year', 'Outlet_Location_Type', 'Outlet_Type', 'NC']\none_hot_cols = ['Item_Weight', 'Item_Visibility', 'Item_MRP',\n 'Outlet_Establishment_Year', 'FD', 'NC', 'DR', 'Outlet_Type_3.75', 'Outlet_Type_1.36',\n 'Outlet_Type_0.21', 'Outlet_Type_2.67', 'Outlet_Location_Type_0', 'Outlet_Location_Type_1',\n 'Outlet_Location_Type_2', 'Outlet_Size_1', 'Outlet_Size_0', 'Outlet_Size_2', 'Item_Fat_Content_0',\n 'Item_Fat_Content_1']\ntarget = 'Item_Outlet_Sales'\nX, y = data[reg1].values, data[target].values\nxgb = RandomForestRegressor(n_estimators=200, max_depth=5)\nxgb.fit(X, y)\ntest = pd.read_csv('com_test.csv')\nsub = pd.read_csv('../data/first_com/sub1.csv')\nsub['Item_Outlet_Sales'] = xgb.predict(test[reg1].values)\nsub.to_csv('sub2.csv', index=False)\n","sub_path":"code/model_selection.py","file_name":"model_selection.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"624864455","text":"from google.auth.transport import requests\nfrom google.oauth2 import id_token\n\n\n# フロントからのリクエストを受け取り、headersのauthorizationからid_token(idToken)を受け取る。\n# 受け取ったトークンを解析して、トークンが有効であるかを調べる。\ndef validate_token(function):\n def validate(root, info, **kwargs):\n print(kwargs)\n # Bearer token... の形式でトークンを受け取る。\n authorization = info.context.headers['authorization']\n # headersのauthorizationが空だった場合はエラー処理\n if authorization == '':\n raise ValueError('401 Unauthorized')\n token_type = authorization[:6]\n if token_type != 'Bearer':\n raise ValueError('not Bearer token!')\n token = authorization[7:]\n # print(token)\n try:\n # Specify the CLIENT_ID of the app that accesses the backend:\n id_info = id_token.verify_oauth2_token(token, requests.Request())\n # print(id_info)\n # Or, if multiple clients access the backend server:\n # id_info = id_token.verify_oauth2_token(token, requests.Request())\n # if id_info['aud'] not in [CLIENT_ID_1, CLIENT_ID_2, CLIENT_ID_3]:\n # raise ValueError('Could not verify audience.')\n\n # If auth request is from a G Suite domain:\n # if id_info['hd'] != GSUITE_DOMAIN_NAME:\n # raise ValueError('Wrong hosted domain.')\n\n # ID token is valid. Get the user's Google Account ID from the decoded token.\n # userid = id_info['sub']\n\n # キーワード引数にemailを追加\n info.context.user.email = id_info['email']\n # kwargs['login_user_email'] = id_info['email']\n return function(root, info, **kwargs)\n except ValueError:\n raise ValueError('token is invalid')\n return validate\n","sub_path":"samples/api/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"596537376","text":"################################################################################\r\n# File : 3DPrintMenu.py\r\n# Author : Tony Kamenick\r\n# Date : 6/6/06 \r\n# \r\n# Description : Adds a 3-D printer menu to the PyMOL tcl/tk GUI window. It\r\n# contains options to create struts and export VRML files\r\n###############################################################################\r\n\r\nfrom .build_struts import *\r\nimport tkinter.filedialog\r\nfrom pymol import cmd\r\n\r\ndef __init__(self):\r\n self.menuBar.addmenu('3-D Printer','3-D Printer')\r\n \r\n self.menuBar.addmenuitem('3-D Printer', 'command',\r\n 'Manual Strut Insertion',label='Insert Strut Between Current Selection',\r\n command = lambda s=self : ManualStrutsDialog(s))\r\n\r\n self.menuBar.addmenuitem('3-D Printer', 'command',\r\n 'Build Struts (CA)',\r\n label='Build Struts (CA)',\r\n command = lambda s = self: buildStrutsDialog(s,'CA'))\r\n\r\n self.menuBar.addmenuitem('3-D Printer', 'command',\r\n 'Build Struts (P)',\r\n label='Build Struts (P)',\r\n command = lambda s = self: buildStrutsDialog(s,'P'))\r\n\r\n self.menuBar.addmenuitem('3-D Printer', 'command',\r\n 'Export VRML',label='Export VRML',\r\n command = lambda s=self : _exportVRML(s))\r\n \r\n \r\n\r\ndef _exportVRML(app):\r\n file = tkinter.filedialog.asksaveasfilename(filetypes=[('VRML 2 WRL File','*.wrl')])\r\n if len(file) > 0:\r\n cmd.save(file)\r\n print (\"file exported as \"+file)\r\n\r\n\r\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"31634628","text":"\"\"\"Added approved_amount field to sample_request table\n\nRevision ID: 797dc311403e\nRevises: 684d1b21992b\nCreate Date: 2020-06-10 14:29:33.429454\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '797dc311403e'\ndown_revision = '684d1b21992b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sample_request', sa.Column('approved_amount', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('sample_request', 'approved_amount')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/797dc311403e_added_approved_amount_field_to_sample_.py","file_name":"797dc311403e_added_approved_amount_field_to_sample_.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"618987069","text":"#!/usr/bin/env python3\nimport json, requests, time, sys, socket\n\nclass cloudFlare(object):\n def __init__(self):\n self.settings = {}\n self.currentIP = \"\"\n self.publicIP = \"\"\n\n def getPayload(self):\n payload = {'X-Auth-Email': self.settings['emailAddress'], 'X-Auth-Key': self.settings['apiKey'], 'Content-Type': 'application/json'}\n return payload\n\n def convertJson(self, response):\n data = json.loads(response.text)\n return data\n\n def getIP(self):\n self.publicIP = requests.get(\"http://ipv4bot.whatismyipaddress.com\").text\n return self.publicIP\n\n def getZoneID(self, domain):\n response = requests.get(\"https://api.cloudflare.com/client/v4/zones?name=\" + domain + \"&status=active&page=1&per_page=20&order=status&direction=desc&match=all\", headers=self.getPayload())\n convertedJson = self.convertJson(response)\n zoneID = convertedJson['result'][0]['id']\n return zoneID\n\n def getDnsRecord(self, domain, domainUpdate):\n response = requests.get(\"https://api.cloudflare.com/client/v4/zones/\" + self.getZoneID(domain) + \"/dns_records?type=A&name=\" + domainUpdate + \"&per_page=20&order=type&direction=desc&match=all\", headers = self.getPayload())\n convertedJson = self.convertJson(response)\n self.currentIP = convertedJson['result'][0]['content']\n DnsRecord = convertedJson['result'][0]['id']\n return DnsRecord\n\n def logger(self, success, domain, checked):\n if self.settings['logging'][0] == True:\n curTime = time.strftime(\"%H:%M:%S\")\n curDate = time.strftime(\"%d/%m/%Y\")\n if success == True:\n with open(self.settings['logging'][1], \"a\") as log:\n log.write(\"[\" + curDate + \" \" + curTime + \"] \" + domain + \" successfully updated with the IP \" + self.getIP() + \"\\n\")\n if success == False:\n with open(self.settings['logging'][1], \"a\") as log:\n log.write(\"[\" + curDate + \" \" + curTime + \"] \" + domain + \" FAILED to updated with the IP \" + self.getIP() + \"\\n\")\n\n if checked == True:\n with open(self.settings['logging'][1], \"a\") as log:\n log.write(\"[\" + curDate + \" \" + curTime + \"] Checked domains, currently updated. \" + \"\\n\")\n sys.exit()\n\n else:\n return\n\n def checkDomains(self):\n for domain in self.settings['domain']:\n for subdom in self.settings['domain'][domain]:\n if socket.gethostbyname(subdom) == self.getIP():\n self.logger(\"\", \"\", checked = True)\n sys.exit()\n\n def update(self):\n if self.checkDomains() == True:\n sys.exit()\n\n if len(self.settings['apiKey']) <= 0:\n print(\"ERROR - No API Key Provided!\")\n sys.exit()\n if len(self.settings['emailAddress']) <= 0:\n print(\"ERROR - No email address Provided!\")\n sys.exit()\n\n if self.settings['logging'][0] == True:\n if len(self.settings['logging'][1]) <= 0:\n print(\"ERROR - Logging enabled however no log location specified!\")\n sys.exit()\n\n for i in self.settings['domain']:\n domain = i\n for domainUpdate in self.settings['domain'][i]:\n data = {\"id\":self.getDnsRecord(domain, domainUpdate),\"type\":\"A\",\"name\":domainUpdate,\"content\":self.getIP(),\"ttl\":120,\"locked\":\"false\",\"zone_id\":self.getZoneID(domain),\"zone_name\":domain,\"data\":{}}\n updating = requests.put(\"https://api.cloudflare.com/client/v4/zones/\" + self.getZoneID(domain) + \"/dns_records/\" + self.getDnsRecord(domain, domainUpdate), headers=self.getPayload(), json=data)\n if updating.status_code == 200:\n self.logger(True, domainUpdate, False)\n else:\n print(\"Failure\")\n\napp = cloudFlare()\napp.settings = {\n \"apiKey\":\"\",\n \"emailAddress\": \"\",\n \"domain\":{\"\":['', '']\n },\n \"logging\": [True, \"/var/log/CFDNS.log\"],\n}\napp.update()\n","sub_path":"CloudDNSUpdate.py","file_name":"CloudDNSUpdate.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"278663418","text":"from contact import Contact\n\nclass CRM:\n\n def main_menu(self):\n while True: # repeat indefinitely\n self.print_main_menu()\n user_selected = int(input())\n self.call_option(user_selected)\n \n def print_main_menu(self):\n print('[1] Add a new contact')\n print('[2] Modify an existing contact')\n print('[3] Delete a contact')\n print('[4] Display all the contacts')\n print('[5] Search by attribute')\n print('[6] Exit')\n print('Enter a number: ')\n \n \n def call_option(self, user_selected):\n if user_selected == 1:\n self.add_new_contact()\n elif user_selected == 2:\n self.modify_existing_contact()\n elif user_selected == 3:\n self.delete_contact()\n elif user_selected == 4:\n self.display_all_contacts()\n elif user_selected == 5:\n self.search_by_attribute()\n elif user_selected == 6:\n exit()\n # Finish off the rest for 3 through 6\n # To be clear, the methods add_new_contact and modify_existing_contact\n # don't do anything yet\n \n \n def add_new_contact(self):\n # get all the required info from our user:\n print('Enter First Name: ')\n first_name = input()\n\n print('Enter Last Name: ')\n last_name = input()\n\n print('Enter Email Address: ')\n email = input()\n\n print('Enter a Note: ')\n note = input()\n\n contact = Contact.create(\n first_name=first_name,\n last_name=last_name,\n email=email,\n note=note\n )\n\n @classmethod\n def modify_existing_contact(cls):\n print('Enter ID: ')\n id = input()\n contact = Contact.get_by_id(id)\n print(contact)\n print('please enter an attribute to change')\n n_attribute = input()\n print('please enter a new value')\n n_value = input()\n setattr(contact, n_attribute, n_value)\n contact.save()\n return contact.update\n \n \n\n def delete_contact(self):\n print('Please enter ID of contact to delete: ')\n id = input()\n contact = Contact.get_by_id(id)\n contact.delete_instance()\n \n \n def display_all_contacts(self):\n for contact in Contact.select():\n print(contact.full_name())\n \n def search_by_attribute(self):\n print('Which attribute would you like to search by?: ')\n attribute = input()\n print('Which value would you like to search by?: ')\n value = input()\n #show_contact = Contact.find_by(attribute, value)\n #print(show_contact)\n contact = None\n if attribute == 'first_name':\n contact = Contact.select().where(Contact.first_name == value)\n for row in contact:\n print(row.first_name)\n if attribute == 'last_name':\n contact = Contact.select().where(Contact.last_name == value)\n for row in contact:\n print(row.last_name)\n print('-->',contact)\n\n\n\n\na_crm_app = CRM()\na_crm_app.main_menu()","sub_path":"crm.py","file_name":"crm.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"436950531","text":"import sys\r\nfrom heapq import *\r\n\r\ndef solve(case):\r\n h = []\r\n res = max(case)\r\n for p in case:\r\n heappush(h, (-p, 1, p))\r\n \r\n i = 1\r\n while i < res:\r\n a,b,c = heappop(h)\r\n b += 1\r\n a = -((c+b-1)//b)\r\n heappush(h, (a,b,c))\r\n nres = -h[0][0] + i\r\n if nres < res:\r\n res = nres\r\n i += 1\r\n\r\n return res\r\n\r\ndata = list(sys.stdin)[1:]\r\nncases = len(data) // 2\r\n\r\nfor caseIndex in range(ncases):\r\n case = list(map(int, data[2*caseIndex+1].rstrip().split(' ')))\r\n answer = solve(case)\r\n print('Case #{0}: {1}'.format(caseIndex+1, answer))\r\n\r\n\r\n","sub_path":"solutions_5686275109552128_1/Python/pivizz/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"52899832","text":"#NAME: AANYU DEBORAH ODUMAN\r\n#COURSE: BACHELOR OF SCIENCE IN COMPUTER ENGINEERING\r\n#REG NUMBER: 16/U/2070\r\n#STUDENT NUMBER: 216002109\r\n\r\nprint('we shall print your date of birth')\r\nimport calendar\r\nday=int(input('enter you date of birth: '))\r\nmonth=int(input('enter your month of birth: '))\r\nyear=int(input('enter your year of birth: '))\r\ndebbie=calendar.weekday(year, month, day)\r\naanyu={0:'monday', 1:'tuesday', 2:'wednesday', 3:'thursday', 4:'friday', 5:'saturday', 6:'sunday'}\r\nprint('your day of birth is',aanyu[debbie])\r\n","sub_path":"date of birth.py","file_name":"date of birth.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"954063","text":"fin = open('C.in', 'r')\r\nfout = open('file.out', 'w')\r\nn = fin.readline()\r\n \r\nfor l in range(int(n)):\r\n j,p,s,k = map(int,fin.readline().split())\r\n #print j,p,s,k\r\n if k >= s:\r\n fout.write(\"Case #%d: %d\\n\"%(l+1,j*p*s))\r\n for i in range(j):\r\n for l in range(p):\r\n for m in range(s):\r\n fout.write(\"%d %d %d\\n\"%(i+1,l+1,m+1))\r\n else:\r\n #print 'ping'\r\n fout.write(\"Case #%d: %d\\n\"%(l+1,j*p*k))\r\n for i in range(j):\r\n for l in range(p):\r\n for m in range(k):\r\n fout.write(\"%d %d %d\\n\"%(i+1,l+1,(i + l + m+1)%s+1))\r\n \r\n \r\nfin.close()\r\nfout.close()\r\n","sub_path":"solutions_5708921029263360_1/Python/Ardem/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"438991965","text":"#!/usr/bin/python\n\nimport io\nimport re\n\n# Expressions to be tested\nexpr = (\n ( True , \"0.0 + 0.0\" , None ),\n ( True , \"1.0 - 1.0\" , None ),\n ( True , \"4.5 - -3.0\" , \"4.5 - (-3.0)\" ),\n ( True , \"+7.5 * -4.0\" , \"(+7.5) * (-4.0)\" ),\n ( True , \"--3.4 + 2.3\" , \"(-(-3.4)) + 2.3\" ),\n ( True , \"2.0 ** 3.5 ** 4.0\" , \"2.0 ** (3.5 ** 4.0)\" ),\n ( True , \"2.0 ^ 3.5 ^ 4.0\" , \"2.0 ** (3.5 ** 4.0)\" ),\n ( True , \"1.2 + 1.4 * 1.5 - 4.5\" , None ),\n ( True , \"(1.2 + 1.4) * 1.5 - 4.5\" , None ),\n ( True , \"(1.2 + 1.4) * (1.5 - 4.5)\" , None ),\n ( True , \"((((1.2 + 1.4)))) * 1.5\" , None ),\n ( True , \"Sin (0.0)\" , None ),\n ( True , \"Cos (0.0)\" , None ),\n ( False, \"1.0 / 0.0\" , None )\n)\n\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n# Templates\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n\n# Positive test\ndef tmpl_test_success(id, e):\n\n return \\\n \"\"\" procedure Test_{0:02d} (T : in out Test_Case'Class) is\n\n Result : Long_Float := Solve (\"{1}\");\n Expected : constant Long_Float := {2};\n\n begin\n\n Assert (abs (Result - Expected) <= Threshold,\n \"Test {0} failed: {1}\"\n & LF & \" Result : \" & Result'Image\n & LF & \" Expected : \" & Expected'Image);\n\n end Test_{0:02d};\\n\\n\"\"\".format(id, e[1], e[1] if e[2] == None else e[2])\n\n# Negative test\ndef tmpl_test_fail(id, e):\n\n return \\\n \"\"\" -------------\n -- Test_{0:02d} --\n -------------\n\n procedure Test_{0:02d} (T : in out Test_Case'Class) is\n\n Result : Long_Float;\n\n begin\n\n Result := Solve (\"{1}\");\n Assert (False,\n \"Test {0} failed: {1}\"\n & LF & \" Expected an error\");\n\n exception\n when others => null;\n\n end Test_{0:02d};\\n\\n\"\"\".format(id, e[1])\n\n\ndef tmpl_test(id, e):\n\n if (e[0] == True):\n return tmpl_test_success(id, e)\n else:\n return tmpl_test_fail(id, e)\n\n\ndef tmpl_register_routine(id):\n\n return \\\n \"\"\" Register_Routine (T, Test_{0:02d}'Access, \"Test {0:02d}\");\\n\"\"\".format(id)\n\n\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n# Main\n# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n\nwith open(\"calc-test-TEMPLATE.adb\", 'rt') as fi, \\\n open(\"calc-test.adb\" , \"wt\") as fo:\n\n # Part 1A - Copy lines until 1st insertion point\n while True:\n\n tline = fi.readline()\n match = re.search('-- S1', tline)\n if not match:\n fo.write(tline)\n else:\n break\n\n # Part 1B - Insert tests\n for k in range(0, len(expr)):\n fo.write(tmpl_test(k, expr[k]))\n\n\n # Part 2A - Copy lines until 2nd insertion point\n while True:\n\n tline = fi.readline()\n match = re.search('-- S2', tline)\n if not match:\n fo.write(tline)\n else:\n break\n\n # Part 2B - Insert test registrations\n for k in range(0, len(expr)):\n fo.write(tmpl_register_routine(k))\n\n\n # Part 3 - Copy lines until EOF\n while True:\n\n tline = fi.readline()\n if (tline != ''):\n fo.write(tline)\n else:\n break\n\nfo.close()\nfi.close()\n","sub_path":"src/test_gen/generate_tests.py","file_name":"generate_tests.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"3776888","text":"def main():\n module = AnsibleModule(argument_spec=dict(src=dict(type='path'), _original_basename=dict(type='str'), content=dict(type='str', no_log=True), dest=dict(type='path', required=True), backup=dict(type='bool', default=False), force=dict(type='bool', default=True, aliases=['thirsty']), validate=dict(type='str'), directory_mode=dict(type='raw'), remote_src=dict(type='bool'), local_follow=dict(type='bool'), checksum=dict()), add_file_common_args=True, supports_check_mode=True)\n src = module.params['src']\n b_src = to_bytes(src, errors='surrogate_or_strict')\n dest = module.params['dest']\n if (os.path.sep not in dest):\n dest = '.{0}{1}'.format(os.path.sep, dest)\n b_dest = to_bytes(dest, errors='surrogate_or_strict')\n backup = module.params['backup']\n force = module.params['force']\n _original_basename = module.params.get('_original_basename', None)\n validate = module.params.get('validate', None)\n follow = module.params['follow']\n remote_src = module.params['remote_src']\n checksum = module.params['checksum']\n if (not os.path.exists(b_src)):\n module.fail_json(msg=('Source %s not found' % src))\n if (not os.access(b_src, os.R_OK)):\n module.fail_json(msg=('Source %s not readable' % src))\n if os.path.isdir(b_src):\n module.fail_json(msg=('Remote copy does not support recursive copy of directory: %s' % src))\n if (module.params['mode'] == 'preserve'):\n module.params['mode'] = ('0%03o' % stat.S_IMODE(os.stat(b_src).st_mode))\n mode = module.params['mode']\n checksum_src = module.sha1(src)\n checksum_dest = None\n try:\n md5sum_src = module.md5(src)\n except ValueError:\n md5sum_src = None\n changed = False\n if (checksum and (checksum_src != checksum)):\n module.fail_json(msg='Copied file does not match the expected checksum. Transfer failed.', checksum=checksum_src, expected_checksum=checksum)\n if (_original_basename and dest.endswith(os.sep)):\n dest = os.path.join(dest, _original_basename)\n b_dest = to_bytes(dest, errors='surrogate_or_strict')\n dirname = os.path.dirname(dest)\n b_dirname = to_bytes(dirname, errors='surrogate_or_strict')\n if (not os.path.exists(b_dirname)):\n try:\n (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)\n except AnsibleModuleError as e:\n e.result['msg'] += ' Could not copy to {0}'.format(dest)\n module.fail_json(**e.results)\n os.makedirs(b_dirname)\n directory_args = module.load_file_common_arguments(module.params)\n directory_mode = module.params['directory_mode']\n if (directory_mode is not None):\n directory_args['mode'] = directory_mode\n else:\n directory_args['mode'] = None\n adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)\n if os.path.isdir(b_dest):\n basename = os.path.basename(src)\n if _original_basename:\n basename = _original_basename\n dest = os.path.join(dest, basename)\n b_dest = to_bytes(dest, errors='surrogate_or_strict')\n if os.path.exists(b_dest):\n if (os.path.islink(b_dest) and follow):\n b_dest = os.path.realpath(b_dest)\n dest = to_native(b_dest, errors='surrogate_or_strict')\n if (not force):\n module.exit_json(msg='file already exists', src=src, dest=dest, changed=False)\n if os.access(b_dest, os.R_OK):\n checksum_dest = module.sha1(dest)\n elif (not os.path.exists(os.path.dirname(b_dest))):\n try:\n os.stat(os.path.dirname(b_dest))\n except OSError as e:\n if ('permission denied' in to_native(e).lower()):\n module.fail_json(msg=('Destination directory %s is not accessible' % os.path.dirname(dest)))\n module.fail_json(msg=('Destination directory %s does not exist' % os.path.dirname(dest)))\n if ((not os.access(os.path.dirname(b_dest), os.W_OK)) and (not module.params['unsafe_writes'])):\n module.fail_json(msg=('Destination %s not writable' % os.path.dirname(dest)))\n backup_file = None\n if ((checksum_src != checksum_dest) or os.path.islink(b_dest)):\n if (not module.check_mode):\n try:\n if backup:\n if os.path.exists(b_dest):\n backup_file = module.backup_local(dest)\n if os.path.islink(b_dest):\n os.unlink(b_dest)\n open(b_dest, 'w').close()\n if validate:\n if (mode is not None):\n module.set_mode_if_different(src, mode, False)\n if ('%s' not in validate):\n module.fail_json(msg=('validate must contain %%s: %s' % validate))\n (rc, out, err) = module.run_command((validate % src))\n if (rc != 0):\n module.fail_json(msg='failed to validate', exit_status=rc, stdout=out, stderr=err)\n b_mysrc = b_src\n if remote_src:\n (_, b_mysrc) = tempfile.mkstemp(dir=os.path.dirname(b_dest))\n shutil.copyfile(b_src, b_mysrc)\n try:\n shutil.copystat(b_src, b_mysrc)\n except OSError as err:\n if ((err.errno == errno.ENOSYS) and (mode == 'preserve')):\n module.warn('Unable to copy stats {0}'.format(to_native(b_src)))\n else:\n raise\n module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])\n except (IOError, OSError):\n module.fail_json(msg=('failed to copy: %s to %s' % (src, dest)), traceback=traceback.format_exc())\n changed = True\n else:\n changed = False\n res_args = dict(dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed)\n if backup_file:\n res_args['backup_file'] = backup_file\n module.params['dest'] = dest\n if (not module.check_mode):\n file_args = module.load_file_common_arguments(module.params)\n res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])\n module.exit_json(**res_args)","sub_path":"Data Set/bug-fixing-5/571d971039f51f865d5516844102a203f703bfac-
-bug.py","file_name":"571d971039f51f865d5516844102a203f703bfac-
-bug.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"17465077","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nclass profile(models.Model):\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n is_active=models.BooleanField(default=False)\n authentication_code=models.CharField(max_length=200, default='', blank=True)\n date_on_birth=models.DateField(blank=True,null=True)\n\n\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"422433389","text":"# 302. Smallest Rectangle Enclosing Black Pixels\n# weekly specail\n# 2021/10/14\n#\n# Runtime: 517 ms, faster than 35.75% of Python3 online submissions for Smallest Rectangle Enclosing Black Pixels.\n# Memory Usage: 15.3 MB, less than 28.99% of Python3 online submissions for Smallest Rectangle Enclosing Black Pixels.\n\n# bfs。\n# 遍历过程中横纵坐标的最大最小值都记录下来即可。\n\nclass Solution:\n def minArea(self, image: List[List[str]], x: int, y: int) -> int:\n q = collections.deque([(x, y)])\n visited = set([(x, y)])\n min_x, max_x = x, x\n min_y, max_y = y, y\n m, n = len(image), len(image[0])\n dirs = [(-1, 0), (1, 0), (0, 1), (0, -1)]\n while q:\n x, y = q.popleft()\n for dx, dy in dirs:\n nx, ny = x + dx, y + dy\n if nx < 0 or nx >= m or ny < 0 or ny >= n or image[nx][ny] == '0' or (nx, ny) in visited:\n continue\n visited.add((nx, ny))\n q.append((nx, ny))\n min_x = min(min_x, nx)\n max_x = max(max_x, nx)\n min_y = min(min_y, ny)\n max_y = max(max_y, ny)\n return (max_x - min_x + 1) * (max_y - min_y + 1)\n","sub_path":"0302. Smallest Rectangle Enclosing Black Pixels.py","file_name":"0302. Smallest Rectangle Enclosing Black Pixels.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"39072066","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 22:09:26 2017\n\n@author: yolandatiao\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 21:27:22 2017\n\n@author: yolandatiao\n\"\"\"\n\n#####------------------ Import START ------------------#####\nimport os # For changing directory\nimport csv # For using csv writer\nimport string # For using string replace\nfrom astropy.io import ascii # For using ascii table to open csv\nfrom astropy.table import Table, Column, join # For using astropy table functions\nimport glob # For finding filenames under a directory\n#####------------------ Import END ------------------#####\n\n\n\n#####------------------ Config START ------------------#####\ncode_dir=\"/Volumes/Huitian/Exp174/codes\"\nwk_dir=\"/Volumes/Huitian/Exp174/1_Refseq\"\n\npeak_file=\"/Volumes/Huitian/Exp174/1_Refseq/Exp122_Exp169_GSE88987_mergedPeaks_cord.csv\"\nref_file=\"mm10_refseq_match_mg_e-100000.csv\"\n\n#####------------------ Config END ------------------#####\n\n\n\n#####------------------ Self defined function START ------------------######\nos.chdir(code_dir)\nimport fc_basic_astropy_subprocess as fc\n#####------------------ Self defined function END ------------------######\n\n\n#####------------------ Main function START ------------------#####\nos.chdir(wk_dir)\n\n###----- Slice data tables\n'''\npeak_filename_nf=fc.filenamenoformat(fc.Getfilename(peak_file))\nref_filename_nf=fc.filenamenoformat(fc.Getfilename(ref_file))\n\npeak_data=ascii.read(peak_file)\nref_data=ascii.read(ref_file)\n\npeak_data=fc.setcolnames(peak_data)\nref_data=fc.setcolnames(ref_data)\n\npeak_data.sort([\"chr\",\"start\",\"end\"])\nref_data.sort([\"chrom\",\"txStart\",\"txEnd\"])\n\npeak_data_chrlist=list(peak_data[\"chr\"])\nref_data_chrlist=list(ref_data[\"chrom\"])\n\n\npeak_data_chrset=sorted(list(set(peak_data_chrlist)))\nref_data_chrset=sorted(list(set(ref_data_chrlist)))\n\n\nfor n in xrange(0,len(peak_data_chrset)):\n chr_n=peak_data_chrset[n]\n peak_data_n_index=peak_data_chrlist.index(chr_n)\n \n if n < (len(peak_data_chrset)-1): \n chr_n1=peak_data_chrset[n+1]\n peak_data_n1_index=peak_data_chrlist.index(chr_n1)\n else:\n peak_data_n1_index=len(peak_data_chrlist)+1\n \n peak_data_chrn=peak_data[peak_data_n_index:peak_data_n1_index]\n ascii.write(peak_data_chrn,\"%s_%s.csv\"%(chr_n,peak_filename_nf), format=\"csv\", overwrite=True)\n \nfor n in xrange(0,len(ref_data_chrset)):\n chr_n=ref_data_chrset[n]\n ref_data_n_index=ref_data_chrlist.index(chr_n)\n \n if n < (len(ref_data_chrset)-1): \n chr_n1=ref_data_chrset[n+1]\n ref_data_n1_index=ref_data_chrlist.index(chr_n1)\n else:\n ref_data_n1_index=len(ref_data_chrlist)+1\n \n ref_data_chrn=ref_data[ref_data_n_index:ref_data_n1_index]\n ascii.write(ref_data_chrn,\"%s_%s.csv\"%(chr_n,ref_filename_nf), format=\"csv\", overwrite=True)\n'''\n\n\n\n###----- Find gene name for each peak\npeak_file=\"/Volumes/Huitian/Exp174/1_Refseq/chr_split/chr1_Exp122_Exp169_GSE88987_mergedPeaks_cord.csv\"\nref_file=\"/Volumes/Huitian/Exp174/1_Refseq/chr_split/chr1_mm10_refseq_match_mg_e-100000.csv\"\n\npeak_file_nf=fc.filenamenoformat(fc.Getfilename(peak_file))\nref_file_nf=fc.filenamenoformat(fc.Getfilename(ref_file))\nout_file_name=\"%s_ann-%s.csv\"%(peak_file_nf,ref_file_nf)\n\nref_data=ascii.read(ref_file)\nref_data=fc.setcolnames(ref_data)\nref_data_len=len(ref_data)\n\nwith open(out_file_name,\"w\") as fout:\n outwriter=csv.writer(fout, delimiter=\",\") \n \n with open(peak_file,\"r\") as fin:\n inreader=csv.reader(fin,delimiter=\",\")\n \n in_colnames=next(inreader)\n in_colnames.append(\"gene_number\")\n in_colnames.append(\"gene_name\")\n outwriter.writerow(in_colnames)\n \n row_out=[]\n for row in inreader:\n row_out=row\n row_chr=row[1]\n row_s=row[2]\n row_e=row[3]\n row_genelist=[]\n ref_data_x=[]\n for x in xrange(0, ref_data_len):\n ref_data_x=list(ref_data[x])\n if ref_data_x[1]==row_chr:\n if (ref_data_x[2]<=row_s and ref_data_x[3]>row_e):\n row_genelist.append(ref_data[0])\n \n row_out.append(len(row_genelist))\n if len(row_genelist)>0:\n row_out.append(\",\".join(row_genelist))\n else:\n row_out.append(\"NA\")\n outwriter.writerow(row_out)\n\n \n \n \n\n \n#####------------------ Main function END ------------------#####","sub_path":"codes/Func_2_refseq_slice_match_peak.py","file_name":"Func_2_refseq_slice_match_peak.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"313099446","text":"# -*- coding: utf-8 -*-\nfrom dateutil.relativedelta import relativedelta\nfrom django import forms\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User\nfrom django.db.models import Sum\nfrom django.forms import ModelForm, fields_for_model, Form\nfrom django.utils.crypto import get_random_string\nfrom graphos.renderers import morris\nfrom graphos.sources.simple import SimpleDataSource\n\nfrom core.models import Customer, Employee, Branch, Account, SystemConfiguration, Manager, Cashier, Jursit, Auditor, \\\n BillType, Transaction, Card, Bill, Maintainer, Greenback, CardToCard\n\n\nclass LoginForm(ModelForm):\n username = fields_for_model(User)['username']\n password = forms.CharField(widget=forms.PasswordInput())\n\n class Meta:\n model = Employee\n fields = []\n\n def clean(self):\n cleaned_data = super(LoginForm, self).clean()\n try:\n User.objects.get(username=cleaned_data.get(\"username\"))\n except User.DoesNotExist:\n raise forms.ValidationError('Username \"%s\" Does not exist.' % cleaned_data.get(\"username\"))\n password = self.cleaned_data.get('password')\n username = self.cleaned_data.get('username')\n if not password or len(password) < 1:\n raise forms.ValidationError(\"password invalid\")\n\n user = authenticate(username=username, password=password)\n if user is None:\n raise forms.ValidationError(\"user is shasgool\")\n return cleaned_data\n\n\nEMPLOYEE_TYPES = (\n ('Manager', 'مدیر شعبه'),\n ('Cashier', 'صندوق دار'),\n ('Jursit', 'کارشناس حقوقی'),\n ('Auditor', 'حسابرس'),\n)\n\n\nclass EmployeeCreateForm(Form):\n button_text = \"ایجاد کارمند\"\n type = forms.ChoiceField(choices=EMPLOYEE_TYPES, label='سمت')\n\n labels = {\n 'first_name': \"نام\",\n 'last_name': \"نام خانوادگی\",\n 'sex': \"جنسیت\",\n 'birth_date': \"تاریخ تولد\",\n 'birth_place': \"محل تولد\",\n 'social_id': \"کد ملی\",\n 'address': \"آدرس\",\n 'education': \"تحصیلات\",\n 'relationship': \"وضعیت تاهل\",\n 'branch': 'شعبه'\n }\n\n def __init__(self, data=None, *args, **kwargs):\n super(EmployeeCreateForm, self).__init__(data)\n self.fields.update(fields_for_model(Employee, labels=self.labels))\n del self.fields['user']\n\n def clean(self):\n cleaned_data = super(EmployeeCreateForm, self).clean()\n print('cleaned_data is: ', cleaned_data)\n return cleaned_data\n\n def save(self):\n first_name = self.cleaned_data.get('first_name', None)\n last_name = self.cleaned_data.get('last_name', None)\n username = get_random_string(length=8)\n # password = get_random_string(length=8)\n password = username\n user = User.objects.create_user(username=username, password=password, first_name=first_name,\n last_name=last_name)\n model = {\n 'Manager': Manager,\n 'Cashier': Cashier,\n 'Jursit': Jursit,\n 'Auditor': Auditor,\n 'Maintainer': Maintainer\n }[self.cleaned_data.get('type')]\n del self.cleaned_data['type']\n\n employee = model(user=user, **self.cleaned_data)\n employee.save()\n\n return employee\n\n\nclass BranchCreateForm(ModelForm):\n button_text = \"ایجاد شعبه\"\n\n class Meta:\n model = Branch\n fields = ['name', 'address']\n labels = {\n 'name': \"نام شعبه\",\n 'address': \"آدرس شعبه\"\n }\n\n def clean(self):\n cleaned_data = super(BranchCreateForm, self).clean()\n # validate form data here!\n return cleaned_data\n\n def save(self, commit=True):\n branch = Branch(**self.cleaned_data)\n branch.save()\n return branch\n\n\nclass BillTypeCreateForm(ModelForm):\n button_text = \"ایجاد قبض\"\n\n class Meta:\n model = BillType\n fields = ['company', 'account']\n labels = {\n 'company': \"نام شرکت\",\n 'account': \"حساب بانکی مرتبط\",\n }\n\n def clean(self):\n cleaned_data = super(BillTypeCreateForm, self).clean()\n # validate form data here!\n return cleaned_data\n\n def save(self, commit=True):\n billType = BillType(**self.cleaned_data)\n billType.save()\n return billType\n\n\nclass Bill_Create_form(ModelForm):\n button_text = \"ایجاد قبض\"\n\n class Meta:\n model = Bill\n fields = ['bill_type', 'amount']\n labels = {\n 'bill_type': \"نوع قبض\",\n 'amount': \"مقدار قبض\",\n }\n\n def clean(self):\n cleaned_data = super(Bill_Create_form, self).clean()\n return cleaned_data\n\n def save(self, commit=True):\n bill = Bill(**self.cleaned_data)\n bill.save()\n return bill\n\nclass GreenbackCreateForm(ModelForm):\n button_text = \"ایجاد اسکناس\"\n\n class Meta:\n model = Greenback\n fields = ['value']\n labels = {\n 'value': \"ارزش اسکناس\",\n }\n\n def clean(self):\n cleaned_data = super(GreenbackCreateForm, self).clean()\n # validate form data here!\n return cleaned_data\n\nclass AccountCreateForm(ModelForm):\n button_text = \"ایجاد حساب\"\n\n class Meta:\n model = Account\n fields = ['user_type', 'real_owner']\n labels = {\n 'user_type': \"نوع کاربر\",\n \"real_owner\": \"صاحب حساب\"\n }\n\n def clean(self):\n cleaned_data = super(AccountCreateForm, self).clean()\n # validate form data here!\n return cleaned_data\n\n def save(self, commit=True):\n account = Account(**self.cleaned_data)\n account.save()\n account_number = account.account_number\n return account\n\n\nclass CustomerCreateForm(ModelForm):\n button_text = \"ایجاد مشتری\"\n\n class Meta:\n model = Customer\n fields = ['first_name', 'last_name', 'sex', 'birthday', 'father_name',\n 'social_id', 'phone_number', 'email', 'notif_type']\n labels = {\n 'first_name': \"نام\",\n 'last_name': \"نام خانوادگی\",\n 'sex': \"جنسیت\",\n 'birthday': \"تاریخ تولد\",\n 'father_name': \"نام پدر\",\n 'social_id': \"شماره ملی\",\n 'phone_number': \"شماره تلفن\",\n # 'address': \"آدرس\",\n 'email': \"آدرس ایمیل\",\n 'notif_type': \"نوع اطلاع رسانی\"\n }\n\n def clean(self):\n cleaned_data = super(CustomerCreateForm, self).clean()\n # validate form data here!\n return cleaned_data\n\n def save(self, commit=True):\n customer = Customer(**self.cleaned_data)\n customer.save()\n return customer\n\n\n\nclass SystemConfigurationForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(SystemConfigurationForm, self).__init__(*args, **kwargs)\n print(len(SystemConfiguration.objects.all()))\n instance = SystemConfiguration.objects.get()\n\n self.fields['card_production_fee'].initial = str(instance.card_production_fee)\n self.fields['cheque_production_fee'].initial = str(instance.cheque_production_fee)\n self.fields['sms_notif_fee'].initial = str(instance.sms_notif_fee)\n self.fields['card_to_card_fee'].initial = str(instance.card_to_card_fee)\n self.fields['transaction_fee'].initial = str(instance.transaction_fee)\n self.fields['atm_min_money'].initial = str(instance.atm_min_money)\n self.fields['loan_interest'].initial = str(instance.loan_interest)\n self.fields['deposit_yearly_interest'].initial = str(instance.deposit_yearly_interest)\n\n class Meta:\n model = SystemConfiguration\n fields = [\n 'card_production_fee',\n 'cheque_production_fee',\n 'sms_notif_fee',\n 'card_to_card_fee',\n 'transaction_fee',\n 'atm_min_money',\n 'loan_interest',\n 'deposit_yearly_interest',\n ]\n labels = {\n 'card_production_fee': \"هزینه‌ی صدور کارت\",\n 'cheque_production_fee': \"هزینه‌ی صدور چک\",\n 'sms_notif_fee': \"هزینه‌ی فعال‌سازی اعلام پیامک\",\n 'card_to_card_fee': \"هزینه‌ی کارت به کارت\",\n 'transaction_fee': \"هزینه‌ی تراکنش\",\n 'atm_min_money': \"مقدار کمینه‌ی پول موجود در خودپرداز\",\n 'loan_interest': \"بهره‌ی وام\",\n 'deposit_yearly_interest': \"بهره‌ی حساب سالیانه\",\n }\n\n def save(self, commit=True):\n instance = SystemConfiguration(**self.cleaned_data)\n instance.save()\n return instance\n\nREPORT_PERIOD = (\n ('Day', 'روز'),\n ('Month', 'ماه'),\n ('Year', 'سال'),\n)\n\nREPORT_TYPES = (\n ('COUNT', 'بر اساس تعداد عملیات'),\n ('VOLUME', 'بر اساس حجم نقدینگی')\n)\n\nREPORT_DOMAIN = (\n ('ALL', 'کل سامانه'),\n ('PART', 'به تفکیک شعب')\n)\n\nclass ReportForm(forms.Form):\n button_text = \"ایجاد گزارش\"\n\n period = forms.ChoiceField(choices=REPORT_PERIOD, label='واحد زمانی')\n type = forms.ChoiceField(choices=REPORT_TYPES, label='مورد گزارش')\n domain = forms.ChoiceField(choices=REPORT_DOMAIN, label='دامنه گزارش')\n begin_date = forms.DateField(label='شروع')\n end_date = forms.DateField(label='پایان')\n\n branches = forms.ModelMultipleChoiceField(Branch.objects.all(), label='انتخاب شعب', required=False)\n\n def clean(self):\n cleaned_data = super(ReportForm, self).clean()\n if cleaned_data[\"end_date\"] < cleaned_data[\"begin_date\"]:\n raise forms.ValidationError(\"تاریخ ورودی نامعتبر است.\")\n return cleaned_data\n return cleaned_data\n\n def save(self):\n print(\"savinggg\")\n begin_date = self.cleaned_data[\"begin_date\"]\n end_date = self.cleaned_data[\"end_date\"]\n period = self.cleaned_data[\"period\"]\n type = self.cleaned_data[\"type\"]\n domain = self.cleaned_data[\"domain\"]\n branches = self.cleaned_data[\"branches\"]\n\n years = 1 if period == \"Year\" else 0\n months = 1 if period == \"Month\" else 0\n days = 1 if period == \"Day\" else 0\n\n dates = []\n index_date = begin_date\n while index_date < end_date:\n dates.append(index_date)\n index_date = index_date + relativedelta(years=years, months=months, days=days)\n dates.append(end_date)\n\n if type == \"COUNT\":\n meta = ['تاریخ', 'تعداد واریز', 'تعداد برداشت', 'تعداد کارت به کارت']\n datas = [meta]\n if domain == \"ALL\":\n for i in range(len(dates) - 1):\n beg = dates[i]\n end = dates[i+1]\n row = [beg.strftime(\"%Y-%m-%d\"),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"d\").count(),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"w\").count(),\n CardToCard.objects.filter(deposit__date__gte=beg, deposit__date__lt=end).count()]\n datas.append(row)\n simple_data_source = SimpleDataSource(data=datas)\n bar_chart = morris.BarChart(simple_data_source)\n return [{'name': 'کل سامانه', 'chart': bar_chart}]\n else:\n charts = []\n for branch in branches:\n datas = [meta]\n for i in range(len(dates) - 1):\n beg = dates[i]\n end = dates[i + 1]\n print(branch.id)\n print(Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"w\", branch=branch))\n row = [beg.strftime(\"%Y-%m-%d\"),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"d\", branch=branch).count(),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"w\", branch=branch).count(),\n CardToCard.objects.filter(deposit__date__gte=beg, deposit__date__lt=end, deposit__branch=branch).count()]\n datas.append(row)\n simple_data_source = SimpleDataSource(data=datas)\n bar_chart = morris.BarChart(simple_data_source)\n charts.append({'name': branch.name, 'chart': bar_chart})\n return charts\n\n else:\n meta = ['تاریخ', 'حجم واریز', 'حجم برداشت']\n datas = [meta]\n if domain == \"ALL\":\n for i in range(len(dates) - 1):\n beg = dates[i]\n end = dates[i + 1]\n row = [beg.strftime(\"%Y-%m-%d\"),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"d\").aggregate(Sum('amount'))['amount__sum'],\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"w\").aggregate(Sum('amount'))['amount__sum'],]\n datas.append(row)\n simple_data_source = SimpleDataSource(data=datas)\n bar_chart = morris.BarChart(simple_data_source)\n return [{'name': 'کل سامانه', 'chart': bar_chart}]\n else:\n charts = []\n for branch in branches:\n datas = [meta]\n for i in range(len(dates) - 1):\n beg = dates[i]\n end = dates[i + 1]\n row = [beg.strftime(\"%Y-%m-%d\"),\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"d\",\n branch=branch).aggregate(Sum('amount'))['amount__sum'],\n Transaction.objects.filter(date__gte=beg, date__lt=end, transaction_type=\"w\",\n branch=branch).aggregate(Sum('amount'))['amount__sum'],]\n datas.append(row)\n simple_data_source = SimpleDataSource(data=datas)\n bar_chart = morris.BarChart(simple_data_source)\n charts.append({'name': branch.name, 'chart': bar_chart})\n return charts\n\n","sub_path":"core/forms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":14942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"165645105","text":"\n\n#calss header\nclass _BOASTFUL():\n\tdef __init__(self,): \n\t\tself.name = \"BOASTFUL\"\n\t\tself.definitions = [u'praising yourself and what you have done']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_boastful.py","file_name":"_boastful.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"280456626","text":"from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\n\n\nclass Category(models.Model):\n category = models.CharField(max_length=50)\n\n class Meta:\n app_label = 'interviews'\n\n\nclass Question(models.Model):\n class Meta:\n app_label = 'interviews'\n question = models.CharField(max_length=200)\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n time = models.IntegerField()\n\n\nclass Interview(models.Model):\n class Meta:\n app_label = 'interviews'\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE, default=\"GUEST\")\n used_questions = models.ManyToManyField(\n Question, related_name='used_interviews')\n","sub_path":"back/interviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168755494","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nurl = \"https://en.wikipedia.org/wiki/S%26P/TSX_60\"\ndef get():\n my_rows = []\n m_obj = {}\n page = requests.get(url).content.decode()\n soup = BeautifulSoup(page, \"html.parser\")\n tables = soup.findAll(\"table\")[0]\n t_body = tables.find(\"tbody\")\n rows = t_body.findAll(\"tr\")\n for row in rows[1:]:\n my_rows.append((row.get_text().strip(\"\\n\").split(\"\\n\"))[:2])\n for item in my_rows:\n m_obj[item[1]] = item[0]+ \".TO\"\n with open(\"tsx60.json\", \"w\") as tsx:\n json.dump(m_obj, tsx)\n tsx.close()\nget()\n","sub_path":"internet/wikipedia/tsx60.py","file_name":"tsx60.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"363663058","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app1', '0010_auto_20151115_1131'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TestProgammingSection',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('code', models.FileField(upload_to=b'app1/static/app1/code_samples/')),\n ('question', models.ForeignKey(to='app1.Question')),\n ('user', models.ForeignKey(to='app1.User')),\n ],\n ),\n ]\n","sub_path":"source/pdf-with-no-breakage/app1/migrations/0011_testprogammingsection.py","file_name":"0011_testprogammingsection.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"467643691","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncrawler\n\"\"\"\n\n\nimport requests #美味的汤~~来自《爱丽丝梦游仙境》同名诗歌。化平淡为神奇(通过定位HTML标签来组织复杂的网络信息)\nfrom bs4 import BeautifulSoup\nurl = \"http://bj.xiaozhu.com/fangzi/5098280314.html\"\nwb_data = requests.get(url)\nsoup = BeautifulSoup(wb_data.text,\"lxml\")\nprint(soup)\n\n\nprint(soup.h4)#定位标签,抽丝剥茧\n\nprint(soup.h4.em)\nprint(soup.h4.em.string,\"\\n\")\n\nprint(soup.title)\nprint(soup.title.name)\nprint(soup.title.string,\"\\n\")\n\nprint(soup.head,\"\\n\")#如何快速找到心仪数据在网页中的位置呢?——chrome浏览器:检查工具是神器\n\ninfos = soup.find_all('h6')#类似于soup.tags模式\nfor info in infos:\n print(info.text)# .get_text():会把你正在处理的文档中所有的标签清除,返回一个只包含文字的字符串\nprint(len(infos),'\\n')\n\ninfos = soup.find_all({'h4','h6'})#可以传入多个标签名称组成的列表,是或的关系。\nfor info in infos:\n print(info.get_text())\nprint(len(infos),'\\n')\n\nnames = soup.find_all('a',{'class':'lorder_name'})#加入class变量,精准找数据\nfor name in names:\n print(name)\n\nnames = soup.find_all(class_='bg_box')#使用keyword技巧(PS:因为class是受保护的关键字,因此不能单独做参数名需要处理:class_)\n#类比于:names=soup.find_all('','class':'lorder_name')\nfor name in names:\n print(name)\n\nscorelist = soup.find_all(text='5分')#匹配字符段\nprint(len(scorelist))\n\n#神器再现:使用chrome检查功能找到目标数据的位置,因为.select是一个全文搜索的方法,所以结果是列表!!\ntitle = soup.select('div.con_l > div.pho_info > h4')[0].text\nscore = soup.select('li.top_bar_w2.border_right_none > em')[0].text\naddress = soup.select('div.pho_info > p > span')[0].text\nprint(address,title,score)\n\npeitao = soup.select(\"div.info_r > div.intro_item_content > ul > li\")\nfor item in peitao:\n if 's_ico_no' not in item.get('class')[0]:\n print(item.get_text())\n\n\n#一个较完整的例子:::\n# 因为是单页面,使用 select 方法获得的元素又是一个列表,那么列表中的第一个元素且也是唯一一个元素即是我们要找的信息 用 “[0]” 索引将其取出\n# 后在对其使用处理的方法,因为 beautifulsoup 的些筛选方法并不能针对列表类型的元素使用 ;)\n\ntitle = soup.select(' div.con_l > div.pho_info > h4 > em')[0].text\naddress = soup.select(' div.pho_info > p > span')[0].text # 和 get('href') 同理,他们都是标签的一个属性而已,我们只需要的到这个属性的内容即可\nprice = soup.select('#pricePart > div.day_l > span')[0].text\npic = soup.select('#curBigImage')[0].get('src') # “#” 代表 id 这个找元素其实就是找他在页面的唯一\n\nhost_name = soup.select('a.lorder_name')[0].text\nhost_gender = soup.select('div.member_pic > div')[0].get('class')[0]\n\n\n\n# 请在此处打印并观察结果\nprint(title)\nprint(address)\nprint(price)\nprint(pic)\n\nprint(host_name)\nprint(host_gender)\n\n# 根据结果观察不同性别会用不同的图标样式(class),设计一个函数进行转换\ndef print_gender(class_name):\n if class_name == 'member_girl_ico':\n return '女'\n if class_name == 'member_boy_ico':\n return '男'\n\nprint(print_gender(host_gender))\n\ndata = {\n 'title':title,\n 'address':address,\n 'price':price,\n 'pic':pic,\n 'host_name':host_name,\n 'host_gender':print_gender(host_gender)\n\n}\n\nprint(data)\n\n\n# -------------------补充------------------\n# 如何批量获取链接\npage_link = [] # <- 每个详情页的链接都存在这里,解析详情的时候就遍历这个列表然后访问就好啦~\n\n\ndef get_page_link(page_number):\n for each_number in range(1, page_number): # 每页24个链接,这里输入的是页码。range用法,例如range(1,5) #代表从1到5(不包含5)[1, 2, 3, 4]\n full_url = 'http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(each_number)\n wb_data = requests.get(full_url)\n soup = BeautifulSoup(wb_data.text, 'lxml')\n for link in soup.select('a.resule_img_a'): # 找到这个 class 样为resule_img_a 的 a 标签即可\n page_link.append(link.get('href'))\n\n\n# ---------------------\nget_page_link(2)\nprint(len(page_link))\n\n#如何批量获取数据\ndef get_data(page_link):\n for url in page_link:\n wb_data = requests.get(url)\n soup = BeautifulSoup(wb_data.text, 'lxml')\n title = soup.select('div.con_l > div.pho_info > h4 ')[0].get_text() #\n address = soup.select('div.pho_info > p')[0].get('title') # 和 get('href') 同理,他们都是标签的一个属性而已,我们只需要的到这个属性的内容即可\n price = soup.select('div.day_l > span')[0].text ##pricePart > div.day_l > span\n pic = soup.select('#curBigImage')[0].get('src') # “#” 代表 id 这个找元素其实就是找他在页面的唯一\n host_name = soup.select('a.lorder_name')[0].text\n host_gender = soup.select('div.member_pic > div')[0].get('class')[0]\n data = {\n 'title': title,\n 'address': address,\n 'price': price,\n 'pic': pic,\n 'host_name': host_name,\n 'host_gender': print_gender(host_gender)\n\n }\n print(data)#在这里也可以将数据按照格式装进数据库了\n\nget_data(page_link)\n\n","sub_path":"l14spider/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"338341404","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\n\ndata = pd.read_csv('D:/EclipseWorkspace/02-DataVis-5Ways/Python/cars-sample.csv')\ncolors = {'bmw':'#f09f9e', 'ford':'c4c677', 'honda':'#90d5b8', 'mercedes':'#7accf5', 'toyota':'#ec9df2'}\n\nx = data[['Weight']]\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ndata['Bubble Size'] = (x_scaled * 200) + 10\n\ndata['Color'] = data['Manufacturer'].map(colors)\n\ngraph = data.plot.scatter(x='Weight', y='MPG', c='Color', s='Bubble Size', alpha=0.5)\n\nmarkers = [plt.Line2D([0],[0], color=color, marker='o', linestyle='', alpha=0.5) for color in colors.values()]\n\ngraph.matplotlib.pyplot.show()\n\nfig.savefig('/img/python.png')\n","sub_path":"Python/please.py","file_name":"please.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"619629737","text":"from collections import Counter\nfrom .utils import to_unicode\n''' from .value_checks import (is_a_date, is_a_number, is_a_nothing,\n is_a_latitude, is_a_longitude, is_a_coord_pair, is_a_country, is_a_city, \n is_a_state, is_a_address, is_a_text, is_a_label, is_a_zip, is_a_street, \n is_a_phone, is_a_url, is_a_email, is_a_time, is_a_currency, is_a_percent) '''\n\n# currently understands\n# category\n# datetime\n# time\n# number\n# label\n# text\n# id\n# email\n# url\n# address\n# street\n# city\n# state\n# zipcode\n# country\n# phone\n# latitude\n# longitude\n# coordinate_pair\n\n# coming soon\n# name\n\n# ordinal??? -- can obtain from categorical/int info...\n\nfrom .utils import prep_value\n\ndef is_a_nothing(value, header=None):\n value = prep_value(value).lower()\n\n if not value:\n return True\n \n if value in ['none','nan','null','n/a']:\n return True\n \n return False\n\n\ndef guess(column_values, header=None, for_types=None):\n types = []\n checked_types = []\n threshold = .8\n\n def should_check(column_type):\n if not for_types or column_type in for_types:\n return True\n\n return False\n\n # Check if all values are unique\n if should_check('rowlabel'):\n if len(list(set(column_values))) == len(column_values):\n types.append('rowlabel')\n\n # Get non-empty values\n values = [v for v in column_values if not is_a_nothing(v)]\n count_not_empty = len(values)\n \n # If the column is empty, we don't need to do anything else\n if count_not_empty == 0:\n types.append('unknown')\n return sorted(list(set(types)))\n \n \n def do_check(test_func):\n passes_test_count = sum([test_func(v, header=header) for v in values])\n return float(passes_test_count) / count_not_empty > threshold\n \n \n # if the column is long text, we don't need to do anything else\n if should_check('text') and do_check(is_a_text):\n types.append('text')\n return sorted(list(set(types)))\n\n\n # If the column is a date, we don't need to do anything else\n if should_check('datetime') and do_check(is_a_date):\n types.append('datetime')\n return sorted(list(set(types)))\n\n word_counts = Counter(values).items()\n largest_category_ratio = float(max([w[1] for w in word_counts])) / len(values)\n unique_value_ratio = float(len(word_counts)) / len(values)\n dot_ratio = float(sum([u'.' in to_unicode(v) for v in values])) / len(values)\n \n \n\n # periods are important to check for determining if numeric is a category\n has_dots = dot_ratio > threshold\n is_boolean = len(word_counts) == 2\n \n # See if values repeat often enough that we should count groups\n if should_check('category'):\n if is_boolean or (largest_category_ratio >= .05 and \\\n unique_value_ratio < .2 and not has_dots and len(word_counts)<50):\n \n #print(largest_category_ratio)\n #print(unique_value_ratio)\n #print(word_counts)\n \n types.append('category')\n\n\n # Check if this is some kind of ID\n id_labels = ['_id', '_ID', '-id', '-ID', 'ID', ' ID', ' id']\n is_id_header = header and any([s in header for s in id_labels])\n\n \n if ('rowlabel' in types or 'category' in types) and is_id_header:\n return sorted(list(set(types)))\n\n \n # Check for number stuff\n if not is_id_header and should_check('numeric') and \\\n do_check(is_a_number) and not is_boolean:\n \n types.append('numeric')\n \n # more number stuff\n if should_check('currency') and do_check(is_a_currency):\n types.append('currency')\n\n elif should_check('percent') and do_check(is_a_percent):\n types.append('percent')\n\n elif should_check('latitude') and do_check(is_a_latitude):\n types.append('latitude')\n\n elif should_check('longitude') and do_check(is_a_longitude):\n types.append('longitude')\n\n else:\n # string stuff\n if should_check('email') and do_check(is_a_email):\n types += ['email', 'label']\n\n elif should_check('url') and do_check(is_a_url):\n types += ['url']\n\n elif should_check('time') and do_check(is_a_time):\n types.append('time')\n\n elif should_check('coordinate') and do_check(is_a_coord_pair):\n types.append('coordinate')\n\n elif should_check('phone') and do_check(is_a_phone):\n types += ['phone']\n\n elif should_check('zip') and do_check(is_a_zip):\n types += ['zip']\n\n elif should_check('state') and do_check(is_a_state):\n types += ['state']\n\n elif should_check('country') and do_check(is_a_country):\n types += ['country']\n\n elif should_check('city') and do_check(is_a_city):\n types += ['city'] \n\n elif should_check('address') and do_check(is_a_address):\n types.append('address')\n\n elif should_check('street') and do_check(is_a_street):\n types += ['street']\n\n elif should_check('label') and do_check(is_a_label):\n types += ['label']\n\n \n if len(types) == 0:\n types.append('unknown')\n\n return sorted(list(set(types)))\n","sub_path":"Simon/penny/guesser.py","file_name":"guesser.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"82645628","text":"\r\nimport hashlib\r\n\r\n\r\n\"\"\"\r\nAvailable hashing algorithms in hashlib but this program is limited to some Algorithms only. : \r\nsha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), and blake2s(). md5()\r\n\"\"\"\r\n\r\n\r\nstr = input(\"Enter the string/password you want to encrypt : \")\r\n\r\nn = int(input(\"\"\"Enter algorithm to implement on the input : \r\n1. SHA\r\n2.SHA256\r\n3.SHA512\r\n4.BLAKE2B\r\n5.MD5\r\n\"\"\"))\r\n\r\nif n==1:\r\n\r\n result = hashlib.sha1(str.encode())\r\n \r\n # printing the equivalent hexadecimal value.\r\n print(\"The hexadecimal equivalent of SHA is : \")\r\n print(result.hexdigest())\r\n\r\n \r\nelif n==2:\r\n result = hashlib.sha256(str.encode())\r\n \r\n # printing the equivalent hexadecimal value.\r\n print(\"The hexadecimal equivalent of SHA256 is : \")\r\n print(result.hexdigest())\r\n\r\nelif n==3:\r\n result = hashlib.sha512(str.encode())\r\n \r\n # printing the equivalent hexadecimal value.\r\n print(\"The hexadecimal equivalent of SHA512 is : \")\r\n print(result.hexdigest())\r\n\r\nelif n==4:\r\n result = hashlib.blake2b(str.encode())\r\n \r\n # printing the equivalent hexadecimal value.\r\n print(\"The hexadecimal equivalent of BLAKE2B is : \")\r\n print(result.hexdigest())\r\n\r\nelse:\r\n result = hashlib.md5(str.encode())\r\n \r\n # printing the equivalent hexadecimal value.\r\n print(\"The hexadecimal equivalent of MD5 is : \")\r\n print(result.hexdigest())\r\n","sub_path":"Encrpytionusinghashlib.py","file_name":"Encrpytionusinghashlib.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"213661898","text":"from openerp.osv import fields, osv\n\n\nclass InternalMove(osv.Model):\n\n _inherit = 'stock.picking'\n\n _columns = {\n 'devolucion': fields.date('Fecha de devolucion'),\n 'empleado': fields.many2one('hr.employee',\n string='Nombre del empleado'),\n 'es_consigna': fields.boolean('Es Consigna'),\n 'justificacion': fields.char('Justificacion', size=40),\n 'elaboro': fields.char('Persona que elaboro', size=40),\n 'estudio': fields.many2one('project.project'),\n 'observaciones': fields.text('Observaciones'),\n 'origen': fields.char(\"Origen\", size=40)\n }\n\n\nclass Empleado(osv.Model):\n\n _inherit = 'hr.employee'\n\n _columns = {\n 'nip': fields.char(\"NIP\", size=15)\n }","sub_path":"ea_jmd/movimientosinternos.py","file_name":"movimientosinternos.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"136898495","text":"import re\nimport time, datetime\nimport csv\nfrom LogItem import LogItem\n\ndef parseRequests(fileName):\n\n\tp = re.compile(\n\t\t'([^-]*|-) ([^ ]*) ([^ ]*) \\[([^]]*)\\] \"([^\"]*)\" ([^ ]*) ([^ ]*) \"([^\"]*)\" \"([^\"]*)\"'\n\t)\n\tout = re.compile(\n\t\t'GET /out/(\\w+)'\n\t)\n\tview = re.compile(\n\t\t'GET /view\\d*/([^ ?]*)'\n\t)\n\n\toutRequests = []\n\tviewRequests = []\n\thostDict = {}\n\n\twith open(fileName) as f:\n\t\tfor line in f:\n\t\t\tm = p.match(line)\n\t\t\tif not m:\n\t\t\t\tcontinue\n\t\t\tg1, g2, g3, g4, g5, g6, g7, g8, g9 = m.groups()\n\t\t\thost, user, date, request, status, size, refer, device = cleanData(g1, g2, g3, g4, g5, g6, g7, g8, g9)\n\t\t\t\n\t\t\tif host[0] in hostDict:\n\t\t\t\thostDict[ host[0] ] += 1\n\t\t\telse:\n\t\t\t\thostDict[ host[0] ] = 1\n\n\t\t\t#Match Reqest to Out or View\n\t\t\tm = out.match(request)\n\t\t\tif m:\n\t\t\t\tcouponId = m.groups()[0]\n\t\t\t\toutRequests.append(LogItem(host, user, date, request, status, size, refer, device))\n\t\t\t\tcontinue\n\n\t\t\tm = view.match(request)\n\t\t\tif m:\n\t\t\t\tstoreDomain = m.groups()[0]\n\t\t\t\tviewRequests.append((LogItem(host, user, date, request, status, size, refer, device, storeDomain)))\n\t\t\t\tcontinue\n\treturn outRequests, viewRequests, hostDict\n\n\ndef cleanData(g1, ignore, user, g4, request, g6, g7, refer, device):\n\thost = g1.split(\", \")\n\tdate = convertToDateTime(g4)\n\n\tstatus = int(g6)\n\t\n\tif g7 == \"-\":\n\t\tsize = 0\n\telse:\n\t\tsize = int(g7)\n\treturn host, user, date, request, status, size, refer, device\n\ndef processOutRequests(outRequests):\n\tminutes = [[0 for x in range(60)] for x in range(24)]\n\t# Zero Array\n\tfor i in minutes:\n\t\tfor j in i:\n\t\t\tj = 0\n\tfor req in outRequests:\n\t\thour = req.date.hour\n\t\tminute = req.date.minute\n\t\tminutes[hour][minute] = minutes[hour][minute] + 1\n\n\t# Calculate Min, Max, Mean\n\tminimumMinute = (0,minutes[0][0])\n\tmaximumMinute = (0,0)\n\ttotal = 0.0\n\tfor idx,m in enumerate(minutes[0]):\n\t\tif m > maximumMinute[1]:\n\t\t\tmaximumMinute = (idx, m)\n\t\tif m < minimumMinute[1]:\n\t\t\tminimumMinute = (idx, m)\n\t\ttotal += m\n\n\tmean = total / len(minutes[0])\n\n\t# Calculate Standard Deviation\n\ttotalDev = 0.0\n\tfor m in minutes[0]:\n\t\ttotalDev += (mean-m)**2\n\tstdDev = (totalDev / len(minutes[0]))**0.5\n\n\t# Calculate Median\n\tminutes[0].sort()\n\tmedianClicks = minutes[0][30]\n\n\treturn mean, medianClicks, stdDev, maximumMinute, minimumMinute\n\n\ndef processViewRequests(viewRequests, hostDict):\n\n\tsKeys = []\n\tbDict = {}\n\n\tfor req in viewRequests:\n\t\tcount = hostDict[ req.host[0] ]\n\n\t\tkey = req.storeDomain\n\n\t\tif hostDict[ req.host[0] ] == 1:\n\t\t\tbounced = 1\n\t\telse:\n\t\t\tbounced = 0\n\t\t\n\t\tif key in bDict:\n\t\t\tbDict[key] = (bDict[key][0]+bounced, bDict[key][1]+1)\n\t\telse:\n\t\t\tbDict[key] = (bounced, 1)\n\t\t\tsKeys.append(key)\n\n\treturn sKeys, bDict\n\ndef writeCSVFiles(cMean, cMedian, cStdDev, cMaximumMinute, cMinimumMinute, storeKeys, bounceDict):\n\twith open('clickStats.csv', 'w') as csvfile:\n\t\tfieldnames = [\n\t\t\t'clicksMean', 'clicksMedian', 'clicksStdDev', 'clicksMax', 'clicksMaxMinute', 'clicksMin', 'clicksMinMinute'\n\t\t]\n\t\twriter = csv.writer(csvfile)\n\t\twriter.writerow(fieldnames)\n\t\twriter.writerow(\n\t\t\t[cMean, cMedian, cStdDev, cMaximumMinute[1], cMaximumMinute[0], cMinimumMinute[1], cMinimumMinute[0]]\n\t\t)\n\n\twith open('storeBounceRates.csv', 'w') as csvfile:\n\t\twriter = csv.writer(csvfile)\n\t\tfor store in storeKeys:\n\t\t\tbounceTuple = bounceDict[store]\n\t\t\tbounceRate = float(bounceTuple[0]) / float(bounceTuple[1]) * 100\n\t\t\twriter.writerow([store, str(bounceRate)])\n\t\t\n\n\n#\tTimeZone and Date Code taken from:\n#\thttp://www.seehuhn.de/blog/52\n#\tCode posted under Creative Commons Attribution-Share Alike 3.0 License\ndef convertToDateTime(dateString):\n\ttt = time.strptime(dateString[:-6], \"%d/%b/%Y:%H:%M:%S\")\n\ttt = list(tt[:6]) + [ 0, Timezone(dateString[-5:]) ]\n\treturn datetime.datetime(*tt)\n\nclass Timezone(datetime.tzinfo):\n\n def __init__(self, name=\"+0000\"):\n self.name = name\n seconds = int(name[:-2])*3600+int(name[-2:])*60\n self.offset = datetime.timedelta(seconds=seconds)\n\n def utcoffset(self, dt):\n return self.offset\n\n def dst(self, dt):\n return timedelta(0)\n\n def tzname(self, dt):\n return self.name\n\n\n\n\n#outRequests, viewRequests, hostDict = parseRequests(\"short.log\")\n#outRequests, viewRequests, hostDict = parseRequests(\"2min.log\")\n#outRequests, viewRequests, hostDict = parseRequests(\"10min.log\")\noutRequests, viewRequests, hostDict = parseRequests(\"rmn_weblog_sample.log\")\n\nclicksMean, clicksMedian, clicksStdDev, clicksMax, clicksMin = processOutRequests(outRequests)\nsKeys, bDict = processViewRequests(viewRequests, hostDict)\n\nwriteCSVFiles(clicksMean, clicksMedian, clicksStdDev, clicksMax, clicksMin, sKeys, bDict)\n\n\n\n\n\n\n\n\n","sub_path":"rmnStats.py","file_name":"rmnStats.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"605547813","text":"\"\"\"Functions used in setting up the config file are defined here.\"\"\"\n\nimport os\n\nconfig_text = '#*****************************************#\\n\\\n#*-------------config for ytmdl ----------#\\n\\\n#\\n\\\n#-----------------------------------------#\\n\\\n#------PLEASE DONT LEAVE ANY BLANK LINE---#\\n\\\n#-----------------------------------------#\\n\\\n#\\n\\\n# To change defaults just remove the hash(#)\\n\\\n# from thw beginning of the line.\\n\\\n# The will be read as a single line comment.\\n\\\n#\\n\\\n#*****************************************\\n\\\n# The SONG_DIR is the directory where all the songs will be saved.\\n\\\n# In order to change it, simply remove the hash from beginning\\n\\\n# And change the path to your desired one.\\n\\\n# In case the path has spaces in in, include it in a \" \"\\n\\\n# Following is a simple folder path example\\n\\\n#\\n\\\n#SONG_DIR = \"path/to/your/desired/folder\"\\n\\\n#\\n\\\n#************--------ADVANCED-------*********\\n\\\n# If you want to save the song in custom folders than those can be\\n\\\n# added to the name like the following example.\\n\\\n# The possible values are following\\n\\\n#\\n\\\n# Artist --> Song Artist\\n\\\n# Album --> Song Album Name\\n\\\n# Title --> Song Name\\n\\\n# Genre --> Song Genre\\n\\\n# TrackNumber --> Song Number in the album\\n\\\n# ReleaseDate --> Song Release date\\n\\\n#\\n\\\n# Following is an example of the format\\n\\\n#SONG_DIR = \"/home/deepjyoti30/Music$Artist->Album->Title\"\\n\\\n#\\n\\\n#*****************************************#\\n\\\n# The QUALITY is the quality of the song in kbps\\n\\\n# By default it is set to 320kbps\\n\\\n# In case you want to change it to something else,\\n\\\n# Uncomment the following line and change it\\n\\\n#\\n\\\n# Supported values are 320 and 192\\n\\\n#\\n\\\n#QUALITY = \"320\"\\n\\\n#'\n\n\nclass DEFAULTS:\n \"\"\"Some default stuff defined.\"\"\"\n\n # The home dir\n HOME_DIR = os.path.expanduser('~')\n\n # The default song dir\n SONG_DIR = os.path.join(HOME_DIR, 'Music')\n\n # The temp dir\n SONG_TEMP_DIR = os.path.join(SONG_DIR, 'ytmdl')\n\n # The default song quality\n SONG_QUALITY = '320'\n\n # The config path\n CONFIG_PATH = os.path.join(HOME_DIR, '.config', 'ytmdl')\n\n\ndef make_config():\n \"\"\"Copy the config file to .config folder.\"\"\"\n # Remove the current config from SONG_TEMP_DIR\n config_path = os.path.join(DEFAULTS.CONFIG_PATH, 'config')\n\n # Check if the ytmdl folder is present in config\n if not os.path.isdir(DEFAULTS.CONFIG_PATH):\n # Make the ytmdl folder\n os.makedirs(DEFAULTS.CONFIG_PATH)\n elif os.path.isfile(config_path):\n os.remove(config_path)\n\n # Now write the config test to config file\n with open(config_path, 'w') as write_config:\n write_config.write(config_text)\n\n\ndef checkConfig():\n \"\"\"Need to check the config to see if defaults are changed.\n\n The config will be saved in the .config folder.\n \"\"\"\n # Try to see if the config is present in the SONG_TEMP_DIR\n\n if os.path.isdir(DEFAULTS.CONFIG_PATH):\n DIR_CONTENTS = os.listdir(DEFAULTS.CONFIG_PATH)\n else:\n return False\n\n if 'config' not in DIR_CONTENTS:\n make_config()\n return True\n else:\n return True\n\n\ndef checkExistence(keyword, value):\n \"\"\"Check if the user specified value in config is possible.\"\"\"\n if keyword == 'SONG_DIR':\n # In this case check if $ and -> are presnt\n # If they are then only check if the base dir exists\n if '$' in value and '->' in value:\n pos = value.find('$')\n value = value[:pos]\n\n if os.path.isdir(value):\n return True\n else:\n return False\n elif keyword == 'QUALITY':\n # Possible values that QUALITY can take\n possQ = ['320', '192']\n\n if value in possQ:\n return True\n else:\n return False\n\n\ndef retDefault(keyword):\n \"\"\"Return the DEFAULT value of keyword.\"\"\"\n if keyword == 'QUALITY':\n return DEFAULTS.SONG_QUALITY\n elif keyword == 'SONG_DIR':\n return DEFAULTS.SONG_DIR\n\n\ndef GIVE_DEFAULT(self, keyword):\n \"\"\"Check if the user has uncommented the config and added something.\n\n If possible get what is changed, else return the default value.\n \"\"\"\n # Check If the config is already present in SONG_TEMP_DIR\n if not checkConfig():\n return retDefault(keyword)\n else:\n # Then read from it\n READ_STREAM = open(os.path.join(DEFAULTS.CONFIG_PATH, 'config'), 'r')\n\n while True:\n line = READ_STREAM.readline()\n if not line:\n return retDefault(keyword)\n if line[0] != '#' and keyword in line:\n # Get the position of =\n index_equal = line.index('=')\n if line[index_equal + 1] == ' ':\n newDEFAULT = line[index_equal + 2:]\n else:\n newDEFAULT = line[index_equal + 1:]\n\n # Remove the \"\n newDEFAULT = newDEFAULT.replace('\"', '')\n # Check if the line has a \\n in it\n if \"\\n\" in line:\n newDEFAULT = newDEFAULT.replace('\\n', '')\n\n if checkExistence(keyword, newDEFAULT):\n return newDEFAULT\n else:\n return retDefault(keyword)\n\n\nif __name__ == '__main__':\n make_config()\n exit(0)\n","sub_path":"ytmdl/setupConfig.py","file_name":"setupConfig.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"19362890","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass LinearRegression(object):\r\n def __init__(self, learning_rate=0.01, max_iter=100, seed=None):\r\n \"\"\"\r\n 一元线性回归类的构造函数:\r\n 参数 学习率:learning_rate\r\n 参数 最大迭代次数:max_iter\r\n 参数 seed:产生随机数的种子\r\n 从正态分布中采样w和b的初始值\r\n \"\"\"\r\n np.random.seed(seed)\r\n self.lr = learning_rate\r\n self.max_iter = max_iter\r\n self.w = np.random.normal(1, 0.1)\r\n self.b = np.random.normal(1, 0.1)\r\n self.loss_arr = []\r\n\r\n def fit(self, x, y):\r\n \"\"\"\r\n 类的方法:训练函数\r\n 参数 自变量:x\r\n 参数 因变量:y\r\n 返回每一次迭代后的损失函数\r\n \"\"\"\r\n for i in range(self.max_iter):\r\n self.__train_step(x, y)\r\n y_pred = self.predict(x)\r\n self.loss_arr.append(self.loss(y, y_pred))\r\n\r\n def __f(self, x, w, b):\r\n '''\r\n 类的方法:计算一元线性回归函数在x处的值\r\n '''\r\n return x * w + b\r\n\r\n def predict(self, x):\r\n '''\r\n 类的方法:预测函数\r\n 参数:自变量:x\r\n 返回:对x的回归值\r\n '''\r\n y_pred = self.__f(x, self.w, self.b)\r\n return y_pred\r\n\r\n def loss(self, y_true, y_pred):\r\n '''\r\n 类的方法:计算损失\r\n 参数 真实因变量:y_true\r\n 参数 预测因变量:y_pred\r\n 返回:MSE损失\r\n '''\r\n return np.mean((y_true - y_pred) ** 2)\r\n\r\n def __calc_gradient(self, x, y):\r\n '''\r\n 类的方法:分别计算对w和b的梯度\r\n '''\r\n d_w = np.mean(2 * (x * self.w + self.b - y) * x)##这里面的都是向量,可以相当于n个维度r\r\n d_b = np.mean(2 * (x * self.w + self.b - y))\r\n return d_w, d_b\r\n\r\n def __train_step(self, x, y):##一次迭代\r\n '''\r\n 类的方法:单步迭代,即一次迭代中对梯度进行更新\r\n '''\r\n d_w, d_b = self.__calc_gradient(x, y)\r\n self.w = self.w - self.lr * d_w\r\n self.b = self.b - self.lr * d_b\r\n return self.w, self.b\r\n\r\ndef show_data(x, y, w=None, b=None):\r\n plt.scatter(x, y, marker='.')\r\n if w is not None and b is not None:\r\n plt.plot(x, w * x + b, c='red')\r\n plt.show()\r\n\r\n# data generation\r\nnp.random.seed(272)\r\ndata_size = 100\r\nx = np.random.uniform(low=1.0, high=10.0, size=data_size)\r\ny = x * 20 + 10 + np.random.normal(loc=0.0, scale=10.0, size=data_size)\r\nprint(x.shape,y.shape)\r\n\r\n# train / test split\r\nshuffled_index = np.random.permutation(data_size)\r\nprint(shuffled_index)\r\nx = x[shuffled_index]\r\ny = y[shuffled_index]\r\nsplit_index = int(data_size * 0.7)\r\nprint(split_index)\r\nx_train = x[:split_index]##取前七十个点\r\ny_train = y[:split_index]\r\nx_test = x[split_index:]##取后七十个点\r\ny_test = y[split_index:]\r\n\r\n# train the liner regression model\r\nregr = LinearRegression(learning_rate=0.01, max_iter=10, seed=0)#创建对象\r\nregr.fit(x_train, y_train)#注意接口,拟合里面只有训练集\r\nprint('w: \\t{:.3}'.format(regr.w))\r\nprint('b: \\t{:.3}'.format(regr.b))\r\nshow_data(x, y, regr.w, regr.b)\r\n\r\n# plot the evolution of cost\r\nplt.scatter(np.arange(len(regr.loss_arr)), regr.loss_arr, marker='o', c='green')\r\nplt.show()\r\n","sub_path":"第三次大作业使用numpy,pytorch实现逻辑回归/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"640339","text":"answers = {\n \"привет\":\"Привет!\",\n \"как дела\":\"Отлично, а у тебя?\",\n \"пока\":\"Еще увидимся!\",\n}\n\ndef get_answer(question, answers):\n return answers.get(question)\n\ndef ask_user(answers):\n while True:\n user_input = input(\"Скажи что-нибудь: \")\n answers = get_answers(user_input, answers)\n print(answer)\n\n if user_input == 'пока':\n break\nif __name__ == \"__main__\":\n ask_user(answers)","sub_path":"answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"37803643","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\n\n#COMECE SEU CODIGO AQUI\nvalor=int(input('Valor do saque:'))\n#PROCESSAMENTO\na=valor//20\nb=x-(20*a)\nc=b//10\nd=b-(10*c)\ne=d//5\nf=d-(5*e)\ng=f//2\nh=f-(2*g)\ni=h//1\nprint(a)\nprint(c)\nprint(e)\nprint(g)\nprint(i)\n\n\n\n","sub_path":"moodledata/vpl_data/309/usersdata/287/73475/submittedfiles/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"376977885","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport sys\nimport csv\nfrom numpy import genfromtxt\nrootPath = ''\narg = True\nselectFea = []\nfeaRow = 0\nfeaCol = 0\ndef stdScalar():\n std = np.std(selectFea,axis=0)\n mean = np.mean(selectFea,axis=0)\n for i in range(0,feaCol):\n if std[i] == 0:\n selectFea[:,i] = np.zeros((feaRow)) \n else:\n selectFea[:,i] = (selectFea[:,i] - mean[i])/std[i]\n\n# readFile \nif arg == True:\n df = pd.read_csv(sys.argv[5],encoding=\"big5\")\nelse:\n df = pd.read_csv(rootPath + 'X_test',encoding=\"big5\")\nselectFea = df.as_matrix()\nselectFea = selectFea.astype(float)\nselectFea = np.concatenate((selectFea, selectFea[:,[0,1,3,4,5]]**3),axis = 1)\nfeaRow = selectFea[:,0].size\nfeaCol = selectFea[0].size \ndel df\nstdScalar()\nweight = np.reshape(genfromtxt(\"modelLogi.csv\",dtype=None,delimiter=','),(112,1))\nbias = float(weight[111])\npredict = np.dot(selectFea, weight[0:111])+ + bias\n\nparse = []\nparse.append(['id','label'])\nfor k in range(0,feaRow): \n if predict[k] >= 0:\n parse.append([str(k+1),1])\n else:\n parse.append([str(k+1),0])\nif arg == True:\n pth = sys.argv[6]\nelse:\n pth = rootPath + 'logistic.csv'\nwith open (pth, 'w') as f:\n writer = csv.writer(f)\n for k in range(0,(feaRow+1)):\n writer.writerow(parse[k])\n","sub_path":"hw2/logiSubmit.py","file_name":"logiSubmit.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"227201255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thur Mar 2 2017\n\n@author: Aiting Liu\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport data_utils\nimport multi_task_model\n\nimport subprocess\n\ntf.app.flags.DEFINE_float(\"learning_rate\", 0.05, \"Learning rate.\")\n# tf.app.flags.DEFINE_float(\"learning_rate_decay_factor\", 0.9,\n# \"Learning rate decays by this much.\")\ntf.app.flags.DEFINE_float(\"max_gradient_norm\", 5.0,\n \"Clip gradients to this norm.\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 8,\n \"Batch size to use during training.\")\ntf.app.flags.DEFINE_integer(\"size\", 128, \"Size of each model layer.\")\ntf.app.flags.DEFINE_integer(\"word_embedding_size\", 100, \"Size of the word embedding\")\ntf.app.flags.DEFINE_integer(\"num_layers\", 2, \"Number of layers in the model.\")\ntf.app.flags.DEFINE_integer(\"sent_vocab_size\", 300, \"max vocab Size.\")\n# tf.app.flags.DEFINE_integer(\"out_vocab_size\", 500, \"max tag vocab Size.\")\ntf.app.flags.DEFINE_integer(\"alpha\", 0.5, \"slot weight.\")\ntf.app.flags.DEFINE_string(\"data_dir\", \"data\", \"Data directory\")\ntf.app.flags.DEFINE_string(\"train_dir\", \"model\", \"Training directory.\")\ntf.app.flags.DEFINE_integer(\"max_train_data_size\", 0,\n \"Limit on the size of training data (0: no limit).\")\ntf.app.flags.DEFINE_integer(\"steps_per_checkpoint\", 100,\n \"How many training steps to do per checkpoint.\")\ntf.app.flags.DEFINE_integer(\"max_training_steps\", 10000,\n \"Max training steps.\")\ntf.app.flags.DEFINE_integer(\"max_test_data_size\", 0,\n \"Max size of test set.\")\n# tf.app.flags.DEFINE_boolean(\"use_attention\", False,\n# \"Use attention based RNN\")\ntf.app.flags.DEFINE_integer(\"max_sequence_length\", 40,\n \"Max sequence length.\")\ntf.app.flags.DEFINE_float(\"dropout_keep_prob\", 0.8,\n \"dropout keep cell input and output prob.\")\n# tf.app.flags.DEFINE_boolean(\"bidirectional_rnn\", False,\n# \"Use birectional RNN\")\n# tf.app.flags.DEFINE_string(\"task\", 'joint', \"Options: joint; intent; tagging\")\nFLAGS = tf.app.flags.FLAGS\n\nif FLAGS.max_sequence_length == 0:\n print('Please indicate max sequence length. Exit')\n exit()\n\n\ndef create_model(session, sent_vocab_size, slot_vocab_size, intent_vocab_size):\n \"\"\"Create model and initialize or load parameters in session.\"\"\"\n with tf.variable_scope(\"model\", reuse=None):\n model_train = multi_task_model.MultiTaskModel(\n session,\n sent_vocab_size, slot_vocab_size, intent_vocab_size, FLAGS.max_sequence_length,\n FLAGS.word_embedding_size, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n learning_rate=FLAGS.learning_rate, alpha=FLAGS.alpha,\n dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True,\n forward_only=False)\n with tf.variable_scope(\"model\", reuse=True):\n model_test = multi_task_model.MultiTaskModel(\n session,\n sent_vocab_size, slot_vocab_size, intent_vocab_size, FLAGS.max_sequence_length,\n FLAGS.word_embedding_size, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n learning_rate=FLAGS.learning_rate, alpha=FLAGS.alpha,\n dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True,\n forward_only=True)\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model_train.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n return model_train, model_test\n\n\ndef train():\n print('Applying Parameters:')\n for k, v in FLAGS.__dict__['__flags'].items():\n print('%s: %s' % (k, str(v)))\n print(\"Preparing data in %s\" % FLAGS.data_dir)\n\n sent_train, slot_train, intent_train, \\\n sent_valid, slot_valid, intent_valid, \\\n sent_test, slot_test, intent_test, \\\n sent_vocab_path, slot_vocab_path, intent_vocab_path = data_utils.prepare_multi_task_data(\n FLAGS.data_dir, FLAGS.sent_vocab_size)\n\n # # Get embedding meta.\n # with open(sent_vocab_path, 'r') as f:\n # lines = f.readlines()\n # tmps = []\n # for wid, line in enumerate(lines):\n # tmps.append(line.strip() + '\\t' + str(wid) + '\\n')\n # with open(os.path.join(FLAGS.train_dir, \"embedding_meta.tsv\"), 'w') as fw:\n # fw.write('word\\tword_id\\n')\n # fw.writelines(tmps)\n\n result_dir = os.path.join(FLAGS.train_dir, 'test_results')\n if not os.path.isdir(result_dir):\n os.makedirs(result_dir)\n\n current_valid_out_file = result_dir + '/valid_hyp'\n current_test_out_file = result_dir + '/test_hyp'\n\n sent_vocab, rev_sent_vocab = data_utils.initialize_vocabulary(sent_vocab_path)\n s_attr_vocab, rev_s_attr_vocab = data_utils.initialize_vocabulary(slot_vocab_path[0])\n s_loc_vocab, rev_s_loc_vocab = data_utils.initialize_vocabulary(slot_vocab_path[1])\n s_name_vocab, rev_s_name_vocab = data_utils.initialize_vocabulary(slot_vocab_path[2])\n s_ope_vocab, rev_s_ope_vocab = data_utils.initialize_vocabulary(slot_vocab_path[3])\n s_way_vocab, rev_s_way_vocab = data_utils.initialize_vocabulary(slot_vocab_path[4])\n intent_vocab, rev_intent_vocab = data_utils.initialize_vocabulary(intent_vocab_path)\n\n sent_vocab_size = len(sent_vocab)\n slot_vocab_size = [len(s_attr_vocab), len(s_loc_vocab), len(s_name_vocab), len(s_ope_vocab), len(s_way_vocab)]\n intent_vocab_size = len(intent_vocab)\n\n # print(sent_vocab_size, slot_vocab_size, intent_vocab_size)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n # Create model.\n print(\"Max sequence length: %d.\" % FLAGS.max_sequence_length)\n print(\"Creating %d layers of %d units.\" % (FLAGS.num_layers, FLAGS.size))\n\n model, model_test = create_model(sess, sent_vocab_size, slot_vocab_size, intent_vocab_size)\n print(\"Creating model with sent_vocab_size=%d, s_attr_vocab_size=%d, \"\n \"s_loc_vocab_size=%d, s_name_vocab_size=%d, \"\n \"s_ope_vocab_size=%d, s_way_vocab_size=%d, \"\n \"and intent_vocab_size=%d.\" % (sent_vocab_size, slot_vocab_size[0],\n slot_vocab_size[1], slot_vocab_size[2],\n slot_vocab_size[3], slot_vocab_size[4], intent_vocab_size))\n\n # Read data into buckets and compute their sizes.\n print(\"Reading train/valid/test data (training set limit: %d).\"\n % FLAGS.max_train_data_size)\n valid_set = data_utils.read_data(sent_valid, slot_valid, intent_valid)\n test_set = data_utils.read_data(sent_test, slot_test, intent_test)\n train_set = data_utils.read_data(sent_train, slot_train, intent_train)\n\n # This is the training loop.\n step_time, loss = 0.0, 0.0\n current_step = 0\n\n best_valid_score = 0\n best_test_score = 0\n\n while model.global_step.eval() < FLAGS.max_training_steps:\n # Get a batch and make a step.\n start_time = time.time()\n\n batch_inputs, batch_s_attrs, batch_s_locs, batch_s_names, batch_s_opes, batch_s_ways, \\\n batch_intents, batch_sequence_length = model.get_batch(train_set)\n # print(batch_inputs[0].shape)\n\n train_summary, step_loss, train_logits = model.step(batch_inputs, batch_s_attrs, batch_s_locs,\n batch_s_names, batch_s_opes, batch_s_ways,\n batch_intents, batch_sequence_length, False)\n\n step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint\n loss += step_loss / FLAGS.steps_per_checkpoint\n current_step += 1\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n if current_step % FLAGS.steps_per_checkpoint == 0:\n perplexity = math.exp(loss) if loss < 300 else float('inf')\n print(\"global step %d step-time %.2f. Training perplexity %.2f\"\n % (model.global_step.eval(), step_time, perplexity))\n sys.stdout.flush()\n # Save checkpoint and zero timer and loss.\n model.train_writer.add_summary(train_summary, global_step=model.global_step.eval())\n checkpoint_path = os.path.join(FLAGS.train_dir, \"model.ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=model.global_step)\n step_time, loss = 0.0, 0.0\n\n def write_eval_result(result_list, result_path):\n with tf.gfile.GFile(result_path, 'w') as fin:\n for i in range(len(result_list[0])):\n fin.write(' '.join([str(result_list[j][i]) for j in range(6)]) + '\\n')\n\n def run_valid_test(data_set, mode): # mode: Eval, Test\n # Run evals on development/test set and print the accuracy.\n ref_s_attr_list = list()\n hyp_s_attr_list = list()\n ref_s_loc_list = list()\n hyp_s_loc_list = list()\n ref_s_name_list = list()\n hyp_s_name_list = list()\n ref_s_ope_list = list()\n hyp_s_ope_list = list()\n ref_s_way_list = list()\n hyp_s_way_list = list()\n ref_intent_list = list()\n hyp_intent_list = list()\n s_attr_correct_count = 0\n s_loc_correct_count = 0\n s_name_correct_count = 0\n s_ope_correct_count = 0\n s_way_correct_count = 0\n intent_correct_count = 0\n\n # accuracy = 0.0\n\n eval_loss = 0.0\n count = 0\n for i in range(len(data_set)):\n count += 1\n inputs, s_attrs, s_locs, s_names, s_opes, s_ways, intents, sequence_length = \\\n model_test.get_one(data_set, i)\n\n _, _step_loss, logits = model_test.step(inputs, s_attrs, s_locs, s_names,\n s_opes, s_ways, intents, sequence_length, True)\n eval_loss += _step_loss / len(data_set)\n\n ref_s_attr = np.argmax(s_attrs)\n ref_s_attr_list.append(rev_s_attr_vocab[ref_s_attr])\n hyp_s_attr = np.argmax(logits[0])\n hyp_s_attr_list.append(rev_s_attr_vocab[hyp_s_attr])\n # print(ref_s_attr, hyp_s_attr)\n ref_s_loc = np.argmax(s_locs)\n ref_s_loc_list.append(rev_s_loc_vocab[ref_s_loc])\n hyp_s_loc = np.argmax(logits[1])\n hyp_s_loc_list.append(rev_s_loc_vocab[hyp_s_loc])\n ref_s_name = np.argmax(s_names)\n ref_s_name_list.append(rev_s_name_vocab[ref_s_name])\n hyp_s_name = np.argmax(logits[2])\n hyp_s_name_list.append(rev_s_name_vocab[hyp_s_name])\n ref_s_ope = np.argmax(s_opes)\n ref_s_ope_list.append(rev_s_ope_vocab[ref_s_ope])\n hyp_s_ope = np.argmax(logits[3])\n hyp_s_ope_list.append(rev_s_ope_vocab[hyp_s_ope])\n ref_s_way = np.argmax(s_ways)\n ref_s_way_list.append(rev_s_way_vocab[ref_s_way])\n hyp_s_way = np.argmax(logits[4])\n hyp_s_way_list.append(rev_s_way_vocab[hyp_s_way])\n ref_intent = np.argmax(intents)\n ref_intent_list.append(rev_intent_vocab[ref_intent])\n hyp_intent = np.argmax(logits[5])\n hyp_intent_list.append(rev_intent_vocab[hyp_intent])\n\n if ref_s_attr == hyp_s_attr:\n s_attr_correct_count += 1\n if ref_s_loc == hyp_s_loc:\n s_loc_correct_count += 1\n if ref_s_name == hyp_s_name:\n s_name_correct_count += 1\n if ref_s_ope == hyp_s_ope:\n s_ope_correct_count += 1\n if ref_s_way == hyp_s_way:\n s_way_correct_count += 1\n if ref_intent == hyp_intent:\n intent_correct_count += 1\n\n s_attr_accuracy = float(s_attr_correct_count) * 100 / count\n s_loc_accuracy = float(s_loc_correct_count) * 100 / count\n s_name_accuracy = float(s_name_correct_count) * 100 / count\n s_ope_accuracy = float(s_ope_correct_count) * 100 / count\n s_way_accuracy = float(s_way_correct_count) * 100 / count\n slot_accuracy = (s_attr_accuracy + s_loc_accuracy + s_name_accuracy\n + s_ope_accuracy + s_way_accuracy) / 5\n\n intent_accuracy = float(intent_correct_count) * 100 / count\n\n # if task['intent'] == 1:\n print(\" %s s_attr_accuracy: %.2f %d/%d\" % (mode, s_attr_accuracy, s_attr_correct_count, count))\n print(\" %s s_loc_accuracy: %.2f %d/%d\" % (mode, s_loc_accuracy, s_loc_correct_count, count))\n print(\" %s s_name_accuracy: %.2f %d/%d\" % (mode, s_name_accuracy, s_name_correct_count, count))\n print(\" %s s_ope_accuracy: %.2f %d/%d\" % (mode, s_ope_accuracy, s_ope_correct_count, count))\n print(\" %s s_way_accuracy: %.2f %d/%d\" % (mode, s_way_accuracy, s_way_correct_count, count))\n print(\" %s intent_accuracy: %.2f %d/%d\" % (mode, intent_accuracy, intent_correct_count, count))\n sys.stdout.flush()\n out_file = None\n if mode == 'Eval':\n out_file = current_valid_out_file\n elif mode == 'Test':\n out_file = current_test_out_file\n\n hyp_list = [hyp_s_attr_list, hyp_s_loc_list, hyp_s_name_list, hyp_s_ope_list, hyp_s_way_list,\n hyp_intent_list]\n ref_list = [ref_s_attr_list, ref_s_loc_list, ref_s_name_list, ref_s_ope_list, ref_s_way_list,\n ref_intent_list]\n\n write_eval_result(hyp_list, out_file) # write prediction result to output file path\n\n return slot_accuracy, intent_accuracy, hyp_list\n\n # # valid\n # valid_slot_accuracy, valid_intent_accuracy, hyp_list = run_valid_test(valid_set, 'Valid')\n # if valid_slot_accuracy > best_valid_score:\n # best_valid_score = valid_slot_accuracy\n # # save the best output file\n # subprocess.call(['mv', current_valid_out_file,\n # current_valid_out_file + '_best_acc_%.2f' % best_valid_score])\n # test, run test after each validation for development purpose.\n test_slot_accuracy, test_intent_accuracy, hyp_list = run_valid_test(test_set, 'Test')\n if test_slot_accuracy > best_test_score:\n best_test_score = test_slot_accuracy\n # save the best output file\n subprocess.call(['mv', current_test_out_file,\n current_test_out_file + '_best_acc_%.2f' % best_test_score])\n\n\ndef main(_):\n train()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"nlu_yzf/run_multi_task.py","file_name":"run_multi_task.py","file_ext":"py","file_size_in_byte":16457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"636656338","text":"n,m = map(int, input().split())\ns = [list(input()) for _ in range(n)]\n\ndef isArea(y,x):\n if 0 <= x < m and 0<= y < n:\n return True\n else:\n return False\n \n\nprint(s)","sub_path":"mujin/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"310538018","text":"from __future__ import division\n\nfrom math import cos, sin\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport cv2\n\ngreen = (0,255,0)\ndef show(image):\n plt.figure(figsize=(10,10))\n plt.imshow(image, interpolation='nearest')\n\ndef overlay_mask(mask, image):\n rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)\n # combine mask and image\n img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)\n return img\n\ndef find_biggest_contour(image):\n image = image.copy()\n img, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n print(contours)\n contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]\n biggest_contour = max(contour_sizes, key = lambda x: x[0])[1]\n \n # return biggest contour\n mask = np.zeros(image.shape, np.uint8)\n cv2.drawContours(mask, [biggest_contour], -1,255, -1)\n\n return biggest_contour, mask\ndef circle_contour(image, contour):\n #bounding ellipse\n image_with_ellipse = image.copy()\n ellipse = cv2.fitEllipse(contour)\n\n #add it\n cv2.ellipse(image_with_ellipse, ellipse, green, 2,3)\n return image_with_ellipse\ndef find_strawberry(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n max_dimension = max(image.shape)\n scale = 700/max_dimension\n image = cv2.resize(image, None, fx=scale, fy=scale)\n\n # clean the image blur to ignore minute specs like the pits\n image_blur = cv2.GaussianBlur(image, (7,7), 0)\n # separates the brightness intensity from the color information\n image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)\n\n #define filters\n #filter by color\n # range of colors to filter by\n min_red = np.array([0,100,80])\n max_red = np.array([10,256,256])\n\n mask1 = cv2.inRange(image_blur_hsv, min_red, max_red)\n \n # brightness filter\n # range of brightness to filter by\n min_red2 = np.array([170,100,80])\n max_red2 = np.array([180,256,256])\n\n mask2 = cv2.inRange(image_blur_hsv, min_red2, max_red2)\n\n # combine the masks\n mask = mask1 + mask2\n\n # segmentation\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))\n mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)\n\n # find biggest strawberry\n big_strawberry_contour, mask_strawberries = find_biggest_contour(mask_clean)\n\n # overlay\n overlay = overlay_mask(mask_clean, image)\n\n # ellipse the biggest strawberry\n circled = circle_contour(overlay, big_strawberry_contour)\n show(circled)\n \n # convert back to original color scheme\n bgr = cv2.cvtColor(circled, cv2.COLOR_RGB2BGR)\n return bgr\n\nimage = cv2.imread('yo.jpg')\nresult = find_strawberry(image)\ncv2.imwrite('yo2.jpg', result)\n","sub_path":"strawberrySeg.py","file_name":"strawberrySeg.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"167033534","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n x = tf.placeholder(dtype=tf.float32, shape=[None, 50], name='x_input')\n y = tf.placeholder(dtype=tf.float32, shape=[None, 50], name='y_label')\n\n x_val = np.arange(-25., 25., 1., dtype=np.float32).reshape([1, 50])\n print(x_val.shape)\n y_val = np.square(x_val)\n print(y_val.shape)\n\n x_test = np.arange(-25.5, 24.5, 1., dtype=np.float32).reshape([1, 50])\n y_test = np.square(x_test)\n\n with tf.variable_scope('net', reuse=tf.AUTO_REUSE):\n l1 = tf.layers.dense(x, 100, activation=tf.nn.relu, name='layer1')\n l2 = tf.layers.dense(l1, 100, activation=tf.nn.relu, name='layer2')\n out = tf.layers.dense(l2, 50, name='output')\n\n loss = tf.losses.mean_squared_error(y, out)\n optimizer = tf.train.AdamOptimizer(0.001)\n train_op = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n\n export_dir = './saved_model_1' # Don't need to create dir\n builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n\n with tf.Session(graph=graph) as sess:\n sess.run(init)\n\n for i in range(50):\n l, _ = sess.run([loss, train_op], feed_dict={x: x_val, y: y_val})\n l_t, pred = sess.run([loss, out], feed_dict={x: x_test, y: y_test})\n\n plt.plot(x_test, y_test, 'bo', x_test, pred, 'rs')\n plt.savefig('./trained.png')\n plt.show()\n\n tensor_info_x = tf.saved_model.utils.build_tensor_info(x) # build_tensor_info方法将tensor相关的信息序列化为TensorInfo protocol buffer\n tensor_info_y = tf.saved_model.utils.build_tensor_info(y) # key是我们约定的输入输出别名,value就是对具体tensor包装得到的TensorInfo\n tensor_info_layer2 = tf.saved_model.utils.build_tensor_info(l2)\n tensor_info_out = tf.saved_model.utils.build_tensor_info(out)\n\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x_input': tensor_info_x,\n 'y_label': tensor_info_y},\n outputs={'layer2': tensor_info_layer2,\n 'output': tensor_info_out,\n },\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\n )\n )\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n prediction_signature\n }\n )\n\n builder.save()\n","sub_path":"savedmodel/savedmodel_exercise.py","file_name":"savedmodel_exercise.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"598269988","text":"def solution(N):\n b=N\n li=[]\n while N!=0:\n B=N//2\n N=B\n A=N%2\n if A==0:\n li.append(0)\n else:\n li.append(1)\n del li[-1]\n li.reverse()\n if b%2==1:\n li.append(1)\n else:\n li.append(0)\n cn=[]\n dn=0\n for i in range(1,len(li)):\n if li[i]==0:\n dn=dn+1\n else:\n cn.append(dn)\n dn=0\n if cn==[]:\n return 0 \n else:\n return max(cn)\n\ndef solution(A,K):\n try:\n for i in range(0,K):\n A.insert(0,A[-1])\n del A[-1]\n return A\n except: \n if A==[]:\n return []\n elif int(A):\n return [A]\n \nA=[2,3,4]\nA=1\nsolution(A,2)\n \nA=[3,8,9,7,6]\nK=1\nsolution(A,K)\n\nA.insert(0,A[-1])\ndel A[-1]\n\ndef solution(A):\n B=set()\n for i in A:\n if A.count(i)%2==1:\n B.add(i)\n C=list(B)\n return C[0]\n\ndef solution(A):\n B=list(set(A))\n for i in B:\n if A.count(i)%2==1: # 카운드 자체가 for문을 계속일으켜서 느릴 수있다.\n return i\n \ndef solution(A):\n C={}\n for i in A:\n if i not in C:\n C[i]=1\n else:\n C[i]=C[i]+1\n \n for i,j in C.items():\n if j%2==1:\n return i\n \n\n\nsolution(A)\nA=[3,3,9,9,9,9,7]\nB=set()\nfor i in A:\n if A.count(i)%2==1:\n B.add(i)\nC=list(B) \n\n\ndef solution(A):\n B=[]\n for i in range(1,max(A)+1):\n B.append(i)\n C=[]\n for i in B:\n if i not in A:\n return i\n break\n \n\ndef solution(A):\n\n if A==[]:\n return 1\n elif str(A).isnumeric():\n return A+1\n else:\n A.sort()\n for i in range(0,len(A)):\n if i==len(A)-1:\n break\n elif A[i]+1!=A[i+1]:\n return A[i]+1\n break\n if len(A)==max(A):\n return max(A)+1\n elif A[0]==2:\n return 1\n\ndef solution(x):\n x=x.replace('?','.')\n x=x.replace('!','.')\n x=x.split('.')\n D=[]\n for i in x:\n y=i.split(' ')\n cn=0\n for j in y:\n if j.isalpha():\n cn=cn+1\n D.append(cn)\n return max(D)\n \ndef solution(x):\n start=0\n cn=0\n for i in range(0,len(x)):\n start=x[start]\n if start == -1:\n cn=cn+1\n break;\n else:\n cn=cn+1\n return cn \n","sub_path":"codility1.py","file_name":"codility1.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"492771110","text":"from idlab import * \n\n\n\n# home function, this is where the main minutae will go\n# the rfid reading, Rpi.GPio stuff and user access wil go here\n# this will obviously get very length\n\n@app.route('/', methods=['GET', 'POST'])\ndef access():\n try:\n serial = serial.Serial(\"/dev/tty.usbserial-AK05BHHV\", baudrate=9600)\n while True:\n data = serial.read()\n if data == '\\r':\n print(code)\n code = ''\n else:\n code = code + data\n \n if request.method == 'POST':\n if request.form['card_id']:\n pin = request.form['card_id']\n \n query = Student.query.filter(Student.card_id == pin).first()\n \n\n #my key, quick ref = 41034903020\n if query:\n query.last_access = datetime.now()\n db.session.commit()\n currQuery = Current.query.filter(Current.id == query.id).first()\n \n if not currQuery:\n newCurrent = Current(query.id, query.fname, query.lname, None)\n db.session.add(newCurrent)\n db.session.flush()\n db.session.commit()\n return redirect(url_for('success'))\n else:\n \n return redirect(url_for('success'))\n \n \n else:\n flash('No pin entered')\n return redirect(request.url)\n \n return render_template('access.html')\n \n except Exception as e:\n return str(e)\n \n \n\n# success, temp page for testing purposes\n@app.route('/success/')\ndef success():\n return 'success'\n\n# everything below this will concern the dashboard. this will be the second pi in the actual ID lab. interactive for students. can be accessible from computers within\n\n# dashboard home, this is for the pi inside or comps inside\n@app.route('/dashboard/home/')\ndef dashboard_home():\n try:\n # get current users to pass to home \n currUser = Current.query.all()\n \n return render_template('dashboard_home.html', currUser=currUser)\n except Exception as e:\n return str(e)\n \n \n# dashboard --- current students\n@app.route('/dashboard/students/')\ndef dashboard_students():\n try:\n currUser = Current.query.all()\n events = Event.query.all()\n return render_template('dashboard_student.html', currUser=currUser, events=events)\n except Exception as e:\n return str(e)\n \n\n\n \n","sub_path":"idlab/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"319428341","text":"from MeCabTool import M_extract\nimport csv\nfin=open(\"nc2meta_dict.txt\",\"r\")\nfout=open(\"nc2meta_ndict.txt\",\"w\")\nreader=csv.reader(fin)\nfor l in reader:\n k = M_extract(l[0])[1]\n if k != \" \":\n fout.write(\"{}\\t{}\\n\".format(k,l[1]))\nfin.close()\nfout.close()\n","sub_path":"1127_mecabian.py","file_name":"1127_mecabian.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"517441614","text":"import psycopg2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport errno\nimport pickle\n\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.preprocessing import MaxAbsScaler\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import explained_variance_score\n\ndef split_list(list, indexes):\n result = []\n for index in indexes:\n result.append(list[index])\n return result\n\n# connect to the PostgreSQL server\nconn = psycopg2.connect(host=\"localhost\",database=\"tcc_development\", user=\"postgres\", password=\"postgres\")\n# create a cursor\ncur = conn.cursor()\n\ncur.execute('SELECT id from gauges')\ngauges_ids = [id[0] for id in cur.fetchall()]\n\nall_data = [0]*(np.max(gauges_ids)+1)\n\nrs = ShuffleSplit(n_splits=10, test_size=.1, random_state=42)\n\nSVR_results_filename = \"./SVR_regressors_cv/results.csv\"\nif not os.path.exists(os.path.dirname(SVR_results_filename)):\n try:\n os.makedirs(os.path.dirname(SVR_results_filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\nwith open(SVR_results_filename, \"w\") as SVR_results_file:\n SVR_results_file.write(\"Gauge ID;Split No;Train Indexes;Test Indexes;Real Y;Predicted Y;Same Class;R2 Score;Explained Variance Score\\n\")\n\n for gauge_id in gauges_ids:\n cur.execute('SELECT * from infos_in_half_hours where gauge_id = %s', [gauge_id])\n infos_in_half_hours = cur.fetchall()\n\n X = [[int(row[2].strftime(\"%Y\")), int(row[2].strftime(\"%m\")), int(row[2].strftime(\"%d\")), int(row[2].strftime(\"%H\")), int(row[2].strftime(\"%M\")), row[3], row[4], row[5]] for row in infos_in_half_hours[:-1]]\n transformer = MaxAbsScaler().fit(X)\n X_normalized = transformer.transform(X)\n Y = [measure[5] for measure in infos_in_half_hours[1:]]\n\n regr = SVR(kernel='rbf', C=100, gamma=0.001)\n\n X_norm_filename = \"./SVR_regressors_cv/%s/X_normalized.csv\" %(str(gauge_id))\n if not os.path.exists(os.path.dirname(X_norm_filename)):\n try:\n os.makedirs(os.path.dirname(X_norm_filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(X_norm_filename, \"w\") as f:\n for item in X_normalized.tolist():\n f.write(\"%s\\n\" %(item))\n with open(\"./SVR_regressors_cv/%s/X.csv\" %(str(gauge_id)), \"w\") as f:\n for item in X:\n f.write(\"%s\\n\" %(item))\n with open(\"./SVR_regressors_cv/%s/Y.csv\" %(str(gauge_id)), \"w\") as f:\n for item in Y:\n f.write(\"%s\\n\" %(item))\n\n split_index = 0\n for train_index, test_index in rs.split(X):\n X_train, X_test, y_train, y_test = split_list(X,train_index), split_list(X,test_index), split_list(Y,train_index), split_list(Y,test_index)\n\n pickle.dump(regr, open('./SVR_regressors_cv/%s/RF_%s' %(str(gauge_id), split_index), 'wb'))\n regr.fit(X_train, y_train)\n\n y_pred = regr.predict(X_test)\n\n r2_score_value = r2_score(y_test, y_pred)\n variance_score = explained_variance_score(y_test, y_pred)\n\n same_class = [((y_test[i] <= 2.5 and y_pred[i] <= 2.5) or (y_test[i] > 2.5 and y_pred[i] > 2.5 and y_test[i] <= 12.5 and y_pred[i] <= 12.5) or (y_test[i] > 12.5 and y_pred[i] > 12.5 and y_test[i] <= 25 and y_pred[i] <= 25) or (y_test[i] > 25 and y_pred[i] > 25)) for i in range(len(y_pred))]\n corrects = same_class.count(True)/len(same_class)\n\n SVR_results_file.write(\"%s;%s;%s;%s;%s;%s;%s;%s;%s\\n\" %(gauge_id,split_index,train_index.tolist(),test_index.tolist(),y_test,y_pred.tolist(),corrects,r2_score_value,variance_score))\n\n split_index = split_index + 1\n\n# end connection\ncur.close()\nconn.close()\n","sub_path":"python/old/old_individual_SVR_cv.py","file_name":"old_individual_SVR_cv.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"522945226","text":"import re\n\n\ndef read(file_name):\n '''\n :param file_name: 需要读入的文件名\n :return: 以二维列表形式存储的单词\n 以逗号,空格,括号分隔字符串\n '''\n texts = []\n file_object = open(file_name, 'r', encoding='UTF-8')\n for line in file_object.readlines():\n texts.append(\n list(\n filter(None,\n (word.lower()\n for word in re.split(r'[(),. ]', line.strip())))))\n file_object.close()\n return texts\n\n\nif __name__ == \"__main__\":\n print(read(\"data/2/trainData.txt\"))\n","sub_path":"input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"299927135","text":"__author__ = 'admin'\n\nimport re\n\nDEBUG = False\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Default variables:\n_TEMPLATE = 'template.apib'\n_DUMPFILE = 'result.apib'\n\n# Command Related:\n_INCLUDE = 'include'\n_SECTION_START = 'start'\n_SECTION_END = 'end'\n\n# Static methods:\n_apiary_file_type = re.compile(r'.+\\.apib$').match\n_is_command = re.compile(r'^####\\$[a-zA-Z]+\\{[a-zA-Z0-9\\_\\-\\.]+\\}').match\n_search_tag = re.compile(r'\\{[a-zA-Z0-9\\_\\-]+(\\.[a-zA-Z0-9\\_\\-]+)?\\}').search\n_is_section_tag = re.compile(r'^[a-zA-Z0-9\\_\\-]+$').match\n\n\nclass Parser(object):\n\n @staticmethod\n def _get_tag_from_filename(filename):\n assert isinstance(filename, str) and _apiary_file_type(filename)\n return filename[:-5]\n\n @staticmethod\n def _get_filename_from_tag(tag):\n assert isinstance(tag, str) and not _apiary_file_type(tag)\n return '%s.%s' % (tag, 'apib')\n\n @staticmethod\n def _get_command_info_from_line(line):\n # @param {string} line: the input line string to process\n # @return {string, string} command, tag: the command to execute and the tag as the following parameter\n tag_search = _search_tag(line)\n assert tag_search is not None, 'Syntax Error for command in line: %s' % line\n return line[5:tag_search.start()], tag_search.group()[1:-1]\n\n @staticmethod\n def _include(tag):\n # @param {string} tag should one of the patterns: SOURCE_FILE, SOURCE_FILE.SECTION_TAG\n # @return {list(string)} the content\n tag_contents = tag.split('.')\n assert len(tag_contents) == 1 or len(tag_contents) == 2, 'Syntax Error with tag: %s' % tag\n filename = Parser._get_filename_from_tag(tag_contents[0])\n if len(tag_contents) == 2:\n sub_tag = tag_contents[1]\n else:\n sub_tag = None\n\n sub_parser = Parser()\n return sub_parser.parse(filename, sub_tag)\n\n def _execute_command(self, command, tag):\n if DEBUG:\n print('execute command: %s with tag: %s' % (command, tag))\n\n if command == _INCLUDE:\n sub_content = Parser._include(tag)\n for line in sub_content:\n self._parsed_content.append(line)\n\n elif command == _SECTION_START:\n assert _is_section_tag(tag), 'Syntax Error with tag: %s for command :%s' % (tag, command)\n self._should_parse = (self._tag == tag)\n\n elif command == _SECTION_END:\n assert _is_section_tag(tag), 'Syntax Error with tag: %s for command: %s' % (tag, command)\n if self._should_parse and self._tag == tag:\n self._should_parse = False\n else:\n assert False, 'Syntax Error: command %s not defined' % command\n\n def __init__(self):\n self._parsed_content = None\n self._tag = None\n self._should_parse = False\n\n def parse(self, filename, tag=None):\n # @param {string} filename\n # @param {list(string)} tag, default: empty list\n # @return {list(string)} parsed contents\n assert isinstance(filename, str) and (isinstance(tag, str) or tag is None)\n print('Start parsing: %s with tag %s' % (filename, tag))\n\n self._parsed_content = list()\n self._tag = tag\n try:\n with open(filename, 'r') as file:\n lines = file.readlines()\n\n except FileNotFoundError:\n print('Error: could not found the related file: %s' % filename)\n return self._parsed_content\n\n line_count = 0\n self._should_parse = (self._tag is None)\n for line in lines:\n line_count += 1\n if DEBUG:\n print('%d %s' % (line_count, line))\n\n if _is_command(line):\n command, tag = Parser._get_command_info_from_line(line)\n self._execute_command(command, tag)\n elif self._should_parse:\n self._parsed_content.append(line)\n\n return self._parsed_content\n\n# ----------------------------------------------------------------------------------------------------------------------\n# define the main function for the compile process:\ndef compile(**kwargs):\n template = kwargs.get('template', _TEMPLATE)\n dumpfile = kwargs.get('dumpfile', _DUMPFILE)\n\n main_parser = Parser()\n contents = main_parser.parse(template)\n\n with open(dumpfile, 'w') as file:\n file.writelines(contents)\n\n pass\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# the operation entry point:\nif __name__ == '__main__':\n print('start compiling process ...')\n\n compile()\n\n exit(0)","sub_path":"utilities/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"532864248","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport shlex\n\nfrom . import exec_cmd\n\n\ndef get_ssh_banner(conn):\n '''see RFC4253\n for SSH 2.0, the identification string is\n SSH-2.0-*\n\n In the compatibility mode, the identification string is\n SSH-1.99-*\n\n for ealier version, the identification string is\n SSH-1.x-*\n\n It's not necessary to send identification to server since\n server will send header info once connection established.\n Otherwise client will receive more output than identification string.\n '''\n #conn.sendall('SSH-2.0-check-ssh\\r\\n')\n ssh_banner = conn.recv(1024).strip()\n return ssh_banner\n\n\ndef get_ssh_auth(host, port):\n cmd = 'ssh -l root -o PreferredAuthentications=none \\\n -o StrictHostKeyChecking=no \\\n -o UserKnownHostsFile=/dev/null -p {0} {1}'.format(port, host)\n cmd = shlex.split(cmd)\n ret, out = exec_cmd(cmd)\n out = out.strip()\n result = re.search('Permission denied \\((?P.+)\\)', out) # pylint: disable=anomalous-backslash-in-string\n if not result:\n raise Exception(\"Can't find auth type in: {0}\".format(out))\n auth = result.group('auth').split(',')\n return auth\n\n\ndef get_ssh_version(banner):\n '''get the ssh version by parsing version message'''\n version = banner.split('-')[1]\n if version == '1.99':\n version = '1.99'\n elif version.startswith('1.'):\n version = '1'\n elif version.startswith('2.'):\n version = '2'\n else:\n raise Exception('Unknown version: {0}'.format(banner))\n\n return version\n","sub_path":"scripts/utils/ssh_helper.py","file_name":"ssh_helper.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"532262282","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Dreams',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('dream_subject', models.CharField(max_length=255, verbose_name=b'subject of dream')),\n ('dream_text', models.TextField(verbose_name=b'dream description')),\n ('dream_date', models.DateField(verbose_name=b'morning date')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"dreams/users/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"486149657","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils import data\nimport pandas as pd\nimport numpy as np\nfrom torch.distributions import Normal\nfrom torch.nn.utils import weight_norm\nfrom torch.distributions import Categorical\n\ndef init_weights_lstm(m):\n if isinstance(m, nn.LSTMCell):\n nn.init.normal_(m.weight, mean=0., std=0.1)\n nn.init.constant_(m.bias, 0.1)\n\nclass Model(nn.Module):#torch.jit.ScriptModule,\n def __init__(self, hidden_size, output_size, std=0.1):\n super(Model, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.input_features = 5\n \n #critic\n self.critic_lstm1 = nn.LSTMCell(self.input_features, self.hidden_size)\n self.critic_lstm2 = nn.LSTMCell(self.hidden_size, self.hidden_size)\n #2layers\n self.critic_out = nn.Linear(self.hidden_size, 1)\n\n # #add another layer\n # self.critic_lstm3 = nn.LSTMCell(self.hidden_size, self.output_size)\n # # one fully connected output layer\n # self.critic_out = nn.Linear(self.output_size, 1)\n\n #actor\n self.actor_lstm1 = nn.LSTMCell(self.input_features, self.hidden_size)\n self.actor_lstm2 = nn.LSTMCell(self.hidden_size, self.hidden_size)\n #2layers\n self.actor_out = nn.Linear(self.hidden_size, 1)\n\n\n # #add another layer\n # self.actor_lstm3 = nn.LSTMCell(self.hidden_size, self.output_size)\n # # one fully connected output layer\n # self.actor_out = nn.Linear(self.output_size, 1)\n\n self.log_std = nn.Parameter(torch.ones(1, 1) * std)\n\n self.apply(init_weights_lstm)\n\n\n #@torch.jit.script_method\n def forward(self, input, pred_range: int = 0, pred_start: int = 0, series_length: int = 1, batch_size = 1):\n outputs_values = []\n outputs_dist = []\n #initialize ouput\n #prepare memory tensors\n c_h_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n c_c_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n c_h_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n c_c_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n # #another layer\n # c_h_3 = torch.zeros([batch_size, self.output_size], dtype=torch.double)\n # c_c_3 = torch.zeros([batch_size, self.output_size], dtype=torch.double)\n\n a_h_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n a_c_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n a_h_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n a_c_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n # #another layer\n # a_h_3 = torch.zeros([batch_size, self.output_size], dtype=torch.double)\n # a_c_3 = torch.zeros([batch_size, self.output_size], dtype=torch.double)\n\n \n #define forward flow\n for i, input_t in enumerate(input[:,:series_length,:].chunk(input.size(1), dim=1)):\n input_t = input_t.squeeze(1)\n\n c_h_1, c_c_1 = self.critic_lstm1(input_t, (c_h_1, c_c_1))\n c_h_2, c_c_2 = self.critic_lstm2(c_h_1, (c_h_2, c_c_2))\n #2 layers self.flatten = Permute()\n\n output_critic = self.critic_out(c_h_2)\n\n # #another layer\n # c_h_3, c_c_3 = self.critic_lstm3(c_h_2, (c_h_3, c_c_3))\n # output_critic = self.critic_out(c_h_3)\n \n \n\n a_h_1, a_c_1 = self.actor_lstm1(input_t, (a_h_1, a_c_1))\n a_h_2, a_c_2 = self.actor_lstm2(a_h_1, (a_h_2, a_c_2))\n #2 layers\n mu = torch.tanh(self.actor_out(a_h_2))*150\n #another layer\n # a_h_3, a_c_3 = self.actor_lstm3(a_h_2, (a_h_3, a_c_3))\n # mu = torch.tanh(self.actor_out(a_h_3))*150\n\n output_value = output_critic\n std = self.log_std.exp().expand_as(mu)\n dist = Normal(mu, std)\n return output_value, dist\n\n#only sees 10 steps now\nclass Basic_Model(nn.Module):#torch.jit.ScriptModule,\n def __init__(self, hidden_size):\n super(Basic_Model, self).__init__()\n #change 61 ->101\n self.hidden_size = hidden_size\n self.input_features = 5\n\n #2 lstm layers\n #changed hidden size to 61 \n self.lstm1 = nn.LSTMCell(self.input_features, self.hidden_size)\n self.lstm2 = nn.LSTMCell(self.hidden_size, self.hidden_size)\n #one fully connected output layer\n self.linear = nn.Linear(self.hidden_size, 1)\n\n\n #@torch.jit.script_method\n def forward(self, input, pred_range: int = 0, pred_start: int = 0, series_length: int = 1, batch_size=10):\n s = series_length-15\n if s <=0:\n s=0\n outputs = []\n #initialize ouput\n #prepare memory tensors\n h_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n c_1 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n h_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n c_2 = torch.zeros([batch_size, self.hidden_size], dtype=torch.double)\n\n \n #define forward flow\n for i, input_t in enumerate(input[:,s:series_length,:].chunk(input.size(1), dim=1)):\n input_t = input_t.squeeze(1)\n h_1, c_1 = self.lstm1(input_t, (h_1, c_1))\n h_2, c_2 = self.lstm2(h_1, (h_2, c_2))\n output = self.linear(h_2)\n outputs += [output]\n\n # for prediction in the model, not used for now\n for i in range(pred_range):\n t=input[:,pred_start+i,:-1]\n input_pred = torch.cat([t,outputs[-1]], dim=1)\n h_1, c_1 = self.lstm1(input_pred, (h_1, c_1))\n h_2, c_2 = self.lstm2(h_1, (h_2, c_2))\n output = self.linear(h_2)\n outputs += [output]\n outputs = torch.stack(outputs, 1).squeeze(2)\n return outputs\n\nclass Chomp1d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass Conv_Block(nn.Module):\n def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2, batch_size=10):\n super(Conv_Block, self).__init__()\n \n # self.bn1 = nn.BatchNorm1d(n_inputs)\n self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation))\n self.chomp1 = Chomp1d(padding)\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(dropout)\n\n # self.bn2 = nn.BatchNorm1d(n_outputs)\n self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size, stride=stride, padding = padding, dilation=dilation))\n self.chomp2 = Chomp1d(padding)\n self.relu2 = nn.ReLU()\n self.dropout2 = nn.Dropout(dropout)\n\n #add self.bn1, self.bn2 if needed\n self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,\n self.conv2, self.chomp2, self.relu2, self.dropout2)\n self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None\n self.relu = nn.ReLU()\n self.init_weights()\n\n\n def init_weights(self):\n self.conv1.weight.data.normal_(0, 0.01)\n self.conv2.weight.data.normal_(0, 0.01)\n if self.downsample is not None:\n self.downsample.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n out = self.net(x)\n res = x if self.downsample is None else self.downsample(x)\n return self.relu(out+res)\n\nclass TemporalConvNet(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=3, dropout = 0.2):\n super(TemporalConvNet, self).__init__()\n layers =[]\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2**i\n in_channels = num_inputs if i == 0 else num_channels[i-1]\n out_channels = num_channels[i]\n layers += [Conv_Block(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size, padding=(kernel_size-1)*dilation_size, dropout=dropout)]\n \n \n self.network = nn.Sequential(*layers)\n self.mid = nn.Linear(num_channels[-1], 5)#dunno if im right here\n self.out = nn.Linear(5, 1)\n # self.network.add_module(\"linear\", self.out)\n self.init_weights_linear()\n\n def init_weights_linear(self):\n nn.init.normal_(self.out.weight, mean=0., std=0.5)\n nn.init.constant_(self.out.bias, 0.1)\n nn.init.normal_(self.mid.weight, mean=0., std=0.5)\n nn.init.constant_(self.mid.bias, 0.1)\n\n def forward(self, x, view):\n #20 is the view\n input_t = x[:,:,-view:]\n #normally with x instead of input_t\n output = self.network(x)\n output = self.mid(output.permute(0,2,1))\n output = self.out(output)\n return output\n \n\n\nclass ActorCritic(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=2, dropout = 0.2, std = 0.0):\n super(ActorCritic, self).__init__()\n\n self.out = nn.Linear(num_channels[-1], 1)\n self.value = nn.Linear(1,1)#dont know if necessary\n\n layers =[]\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2**i\n in_channels = num_inputs if i == 0 else num_channels[i-1]\n out_channels = num_channels[i]\n layers += [Conv_Block(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size, padding=(kernel_size-1)*dilation_size, dropout=dropout)]\n \n self.critic = nn.Sequential(*layers)\n # self.critic.add_module(\"linear\", self.out)\n # self.critic.add_module(\"value\", self.value)\n \n self.actor = nn.Sequential(*layers)\n # self.actor.add_module(\"linear\", self.out)\n\n self.log_std = nn.Parameter(torch.ones(1, 1)*std)\n\n self.init_weights_linear()\n\n def init_weights_linear(self):\n nn.init.normal_(self.out.weight, mean=0., std=0.1)\n nn.init.constant_(self.out.bias, 0.1)\n nn.init.normal_(self.value.weight, mean=0., std=0.1)\n nn.init.constant_(self.value.bias, 0.1)\n \n def forward(self, x):\n value = self.critic(x)\n value = self.out(value.permute(0,2,1))\n value = self.value(value)\n value = value[:,-1,:]\n mu = self.actor(x)\n mu = self.out(mu.permute(0,2,1))\n mu = mu[:,-1,:]\n std = self.log_std.exp().expand_as(mu)\n dist = Normal(mu, std)\n return value, dist\n\nclass WeirdTemporalConvNet(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=3, dropout = 0.2):\n super(WeirdTemporalConvNet, self).__init__()\n layers =[]\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2**i\n in_channels = num_inputs if i == 0 else num_channels[i-1]\n out_channels = num_channels[i]\n layers += [Conv_Block(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size, padding=(kernel_size-1)*dilation_size, dropout=dropout)]\n \n \n self.network = nn.Sequential(*layers)\n self.mid = nn.Linear(num_channels[-1], 1)#dunno if im right here\n self.out = nn.Linear(15, 1)\n # self.network.add_module(\"linear\", self.out)\n self.init_weights_linear()\n\n def init_weights_linear(self):\n nn.init.normal_(self.mid.weight, mean=0., std=0.1)\n nn.init.constant_(self.mid.bias, 0.1)\n nn.init.normal_(self.out.weight, mean=0., std=0.1)\n nn.init.constant_(self.out.bias, 0.1)\n\n def forward(self, x):\n net_out = torch.zeros([1,15,1], dtype=torch.double)\n output = self.network(x)\n output = output.permute(0,2,1)\n output = self.mid(output)\n if output.size(1) <= 15:\n net_out[:,-output.size(1):,:] = output\n else:\n net_out = output\n net_out = self.out(net_out[:,-15:,:].permute(0,2,1))\n return net_out\n\n\nclass Pad(nn.Module):\n def __init__(self, num_inputs):\n super(Pad, self).__init__()\n self.num_inputs = num_inputs\n\n def forward(self, t):\n self.target = torch.zeros([1,self.num_inputs, 80], dtype=torch.double)\n self.target[:,:,:t.size(2)] = t\n out = self.target.permute(0,2,1)\n out = out.unsqueeze(0)\n return out\n\nclass Basic_Cnn_2d(nn.Module):\n def __init__(self, num_inputs, n_hid, kernel_size_w=2, kernel_size_h=1, stride_h=2, stride_w=1, dropout=0.2):\n super(Basic_Cnn_2d, self).__init__()\n\n self.pad = Pad(num_inputs)\n self.output_shape = [(80-(1-1)-1)/1+1, (num_inputs-(num_inputs-1)-1)/1+1]\n\n self.conv1 = nn.Conv2d(1, n_hid, (1, num_inputs))\n\n self.relu1 = nn.ReLU()\n\n self.dropout1 = nn.Dropout(dropout)\n\n self.fcl = nn.Linear(n_hid, 1)\n self.out = nn.Linear(int(self.output_shape[0]), 1)\n \n self.network = nn.Sequential(self.pad, self.conv1, self.relu1, self.dropout1)\n self.init_weights()\n\n def init_weights(self):\n nn.init.normal_(self.fcl.weight, mean=0., std=0.1)\n nn.init.constant_(self.fcl.bias, 0.1)\n nn.init.normal_(self.out.weight, mean=0., std=0.1)\n nn.init.constant_(self.out.bias, 0.1)\n self.conv1.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n output = self.network(x)\n output = output.squeeze(3)\n output = output.permute(0,2,1)\n output = self.fcl(output)\n output = output.permute(0,2,1)\n output = self.out(output)\n return output\n\nclass Basic_Linear(nn.Module):\n def __init__(self, num_inputs):\n super(Basic_Linear, self).__init__()\n\n self.pad = Pad(num_inputs)\n\n self.lin_q1 = nn.Linear(80,160)\n self.out1 = nn.Linear(160,1)\n self.lin_qd1 = nn.Linear(80,160)\n self.out2 = nn.Linear(160,1)\n self.lin_control = nn.Linear(80,160)\n self.out3 = nn.Linear(160,1)\n self.lin_qdd1 = nn.Linear(80,160)\n self.out4 = nn.Linear(160,1)\n self.mid = nn.Linear(4,10)\n self.out = nn.Linear(10,1)\n\n self.relu1 = nn.ReLU()\n self.relu2 = nn.ReLU()\n self.relu3 = nn.ReLU()\n self.relu4 = nn.ReLU()\n\n self.init_weights()\n\n def init_weights(self):\n nn.init.normal_(self.lin_q1.weight, mean=0., std=0.1)\n nn.init.constant_(self.lin_q1.bias, 0.1)\n nn.init.normal_(self.lin_qd1.weight, mean=0., std=0.1)\n nn.init.constant_(self.lin_qd1.bias, 0.1)\n nn.init.normal_(self.lin_control.weight, mean=0., std=0.1)\n nn.init.constant_(self.lin_control.bias, 0.1)\n nn.init.normal_(self.lin_qdd1.weight, mean=0., std=0.1)\n nn.init.constant_(self.lin_qdd1.bias, 0.1)\n nn.init.normal_(self.out.weight, mean=0., std=0.1)\n nn.init.constant_(self.out.bias, 0.1)\n nn.init.normal_(self.out1.weight, mean=0., std=0.1)\n nn.init.constant_(self.out1.bias, 0.1)\n nn.init.normal_(self.out2.weight, mean=0., std=0.1)\n nn.init.constant_(self.out2.bias, 0.1)\n nn.init.normal_(self.out3.weight, mean=0., std=0.1)\n nn.init.constant_(self.out3.bias, 0.1)\n nn.init.normal_(self.out4.weight, mean=0., std=0.1)\n nn.init.constant_(self.out4.bias, 0.1)\n nn.init.normal_(self.mid.weight, mean=0., std=0.1)\n nn.init.constant_(self.mid.bias, 0.1)\n \n\n def forward(self, x):\n input_ = self.pad(x).squeeze(0)\n out1 = self.lin_q1(input_[:,:,0])\n out1 = self.out1(out1)\n out2 = self.lin_qd1(input_[:,:,1])\n out2 = self.out2(out2)\n out3 = self.lin_control(input_[:,:,3])\n out3 = self.out3(out3)\n out4 = self.lin_qdd1(input_[:,:,2])\n out4 = self.out4(out4) \n out1 = self.relu1(out1)\n out2 = self.relu2(out2)\n out3 = self.relu3(out3)\n out4 = self.relu4(out4)\n input_ = torch.cat((out1,out2,out3,out4), dim=1)\n out = self.mid(input_)\n out = self.out(out)\n return out3\n\n\n \n \n\n\n\n \n\n\n\n\n\n\n \n\n","sub_path":"cart_pred/less_timesteps_nn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"387279094","text":"\"\"\"\n\nSimulation script for a model that captures the toroidal ITG mode.\n\nIt is a special case of the Busse annulus. \n\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nfrom dedalus import public as de\nfrom dedalus.extras import plot_tools\nfrom dedalus.extras import flow_tools\n\nfrom mpi4py import MPI\n\n# Simulation parameters\nshape = (64,64)\nLx, Ly = (10. * 2. * np.pi, 10. * 2. * np.pi)\n\nparam = open('param','r')\nrho=float(param.readline().split()[0])\n\nlogger.info('test %f' %rho)\nrho=0.5\nep=0.25\nR_L=45 \nfmag = 0*5e-1\nmu = 1\nmuZF = 0\nnu = 0.00\ndt = 1e-3\nsim_time=500\nmod_print=10\ndata_dt = sim_time/200.#0.1\nlinear=False\ncorrelated=True\nZF_damping=False\ncorrect_physics=True\n\n\n\n# Initialize some variables\nnx, ny = shape\nlastF = 0\nlastIt =-1\n\n# Random Forcing\ndef forcing(solv,domain):\t\n\tglobal lastIt\n\tglobal lastF\n\tif(solv.iteration != lastIt or not correlated):\n\t\tglobal dt\n\t\tsh=domain.local_grid_shape()\n\t\t#logger.info('Iteration: %d' %solv.iteration)\n\t\tlastF = np.random.uniform(-1,1,sh*1.5)/np.sqrt(dt)\n\t\t#lastF = np.random.standard_normal(sh*1.5)/np.sqrt(dt)\n\t\tlastIt=solv.iteration\n\treturn lastF\t\n\t\t\n\n# Create bases and domain\nx_basis = de.Fourier('x', nx, interval=(0, Lx), dealias=3/2)\ny_basis = de.Fourier('y', ny, interval=(0, Ly), dealias=3/2)\ndomain = de.Domain([x_basis, y_basis], grid_dtype=np.float64)\nforcing_func = de.operators.GeneralFunction(domain,'g',forcing, args=[])\n\n# Poisson equation\nproblem = de.IVP(domain, variables=['phi','u','v','w','T','wx','wy','Tx','Ty'])\n\n# Create Parameters\nproblem.parameters['L'] = 1/(rho*ep*R_L)\nproblem.parameters['Lx'] = Lx\nproblem.parameters['Ly'] = Ly\nproblem.parameters['ep'] = 2*rho*ep\nproblem.parameters['mu'] = mu\nproblem.parameters['nu'] = nu\nproblem.parameters['fmag'] = fmag\nif(not ZF_damping):\n\tproblem.parameters['muZF'] = muZF\n\nproblem.parameters['forcing_func'] = forcing_func\n\n# Add equations\nif(correct_physics):\n problem.add_equation(\"dx(dx(phi)) + dy(dy(phi)) - w - phi = 0\", condition=\"(ny != 0)\")\n problem.add_equation(\"dx(dx(phi)) - w = 0\", condition=\"(ny == 0)\")\nelse:\n problem.add_equation(\"dx(dx(phi)) + dy(dy(phi)) - w - phi = 0\")\n\n\nproblem.add_equation(\"u - dx(phi) = 0\")\nproblem.add_equation(\"v + dy(phi) = 0\")\nproblem.add_equation(\"Tx - dx(T) = 0\")\nproblem.add_equation(\"Ty - dy(T) = 0\")\nproblem.add_equation(\"wx - dx(w) = 0\")\nproblem.add_equation(\"wy - dy(w) = 0\")\n\n# handle zonal flows differently if needed\nif(ZF_damping):\n\tif(linear):\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + mu*w - nu*dy(wy) - nu*dx(wx) = fmag*forcing_func\", condition=\"(ny != 0) or (nx != 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + mu*T - nu*dy(Ty) - nu*dx(Tx) = fmag*forcing_func\", condition=\"(ny != 0) or (nx != 0)\")\n\telse:\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + mu*w - nu*dy(wy) - nu*dx(wx) = -u*wy - v*wx + fmag*forcing_func\", condition=\"(ny != 0) or (nx != 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + mu*T - nu*dy(Ty) - nu*dx(Tx) = -u*Ty - v*Tx + fmag*forcing_func\", condition=\"(ny != 0) or (nx != 0)\")\n\nelse:\n\tif(linear):\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + mu*w - nu*dy(wy) - nu*dx(wx) = fmag*forcing_func\", condition=\"(ny != 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + mu*T - nu*dy(Ty) - nu*dx(Tx) = fmag*forcing_func\", condition=\"(ny != 0)\")\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + muZF*w = fmag*forcing_func\", condition=\"(nx != 0) and (ny == 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + muZF*T = fmag*forcing_func\", condition=\"(nx != 0) and (ny == 0)\")\n\telse:\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + mu*w - nu*dy(wy) - nu*dx(wx) = -u*wy - v*wx + fmag*forcing_func\", condition=\"(ny != 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + mu*T - nu*dy(Ty) - nu*dx(Tx) = -u*Ty - v*Tx + fmag*forcing_func\", condition=\"(ny != 0)\")\n\t\tproblem.add_equation(\"dt(w) + ep*Ty + muZF*w = -u*wy - v*wx + fmag*forcing_func\", condition=\"(nx != 0) and (ny == 0)\")\n\t\tproblem.add_equation(\"dt(T) - v/L + muZF*T = -u*Ty - v*Tx + fmag*forcing_func\", condition=\"(nx != 0) and (ny == 0)\")\n\n# Gauge conditions\nproblem.add_equation(\"T = 0\", condition=\"(nx == 0) and (ny == 0)\")\nproblem.add_equation(\"phi = 0\", condition=\"(nx == 0) and (ny == 0)\")\n\n\n# Time stepper\nts= de.timesteppers.RK443 #443\n\n\n# Build solver\nsolver = problem.build_solver(ts)\nforcing_func.args = [solver,domain]\nforcing_func.original_args = [solver,domain]\n\n\n# Initial conditions\nx = domain.grid(0)\ny = domain.grid(1)\nw = solver.state['w']\nT = solver.state['T']\nphi = solver.state['phi']\nw['g'] = 0\nsh=domain.local_grid_shape()\n#logger.info('Iteration: %d' %solv.iteration)\nw['g'] = 0.01*np.random.uniform(-1,1,sh)/np.sqrt(dt)\nT['g'] = 0.01*np.random.uniform(-1,1,sh)/np.sqrt(dt)\n\n\n# Integration Parameters\nsolver.stop_sim_time = sim_time\nsolver.stop_wall_time = np.inf #30 * 60.\nsolver.stop_iteration = np.inf\n\n# Analysis\nsnapshots = solver.evaluator.add_file_handler('snapshots', sim_dt=data_dt, max_writes=50)\nsnapshots.add_task(\"phi\",name='phi')\nsnapshots.add_task(\"w\",name='w')\nsnapshots.add_task(\"T\",name='T')\n#snapshots.add_system(solver.state)\n\n# ZF data file\nsnapshot_ZF = solver.evaluator.add_file_handler('ZF_data', sim_dt=data_dt)\nsnapshot_ZF.add_task(\"integ(u,'y')/Ly\",name='ZF')\nsnapshot_ZF.add_task(\"integ(v*T,'x','y')/(Ly*Lx)\",name='zi')\n\n# CFL\nCFL = flow_tools.CFL(solver, initial_dt=dt, cadence=5, safety=1.5, max_change=1.5, min_change=0.5, max_dt=0.02)\nCFL.add_velocities(('u','v'))\n\naverage_time=0.0\ntry:\n\tlogger.info('Starting loop')\n\tstart_time = time.time()\n\tlast_time = start_time\n\twhile solver.ok and dt > 1e-5:\n\t\tdt = CFL.compute_dt()\n\t\tsolver.step(dt)\n\t\tif solver.iteration % mod_print == 0:\n\t\t\ttemp = time.time()\n\t\t\tlogger.info('Iteration: %i, Time: %e, dt: %.2e, looptime: %.2f s' %(solver.iteration, solver.sim_time, dt, temp-last_time))\n\t\t\taverage_time += temp-last_time \n\t\t\tlast_time = temp\nexcept:\n\tlogger.error('Exception raised, triggering end of main loop.')\n\traise\nfinally:\n\tend_time = time.time()\n\tlogger.info('Iterations: %i' %solver.iteration)\n\tlogger.info('Sim end time: %f' %solver.sim_time)\n\tlogger.info('Average log time: %.2f sec' %(average_time*mod_print/solver.iteration))\n\tlogger.info('Run time: %.2f sec' %(end_time - start_time))\n\tlogger.info('Run time: %f cpu-hr' %((end_time-start_time)/60/60*domain.dist.comm_cart.size))\n\n# Quickly plot final solution\nu = solver.state['phi']\nr = int(shape[0]*1.5)\nsize = domain.distributor.size\ndiv = int(r/size)\nu.require_grid_space()\ndata=np.zeros(r*div)\nfor x in range(0,r):\n\tfor y in range(0,div):\n\t\tdata[div*x+ y]=u.data[x][y]\nbuf= np.zeros(r*r) \ndomain.distributor.comm_cart.Gather(data,buf)\nif(domain.distributor.rank == 0):\n\tbuf2 = np.zeros((r,r))\n\tfor proc in range(0,size):\n\t\tfor y in range(0,r):\n\t\t\tfor z in range(0,div):\n\t\t\t\tbuf2[y][proc*div + z]=buf[proc*r*div + z + y*div]\n\tu.data=buf2\n\tplot_tools.plot_bot_2d(u)\n\tplt.savefig('ITG_%s.png' %domain.distributor.size)\n","sub_path":"NL/myproblem.py","file_name":"myproblem.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"146865468","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom pip.req import parse_requirements\n\n# parse_requirements() returns generator of pip.req.InstallRequirement objects\ninstall_reqs = parse_requirements('requirements.pip')\n\nreqs = [str(ir.req) for ir in install_reqs]\n\nsetup(\n name = 'apng2webp',\n version = '0.0.2',\n author = 'Benny',\n author_email = 'Benny@GMX.it',\n url='',\n license='Public Domain',\n keywords = \"webp webby apng converter image\".split(),\n description='Convert apng animations to webp animations',\n packages = find_packages(),\n scripts = ['apng2webp'],\n install_requires = reqs,\n classifiers = [\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Environment :: Console\",\n \"License :: Public Domain\",\n ],\n)\n\n","sub_path":"apng2webp/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"219812574","text":"import random, numpy, math, scipy\nfrom SumTree import SumTree\n\n\n\nLEARNING_RATE = 0.00025\n\n\n#-------------------- MEMORY --------------------------\nclass Memory: # stored as ( s, a, r, s_ ) in SumTree\n e = 0.01\n a = 0.6\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n self.tree = SumTree(buffer_size)\n self.action_size = action_size\n self.max_size=buffer_size\n self.batch_size = batch_size\n self.seed = random.seed(seed)\n\n def _getPriority(self, error):\n return (error + self.e) ** self.a\n\n def add(self, error, state, action, reward, next_state, done):\n sample=(state, action, reward, next_state, done)\n p = self._getPriority(error)\n self.tree.add(p, sample)\n\n def sampleInternal(self):\n n=self.batch_size\n batch = []\n segment = self.tree.total() / n\n\n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n\n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s)\n batch.append( (idx, data) )\n\n return batch\n\n def sample(self):\n n=self.batch_size\n batch = []\n segment = self.tree.total() / n\n\n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n\n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s)\n batch.append( (idx, data) )\n\n states = numpy.vstack([e[1][0] for e in batch if e is not None])\n actions = numpy.vstack([e[1][1] for e in batch if e is not None])\n rewards = numpy.vstack([e[1][2] for e in batch if e is not None])\n nstates = numpy.vstack([e[1][3] for e in batch if e is not None])\n dones = numpy.vstack([e[1][4] for e in batch if e is not None])\n\n return (states,actions,rewards,nstates,dones)\n\n def update(self, idx, error):\n p = self._getPriority(error)\n self.tree.update(idx, p)\n\n\n #\n #\n #\n #\n #\n # def observe(self, sample): # in (s, a, r, s_) format\n # x, y, errors = self._getTargets([(0, sample)])\n # self.memory.add(errors[0], sample)\n #\n # if self.steps % UPDATE_TARGET_FREQUENCY == 0:\n # self.brain.updateTargetModel()\n #\n # # slowly decrease Epsilon based on our eperience\n # self.steps += 1\n # self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)\n #\n # def _getTargets(self, batch):\n # no_state = numpy.zeros(self.stateCnt)\n #\n # states = numpy.array([ o[1][0] for o in batch ])\n # states_ = numpy.array([ (no_state if o[1][3] is None else o[1][3]) for o in batch ])\n #\n # p = agent.brain.predict(states)\n #\n # p_ = agent.brain.predict(states_, target=False)\n # pTarget_ = agent.brain.predict(states_, target=True)\n #\n # x = numpy.zeros((len(batch), IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT))\n # y = numpy.zeros((len(batch), self.actionCnt))\n # errors = numpy.zeros(len(batch))\n #\n # for i in range(len(batch)):\n # o = batch[i][1]\n # s = o[0]; a = o[1]; r = o[2]; s_ = o[3]\n #\n # t = p[i]\n # oldVal = t[a]\n # if s_ is None:\n # t[a] = r\n # else:\n # t[a] = r + GAMMA * pTarget_[i][ numpy.argmax(p_[i]) ] # double DQN\n #\n # x[i] = s\n # y[i] = t\n # errors[i] = abs(oldVal - t[a])\n #\n # return (x, y, errors)\n #\n # def learn(self):\n # batch = self.memory.sample(BATCH_SIZE)\n # x, y, errors = self._getTargets(batch)\n #\n # #update errors\n # for i in range(len(batch)):\n # idx = batch[i][0]\n # self.memory.update(idx, errors[i])\n #\n # self.brain.train(x, y)\n","sub_path":"CodeToRun/rep.py","file_name":"rep.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"137704820","text":"a = 2\nwhile a > 0:\n\ttemperature = float(input(\"Temperature: \"))\n\tvelocity = float(input(\"Velocity: \"))\n\tif temperature > 60 or velocity > 120 or velocity < 3 :\n\t\tprint(\"Enter acceptables values\")\n\telse: \n\t\teff_temp = float()\n\t\teff_temp = 35.74 + 0.6215 * temperature * (0.4275 * temperature - 35.75) * (velocity**0.16)\n\t\tprint(\"The Effective Temperature is:\",eff_temp)\n","sub_path":"homework3/hw3_1.py","file_name":"hw3_1.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"271091429","text":"import numpy\nimport math\n\ndata = numpy.matrix([\n [-6.4589, -3.9291, 24.1184, 22.9244],\n # [-7.4473, -3.2421, 23.9164, 21.5218],\n [4.7296, -34.2979, 6.6098, -5.4732],\n [5.4915, -34.8143, 6.6496, -6.4036]\n])\n\n# result = [31.1023, 18.8931, 0.924432]\nresult = numpy.matrix([30.8473, 19.6899, 0.905965])\n\nerror = 0\n\n# 写法1\nfor e in data:\n c = math.cos(result[0, 2])\n s = math.sin(result[0, 2])\n rot = numpy.matrix([[c, s], [-s, c]])\n residual = rot * e[0, 0:2].T + result[0, 0:2].T - e[0, 2:4].T\n print(residual)\n error += residual.T * residual\n\nprint('final const:', float(error) / 2)\nprint('error:', math.sqrt(error / (8 - 3)))\n\n# 写法2\nc = math.cos(result[0, 2])\ns = math.sin(result[0, 2])\nrot = numpy.matrix([[c, s], [-s, c]])\ntranslation = numpy.ones((2, data.shape[0]))\ntranslation[0, :] *= float(result[0, 0])\ntranslation[1, :] *= float(result[0, 1])\nresidual = rot * data[:, 0:2].T + translation - data[:, 2:4].T\nprint(residual)\nprint('final const(2):', numpy.sum(numpy.diag(residual * residual.T)) / 2)\n","sub_path":"Tmp/python/bysj-undergraduate/slam-evaluation/evaluation/transform-numpy.py","file_name":"transform-numpy.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"396764444","text":"import sys\nimport threading\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\n\nfrom Ui_MainWindow import *\n\n\n# 1. 创建信号 Message = QtCore.pyqtSignal(str)\n# 2. 主线程连接对应函数\n# 3. 从子线程发送信号\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n # 信号量 互斥\n Message = QtCore.pyqtSignal(str)\n SetRange = QtCore.pyqtSignal(int)\n SetProcess = QtCore.pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.pushButton.clicked.connect(self.getTXT)\n self.pushButton_2.clicked.connect(self.setBrowerPath)\n self.Message.connect(self.textEdit.append)\n self.SetRange.connect(self.setProcessRange)\n self.SetProcess.connect(self.progressBar.setValue)\n\n # 选择路径\n def setBrowerPath(self):\n download_path = QtWidgets.QFileDialog.getExistingDirectory(self, \"位置\", \"D:\")\n self.lineEdit.setText(download_path)\n\n # 设置进度条的总长度 章节N\n def setProcessRange(self, chapterNum):\n self.progressBar.setRange(1, chapterNum)\n\n # 显示文本\n def DisplayMessage(self, str):\n self.Message.emit(str)\n\n # 子线程建立\n def getTXT(self):\n self.t1 = threading.Thread(target=self.kp) # 新建 爬虫子线程\n self.t1.setDaemon(True) # Daemon 守护进程\n self.t1.start()\n\n # 爬虫\n def kp(self):\n self.pushButton.setEnabled(False)\n self.path = self.lineEdit_2.text()\n self.download_path = self.lineEdit.text()\n times = 0\n while times < 3:\n try:\n r = requests.get(self.path)\n self.DisplayMessage(\"连接 :\" + self.path)\n times = 3\n except Exception as e:\n self.DisplayMessage(\"重试\" + str(times) + \"次,错误:\" + str(e))\n time.sleep(1)\n times = times + 1\n if times == 10:\n self.DisplayMessage(\"失败\")\n return\n self.DisplayMessage(\"连接成功\")\n soup = BeautifulSoup(r.content, 'html.parser')\n totalChapter = soup.find_all('tr', {\"itemtype\": \"http://schema.org/Chapter\"})\n tds = totalChapter[-1].find_all('td')\n totalChapterNum = int(tds[0].get_text())\n self.SetRange.emit(totalChapterNum) # emit发送信号\n self.DisplayMessage(\"全部章节: \" + str(totalChapterNum))\n self.path = self.path + '&chapterid='\n with open(self.download_path+'/test.txt', 'w+', encoding='utf-8') as f:\n for i in range(1, totalChapterNum + 1):\n self.SetProcess.emit(i)\n times = 0\n while times < 10:\n try:\n r = requests.get(self.path + str(i))\n self.DisplayMessage(\"连接 :\" + self.path + str(i))\n times = 10\n except Exception as e:\n self.DisplayMessage(\"重试\" + str(times) + \"次,错误:\" + str(e))\n time.sleep(1)\n times = times + 1\n if times == 3:\n self.DisplayMessage(\"失败\")\n return\n soup = BeautifulSoup(r.content, 'lxml')\n header = soup.select('.noveltext > div:nth-child(2) > h2:nth-child(1)')\n text = soup.find('div', class_='noveltext')\n text = text.get_text() # 清除html标签部分,得到需要的内容\n text = text.split('查看收藏列表')\n text = text[1].split('插入书签')\n text = text[0].strip()\n text = text.replace('  ', '\\n ')\n text = text + '\\r\\n\\r\\n\\r\\n---------章节分割线--------\\r\\n\\r\\n\\r\\n'\n f.write(text)\n self.DisplayMessage(\"章节\" + str(i) + \"完成 \" + str(header[0]).strip('

').strip('

'))\n self.pushButton.setEnabled(True)\n self.DisplayMessage(\"已完成\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n mywin = MainWindow()\n mywin.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"386701913","text":"'''\nCopyright (c) 2015 by Tobias Houska\n\nThis file is part of Statistical Parameter Estimation Tool (SPOTPY).\n\n:author: Philipp Kraft\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport multiprocessing as mp\n\nclass ForEach(object):\n def __init__(self,process):\n self.size = mp.cpu_count()\n self.pool = mp.Pool()\n self.process = process\n def is_idle(self):\n return False\n def terminate(self):\n self.pool.join()\n def __call__(self,jobs):\n for result in self.pool.imap_unordered(self.process, jobs):\n yield result\ndef proc(j):\n for i in xrange(10000):\n q = i,i ** 2\n return j,j**2\nif __name__ == '__main__':\n fe = ForEach(proc)\n jobs = range(10000)\n for j,q in fe(jobs):\n print(j)\n","sub_path":"spotpy/parallel/mproc.py","file_name":"mproc.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172456605","text":"# -*- coding: utf-8 -*-\n\n# upload_file_to_server.py -d %WORKSPACE% -f Factory.apk -s \"/home/system1/MTK/BUILD_FOR_TEST/upload_apk/\" -i \"10.250.115.104\"\nimport paramiko\nimport sys, getopt, os\nimport platform\n\n\ndef upload_file_to_server(server, port, src, des):\n ssh = paramiko.Transport((server, port))\n ssh.connect(username=\"system1\", password=\"ysbznjj2016_{:>\")\n sftp = paramiko.SFTPClient.from_transport(ssh)\n try:\n sftp.put(src, des)\n except Exception:\n print(\"[-]put Error:User name or password error or uploaded file does not exist\")\n\n print(\"上传\")\n ssh.close()\n\n\nif __name__ == '__main__':\n source_dir = \"\"\n filename = \"\"\n destination_dir = \"\"\n server_ip = \"\"\n\n server_port = 22\n src_file = \"\"\n des_file = \"\"\n\n opts, args = getopt.getopt(sys.argv[1:], \"f:d:s:i:p:\")\n for op, value in opts:\n if op == \"-d\":\n source_dir = value\n elif op == \"-f\":\n filename = value\n elif op == \"-s\":\n destination_dir = value\n elif op == \"-i\":\n server_ip = value\n\n sysstr = platform.system()\n if sysstr == \"Windows\":\n connector = \"\\\\\"\n else:\n connector = \"/\"\n\n src_file = source_dir + connector + filename\n des_file = destination_dir + filename\n\n if server_ip == \"\":\n print(\"IP地址不能为空!!!\")\n sys.exit(1)\n\n if not os.path.exists(src_file):\n print(\"源文件不存在!!!\")\n sys.exit(2)\n\n upload_file_to_server(server_ip, server_port, src_file, des_file)\n","sub_path":"Pipeline Script for AOSP/Python/upload_file_to_server.py","file_name":"upload_file_to_server.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"87428956","text":"import cairo\nimport pygame\n\nwidth = 300\nheight = 200\n\npygame.init()\npygame.fastevent.init()\nclock = pygame.time.Clock()\nsdl_surface = pygame.display.set_mode((width, height))\n\nc_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\nctx = cairo.Context(c_surface)\n\nwhile True:\n pygame.fastevent.get()\n clock.tick(30)\n ctx.rectangle(10, 10, 50, 50)\n ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)\n ctx.fill_preserve()\n\n dest = pygame.surfarray.pixels2d(sdl_surface)\n dest.data[:] = c_surface.get_data()\n pygame.display.flip()\n","sub_path":"pygamelearning/pygame4.py","file_name":"pygame4.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"615887917","text":"'''\r\nThe purpose of this section is to extend ResNet in an ensemble approach using the final layer before model output. \r\nIt designed for combining 2 models with the same number of nodes on last fully connected layer before output layer\r\nThe optional methods set at the parameter OPTION are \r\n1. Summing fully connected portion (SUM)\r\n2. Multiplying fully connected portions (MULT)\r\n3. Concatenating fully connected portions (CONC)\r\n4. RNN (RNN)\r\n'''\r\nimport tensorflow as tf\r\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport seaborn as sn\r\nfrom sklearn import metrics\r\nfrom keras import backend as K\r\nfrom tensorflow.keras.layers import Flatten, Dense, BatchNormalization, Input, Concatenate, Add, Average, SimpleRNN, Embedding, Dropout\r\nfrom tensorflow.keras.models import Model, load_model\r\nfrom tensorflow.keras.models import Sequential\r\n\r\nfrom get_results import *\r\n\r\n\r\ndef normalize_data(augmented_image):\r\n '''\r\n Purpose: scale the data sample-wise between -1 and 1\r\n Args:\r\n augmented_image: array of the image to be scaled\r\n Returns:\r\n re_scaled_image: augmented_image scaled between -1 and 1\r\n Assumption: values range from 0 to 255\r\n '''\r\n re_scaled_image = (augmented_image/127.5) - 1\r\n return re_scaled_image\r\n\r\n'''\r\nSET THE PARAMETERS\r\n'''\r\nOPTION = \"SUM\"\r\nNUM_EPOCHS = 50\r\nBATCH_SIZE = 32\r\nTYPE_NAME = \"With_Ensemble_DR_LR1e4\" #name of the test being performed\r\nENSEMBLE_DR_RATE = 0.3\r\n\r\n'''\r\nLOAD THE MODELS FROM THE PATH NAME SAVED\r\n'''\r\nmodel1_path = \"Saved_Models/Train1_50_No_Dropout/saved_model.pb\"\r\nmodel2_path = \"Saved_Models/Train1_100_No_Dropout/saved_model.pb\"\r\nfinal_layer_size = 2048\r\nlr_extension = 0.0001\r\n\r\nprint(\"------------------Loading the models------------------------\")\r\nmodel1 = load_model('Saved_Models/Train1_50_DR_Final/')\r\nmodel2 = load_model('Saved_Models/Train1_100_DR_Final/')\r\nmodel1._name = \"Train1\"\r\nmodel2._name= \"Train2\"\r\n#remove the last layer of the model\r\nmodel1.pop()\r\nmodel2.pop()\r\n#don't re-train the models\r\nmodel1.trainable=False\r\nmodel2.trainable=False\r\nprint(\"model 1 layers trainable:\")\r\nprint(model1.layers[0].trainable)\r\nprint(model1.layers[1].trainable)\r\nprint(model1.layers[2].trainable)\r\n\r\n#re-train from scratch option\r\n# #model 1******************************************************\r\n# model_resnet50 = tf.keras.applications.ResNet50V2(\r\n# include_top=False,\r\n# weights=\"imagenet\",\r\n# input_tensor=None,\r\n# input_shape=(50,50,3)\r\n# )\r\n\r\n# model1 = Sequential(\r\n# [\r\n# Input(shape=(50,50,3)),\r\n# model_resnet50,\r\n# Flatten(),\r\n# Dense(2048, activation='tanh'), #relu because that is the activation function used by ResNet\r\n# ]\r\n# )\r\n\r\n# print(\"Model 1 layer 0:\", model1.layers[0])\r\n# print(\"Model 1 layer 1:\", model1.layers[1])\r\n\r\n# model_resnet101 = tf.keras.applications.ResNet101V2(\r\n# include_top=False,\r\n# weights=\"imagenet\",\r\n# input_tensor=None,\r\n# input_shape=(50,50,3)\r\n# )\r\n\r\n# model2 = Sequential(\r\n# [\r\n# Input(shape=(50,50,3)),\r\n# model_resnet101,\r\n# Flatten(),\r\n# Dense(2048, activation='tanh'), #relu because that is the activation function used by ResNet\r\n# ]\r\n# )\r\n\r\n'''\r\nLoad the training data\r\n'''\r\n#load train data\r\nprint(\"------------------------Loading the data--------------------------\")\r\nX_train = np.load(\"../Data/IDC_Data/Split/X_train_patient.npy\")\r\ny_train = np.load(\"../Data/IDC_Data/Split/y_train_patient.npy\")\r\nX_test = np.load(\"../Data/IDC_Data/Split/X_test_patient.npy\")\r\ny_test = np.load(\"../Data/IDC_Data/Split/y_test_patient.npy\")\r\n\r\n\r\n#Create model for each option and train the model\r\nif OPTION == \"SUM\":\r\n print(\"Using summation option\")\r\n #develop a new model that will sum the outputs of each model before prediction \r\n inputs = Input(shape=(50,50,3))\r\n model1_layer = model1(inputs)\r\n model2_layer = model2(inputs)\r\n addition_layer = Add()([model1_layer, model2_layer]) #concatenation layer\r\n dropout1 = Dropout(ENSEMBLE_DR_RATE)(addition_layer)\r\n dense1 = Dense(512, activation='relu')(dropout1)\r\n dropout2 = Dropout(ENSEMBLE_DR_RATE)(dense1)\r\n # dense1 = Dense(final_layer_size, activation='relu')(addition_layer)\r\n # dense2 = Dense(final_layer_size/2, activation='relu')(dense1)\r\n out = Dense(2, activation='softmax')(dropout2)\r\n model = Model(inputs = inputs, outputs = out)\r\n\r\n print(\"Summation model summary:\")\r\n model.summary()\r\n\r\n #create the augmenting data object\r\n aug = ImageDataGenerator(\r\n rotation_range=30,\r\n brightness_range = (0.5, 1.5),\r\n\t\thorizontal_flip=True,\r\n vertical_flip = True,\r\n\t\tfill_mode=\"reflect\",\r\n preprocessing_function=normalize_data)\r\n\r\n #compile the model\r\n model.compile(\r\n optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)\r\n loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly\r\n metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]\r\n )\r\n\r\n\r\n #sending the same augmented input through both models, fit the model to the data\r\n history1 = model.fit(\r\n x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),\r\n shuffle = True,\r\n epochs = NUM_EPOCHS,\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])\r\n\r\nelif OPTION == \"AVG\":\r\n print(\"Using average option\")\r\n #develop a new model that will average the outputs of each model before prediction \r\n inputs = Input(shape=(50,50,3))\r\n model1_layer = model1(inputs)\r\n model2_layer = model2(inputs)\r\n avg_layer = Average()([model1_layer, model2_layer]) #average layer\r\n dropout1 = Dropout(ENSEMBLE_DR_RATE)(avg_layer)\r\n layer1 = Dense(512, activation = 'relu')(dropout1)\r\n dropout2 = Dropout(ENSEMBLE_DR_RATE)(layer1)\r\n #additional layers used in testing\r\n # layer2 = Dense(128, activation = 'tanh')(dropout2)\r\n # dropout3 = Dropout(ENSEMBLE_DR_RATE)(layer2)\r\n # layer3 = Dense(32, activation = 'tanh')(dropout3)\r\n # dropout4 = Dropout(ENSEMBLE_DR_RATE)(layer3)\r\n # # dense1 = Dense(final_layer_size, activation='relu')(addition_layer)\r\n # dense2 = Dense(final_layer_size/2, activation='relu')(dense1)\r\n out = Dense(2, activation='softmax')(dropout2)\r\n model = Model(inputs = inputs, outputs = out)\r\n\r\n print(\"Summary of Avergae Model:\")\r\n model.summary()\r\n #create the augmenting data object\r\n aug = ImageDataGenerator(\r\n rotation_range=30,\r\n brightness_range = (0.5, 1.5),\r\n horizontal_flip=True,\r\n vertical_flip = True,\r\n fill_mode=\"reflect\",\r\n preprocessing_function=normalize_data)\r\n\r\n #compile the model\r\n model.compile(\r\n optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)\r\n loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly\r\n metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]\r\n )\r\n\r\n model.summary()\r\n\r\n #sending the same augmented input through both models and fit the model\r\n history1 = model.fit(\r\n x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),\r\n shuffle = True,\r\n epochs = NUM_EPOCHS,\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])\r\n\r\n \r\nelif OPTION == \"CONCAT\":\r\n print(\"Using conatenation option\")\r\n #develop a new model that will concatenate the outputs of each model before prediction \r\n inputs = Input(shape=(50,50,3))\r\n model1_layer = model1(inputs)\r\n model2_layer = model2(inputs)\r\n concat_layer = Concatenate()([model1_layer, model2_layer]) #concatenation layer\r\n dropout1 = Dropout(ENSEMBLE_DR_RATE)(concat_layer)\r\n layer1 = Dense(final_layer_size, activation='relu')(dropout1)\r\n dropout2 = Dropout(ENSEMBLE_DR_RATE)(layer1)\r\n layer2 = Dense(512, activation='relu')(dropout2)\r\n dropout3 = Dropout(ENSEMBLE_DR_RATE)(layer2)\r\n out = Dense(2, activation='softmax')(dropout3)\r\n model = Model(inputs = inputs, outputs = out)\r\n model.summary()\r\n\r\n #create the augmenting data object\r\n aug = ImageDataGenerator(\r\n rotation_range=30,\r\n brightness_range = (0.5, 1.5),\r\n horizontal_flip=True,\r\n vertical_flip = True,\r\n fill_mode=\"reflect\",\r\n preprocessing_function=normalize_data)\r\n\r\n #compile the model\r\n model.compile(\r\n optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)\r\n loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly\r\n metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()]\r\n )\r\n\r\n \r\n #sending the same augmented input through both models and fit the model\r\n history1 = model.fit(\r\n x = aug.flow(X_train, y_train, batch_size = 32),\r\n shuffle = True,\r\n epochs = NUM_EPOCHS,\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])\r\n\r\nelif OPTION == \"RNN\":\r\n print(\"Using RNN option\")\r\n #develop a new model that will concatenate the outputs of each model before prediction \r\n inputs = Input(shape=(50,50,3))\r\n model1_layer = model1(inputs)\r\n model2_layer = model2(inputs)\r\n new_input = tf.convert_to_tensor([model1_layer, model2_layer])\r\n new_input = tf.transpose(new_input, [1,0,2]) #shape (None, timestep(2), features(2048))\r\n simple_rnn = SimpleRNN(2048, activation='relu', dropout=0.3)(new_input) # Shape = (None, 2048)\r\n dense1 = Dense(512, activation='relu')(simple_rnn)\r\n dropout1 = Dropout(ENSEMBLE_DR_RATE)(dense1)\r\n out = Dense(2, activation='softmax')(dropout1)\r\n model = Model(inputs = inputs, outputs = out)\r\n\r\n model.summary()\r\n\r\n #create the augmenting data object\r\n aug = ImageDataGenerator(\r\n rotation_range=30,\r\n brightness_range = (0.5, 1.5),\r\n horizontal_flip=True,\r\n vertical_flip = True,\r\n fill_mode=\"reflect\",\r\n preprocessing_function=normalize_data)\r\n\r\n model.compile(\r\n optimizer=tf.keras.optimizers.SGD(learning_rate = lr_extension), #function used to minimize the loss (back propogation and gradient descent)\r\n loss=tf.keras.losses.BinaryCrossentropy(), #defines how far off a prediction is from correct answer - loss is high if model predicts small prob classified correctly\r\n metrics=[tf.keras.metrics.MeanSquaredError(), tf.keras.metrics.CategoricalAccuracy()])\r\n\r\n model.layers[0].trainable = False\r\n\r\n #sending the same augmented input through both models\r\n history1 = model.fit(\r\n x = aug.flow(X_train, y_train, batch_size = BATCH_SIZE),\r\n shuffle = True,\r\n epochs = NUM_EPOCHS,\r\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, min_delta = 0.001, restore_best_weights=True)])\r\n\r\nelse:\r\n print(\"Option selected is not defined. Please select from SUM, MULT, CONC, or RNN\")\r\n\r\n\r\n\r\n\"\"\"Plot the results of the training session 1\"\"\"\r\nprint(\"---------------------------------- Getting Metrics -----------------------------\")\r\nplt.figure(figsize=(10,10))\r\nplt.plot(history1.history['mean_squared_error'])\r\nplt.title('MSE')\r\nplt.ylabel('MSE')\r\nplt.xlabel('Epoch')\r\nplt.savefig(OPTION+'_Train_MSE_'+TYPE_NAME)\r\nplt.close()\r\n\r\nplt.figure(figsize=(10,10))\r\nplt.plot(history1.history['categorical_accuracy'])\r\nplt.title('Categorical Accuracy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.savefig(OPTION+'_Train_Categorical_'+TYPE_NAME)\r\nplt.close()\r\n\r\nplt.figure(figsize=(10,10))\r\nplt.plot(history1.history['loss'])\r\nplt.title('Binary Cross Entropy')\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Epoch')\r\nplt.savefig(OPTION+'_Train_Loss_'+TYPE_NAME)\r\nplt.close()\r\n\r\n#Get training and testing results\r\n#normalize all samples\r\nX_train_normalized = np.zeros(X_train.shape)\r\n\r\nfor idx, sample in enumerate(X_train):\r\n X_train_normalized[idx] = normalize_data(sample)\r\n if idx==0:\r\n print(\"size of a sample normalizing:\", sample.shape)\r\n\r\nX_test_normalized = np.zeros(X_test.shape)\r\nfor idx, sample in enumerate(X_test):\r\n X_test_normalized[idx] = normalize_data(sample)\r\n\r\n\r\n#Perform final prediction of the model using non-augmented train and test data\r\nfinal_prediction_train = model.predict(X_train_normalized.astype(np.float64))\r\nfinal_prediction_test = model.predict(X_test_normalized.astype(np.float64))\r\n\r\n#get the final metrics (from file get_results)\r\nget_metrics(final_prediction_train, y_train, \"Train Metrics***************\", \"Train\", OPTION+\"_Ensemble_Confusion_Train_Ensemble_\"+TYPE_NAME)\r\nget_metrics(final_prediction_test, y_test, \"Test Metrics****************\", \"Test\", OPTION+\"_Ensemble_Confusion_Test_Ensemble_\"+TYPE_NAME)","sub_path":"extension_2_ensemble.py","file_name":"extension_2_ensemble.py","file_ext":"py","file_size_in_byte":13519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"159467278","text":"import CrazyCod.Utilities.filters4b as filt4b\nfrom CrazyCod.Utilities.drawchart4 import draw_chart\nfrom CrazyCod.Utilities.pkgstdy import *\nfrom CrazyCod.Utilities.frames import DataFrame\nfrom CrazyCod.Futures.folder import *\n# from CrazyCod.Utilities.folder import *\nimport matplotlib.pyplot as plt\nfrom functools import partial\n\n\nfrom multiprocessing import Pool\nimport os\nimport shutil\n\nimport yaml\n\nnp_nice_options()\nasc = np.ascontiguousarray\n\n# import CrazyCod.Futures.PCore.FiltersBase as fbase\nimport itertools as itt\nimport gc\n\nclass BaseKwargs():\n def __init__(self, *args, **kwargs):\n if len(args) > 0:\n raise ValueError('Please provide information via kwargs')\n v_keys = self.__dir__()\n v_keys = [i for i in v_keys if (len(i) > 2) and (i[0:2] != '__')]\n # for n, v in kwargs.items():\n # print((n, v))\n for n, v in kwargs.items():\n if n in v_keys:\n if isinstance(v, str):\n exec('self.%s = \"%s\"' % (n, v))\n else:\n exec('self.%s = %s' % (n, v))\n else:\n raise ValueError('Unrecognized keyword %s' % n)\n\n\nclass ConfigManager:\n configFile = 'Q:\\\\Strategy2\\\\Config.yaml'\n\n def __init__(self):\n pass\n\n def __getitem__(self, item):\n stream = open(self.configFile, 'r')\n allConfigs = yaml.load(stream)\n stream.close()\n return allConfigs[item]\n\n\nconfigManager = ConfigManager()\n\n\nclass BaseMarkets:\n BaseMarketsDir = ''\n\n def __init__(self):\n self.BaseMarketsDir = ConfigManager()['basemkts_dir']\n\n def retrieve(self, mkt, dir1='Stats', file='Returns'):\n return DataDir(opj(self.BaseMarketsDir, mkt, dir1)).retrieve(file)\n\n def store(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.BaseMarketsDir, mkt, dir1)):\n os.makedirs(opj(self.BaseMarketsDir, mkt, dir1))\n DataDir(opj(self.BaseMarketsDir, mkt, dir1)).store(file, mdata)\n return None\n\n def delete(self, mkt, dir1, file):\n if os.path.exists(opj(self.BaseMarketsDir, mkt, dir1, file) + '.npz'):\n os.remove(opj(self.BaseMarketsDir, mkt, dir1, file) + '.npz')\n return None\n\n def load(self, mkt, dir1, file):\n return DataDir(opj(self.BaseMarketsDir, mkt, dir1)).load(file)\n\n def save(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.BaseMarketsDir, mkt, dir1)):\n os.makedirs(opj(self.BaseMarketsDir, mkt, dir1))\n DataDir(opj(self.BaseMarketsDir, mkt, dir1)).save(file, mdata)\n return None\n\n\nbaseMarkets = BaseMarkets()\n\n\nclass SlippageManager:\n SlippageFile = ''\n\n def __init__(self):\n self.SlippageFile = ConfigManager()['slippage_file']\n\n def __getitem__(self, item):\n stream = open(self.SlippageFile, 'r')\n allConfigs = yaml.load(stream)\n stream.close()\n return allConfigs[item]\n\n\nslippageManager = SlippageManager()\n\n\nclass PointValueManager:\n PointValueFile = ''\n\n def __init__(self):\n self.PointValueFile = ConfigManager()['pointvalue_file']\n\n def __getitem__(self, item):\n stream = open(self.PointValueFile, 'r')\n allConfigs = yaml.load(stream)\n stream.close()\n return allConfigs[item]\n\n\npointValueManager = PointValueManager()\n\n\nclass Universe(BaseKwargs):\n SecurityList = []\n\n def __getitem__(self, item):\n if hasattr(self, item):\n return self.__getattribute__(item)\n else:\n return None\n\n def __iter__(self):\n return iter(self.SecurityList)\n\n def __len__(self):\n return len(self.SecurityList)\n\n\nclass ArtAssetUniverse():\n SecurityList = Universe()\n ArtAssetsDir = ''\n\n def __init__(self):\n self.SecurityList = \\\n Universe(SecurityList=['SP500', 'Nasdaq_SP500', 'R2000_SP500', 'SP400Mid_R2000', 'SPHealthCare_SP500',\n 'SPConsDisc_SP500', 'Nikkei225_SP500', 'ESTX50_SP500', 'MSCIEafe_MSCIEM',\n 'DAX_ESTX50', 'FTSE100_ESTX50', 'TecDAX_DAX', 'ESTXBank_ESTX50', 'ESTXOilGas_ESTX50',\n 'ESTXTelecom_ESTX50', 'ESTXUtil_ESTX50', 'ESTXInsur_ESTX50', 'SMI_ESTX50',\n 'SMIMid_SMI', 'STXE600Insur_ESTXBank', 'STXE600BasRes_ESTX50',\n 'STXE600Health_ESTX50', 'STXE600Util_ESTXUtil', 'STXE600Bank_ESTXBank',\n 'STXE600OilGas_ESTXOilGas', 'RDX', 'SPToronto60_SP500', 'CAC40_ESTX50',\n 'IBEX35_ESTX50', 'OMXS30_ESTX50', 'US10Yr', 'US05Yr_US10Yr', 'EuroDol12_US10Yr',\n 'GerBobl_GerBund', 'Euribor12_GerBobl', 'Euribor04_Euribor08', 'Euribor08_Euribor12',\n 'UKGilt_US10Yr', 'CA10Yr_US10Yr', 'Sterling3m04', 'Sterling3m08_UKGilt',\n 'Canada3m04_CA10Yr', 'EuroSwiss08_Euribor08', 'EuroFX', 'YenFX', 'BritPound_EuroFX',\n 'CanDol_EuroFX', 'AusDol_YenFX', 'MexPeso', 'EuroSwissFX', 'NZDol_AusDol',\n 'SouthAfRand_CanDol', 'Corn01', 'Corn03_Corn01', 'Corn05_Corn02', 'EurMaize01',\n 'EurMaize02_EurMaize01', 'Cotton01', 'Cotton03_Cotton01', 'Sugar01',\n 'Sugar03_Sugar01', 'Sugar05_Sugar03', 'Cocoa01', 'CocoaLdn01_Cocoa01',\n 'CocoaLdn03_CocoaLdn01', 'Coffee01', 'CoffeeLdn01_Coffee01', 'SugarLdn01_Sugar01',\n 'SugarLdn03_SugarLdn01', 'Milk01', 'Milk02_Milk01', 'Butter01', 'FCOJ01',\n 'LCattle01', 'LCattle03_LCattle01', 'LCattle05_LCattle02', 'LCattle05_LCattle04',\n 'FCattle01_LCattle01', 'FCattle03_FCattle01', 'FCattle05_FCattle03', 'LeanHogs01',\n 'LeanHogs03_LeanHogs01', 'LeanHogs04_LeanHogs02', 'LeanHogs05_LeanHogs03',\n 'Canola01_Corn01', 'Rapeseed01_EurMaize01', 'RoughRice01', 'Oats01_Corn01',\n 'SoyMeal01_Soybean01', 'Soybean03_Soybean01', 'SoyMeal02_SoyMeal01',\n 'Wheat01_Corn01', 'Wheat04_Wheat01', 'KWheat01_Wheat01', 'WheatEur01_Wheat01',\n 'WheatEur02_WheatEur01', 'Copper01', 'Gold01', 'Silver01_Gold01',\n 'Platinum01_Gold01', 'Palladium01', 'Lumber01', 'Lumber02_Lumber01', 'CoalRot01',\n 'CoalNew01_CoalRot01', 'CrudeWTI01', 'CrudeWTI03_CrudeWTI01',\n 'CrudeBrent01_CrudeWTI01', 'CrudeBrent03_CrudeBrent01', 'HeatOil01_CrudeWTI05',\n 'Gasoline03_Gasoline01', 'GasOilEur01_CrudeBrent01', 'Ethanol01_Corn01',\n 'Emissions01', 'NatGas01', 'NatGas03_NatGas01', 'NatGas05_NatGas02',\n 'NatGas08_NatGas04', 'NatGas16_NatGas07', 'NatGasUK01'])\n self.ArtAssetsDir = configManager['artmkts_dir']\n\n def retrieve(self, mkt, dir1='Stats', file='Returns'):\n return DataDir(opj(self.ArtAssetsDir, mkt, dir1)).retrieve(file)\n\n def store(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.ArtAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.ArtAssetsDir, mkt, dir1))\n DataDir(opj(self.ArtAssetsDir, mkt, dir1)).store(file, mdata)\n return None\n\n def delete(self, mkt, dir1, file):\n if os.path.exists(opj(self.ArtAssetsDir, mkt, dir1, file) + '.npz'):\n os.remove(opj(self.ArtAssetsDir, mkt, dir1, file) + '.npz')\n return None\n\n def load(self, mkt, dir1, file):\n return DataDir(opj(self.ArtAssetsDir, mkt, dir1)).load(file)\n\n def save(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.ArtAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.ArtAssetsDir, mkt, dir1))\n DataDir(opj(self.ArtAssetsDir, mkt, dir1)).save(file, mdata)\n return None\n\n @staticmethod\n def get_beta_adjusted_spreads(x, y, roundFrac=0.025, window=630, quoteMult=1, funcValue=None):\n xData = baseMarkets.retrieve(x)\n yData = baseMarkets.retrieve(y)\n _xidx = xData['Date', list]\n _yidx = yData['Date', list]\n\n _xyidx = list(sorted(set(_xidx).intersection(set(_yidx))))\n\n _xidx = [kidx for kidx, k in enumerate(_xidx) if k in _xyidx]\n _yidx = [kidx for kidx, k in enumerate(_yidx) if k in _xyidx]\n\n xData = xData[_xidx, :]\n yData = yData[_yidx, :]\n\n if funcValue is None:\n _func1 = filt4b.sma\n _func2 = filt4b.stdev_sma\n\n _rn = _func1(xData['Close'].values * yData['Close'].values, window) - \\\n _func1(xData['Close'].values, window) * _func1(yData['Close'].values, window)\n _vx = _func2(xData['Close'].values, window)\n _vy = _func2(yData['Close'].values, window)\n else:\n _func1 = filt4b.firma\n _func2 = filt4b.stdev_firma\n\n _rn = _func1(xData['Close'].values * yData['Close'].values, window, funcValue, unbias=True) - \\\n _func1(xData['Close'].values, window, funcValue, unbias=True) * \\\n _func1(yData['Close'].values, window, funcValue, unbias=True)\n _vx = _func2(xData['Close'].values, window, funcValue, unbias=True)\n _vy = _func2(yData['Close'].values, window, funcValue, unbias=True)\n\n _rd = _vx * _vy\n _r = _rn / _rd\n _beta = filt4b.lag(quoteMult * _r * _vx / _vy)\n _beta = np.round(_beta / roundFrac) * roundFrac\n\n zData = dummy_df(xData)\n zData.add_ohlc('x', xData.get_ohlc())\n zData.add_ohlc('y', yData.get_ohlc())\n zData['Beta'] = _beta\n\n _spr = filt4b.cum_sum_bar_2d(filt4b.chg_bar_2d(xData.get_ohlc()) -\n _beta[:, np.newaxis] * filt4b.chg_bar_2d(yData.get_ohlc()))\n _spr_adj = xData[xData.shape[0] - 1, 'Close'] - \\\n _beta[_beta.shape[0] - 1] * yData[yData.shape[0] - 1, 'Close']\n _spr += _spr_adj\n\n zData.add_ohlc('', filt4b.fix_ohlc(_spr))\n\n zData = zData[~np.isnan(zData['Close'].values), :]\n\n return zData\n\n def update(self):\n for k in self.SecurityList:\n try:\n if '_' in k:\n kstr = k.split('_')\n for m in range(0, 2):\n # m = 0\n if kstr[m][-2:] in [('0' + str(j) if j < 10 else str(j)) for j in range(1, 30)] and \\\n kstr[m] not in ['Nikkei225']:\n kstr[m] = kstr[m][0:-2] + '_' + kstr[m][-2:]\n self.store(k, 'Stats', 'Returns', self.get_beta_adjusted_spreads(kstr[0], kstr[1]))\n else:\n kstr = k\n if kstr[-2:] in [('0' + str(j) if j < 10 else str(j)) for j in range(1, 30)] and \\\n kstr not in ['Nikkei225']:\n kstr = kstr[0:-2] + '_' + kstr[-2:]\n self.store(k, 'Stats', 'Returns', baseMarkets.retrieve(kstr))\n except:\n raise ValueError('Cannot process %s' %k)\n return None\n\n def __iter__(self):\n return iter(self.SecurityList)\n\n def __len__(self):\n return len([j for j in self.SecurityList])\n\n\nartAssetUniverse = ArtAssetUniverse()\n\n\n# Add these dollar markets: NatGasUK_01, CoalRot_01 (can I really trade it?),\n# Add these spreads: Soybean, CrudeBrent, Gasoline, SugarLdn, Corn (2), Feeder Cattle (2),\n# May be these spreads too: LCattle (2/3), LeanHogs(3/4), check others in the crude complex\n# Re-do maximum clique problem with spreads using volatility, and ignoring the first contract (to avoid netting\n# conflict)\nclass UncAssetUniverse():\n SecurityList = Universe\n UncAssetsDir = ''\n\n def __init__(self):\n self.SecurityList = \\\n Universe(SecurityList=['SP500', 'US10Yr', 'CrudeWTI01', 'Gold01', 'NatGas01', 'EuroFX', 'YenFX', 'CanDol',\n 'Copper01', 'SoyOil01', 'SoyMeal01', 'Sugar01', 'Cocoa01', 'Coffee01', 'Cotton01',\n 'LCattle01', 'LeanHogs01', 'MexPeso', 'Palladium01', 'KWheat01', 'Milk01', 'FCOJ01',\n 'RoughRice01', 'Lumber01', 'Oats01', 'Ethanol01'])\n # self.SecurityList = Universe(SecurityList=['SoyOil01', 'SoyMeal01', 'Ethanol01', 'KWheat01', 'Oats01'])\n self.UncAssetsDir = configManager['uncmkts_dir']\n\n def retrieve(self, mkt, dir1='Stats', file='Returns'):\n return DataDir(opj(self.UncAssetsDir, mkt, dir1)).retrieve(file)\n\n def store(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.UncAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.UncAssetsDir, mkt, dir1))\n DataDir(opj(self.UncAssetsDir, mkt, dir1)).store(file, mdata)\n return None\n\n def delete(self, mkt, dir1, file):\n if os.path.exists(opj(self.UncAssetsDir, mkt, dir1, file) + '.npz'):\n os.remove(opj(self.UncAssetsDir, mkt, dir1, file) + '.npz')\n return None\n\n def load(self, mkt, dir1, file):\n return DataDir(opj(self.UncAssetsDir, mkt, dir1)).load(file)\n\n def save(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.UncAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.UncAssetsDir, mkt, dir1))\n DataDir(opj(self.UncAssetsDir, mkt, dir1)).save(file, mdata)\n\n @staticmethod\n def get_point_value(x):\n return pointValueManager[x]\n\n @staticmethod\n def get_calendar_spread(x, y):\n xData = baseMarkets.retrieve(x)\n yData = baseMarkets.retrieve(y)\n _xidx = xData['Date', list]\n _yidx = yData['Date', list]\n\n _xyidx = list(sorted(set(_xidx).intersection(set(_yidx))))\n _xidx = [kidx for kidx, k in enumerate(_xidx) if k in _xyidx]\n _yidx = [kidx for kidx, k in enumerate(_yidx) if k in _xyidx]\n\n xData = xData[_xidx, :]\n yData = yData[_yidx, :]\n zData = dummy_df(xData)\n zData.add_ohlc('', filt4b.fix_ohlc(xData.get_ohlc()-yData.get_ohlc()))\n\n zData['Volume'] = np.minimum(xData['Volume'].values, yData['Volume'].values)\n zData['OpenInterest'] = np.minimum(xData['OpenInterest'].values, yData['OpenInterest'].values)\n zData['AggVolume'] = xData['AggVolume'].values\n zData['AggOpenInterest'] = xData['AggOpenInterest'].values\n for j in ['Volume', 'OpenInterest', 'AggVolume', 'AggOpenInterest']:\n zData[j] = filt4b.fill(zData[j].values)\n zData['LastTrdDate'] = np.minimum(xData['LastTrdDate'].values, yData['LastTrdDate'].values).astype('int64')\n zData['ContractAtCloseX'] = xData['ContractAtClose'].values\n zData['ContractAtCloseY'] = yData['ContractAtClose'].values\n zData.add_ohlc('NBA', filt4b.fix_ohlc(xData.get_ohlc('NBA')-yData.get_ohlc('NBA')))\n zData['FrontX'] = xData['Front'].values\n zData['FrontY'] = yData['Front'].values\n zData = zData[~np.isnan(zData['Close'].values), :]\n return zData\n\n @staticmethod\n def get_correlation(x, y):\n if isinstance(x, str):\n xData = baseMarkets.retrieve(x)\n else:\n xData = x.copy()\n if isinstance(y, str):\n yData = baseMarkets.retrieve(y)\n else:\n yData = y.copy()\n _xidx = xData['Date', list]\n _yidx = yData['Date', list]\n\n _xyidx = list(sorted(set(_xidx).intersection(set(_yidx))))\n _xidx = [kidx for kidx, k in enumerate(_xidx) if k in _xyidx]\n _yidx = [kidx for kidx, k in enumerate(_yidx) if k in _xyidx]\n\n xData = xData[_xidx, :]\n yData = yData[_yidx, :]\n _xDiffClose = filt4b.chg(xData['Close'].values)\n _yDiffClose = filt4b.chg(yData['Close'].values)\n _rn = np.nanmean(_xDiffClose * _yDiffClose) - np.nanmean(_xDiffClose) * np.nanmean(_yDiffClose)\n _rd = np.nanstd(_xDiffClose) * np.nanstd(_yDiffClose)\n return _rn / _rd\n\n @staticmethod\n def get_suitability(x):\n if isinstance(x, str):\n xData = baseMarkets.retrieve(x)\n else:\n xData = x.copy()\n if xData.shape[0] < 2000:\n return False\n else:\n _t1 = filt4b.sma(filt4b.atr(xData['High'].values, xData['Low'].values, xData['Close'].values), 20)\n _t2 = filt4b.stdev_sma(xData['Close'].values, 20)\n return np.nanmean(_t1 / _t2) <= 1.35\n\n def update(self):\n for k in self.SecurityList:\n try:\n if '_' in k:\n kstr = k.split('_')\n for m in range(0, 2):\n # m = 0\n if kstr[m][-2:] in [('0' + str(j) if j < 10 else str(j)) for j in range(1, 30)] and \\\n kstr[m] not in ['Nikkei225']:\n kstr[m] = kstr[m][0:-2] + '_' + kstr[m][-2:]\n if '_' in kstr[0] and '_' in kstr[1] and kstr[0].split('_')[0] == kstr[1].split('_')[0]:\n self.store(k, 'Stats', 'Returns', self.get_calendar_spread(kstr[0], kstr[1]))\n else:\n raise NotImplementedError('Only calendar spreads implemented so far')\n else:\n kstr = k\n if kstr[-2:] in [('0' + str(j) if j < 10 else str(j)) for j in range(1, 30)] and \\\n kstr not in ['Nikkei225']:\n kstr = kstr[0:-2] + '_' + kstr[-2:]\n self.store(k, 'Stats', 'Returns', baseMarkets.retrieve(kstr))\n except:\n raise ValueError('Cannot process %s' %k)\n\n def __iter__(self):\n return iter(self.SecurityList)\n\n def __len__(self):\n return len([j for j in self.SecurityList])\n\n\nuncAssetUniverse = UncAssetUniverse()\n\n\nclass ArtAssetUniverseSubSet():\n SecurityList = Universe()\n ArtAssetsDir = ''\n SecurityIdx = []\n\n def __init__(self, idx):\n if isinstance(idx, int):\n idxList = configManager['artmkts_subset']['set' + ('0' + str(idx) if idx < 10 else str(idx))]\n else:\n idxList = idx\n self.SecurityList = Universe(SecurityList=[k for kidx, k in enumerate(artAssetUniverse) if kidx in idxList])\n self.ArtAssetsDir = configManager['setmkts_dir']\n self.SecurityIdx = idx\n\n def __iter__(self):\n return iter(self.SecurityList)\n\n def retrieve(self, mkt, dir1='Stats', file='Returns'):\n return DataDir(opj(self.ArtAssetsDir, mkt, dir1)).retrieve(file)\n\n def store(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.ArtAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.ArtAssetsDir, mkt, dir1))\n DataDir(opj(self.ArtAssetsDir, mkt, dir1)).store(file, mdata)\n return None\n\n def delete(self, mkt, dir1, file):\n if os.path.exists(opj(self.ArtAssetsDir, mkt, dir1, file) + '.npz'):\n os.remove(opj(self.ArtAssetsDir, mkt, dir1, file) + '.npz')\n return None\n\n def load(self, mkt, dir1, file):\n return DataDir(opj(self.ArtAssetsDir, mkt, dir1)).load(file)\n\n def save(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.ArtAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.ArtAssetsDir, mkt, dir1))\n DataDir(opj(self.ArtAssetsDir, mkt, dir1)).save(file, mdata)\n return None\n\n\nartAssetUniverseSubSet00 = ArtAssetUniverseSubSet([0, 3, 12, 13, 14, 15, 19, 23, 24, 27, 28, 31, 35, 40, 44, 48, 51, 52,\n 57, 59, 63, 65, 66, 68, 73, 80, 81, 85, 87, 88, 89, 90, 92, 95, 98,\n 101, 105, 111, 115, 116])\nartAssetUniverseSubSet01 = ArtAssetUniverseSubSet([7, 8, 10, 11, 16, 20, 21, 29, 30, 32, 34, 38, 39, 41, 42, 43, 46, 50,\n 54, 55, 61, 62, 67, 71, 72, 76, 77, 79, 82, 84, 91, 99, 103, 104,\n 107, 108, 113, 114, 117, 119])\nartAssetUniverseSubSet02 = ArtAssetUniverseSubSet([1, 2, 4, 5, 6, 9, 17, 18, 22, 25, 26, 33, 36, 37, 45, 47, 49, 53, 56,\n 58, 60, 64, 69, 70, 74, 75, 78, 83, 86, 93, 94, 96, 97, 100, 102,\n 106, 109, 110, 112, 118])\nartAssetUniverseSubSet03 = ArtAssetUniverseSubSet([0, 3, 5, 10, 11, 18, 19, 22, 23, 25, 28, 30, 32, 38, 44, 49, 50, 53,\n 55, 57, 61, 63, 64, 65, 66, 71, 72, 73, 79, 81, 87, 91, 92, 98, 101,\n 102, 105, 108, 109, 118])\nartAssetUniverseSubSet04 = ArtAssetUniverseSubSet([1, 2, 4, 7, 8, 9, 12, 15, 16, 20, 21, 27, 36, 41, 42, 47, 56, 58, 62,\n 68, 70, 74, 76, 77, 78, 83, 84, 85, 86, 88, 89, 95, 96, 97, 107, 110,\n 111, 115, 117, 119])\nartAssetUniverseSubSet05 = ArtAssetUniverseSubSet([6, 13, 14, 17, 24, 26, 29, 31, 33, 34, 35, 37, 39, 40, 43, 45, 46,\n 48, 51, 52, 54, 59, 60, 67, 69, 75, 80, 82, 90, 93, 94, 99, 100, 103,\n 104, 106, 112, 113, 114, 116])\nartAssetUniverseSubSet06 = ArtAssetUniverseSubSet([1, 2, 5, 7, 8, 11, 17, 21, 22, 23, 24, 25, 30, 34, 36, 40, 48, 56,\n 62, 67, 68, 70, 72, 74, 76, 77, 78, 83, 89, 91, 98, 101, 102, 103,\n 105, 107, 110, 111, 117, 118])\nartAssetUniverseSubSet07 = ArtAssetUniverseSubSet([0, 14, 15, 29, 31, 32, 39, 42, 43, 44, 46, 49, 51, 52, 54, 58, 60,\n 63, 64, 65, 69, 71, 73, 75, 79, 81, 82, 87, 90, 92, 93, 95, 96, 99,\n 100, 104, 108, 112, 113, 114])\nartAssetUniverseSubSet08 = ArtAssetUniverseSubSet([3, 4, 6, 9, 10, 12, 13, 16, 18, 19, 20, 26, 27, 28, 33, 35, 37, 38,\n 41, 45, 47, 50, 53, 55, 57, 59, 61, 66, 80, 84, 85, 86, 88, 94, 97,\n 106, 109, 115, 116, 119])\nartAssetUniverseSubSet09 = ArtAssetUniverseSubSet([0, 6, 8, 11, 12, 15, 20, 26, 27, 29, 30, 34, 37, 38, 39, 40, 51, 53,\n 58, 63, 65, 70, 72, 75, 77, 82, 83, 86, 87, 88, 91, 92, 93, 99, 102,\n 104, 107, 108, 115, 117])\nartAssetUniverseSubSet10 = ArtAssetUniverseSubSet([2, 7, 9, 13, 14, 21, 25, 28, 31, 32, 33, 35, 42, 45, 47, 55, 56, 59,\n 61, 62, 69, 71, 74, 78, 80, 81, 84, 89, 96, 97, 98, 100, 101, 103,\n 109, 110, 112, 113, 114, 116])\nartAssetUniverseSubSet11 = ArtAssetUniverseSubSet([1, 3, 4, 5, 10, 16, 17, 18, 19, 22, 23, 24, 36, 41, 43, 44, 46, 48,\n 49, 50, 52, 54, 57, 60, 64, 66, 67, 68, 73, 76, 79, 85, 90, 94, 95,\n 105, 106, 111, 118, 119])\n\n\nclass UncAssetUniverseSubSet():\n SecurityList = Universe()\n UncAssetsDir = ''\n\n def __init__(self):\n self.SecurityList = Universe(SecurityList=[k for kidx, k in enumerate(uncAssetUniverse)])\n self.UncAssetsDir = configManager['setmkts_dir']\n\n def __iter__(self):\n return iter(self.SecurityList)\n\n def retrieve(self, mkt, dir1='Stats', file='Returns'):\n return DataDir(opj(self.UncAssetsDir, mkt, dir1)).retrieve(file)\n\n def store(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.UncAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.UncAssetsDir, mkt, dir1))\n DataDir(opj(self.UncAssetsDir, mkt, dir1)).store(file, mdata)\n return None\n\n def delete(self, mkt, dir1, file):\n if os.path.exists(opj(self.UncAssetsDir, mkt, dir1, file) + '.npz'):\n os.remove(opj(self.UncAssetsDir, mkt, dir1, file) + '.npz')\n return None\n\n def load(self, mkt, dir1, file):\n return DataDir(opj(self.UncAssetsDir, mkt, dir1)).load(file)\n\n def save(self, mkt, dir1, file, mdata):\n if not os.path.exists(opj(self.UncAssetsDir, mkt, dir1)):\n os.makedirs(opj(self.UncAssetsDir, mkt, dir1))\n DataDir(opj(self.UncAssetsDir, mkt, dir1)).save(file, mdata)\n return None\n\nuncAssetUniverseSubSet = UncAssetUniverseSubSet()\n\n\ndef update_universes():\n artAssetUniverse.update()\n uncAssetUniverse.update()\n\n\ndef cbz_str(cbw):\n return '0' + str(cbw) if cbw < 10 else str(cbw)\n\n\n\n\nclass CbzInfo:\n CbzInfoFunc = None\n CbzArgs = None\n CbzName = ''\n CbzCbwList = []\n\n def __init__(self, cbzInfoFunc, cbzName, cbzCbwList, *cbzArgs):\n self.CbzInfoFunc = cbzInfoFunc\n self.CbzName = cbzName\n self.CbzArgs = cbzArgs\n self.CbzCbwList = cbzCbwList\n\n # faster\n def generate_info(self, y):\n univ, cbw, x = y\n xData = univ.retrieve(x)\n cbDateValIdx, cbBrkPnL, cbDir, cbMa, cbStps = self.CbzInfoFunc(xData.get_ohlc(), *self.CbzArgs, cbw)\n # newly added\n cbDateValIdx = cbDateValIdx.astype('int64')\n try:\n cbDf = DataFrame({'BegDateIdx': cbDateValIdx[:, 0]})\n cbDf['EndDateIdx'] = cbDateValIdx[:, 1]\n cbDf['CbBrkPnL'] = cbBrkPnL\n cbDf['CbDir'] = cbDir\n cbDf['BuyStop'] = cbStps[cbDateValIdx[:, 0]-1, 0]\n cbDf['SellStop'] = cbStps[cbDateValIdx[:, 0]-1, 1]\n cbwStr = cbz_str(cbw)\n univ.store(x, 'CbInfo', self.CbzName + cbwStr, cbDf)\n return None\n except:\n print('Issue with %s with %s' %(x, str(cbw)))\n\n def generate_info_all(self, univ, cbw):\n if __name__ == '__main__':\n numSec = len(univ.SecurityList)\n with Pool(processes=os.cpu_count()) as p:\n p.map(self.generate_info, zip([univ]*numSec, [cbw]*numSec, univ.SecurityList))\n else:\n for x in univ:\n self.generate_info(univ, cbw, x)\n return None\n\n def generate_info_all_multi(self, univ, windows=None):\n if windows is None:\n windows = self.CbzCbwList\n for cbw in windows:\n self.generate_info_all(univ, cbw)\n return None\n\n def get_info(self, univ, cbw, x):\n return univ.retrieve(x, 'CbInfo', self.CbzName + cbz_str(cbw))\n\n def __str__(self):\n prOutput = self.CbzName\n for i in range(0, len(self.CbzArgs)):\n prOutput += '_' + str(self.CbzArgs[i])\n return prOutput\n\nmnInfoManager = CbzInfo(filt4b.firz234dfma_mninfo, 'Mom234Win', [1, 2, 3, 4, 5, 6], 80, 16)\ncbzInfoList = [mnInfoManager]\n\n\ndef update_cbinfo_one(k):\n mnInfoManager.generate_info_all_multi(k, list(range(1, 7)))\n\n\ndef update_cbinfo():\n for k in [artAssetUniverse, uncAssetUniverse]:\n update_cbinfo_one(k)\n return None\n\n\nclass FilterOperations:\n\n @staticmethod\n def convert_pnl_to_binary(x):\n x = np.sign(x)\n y = np.zeros(x.shape[0], dtype=int)\n y[x > 0] = 1\n y[x < 0] = -1\n return y\n\n @staticmethod\n def ic_x_bin(x_mat, y):\n if x_mat.ndim == 2:\n y_mat = np.tile(y[:, np.newaxis], x_mat.shape[1])\n n = x_mat.shape[0]\n n11 = np.sum((x_mat == 1).astype(int) * (y_mat == 1).astype(int), axis=0).astype('float64')/n\n n10 = np.sum((x_mat == 1).astype(int) * (y_mat == -1).astype(int), axis=0).astype('float64')/n\n n01 = np.sum((x_mat == -1).astype(int) * (y_mat == 1).astype(int), axis=0).astype('float64')/n\n n00 = np.sum((x_mat == -1).astype(int) * (y_mat == -1).astype(int), axis=0).astype('float64')/n\n return (n11 * n00 - n01 * n10) / np.sqrt((n11 + n10) * (n11 + n01) * (n00 + n10) * (n00 + n01))\n elif x_mat.ndim == 1:\n n11 = np.where((x_mat == 1) & (y == 1))[0].shape[0]\n n10 = np.where((x_mat == 1) & (y == -1))[0].shape[0]\n n01 = np.where((x_mat == -1) & (y == 1))[0].shape[0]\n n00 = np.where((x_mat == -1) & (y == -1))[0].shape[0]\n return (n11 * n00 - n01 * n10) / np.sqrt((n11 + n10) * (n11 + n01) * (n00 + n10) * (n00 + n01))\n else:\n raise NotImplementedError('ic_x_bin cannot handle dimensions more than 2 ')\n\n @staticmethod\n def ic_x_con(x_mat, y):\n if x_mat.ndim == 2:\n n = x_mat.shape[0]\n m1 = np.nansum(x_mat[np.where(y == 1), :], axis=0)/n\n m2 = np.nansum(x_mat[np.where(y == -1), :], axis=0)/n\n y_mat = np.tile(y[:, np.newaxis], x_mat.shape[1])\n n1 = np.nansum((~np.isnan(x_mat)).astype(int) * (y_mat == 1).astype(int), axis=0)/n\n n2 = np.nansum((~np.isnan(x_mat)).astype(int) * (y_mat == -1).astype(int), axis=0)/n\n sx = np.nanstd(x_mat[y == 1 | y == -1, :], axis=0)\n return (m1/n1 - m2 / n2) * np.sqrt(n1 * n2) / (n1 + n2) * (1 / sx)\n elif x_mat.ndim == 1:\n m1 = np.sum(x_mat[np.where((y == 1) & ~np.isnan(x_mat))[0]])\n m2 = np.sum(x_mat[np.where((y == -1) & ~np.isnan(x_mat))[0]])\n n1 = np.where(np.where((y == 1) & ~np.isnan(x_mat))[0]).shape[0]\n n2 = np.where(np.where((y == -1) & ~np.isnan(x_mat))[0]).shape[0]\n sx = np.std(x_mat[~np.isnan(x_mat) & (y == 1 | y == -1)])\n return (m1 / n1 - m2 / n2) * np.sqrt(n1 * n2)/(n1 + n2) * (1/sx)\n else:\n raise NotImplementedError('ic_x_con cannot handle dimensions more than 2')\n\n @staticmethod\n def remove_infinity(x):\n if x.ndim == 1:\n x[np.isposinf(x)] = np.nanmax(x[~np.isinf(x)])\n x[np.isneginf(x)] = np.nanmin(x[~np.isinf(x)])\n return x\n else:\n for i in range(0, x.shape[1]):\n x[np.isposinf(x[:, i]), i] = np.nanmax(x[~np.isinf(x[:, i]), i])\n x[np.isneginf(x[:, i]), i] = np.nanmin(x[~np.isinf(x[:, i]), i])\n return x\n\n @staticmethod\n def make_binary_evenfilter(x, perc=50):\n x = FilterOperations.remove_infinity(x)\n x[x < 0] = 0\n if x.ndim == 2:\n xd = np.nanmedian(x, axis=0) if perc == 50 else np.nanpercentile(x, perc, axis=0)\n else:\n xd = np.nanmedian(x) if perc == 50 else np.nanpercentile(x, perc)\n y = np.zeros(x.shape, dtype=int)\n y[x > xd] = 1\n y[x < xd] = -1\n return y, xd\n\n @staticmethod\n def make_binary_oddfilter(x, threshold=np.float(0)):\n x = FilterOperations.remove_infinity(x)\n y = np.zeros(x.shape, dtype=int)\n if isinstance(threshold, float):\n threshold = np.float(threshold)\n y[x > threshold] = 1\n y[x < -threshold] = -1\n if threshold.ndim == 0:\n return y, np.ones(x.shape[1]) * threshold\n else:\n return y, threshold\n\n\nclass FilterGeneric():\n CbzInfoList = None\n DirName = ''\n Prefix = ''\n UseDir = False\n BinOpFunc = None\n\n def __init__(self, cbzList, dirName, preFix, useDir, binOpFunc):\n if isinstance(cbzList, list):\n self.CbzInfoList = cbzList\n elif isinstance(cbzList, CbzInfo):\n self.CbzInfoList = [cbzList]\n else:\n raise NotImplementedError('Need to supply instances of CbzInfo')\n self.DirName = dirName\n self.Prefix = preFix\n self.UseDir = useDir\n self.BinOpFunc = binOpFunc\n\n # INDICATOR SPECIFIC CODE\n @staticmethod\n def get_indicator_matrix(self, x, row_idx=None):\n output = np.random.random(10)\n return output\n\n @staticmethod\n def get_column_names():\n return np.random.random(10)\n\n @staticmethod\n def get_filter_vec_from_name(cls, x, name):\n return 0\n\n # GENERIC CODE\n @classmethod\n def get_filter_mat_from_names(cls, x, nameList):\n output = np.zeros((x.shape[0], len(nameList)))\n for nameIdx, name in enumerate(nameList):\n output[:, nameIdx] = cls.get_filter_vec_from_name(x, name)\n return output\n\n def generate_indicator_values(self, univ_k):\n univ, k = univ_k\n print('Processing %s' %k)\n if isinstance(k, str):\n xData = univ.retrieve(k)\n xCbzInfo = [j.get_info(univ, w, k) for j in self.CbzInfoList for w in j.CbzCbwList]\n xCbzName = [j.CbzName + cbz_str(w) for j in self.CbzInfoList for w in j.CbzCbwList]\n else:\n raise NotImplementedError('Wrong types of inputs')\n\n # IMPORTANT SUBTRACTING ONE HERE - FOR THE FILTER VALUE DATE\n try:\n comIdx = list(itt.accumulate([j['BegDateIdx'].values for j in xCbzInfo],\n lambda x, y=None: x if y is None else np.union1d(x, y)))[-1]-1\n except:\n raise RuntimeError('Error in ticker %s' %k)\n xCbzIdx = [np.in1d(comIdx+1, j['BegDateIdx'].values) for j in xCbzInfo]\n\n filters = self.get_indicator_matrix(xData.get_ohlc(), comIdx)\n print('done')\n xCbzFilters = [filters[j, :] for j in xCbzIdx]\n col_names = self.get_column_names()\n xCbzFilters = [DataFrame(j) for j in xCbzFilters]\n for j in range(0, len(xCbzFilters)):\n tmp = xCbzFilters[j]\n tmp.set_columns(col_names)\n xCbzFilters[j] = tmp\n\n # assert whether the output is of the same shape or not\n for j in range(0, len(xCbzFilters)):\n try:\n assert xCbzFilters[j].shape[0] == xCbzInfo[j].shape[0]\n except:\n raise AssertionError('Issue with ticker %s at j=%s' % (k, str(j)))\n\n # now save the data\n for j in range(0, len(xCbzFilters)):\n univ.store(k, self.DirName, xCbzName[j], xCbzFilters[j])\n return None\n\n\nsetListAll = [artAssetUniverseSubSet00, artAssetUniverseSubSet01, artAssetUniverseSubSet02,\n artAssetUniverseSubSet03, artAssetUniverseSubSet04, artAssetUniverseSubSet05,\n artAssetUniverseSubSet06, artAssetUniverseSubSet07, artAssetUniverseSubSet08,\n artAssetUniverseSubSet09, artAssetUniverseSubSet10, artAssetUniverseSubSet11]\n\n\nclass IndicatorDataPerSet:\n\n @staticmethod\n def reduce_filter(univ, fmng, cbzList):\n for cbz in cbzList:\n for cbw in cbz.CbzCbwList:\n cbz_cbw_name = cbz.CbzName + cbz_str(cbw)\n fData = []\n cData = []\n col_names = None\n for k in univ.SecurityList:\n fDatak = univ.retrieve(k, fmng.DirName, cbz_cbw_name)\n cDatak = FilterOperations.convert_pnl_to_binary(univ.retrieve(k, 'CbInfo',\n cbz_cbw_name)['CbBrkPnL'].values)\n if col_names is None:\n col_names = list(fDatak.columns)\n if fmng.UseDir:\n dDatak = univ.retrieve(k, 'CbInfo', cbz_cbw_name)['CbDir'].values\n fData.append(fDatak.values * dDatak[:, np.newaxis])\n else:\n fData.append(fDatak.values)\n cData.append(cDatak)\n gc.collect()\n print('done reading the data')\n\n setFDataAll = []\n setIcDataAll = []\n setMedDataAll = []\n setCDataAll = []\n setRDataAll = []\n for jIdx, j in enumerate(setListAll):\n # jIdx = 0\n # j = setListAll[0]\n tmpFData = [k for kidx, k in enumerate(fData) if kidx in j.SecurityIdx]\n tmpCData = [k for kidx, k in enumerate(cData) if kidx in j.SecurityIdx]\n setSize = len(tmpFData)\n\n setFData = None\n setCData = None\n for k in range(0, setSize):\n if k == 0:\n setFData = tmpFData.pop(0)\n setCData = tmpCData.pop(0)\n else:\n setFData = np.vstack((setFData, tmpFData.pop(0)))\n setCData = np.hstack((setCData, tmpCData.pop(0)))\n del tmpFData\n del tmpCData\n gc.collect()\n\n setRData = setFData.copy()\n setFData, setMedData = fmng.BinOpFunc(setFData)\n\n # calculate the information coefficent from P&L\n setIcData = FilterOperations.ic_x_bin(setFData, setCData) # check this\n\n setFDataAll.append(setFData)\n setIcDataAll.append(setIcData)\n setMedDataAll.append(setMedData)\n setCDataAll.append(setCData)\n setRDataAll.append(setRData)\n\n # Remove factors that have differing interpretations in various sets\n _useFiltersAll = None\n for artSetIdx1 in range(0, len(setListAll)-1): # setListAll, self.SetList\n for artSetIdx2 in range(artSetIdx1+1, len(setListAll)): # setListAll, self.SetList\n _useFilters = np.where(np.sign(setIcDataAll[artSetIdx1]).astype('int8') ==\n np.sign(setIcDataAll[artSetIdx2]).astype('int8'))[0]\n _useFiltersAll = _useFilters if _useFiltersAll is None else \\\n np.intersect1d(_useFiltersAll, _useFilters)\n\n for artSetIdx in range(0, len(setListAll)): # setListAll, self.SetList\n setFDataAll[artSetIdx] = setFDataAll[artSetIdx][:, _useFiltersAll]\n setIcDataAll[artSetIdx] = setIcDataAll[artSetIdx][_useFiltersAll]\n setMedDataAll[artSetIdx] = setMedDataAll[artSetIdx][_useFiltersAll]\n setRDataAll[artSetIdx] = setRDataAll[artSetIdx][:, _useFiltersAll]\n gc.collect()\n col_names = np.array(col_names)[_useFiltersAll]\n\n # prune it further to remove signals with IC < 0.025\n setIcDataAvg = np.mean(np.abs(np.array(setIcDataAll)), axis=0)\n print('Max Ic is %s' %np.max(setIcDataAvg))\n\n _potentFiltersAll = np.where(setIcDataAvg > 0.025)[0]\n for artSetIdx in range(0, len(setListAll)): # setListAll, self.SetList\n setFDataAll[artSetIdx] = setFDataAll[artSetIdx][:, _potentFiltersAll]\n setIcDataAll[artSetIdx] = setIcDataAll[artSetIdx][_potentFiltersAll]\n setMedDataAll[artSetIdx] = setMedDataAll[artSetIdx][_potentFiltersAll]\n setRDataAll[artSetIdx] = setRDataAll[artSetIdx][:, _potentFiltersAll]\n gc.collect()\n col_names = np.array(col_names)[_potentFiltersAll]\n\n for jIdx, j in enumerate(setListAll):\n _j_str = '0' + str(jIdx) if jIdx < 10 else str(jIdx)\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'filterData', setFDataAll[jIdx])\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'icData', setIcDataAll[jIdx])\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'medData', setMedDataAll[jIdx])\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'brkPnLData', setCDataAll[jIdx])\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'colNames', col_names)\n j.save('Set' + _j_str, fmng.DirName, cbz_cbw_name + 'rawData', setRDataAll[jIdx])\n gc.collect()\n return None\n\n @staticmethod\n def reduce_filter_validaton(univ, fmng, cbzList):\n for cbz in cbzList:\n for cbw in cbz.CbzCbwList:\n cbz_cbw_name = cbz.CbzName + cbz_str(cbw)\n fData = []\n cData = []\n col_names = None\n for k in univ.SecurityList:\n fDatak = univ.retrieve(k, fmng.DirName, cbz_cbw_name)\n cDatak = FilterOperations.convert_pnl_to_binary(univ.retrieve(k, 'CbInfo',\n cbz_cbw_name)['CbBrkPnL'].values)\n if col_names is None:\n col_names = list(fDatak.columns)\n if fmng.UseDir:\n dDatak = univ.retrieve(k, 'CbInfo', cbz_cbw_name)['CbDir'].values\n fData.append(fDatak.values * dDatak[:, np.newaxis])\n else:\n fData.append(fDatak.values)\n cData.append(cDatak)\n gc.collect()\n print('done reading the data')\n\n setSize = len(fData)\n setFData = None\n setCData = None\n for k in range(0, setSize):\n if k == 0:\n setFData = fData.pop(0)\n setCData = cData.pop(0)\n else:\n setFData = np.vstack((setFData, fData.pop(0)))\n setCData = np.hstack((setCData, cData.pop(0)))\n gc.collect()\n\n setRData = setFData.copy()\n setFData, setMedData = fmng.BinOpFunc(setFData)\n _keptFilters = setListAll[0].load('Set00', fmng.DirName, cbz_cbw_name + 'colNames')\n _keptFilters = [k for k in _keptFilters]\n keepIdx = [kidx for kidx, k in enumerate(col_names) if k in _keptFilters]\n\n setFData = setFData[:, np.array(keepIdx)]\n setMedData = setMedData[np.array(keepIdx)]\n setRData = setRData[:, np.array(keepIdx)]\n col_names = [k for kidx, k in enumerate(col_names) if kidx in keepIdx]\n\n # get ic\n setIcData = FilterOperations.ic_x_bin(setCData, setFData)\n col_names = np.array(col_names)\n\n j = uncAssetUniverseSubSet\n\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'filterData', setFData)\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'icData', setIcData)\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'medData', setMedData)\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'brkPnLData', setCData)\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'colNames', col_names)\n j.save('SetXX', fmng.DirName, cbz_cbw_name + 'rawData', setRData)\n gc.collect()\n return None\n\n\nif __name__ == '__main__':\n # update_universes()\n # update_cbinfo()\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CrazyCod/Futures/PCore/OldWork/Assets.py","file_name":"Assets.py","file_ext":"py","file_size_in_byte":43251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"210987554","text":"from sanic.views import HTTPMethodView\n\nfrom . import jinja2\n\n\nclass ShadowsocksView(HTTPMethodView):\n page_name = 'Shadowsocks'\n\n @jinja2.template('shadowsocks.html')\n async def get(self, request):\n content = {\"page_name\": self.page_name}\n return content\n","sub_path":"PiDashboard/views/shadowsocks.py","file_name":"shadowsocks.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"619947933","text":"\"\"\"\nOAuth dance part of the bot.\n\"\"\"\nimport json\nimport os\n\nimport itsdangerous\nimport typing\nfrom asyncio_extras import threadpool\nfrom kyoukai.asphalt import HTTPRequestContext\nfrom kyoukai.blueprint import Blueprint\nfrom requests_oauthlib import OAuth2Session\nfrom sqlalchemy.orm import Session\nfrom werkzeug.utils import redirect\n\nfrom joku.core.bot import Jokusoramame\n\nAPI_BASE_URL = \"https://discordapp.com/api/v6\"\nAUTHORIZATION_BASE_URL = API_BASE_URL + '/oauth2/authorize'\nTOKEN_URL = API_BASE_URL + '/oauth2/token'\n\nAPI_ME_URL = API_BASE_URL + '/users/@me'\nAPI_GUILDS_URL = API_BASE_URL + '/users/@me/guilds'\nAPI_INVITE_URL = API_BASE_URL + '/invite/{code}'\n\n\nclass OAuth2DanceHelper(object):\n \"\"\"\n A class to help with the OAuth 2 dance.\n \"\"\"\n SCOPES = [\"identify\", \"guilds\"]\n\n def __init__(self, bot: Jokusoramame):\n \"\"\"\n :param bot: The bot instance. \n \"\"\"\n self.bot = bot\n\n if self.bot.config.get(\"developer_mode\", False) is True:\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'\n\n @property\n def client_id(self) -> int:\n \"\"\"\n :return: The client ID of this bot. \n \"\"\"\n return self.bot.app_id\n\n @property\n def client_secret(self) -> str:\n \"\"\"\n :return: The client secret of this bot. \n \"\"\"\n return self.bot.config[\"oauth\"][\"secret_key\"]\n\n @property\n def oauth2_redirect(self) -> str:\n \"\"\"\n :return: The OAuth2 redirect of this bot.\n \"\"\"\n return self.bot.config[\"oauth\"][\"redirect_uri\"]\n\n def make_session(self, token=None, state=None, scopes=None):\n \"\"\"\n Makes a new OAuth2Session.\n \n :param token: The OAuth2 token to use. \n :param state: The OAuth2 state to use.\n :param scopes: The OAuth2 scopes to use.\n \"\"\"\n\n return OAuth2Session(\n client_id=self.client_id,\n token=token,\n state=state,\n scope=scopes,\n redirect_uri=self.oauth2_redirect,\n auto_refresh_kwargs={\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n },\n auto_refresh_url=TOKEN_URL,\n token_updater=self.token_updater)\n\n async def _token_updater(self, token: dict):\n \"\"\"\n A callback coroutine that updates the token in the user's entry in the PostgreSQL database.\n \n :param token: The token to update. \n \"\"\"\n\n user = await self.bot.database.get_or_create_user(int(token[\"id\"]))\n\n async with threadpool():\n with self.bot.database.get_session() as sess:\n assert isinstance(sess, Session)\n user.oauth_token = token\n sess.add(user)\n\n return user\n\n def token_updater(self, token: dict):\n \"\"\"\n Callback that schedules the coroutine above.\n \"\"\"\n return self.bot.loop.create_task(self._token_updater(token))\n\n # OAuth2 methods\n def get_redirect_url_and_state(self, scopes: typing.List[str] = None) -> typing.Tuple[str, typing.Any]:\n \"\"\"\n Gets the redirect URL for a new OAuth2 request.\n \n :param scopes: The scopes to request. \n :return: A tuple of the redirect URL and the new OAuth2 session.\n \"\"\"\n scopes = scopes or self.SCOPES\n sess = self.make_session(scopes=scopes)\n\n url, state = sess.authorization_url(AUTHORIZATION_BASE_URL)\n return url, state\n\n async def fetch_token(self, state: str, code: str, url: str) -> dict:\n \"\"\"\n Fetches the token for a user.\n \n This does **not** store the token.\n \"\"\"\n sess = self.make_session(state=state)\n async with threadpool():\n token = sess.fetch_token(TOKEN_URL, code=code, authorization_response=url,\n client_secret=self.client_secret) # oauthlib is bad and needs this\n\n return token\n\n async def get_me(self, token: dict) -> dict:\n \"\"\"\n Gets the currently logged in user.\n \"\"\"\n async with threadpool():\n sess = self.make_session(token=token)\n data = sess.get(API_ME_URL).json()\n\n # cast id to int\n data[\"id\"] = int(data[\"id\"])\n\n return data\n\n async def get_servers(self, token: dict) -> dict:\n \"\"\"\n Gets the servers for this user.\n \"\"\"\n async with threadpool():\n sess = self.make_session(token=token)\n data = sess.get(API_GUILDS_URL).json()\n\n return data\n\n\nbp = Blueprint(name=\"oauth2\", prefix=\"/oauth2\")\n\n\n@bp.route(\"/test/@me\")\nasync def at_me(ctx: HTTPRequestContext):\n cookie = ctx.request.cookies.get(\"joku_user_id\")\n if cookie is None:\n return redirect(\"/oauth2/redirect\")\n\n try:\n uid = ctx.bot.signer.loads(cookie)\n except itsdangerous.BadData:\n r = redirect(\"/oauth2/redirect\")\n r.delete_cookie(key=\"joku_user_id\")\n return r\n\n token = (await ctx.bot.database.get_or_create_user(id=uid)).oauth_token\n return (json.dumps(await ctx.bot.oauth.get_me(token))), 200, {\"Content-Type\": \"application/json\"}\n\n\n@bp.route(\"/test/servers\")\nasync def test_servers(ctx: HTTPRequestContext):\n cookie = ctx.request.cookies.get(\"joku_user_id\")\n if cookie is None:\n return redirect(\"/oauth2/redirect\")\n\n try:\n uid = ctx.bot.signer.loads(cookie)\n except itsdangerous.BadData:\n r = redirect(\"/oauth2/redirect\")\n r.delete_cookie(key=\"joku_user_id\")\n return r\n\n token = (await ctx.bot.database.get_or_create_user(id=uid)).oauth_token\n return (json.dumps(await ctx.bot.oauth.get_servers(token))), 200, {\"Content-Type\": \"application/json\"}\n\n\n@bp.route(\"/callback\")\nasync def _callback(ctx: HTTPRequestContext):\n \"\"\"\n Called to store the token in the cookies and the DB.\n \"\"\"\n if \"errors\" in ctx.request.args:\n # redirect back to /redirect\n return redirect(\"/oauth2/redirect\", code=302)\n\n state = ctx.request.args[\"state\"]\n code = ctx.request.args[\"code\"]\n\n url = ctx.request.url\n token = await ctx.bot.oauth.fetch_token(state=state, code=code, url=url)\n\n # Get our user object\n me = await ctx.bot.oauth.get_me(token=token)\n\n user = await ctx.bot.database.get_or_create_user(id=me[\"id\"])\n\n async with threadpool():\n with ctx.bot.database.get_session() as sess:\n assert isinstance(sess, Session)\n user.oauth_token = token\n sess.add(user)\n\n signed_cookie = ctx.bot.signer.dumps(me[\"id\"])\n response = redirect(\"/\", code=200)\n response.set_cookie(key=\"joku_user_id\", value=signed_cookie)\n\n return response\n\n\n@bp.route(\"/redirect\")\nasync def _redirect(ctx: HTTPRequestContext):\n \"\"\"\n Redirects the user to the Discord OAuth2 signin page.\n \"\"\"\n bot = ctx.bot # type: Jokusoramame\n # get the oauth2 fuckery\n url, state = bot.oauth.get_redirect_url_and_state()\n response = redirect(url, code=302)\n\n return response\n","sub_path":"joku/web/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"266131926","text":"\"\"\"Transformer for data that contains Null values.\"\"\"\n\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom rdt.transformers.base import BaseTransformer\n\nIRREVERSIBLE_WARNING = (\n 'Replacing nulls with existing value without `null_column`, which is not reversible. '\n 'Use `null_column=True` to ensure that the transformation is reversible.'\n)\n\n\nclass NullTransformer(BaseTransformer):\n \"\"\"Transformer for data that contains Null values.\n\n Args:\n fill_value (object or None):\n Value to replace nulls. If ``None``, nans are not replaced.\n null_column (bool):\n Whether to create a new column to indicate which values were null or not.\n If ``None``, only create a new column when the data contains null values.\n If ``True``, always create the new column whether there are null values or not.\n If ``False``, do not create the new column.\n Defaults to ``None``.\n copy (bool):\n Whether to create a copy of the input data or modify it destructively.\n \"\"\"\n\n nulls = None\n _null_column = None\n _fill_value = None\n\n def __init__(self, fill_value, null_column=None, copy=False):\n self.fill_value = fill_value\n self.null_column = null_column\n self.copy = copy\n\n def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series or numpy.ndarray):\n Data to transform.\n \"\"\"\n if self.fill_value == 'mean':\n self._fill_value = data.mean() if pd.notnull(data).any() else 0\n elif self.fill_value == 'mode':\n self._fill_value = data.mode(dropna=True)[0] if pd.notnull(data).any() else 0\n else:\n self._fill_value = self.fill_value\n\n self.nulls = data.isnull().any()\n if self.null_column is None:\n self._null_column = self.nulls\n else:\n self._null_column = self.null_column\n\n def transform(self, data):\n \"\"\"Replace null values with the indicated fill_value.\n\n If required, create the null indicator column.\n\n Args:\n data (pandas.Series or numpy.ndarray):\n Data to transform.\n\n Returns:\n numpy.ndarray\n \"\"\"\n if self.nulls:\n isnull = data.isnull()\n if self.nulls and self._fill_value is not None:\n if not self.copy:\n data[isnull] = self._fill_value\n else:\n data = data.fillna(self._fill_value)\n\n if self._null_column:\n return pd.concat([data, isnull.astype('int')], axis=1).values\n\n if self._fill_value in data.values:\n warnings.warn(IRREVERSIBLE_WARNING)\n\n return data.values\n\n def reverse_transform(self, data):\n \"\"\"Restore null values to the data.\n\n If a null indicator column was created dring fit, use it as a reference.\n Otherwise, replace all instances of ``fill_value`` that can be found in\n data.\n\n Args:\n data (numpy.ndarray):\n Data to transform.\n\n Returns:\n pandas.Series\n \"\"\"\n if self.nulls:\n if self._null_column:\n isnull = data[:, 1] > 0.5\n data = pd.Series(data[:, 0])\n else:\n isnull = np.where(self._fill_value == data)[0]\n data = pd.Series(data)\n\n if isnull.any():\n if self.copy:\n data = data.copy()\n\n data.iloc[isnull] = np.nan\n\n return data\n","sub_path":"rdt/transformers/null.py","file_name":"null.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"173151443","text":"from collections import defaultdict\r\nfrom collections import deque\r\n\r\n\r\ndef build_cache(cache, m, n, board):\r\n for i in range(1, m + 1):\r\n for j in range(1, n + 1):\r\n cache[board[i][j]].append((i, j))\r\n\r\n\r\ndef get_next_cells(row1, col1, cache):\r\n return cache[row1 * col1]\r\n\r\n\r\ndef solve(sol, row, col, m, n, cache):\r\n queue = deque()\r\n queue.append((row, col))\r\n sol[row][col] = 1\r\n while queue:\r\n r, c = queue.popleft()\r\n if r == 1 and c == 1:\r\n return True\r\n for r1, c1 in get_next_cells(r, c, cache):\r\n if sol[r1][c1] == 0:\r\n queue.append((r1, c1))\r\n sol[r1][c1] = 1\r\n return False\r\n\r\n\r\nm = int(input())\r\nn = int(input())\r\n\r\nsol = [[0 for i in range(n + 1)] for j in range(m + 1)]\r\nboard = list()\r\nboard.append([0 for i in range(n + 1)])\r\n\r\nfor i in range(m):\r\n l = [0]\r\n l.extend([int(x) for x in input().split()])\r\n board.append(l)\r\n\r\ncell_cache = defaultdict(list)\r\n\r\nbuild_cache(cell_cache, m, n, board)\r\n\r\nif solve(sol, m, n, m, n, cell_cache):\r\n print(\"yes\")\r\nelse:\r\n print(\"no\")\r\n","sub_path":"2020 Contest/Question J5 Graph (BFS).py","file_name":"Question J5 Graph (BFS).py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"108661050","text":"from .FruitTale import FruitTale\nimport pygame\n\nclass Game:\n\n def __init__(self, tales):\n self.points = 0\n self.tales = tales\n self.stack = []\n\n def taleHit(self, coord):\n x, y = coord\n for tale in self.tales:\n if isinstance(tale, FruitTale) and tale.isHit(coord) and not tale.isLoked:\n if len(self.stack) == 0 or (len(self.stack) == 1 and not self.stack[0] is tale):\n self.stack.append(tale)\n tale.lock()\n #time.sleep(1)\n\n def matchCheck(self):\n if len(self.stack) > 1:\n pygame.time.delay(700)\n if self.stack[0].getName() == self.stack[1].getName():\n self.points += 1\n print(self.points)\n else:\n self.stack[0].unlock()\n self.stack[1].unlock()\n self.stack.clear()\n #time.sleep(100)\n\n def getScore(self):\n return self.points","sub_path":"findPair/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"183981199","text":"from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom domain.models import ScheduledExam\nfrom domain.tasks import send_push_notification\nfrom domain.utils import grouped_exams_by_lab_date_keygen\n\n\nclass Command(BaseCommand):\n help = \"Resend notifications for exams scheduled from Feb, 1\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n scheduled_exams = ScheduledExam.objects.filter(\n status=ScheduledExam.EXAM_TIME_SCHEDULED,\n scheduled_time__year=2018,\n scheduled_time__gte__month=3\n )\n for instance in scheduled_exams:\n print(instance)\n first_name = instance.prescription.patient.full_name.split(\" \")[0]\n exam_name = instance.exam.name\n expiration_date = instance.prescription.expiration_date\n\n subject, message, data = \"\", \"\", None\n scheduled_time = instance.scheduled_time\n if not scheduled_time:\n print('Scheduled time is null')\n return\n\n data = {\"scheduled_exam_id\": instance.id, \"status\": ScheduledExam.EXAM_TIME_SCHEDULED,\n \"scheduled_time\": int(scheduled_time.timestamp()), \"exam_description\": instance.exam.description,\n \"is_scheduled_by_phone\": instance.exam.is_scheduled_by_phone, \"user_first_name\": first_name}\n\n # if start_preparation_in_hours:\n # preparation_eta = scheduled_time - timedelta(hours=start_preparation_in_hours) - timedelta(minutes=settings.NOTIFICATION_BEFORE_EXAM_PREPARATION_IN_MINUTES)\n preparation_eta = scheduled_time - timedelta(days=settings.NOTIFICATION_BEFORE_EXAM_PREPARATION_IN_DAYS)\n preparation_eta = preparation_eta.replace(\n hour=settings.NOTIFICATION_EXACT_TIME_HOURS,\n minute=0,\n second=0,\n microsecond=0\n )\n\n preparation_future_subject = \"Hora de começar a se preparar\"\n preparation_future_message = \"{0}, você precisa se preparar para seu exame. Vamos ver as instruções?\".format(first_name)\n\n send_push_notification.apply_async(\n args=[instance.prescription.patient.token, preparation_future_subject, preparation_future_message, data],\n eta=preparation_eta,\n )\n\n eta = scheduled_time - timedelta(minutes=settings.NOTIFICATION_BEFORE_EXAM_IN_MINUTES)\n future_subject = \"Você fez o preparo para o exame?\"\n future_message = \"{0}, toque para confirmar o preparo, ou para avisar que não pôde fazê-lo.\".format(first_name)\n\n send_push_notification.apply_async(\n args=[instance.prescription.patient.token, future_subject, future_message, data],\n eta=eta,\n )\n\n if expiration_date:\n days = settings.NOTIFICATION_BEFORE_PRESCRIPTION_EXPIRES_IN_DAYS\n expiration_eta = expiration_date - timedelta(days=days)\n expiration_future_subject = \"Seu pedido expira em {0} dias\".format(days)\n expiration_future_message = \"O pedido do exame {0} expira em {1} não esqueça de pedir para Sara fazer o agendamento.\".format(\n exam_name,\n expiration_date.strftime('%d %B, %Y')\n )\n send_push_notification.apply_async(\n args=[instance.prescription.patient.token, expiration_future_subject, expiration_future_message, data],\n eta=expiration_eta,\n )\n\n subject = \"Seu exame foi agendado\"\n message = \"{0}, toque para ver detalhes do exame.\".format(first_name)\n if data:\n data.update({\n \"exam_description\": instance.exam.description,\n \"is_scheduled_by_phone\": instance.exam.is_scheduled_by_phone,\n \"user_first_name\": first_name\n })\n\n\n send_push_notification.apply_async(args=[instance.prescription.patient.token, subject, message, data], )\n print('sent: {}'.format(instance.prescription.patient.full_name))\n","sub_path":"lab_core/domain/management/commands/resend_notifications.py","file_name":"resend_notifications.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"299069648","text":"from google.appengine.ext import db\n\nfrom geo.geomodel import GeoModel\n\nclass Neighborhood(db.Model):\n name = db.StringProperty()\n number_of_students = db.IntegerProperty()\n percent_single_family = db.FloatProperty()\n location = db.GeoPtProperty()\n\nb_woods = Neighborhood()\nb_woods.name = \"b_woods\"\nb_woods.number_of_students = 120\nb_woods.percent_single_family = 1.0\nb_woods.location = db.GeoPt(33.969553,-84.159453)\nb_woods.put()\n\ncard = Neighborhood()\ncard.name = \"card\"\ncard.number_of_students = 300\ncard.percent_single_family = 1.0\ncard.location = db.GeoPt(33.979392,-84.132407)\ncard.put()\n\nclass School(GeoModel):\n name = db.StringProperty(required=True)\n type = db.StringProperty(required=True, choices=set([\"ES\", \"MS\",\n\"HS\"]))\n capacity = db.IntegerProperty()\n #location = db.GeoPtProperty()\n\n @staticmethod\n def public_attributes():\n \"\"\"Returns a set of simple attributes on public school entities.\"\"\"\n return [\n 'school_id', 'name', 'address', 'city', 'state', 'zip_code',\n 'enrollment', 'phone_number', 'locale_code', 'school_type',\n 'school_level'\n ]\n\n def _get_latitude(self):\n return self.location.lat if self.location else None\n\n def _set_latitude(self, lat):\n if not self.location:\n self.location = db.GeoPt()\n\n self.location.lat = lat\n\n latitude = property(_get_latitude, _set_latitude)\n\n def _get_longitude(self):\n return self.location.lon if self.location else None\n\n def _set_longitude(self, lon):\n if not self.location:\n self.location = db.GeoPt()\n\n self.location.lon = lon\n\n longitude = property(_get_longitude, _set_longitude)\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"102858464","text":"# Sophia Tacderas\n# programming assignment 5\n# Asks the user to think of a random integer and if it is less than, greater than, or equal to each guess.\n\nprint(\"\\n\\nEnter two numbers, low then high.\")\n\n# While loop that checks if low > high\nnumberFlag = 0\nwhile numberFlag != 1:\n low = int(input(\"low = \"))\n high = int(input(\"high = \"))\n print()\n if low > high:\n print(\"Please enter the smaller followed by the larger number.\")\n else:\n numberFlag = 1\n# end while\n\n# Prompts user to think of a number and searches for it based on the user's input.\nprint(\"Think of a number in the range \" + str(low) + \" to \" + str(high) + \".\\n\")\n\nnumber_of_guesses = 0\ninputFlag = 0\nguessFlag = 0\n\n# While loop that checks for correct letter input\nwhile inputFlag != 1:\n mid = (low + high) // 2 # compute midpoint\n\n if low == high:\n break\n elif (low > high) or (high < low):\n # user's responses are not logically consistent\n break\n\n # Condition to check for when to continue asking for guess\n if (low < mid) or (high > mid):\n print(\"Is your number Less than, Greater than, or Equal to \" + str(mid) + \"?\")\n guess = input(\"Type 'L', 'G' or 'E': \")\n\n while guessFlag != 1:\n if guess == 'L' or guess == 'l' or guess == 'G' or guess == 'g' or guess == 'E' or guess == 'e':\n guessFlag += 1\n else:\n print()\n guess = input(\"Please type 'L', 'G' or 'E': \")\n # end guessFlag while\n\n print()\n guessFlag = 0\n number_of_guesses += 1\n\n if guess == 'L' or guess == 'l':\n high = mid - 1\n elif guess == 'G' or guess == 'g':\n low = mid + 1\n else: # guess == 'E' or guess == 'e'\n # set low and high equal to mid\n low = mid\n high = mid\n break\n # end if\n# end inputFlag while\n\nif low == high: # end w/ no guesses or search space narrows down to one number after multiple binary divisions\n if number_of_guesses == 1:\n print(\"I found your number in 1 guess.\\n\\n\")\n else:\n print(\"Your number is \" + str(mid) + \". I found it in \" + str(number_of_guesses) + \" guesses.\\n\\n\")\nelif (low > high) or (high < low): # user's guesses have been inconsistent\n print(\"Your answers have not been consistent.\\n\\n\")\n","sub_path":"pa5/Question.py","file_name":"Question.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"295395271","text":"import flask\nfrom tsCore.commands import *\n\n\ndef response(content, status):\n return flask.Response(\n flask.json.dumps(content),\n status,\n mimetype='application/json',\n )\n\n\ndef do(method, command, parameter):\n dictCommand = {\n 'add': add_items,\n }\n if command in dictCommand:\n res, status = dictCommand[command](method, parameter)\n return response(res, status)\n return response('error', 404)\n\n","sub_path":"tsCore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"41188602","text":"import requests\nimport os\nimport time\nimport json\nimport string\nfrom collections import defaultdict\nfrom functools import reduce\n\nfrom invoke import Collection, task, run\nfrom jinja2 import Environment, FileSystemLoader\nfrom urllib.parse import urlparse\n\nTEMPLATE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\nSPIDERS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'city_scrapers/spiders')\nTESTS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')\nFILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests/files')\n\n# pty is not available on Windows\ntry:\n import pty\n assert pty # prevent pyflakes warning about unused import\n pty_available = True\nexcept ImportError:\n pty_available = False\n\n\ndef quote_list(the_list):\n \"\"\"Jinja helper to quote list items\"\"\"\n return [\"'%s'\" % element for element in the_list]\n\n\n# Jinja env\nenv = Environment(loader=FileSystemLoader(TEMPLATE_DIR))\nenv.filters[\"quote_list\"] = quote_list\n\n\n@task()\ndef genspider(ctx, name, agency_name, start_urls):\n \"\"\"\n Make a new HTML scraping spider.\n\n Specify:\n\n 1. Slug / shortname for spider (typically an agency acronym, e.g. `cpl`\n for the Chicago Public Libary).\n 2. Long name for spider (e.g. \"Chicago Public Library\").\n 3. URLs to start scraping, separated by commas.\n\n Example:\n ```\n invoke genspider testspider 'Test Spider Board Of Directors' http://citybureau.org/articles,http://citybureau.org/staff\n ```\n\n URLs cannot end in `/`.\n\n \"\"\"\n start_urls = start_urls.split(',')\n domains = _get_domains(start_urls)\n _gen_spider(name, agency_name, domains, start_urls)\n _gen_tests(name)\n _gen_html(name, start_urls)\n\n\n@task\ndef runtests(ctx):\n \"\"\"\n Runs pytest and flake8.\n \"\"\"\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)\n\n\ndef _make_classname(name):\n return '{}Spider'.format(string.capwords(name, sep='_').replace('_', ''))\n\n\ndef _gen_spider(name, agency_name, domains, start_urls):\n filename = '{0}/{1}.py'.format(SPIDERS_DIR, name)\n\n with open(filename, 'w') as f:\n content = _render_content('spider.tmpl', name=name, agency_name=agency_name, domains=domains, start_urls=start_urls)\n f.write(content)\n\n print('Created {0}'.format(filename))\n return filename\n\n\ndef _gen_tests(name):\n filename = '{0}/test_{1}.py'.format(TESTS_DIR, name)\n with open(filename, 'w') as f:\n content = _render_content('test.tmpl', name=name)\n f.write(content)\n print('Created {0}'.format(filename))\n return filename\n\n\ndef _fetch_url(url, attempt=1, session=requests.Session()):\n \"\"\"\n Attempt to fetch the specified url. If the request fails, retry it with an\n exponential backoff up to 5 times.\n \"\"\"\n try:\n # Without this, citybureau.org throttles the first request.\n headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}\n r = session.get(url, headers=headers)\n r.raise_for_status()\n return r\n except requests.exceptions.RequestException as e:\n if attempt >= 5:\n return None\n else:\n print(e)\n wait = 2 ** attempt\n print('waiting for {0} seconds'.format(wait))\n time.sleep(wait)\n return _fetch_url(url, attempt + 1)\n\n\ndef _gen_html(name, start_urls, session=requests.Session()):\n \"\"\"\n urls should not end in /\n \"\"\"\n files = []\n for url in start_urls:\n r = _fetch_url(url, session=session)\n if r is None:\n continue\n\n content = r.text.strip()\n filename = '{0}/{1}.html'.format(FILES_DIR, name)\n\n url_suffix = url.split('/')[-1]\n if '.' in url_suffix:\n url_suffix = url_suffix.split('.')[-2]\n if url_suffix:\n filename = '{0}/{1}_{2}.html'.format(FILES_DIR, name, url_suffix)\n else:\n filename = '{0}/{1}.html'.format(FILES_DIR, name)\n\n with open(filename, 'w') as f:\n f.write(content)\n\n print('Created {0}'.format(filename))\n files.append(filename)\n\n return files\n\n\ndef _render_content(template, name, agency_name=None, domains=None, start_urls=None):\n jinja_template = env.get_template(template)\n classname = _make_classname(name)\n return jinja_template.render(\n name=name, agency_name=agency_name, domains=domains, classname=classname, start_urls=start_urls)\n\n\ndef _get_domains(start_urls):\n domains = []\n for url in start_urls:\n parsed = urlparse(url)\n if parsed.netloc not in domains:\n domains.append(parsed.netloc)\n return domains\n\n\n@task\ndef validate_spider(ctx, spider_file):\n \"\"\"\n Validates scraped items from a spider.\n Passes if >=90% of the scraped items\n conform to the schema.\n \"\"\"\n spider = os.path.basename(spider_file).split('.')[0]\n # Open a JSON of scraped items\n with open(spider_file, 'r') as f:\n content = f.read()\n if len(content) == 0:\n print(\"{0} was empty.\".format(spider_file))\n return None\n try:\n scraped_items = json.loads(content)\n except json.decoder.JSONDecodeError:\n message = \"Could not decode JSON. Here is the beginning and end of the file: {0}\\n...\\n{1}\"\n print(message).format(content[:50], content[-50:])\n raise Exception(\"Could not decode JSON\")\n\n # Drop empty items\n nonempty_items = [item for item in scraped_items if item]\n\n # Reformat items from a list of dicts into a dict of lists\n # Keep only the validation keys (that start with 'val_')\n validated_items = defaultdict(list)\n for item in nonempty_items:\n for k, v in item.items():\n if k.startswith('val_'):\n validated_items[k].append(v)\n\n print('\\n------------Validation Summary for: {0}---------------'.format(spider))\n print('Validating {} items\\n'.format(len(nonempty_items)))\n validation_summary = {}\n for item_key, item_list in validated_items.items():\n validation_summary[item_key] = reduce(lambda x, y: x + y, item_list) / len(item_list)\n print('{}: {:.0%}'.format(item_key[4:], validation_summary[item_key]))\n\n try:\n assert all([x >= 0.9 for x in validation_summary.values()])\n except AssertionError as e:\n message = (\n 'Less than 90% of the scraped items from {0} passed validation. '\n 'See the validation summary printed in stdout, and check that the '\n 'scraped items conform to the events schema at: '\n 'https://github.com/City-Bureau/city-scrapers/'\n 'blob/master/docs/06_event_schema.md'\n ).format(spider)\n raise Exception(message) from e\n\n\n# Python invoke namespace (http://docs.pyinvoke.org/en/0.11.0/concepts/namespaces.html#nesting-collections)\nns = Collection()\nns.add_task(genspider)\nns.add_task(runtests)\nns.add_task(validate_spider)\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"427981626","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# job management\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2012 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nfrom __future__ import print_function\nimport argparse\nfrom datetime import datetime, timedelta\nimport csv\nimport time\nimport sys\nfrom pymongo import UpdateOne\n# NOC modules\nfrom noc.core.management.base import BaseCommand\nfrom noc.core.scheduler.scheduler import Scheduler\n\n\nSHARDING_SCHEDULER = {\"discovery\", \"correlator\", \"escalator\"}\n\n\nclass Command(BaseCommand):\n \"\"\"\n Manage Jobs\n \"\"\"\n help = \"Manage Jobs\"\n default_time = timedelta(minutes=5)\n\n @staticmethod\n def valid_date(s):\n print(s)\n try:\n return datetime.strptime(s, \"%Y-%m-%d %H:%M\")\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\n @staticmethod\n def scheduler(s):\n scheduler, pool = \"scheduler\", \"default\"\n if \".\" in s:\n scheduler, pool = s.split(\".\")\n if scheduler in SHARDING_SCHEDULER:\n # raise argparse.ArgumentTypeError(\"Scheduler: %s, not supporting sharding\")\n return Scheduler(scheduler, pool=pool).get_collection()\n return Scheduler(scheduler).get_collection()\n\n def add_arguments(self, parser):\n parser.add_argument(\"--scheduler\", \"-s\",\n dest=\"scheduler\",\n default=Scheduler(\"scheduler\").get_collection(),\n type=self.scheduler,\n help=\"Select scheduler. For sharded use SCHEDULER_NAME.SHARD_NAME\"\n ),\n parser.add_argument(\"--format\", \"-f\",\n dest=\"store\",\n # action=\"format\",\n choices=[\"json\", \"csv\"],\n help=\"Set output format\"\n ),\n subparsers = parser.add_subparsers(dest=\"cmd\")\n # load command\n list_parser = subparsers.add_parser(\"list\")\n list_parser.add_argument(\"--name\",\n help=\"Job name in scheduler\")\n list_parser.add_argument(\n \"key\",\n nargs=argparse.REMAINDER,\n help=\"List of job key\"\n )\n get_parser = subparsers.add_parser(\"get\")\n get_parser.add_argument(\"--id\",\n help=\"Job name in scheduler\")\n subparsers.add_parser(\"set\")\n # Parse Job Field\n reschedule = subparsers.add_parser(\"reschedule\",\n help=\"Shift Jobs to interval\")\n reschedule.add_argument(\"--name\",\n help=\"Job name in scheduler\")\n reschedule.add_argument(\"--start\",\n type=self.valid_date,\n help=\"Start interval for place\")\n reschedule.add_argument(\"--end\",\n type=self.valid_date,\n help=\"End interval for place\")\n reschedule.add_argument(\"--force\",\n default=False,\n action=\"store_true\", help=\"Really do reschedule\")\n reschedule.add_argument(\n \"key\",\n nargs=argparse.REMAINDER,\n help=\"List of job key\"\n )\n parser.add_argument('infile', nargs='?',\n type=argparse.FileType('r'),\n default=sys.stdin)\n\n def init_json(self):\n pass\n\n def init_csv(self):\n self.writer = csv.writer(sys.stdout)\n self.writer.writerow([\n \"Run\", \"ID\", \"Name\", \"Key\", \"Status\", \"Last Status\",\n \"Runs\", \"Last Run\", \"Last Duration\",\n \"Interval\", \"Failed Interval\", \"Offset\"\n ])\n\n def format_json(self, job):\n self.print(job)\n\n def format_csv(self, job):\n # s = job[\"schedule\"] or {}\n self.writer.writerow([\n job[\"ts\"], job[\"_id\"], job[\"jcls\"], job[\"key\"],\n job[\"s\"], job.get(\"ls\", \"\"), job.get(\"runs\", 0),\n job.get(\"last\", \"\"), job.get(\"ldur\", \"\"),\n # s.get(\"interval\", \"\"), s.get(\"failed_interval\", \"\"),\n # s.get(\"offset\", \"\")\n ])\n\n def handle(self, cmd, *args, **options):\n if \"infile\" in options and not sys.stdin.isatty():\n for line in options[\"infile\"]:\n options[\"key\"] += [int(line)]\n return getattr(self, \"handle_%s\" % cmd)(*args, **options)\n\n def handle_list(self, scheduler, *args, **options):\n q = {}\n if options.get(\"name\"):\n q[\"jcls\"] = options[\"name\"]\n if options.get(\"key\"):\n q[\"key\"] = {\"$in\": [int(x) for x in options[\"key\"]]}\n fname = options.get(\"format\", \"csv\")\n format = getattr(self, \"format_%s\" % fname)\n # Print header\n getattr(self, \"init_%s\" % fname)()\n # Print jobs\n for j in scheduler.find(q).sort(\"ts\").limit(50):\n format(j)\n\n def handle_get(self, scheduler, *args, **options):\n fname = options.get(\"format\", \"csv\")\n format = getattr(self, \"format_%s\" % fname)\n # Print header\n getattr(self, \"init_%s\" % fname)()\n # Print jobs\n for j in scheduler.find().sort(\"ts\"):\n format(j)\n\n def handle_set(self, scheduler, *args, **options):\n raise NotImplementedError()\n\n def handle_reschedule(self, scheduler, *args, **options):\n bulk = []\n q = {}\n shift_interval = self.default_time\n if options.get(\"name\"):\n q[\"jcls\"] = options[\"name\"]\n if options.get(\"key\"):\n q[\"key\"] = {\"$in\": [int(x) for x in options[\"key\"]]}\n if not options.get(\"start\"):\n self.die(\"Setting start date for resheduling\")\n start = options.get(\"start\")\n if options.get(\"end\"):\n shift_interval = max(shift_interval, options[\"end\"] - options[\"start\"])\n for j in scheduler.find(q).sort(\"ts\"):\n start += shift_interval\n self.print(\"Change: \", j[\"ts\"], \"-->\", start)\n bulk += [UpdateOne({\"_id\": j[\"_id\"]}, {\"$set\": {\"ts\": start}})]\n if options.get(\"force\", False):\n self.print(\"Jobs will be reschedule\")\n for i in reversed(range(1, 10)):\n self.print(\"%d\\n\" % i)\n time.sleep(1)\n scheduler.bulk_write(bulk)\n # Job.get_next_timestamp(64000)\n\n\nif __name__ == \"__main__\":\n Command().run()\n","sub_path":"commands/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"276329154","text":"#http://effbot.org/librarybook/csv.htm\r\n#http://matplotlib.org/basemap/users/geography.html\r\n#http://www.uvm.edu/~jbagrow/dsv/heatmap_basemap.html\r\n#http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap\r\n#https://www.youtube.com/watch?v=8v3how07th4&list=PLQVvvaa0QuDfefDfXb9Yf0la1fPDKluPF&index=28\r\n#https://pypi.python.org/pypi/geocoder\r\n\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport geocoder\r\nimport numpy as np\r\nglobal cityofinterest #so you can search a city specifically\r\nglobal searchforcity\r\n\r\n\r\n\r\n\r\n#Makes a map of specifically California\r\n#llcrnrlat = lower leftcorner latitude, urcrnrlat = upper right corner latitude, llcrnerlon = lower leftcorner longitude, upper right corner longitude\r\nmap = Basemap(projection = 'mill', llcrnrlat = 31, urcrnrlat = 43, llcrnrlon = -130, urcrnrlon = -109, resolution = 'h')\r\nmap.drawcoastlines()\r\nmap.drawmapboundary(fill_color = '#000080')\r\nmap.fillcontinents(color = '#84BE6A', lake_color = '#000080')\r\nmap.drawstates(linewidth = 1)\r\nmap.drawcountries(linewidth = 1)\r\n\r\n\r\n#Lists\r\ndate = []\r\ntime = []\r\ncity = []\r\nstate = []\r\nshape = []\r\nduration = []\r\ncitywiththings = [] #vague because its for shapes and date searches\r\n\r\ndescription = []\r\n#Reads the CSV file and seperates the data into lists\r\n#goes through the csv and puts them in a list\r\nwith open('Sightings.csv') as data:\r\n reader = csv.reader(data)\r\n for row in reader:\r\n date.append(row[0])\r\n time.append(row[1])\r\n city.append(row[2])\r\n state.append(row[3])\r\n shape.append(row[4])\r\n duration.append(row[5])\r\n description.append(row[6])\r\n\r\n\r\n\r\n\r\n#For city search\r\ndef citysearch():\r\n counter = 0\r\n cityer = 0 #A flag variable\r\n global cityofinterest\r\n global searchforcity\r\n lengthofstring = len(city)\r\n print(\"\\n\")\r\n print(\"---------------------------------------------------------------------------------------------------\")\r\n #Goes through the list of cities and checks to see if the city is in the list\r\n for x in range(0, lengthofstring):\r\n #If the city is in the list it will tell you about it\r\n if(city[x] == searchforcity):\r\n print(\" = \", searchforcity, \" = \")\r\n print(\"A UFO sighting has happened on\", date[x], \"at the time\", time[x], \". The ship was\", shape[x], \"shaped\", \"and showed up for\", duration[x], \".\", \"The description the person who saw the ship is\", \"'\", description[x],\"'\", \" \\n\")\r\n cityer = 1\r\n counter = counter+1\r\n\r\n print(\"There are \",counter, \" sightings above that match the city \", searchforcity)\r\n\r\n print(\"----------------------------------------------------------------------------------------------------\")\r\n #Since you can only use geocoder 2500 times a day it will allow you to find an exact city\r\n if (cityer == 1):\r\n textformap = searchforcity\r\n searchforcity = searchforcity + \" CA\" #To make sure that geocoder is using California\r\n cityofinterest = (geocoder.google(searchforcity))\r\n print(cityofinterest)\r\n print(\"GENERATING MAP NOW \\n\")\r\n xpt, ypt = map(cityofinterest.lng, cityofinterest.lat)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt, textformap , fontsize = 10, fontweight = 'bold', color = '#FFFFFF')\r\n plt.title('UFO SIGHTINGS IN CALIFORNIA')\r\n plt.show()\r\n #Geocoder can only be used 2500 times and after that it will make city of interest None None\r\n if(cityofinterest == \"None None\"):\r\n print(\"Sorry Geocoder cannot be used at this moment. Please try again in a few minutes\")\r\n quit()\r\n\r\n #Since the flag is already is 0 it will come here if the city is not found and it will do nothing\r\n if(cityer == 0):\r\n print(\"\")\r\n\r\n\r\n\r\n#For Shape Search and Date Search\r\ndef advancedsearch():\r\n counter = 0\r\n searcher = 0\r\n global question\r\n if(question == 'Shape'):\r\n lengthofstring = len(shape)\r\n print(\"---------------------------------------------------------------------------------------------------\")\r\n #Goes through the list of shapes and checks to see if the shape is in the list\r\n for x in range(0, lengthofstring):\r\n #If the shape is in the list it will tell you about it\r\n if(shape[x] == searchforshape):\r\n\r\n print(\" = \", city[x], \" = \")\r\n print(\"A UFO sighting has happened on\", date[x], \"at the time\", time[x], \". The location was in \", city[x], \"California\", \"and showed up for\", duration[x], \".\", \"The description the person who saw the ship is\", \"'\", description[x],\"'\", \" \\n\")\r\n citywiththings.append(city[x])\r\n searcher = 1\r\n counter = counter + 1\r\n print(\"There are \", counter, \" sightings above that match the shape of \", searchforshape)\r\n\r\n if(question == 'date' or question == 'Date'):\r\n lengthofstring = len(date)\r\n dash = '/' #for the string\r\n year1 = '2014' #for the string becuase it won't searchwithout all parameters\r\n year2 = '2015' #other year and needed to search\r\n #adds all the parameters to be able to search the dates list\r\n finalforsearch = searchformonth + dash + searchforday + dash + year1\r\n finalforsearch2 = searchformonth + dash + searchforday + dash + year2\r\n\r\n #Searches the dates and finds the occurances that happen\r\n for d in range(0, lengthofstring):\r\n if(date[d] == finalforsearch or date[d] == finalforsearch2):\r\n print(\" = \", city[d], \" = \")\r\n print(\" = \", date[d], \" = \")\r\n print(\"The shape of the UFO was\", shape[d], \"at the time\", time[d], \". The location was in \", city[d], \"California\", \"and showed up for\", duration[d], \".\", \"The description the person who saw the ship is\", \"'\", description[d],\"'\", \" \\n\")\r\n citywiththings.append(city[d])\r\n searcher = 1\r\n counter = counter + 1\r\n print(\"There are \", counter, \" sightings above that match the date of \", searchformonth, \"/\", searchforday)\r\n #Goes through the list of dates the puts the days in a list\r\n\r\n print(\"----------------------------------------------------------------------------------------------------\")\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n #To plot the map of the cities. If a city matches a description of either date or shape it will plot the city\r\n #Long but necessary\r\n if(searcher == 1):\r\n lengthofcitywiththings = len(citywiththings)\r\n\r\n for y in range(0, lengthofcitywiththings):\r\n\r\n if(citywiththings[y] == 'Alameda'):\r\n Alemlng, Alemlat = ( -122.3, 37.76)\r\n xpt, ypt = map(Alemlng, Alemlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Alameda\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Alamo'):\r\n Alamolng, Alamolat = (37.85, -122.03)\r\n xpt,ypt = map(Alamolng, Alamolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Alamo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Aliso Viejo'):\r\n Alilng, Alilat = ( -117.7, 33.56)\r\n xpt, ypt = map(Alilng, Alilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Aliso Viejo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Alta Loma'):\r\n Altlng, Altlat = ( -117.6, 34.12)\r\n xpt, ypt = map(Altlng, Altlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Alta Loma\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Altadena'):\r\n Altdlng, Altdlat = (-118.13, 34.19)\r\n xpt, ypt = map(Altdlng, Altdlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Altadena\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Anaheim'):\r\n Analng, Analat = ( -117.91, 33.83)\r\n xpt, ypt = map(Analng, Analat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Anaheim\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Anderson'):\r\n Andlng, Andlat = ( -122.29, 40.44)\r\n xpt, ypt = map(Andlng, Andlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Anderson\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Antelope'):\r\n Antlng, Antlat = ( -121.36, 38.71)\r\n xpt, ypt = map(Antlng, Antlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Antelope\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Antioch'):\r\n Antilng, Antilat = ( -121.80, 38.00)\r\n xpt, ypt = map(Antilng, Antilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Antioch\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Apple Valley'):\r\n Applng, Applat = ( -117.19, 34.50)\r\n xpt, ypt = map(Applng, Applat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Apple Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Aptos'):\r\n Aptlng, Aptlat = ( -121.89, 36.97)\r\n xpt, ypt = map(Aptlng, Aptlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Aptos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Arbuckle'):\r\n Arlng, Arlat = ( -122.05, 39.01)\r\n xpt, ypt = map(Arlng, Arlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Arbuckle\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Arcadia'):\r\n Arclng, Arclat = ( -118.04, 34.13)\r\n xpt, ypt = map(Arclng, Arclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Arcadia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Arcata'):\r\n Arcalng, Arcalat = ( -124.08, 40.86)\r\n xpt, ypt = map(Arcalng, Arcalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Arcata\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Armona'):\r\n Armlng, Armlat = ( -119.70, 36.31 )\r\n xpt, ypt = map(Armlng, Armlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Armona\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Arroyo Grande'):\r\n Arrlng, Arrlat = ( -120.59, 35.12)\r\n xpt, ypt = map(Arrlng, Arrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Arroyo Grande\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Atascadero'):\r\n Atalng, Atalat = ( -120.67, 35.49)\r\n xpt, ypt = map(Atalng, Atalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Atascadero\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Atwater'):\r\n Atwlng, Atwlat = ( -120.61, 37.35)\r\n xpt, ypt = map(Atwlng, Atwlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Atwater\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Auburn'):\r\n Aublng, Aublat = ( -121.08, 38.89)\r\n xpt, ypt = map(Aublng, Aublat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Auburn\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Avenal'):\r\n Avlng, Avlat = ( -120.13, 36.00)\r\n xpt, ypt = map(Avlng, Avlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Avenal\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Avocado Heights'):\r\n Avolng, Avolat = ( -117.99, 34.04)\r\n xpt, ypt = map(Avolng, Avolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Avocado Heights\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Azusa'):\r\n Azlng, Azlat = ( -117.91, 34.13)\r\n xpt, ypt = map(Azlng, Azlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Azusa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Bakersfield'):\r\n Baklng, Baklat = ( -119.02, 35.37)\r\n xpt, ypt = map(Baklng, Baklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Bakersfield\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Balboa Island'):\r\n Ballng, Ballat = ( -117.89, 33.61)\r\n xpt, ypt = map(Ballng, Ballat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Balboa Island\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Baldwin Park'):\r\n Baldlng, Baldlat = ( -117.96, 34.09)\r\n xpt, ypt = map(Baldlng, Baldlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Baldwin Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Banning'):\r\n Banlng, Banlat = ( -116.87, 33.93)\r\n xpt, ypt = map(Banlng, Banlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Banning\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Barstow'):\r\n Barlng, Barlat = ( -117.02, 34.89)\r\n xpt, ypt = map(Barlng, Barlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Barstow\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Bass Lake'):\r\n Baslng, Baslat = ( -119.57, 37.32)\r\n xpt, ypt = map(Baslng, Baslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Bass Lake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Bay Area'):\r\n Baylng, Baylat = ( -122.29, 37.83)\r\n xpt, ypt = map(Baylng, Baylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Bay Area\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Beaumont'):\r\n Bealng, Bealat = ( -116.97, 33.93)\r\n xpt, ypt = map(Bealng, Bealat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Beaumont\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Benicia'):\r\n Benlng, Benlat = ( -122.16, 38.05)\r\n xpt, ypt = map(Benlng, Benlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Benicia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Berkeley'):\r\n Berlng, Berlat = ( -122.27, 37.87)\r\n xpt, ypt = map(Berlng, Berlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Berkeley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Berry Creek'):\r\n Berrlng, Berrlat = ( -121.40, 39.65)\r\n xpt, ypt = map(Berrlng, Berrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Berry Creek\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Big Bear Lake'):\r\n Biglng, Biglat = ( -116.91, 34.24)\r\n xpt, ypt = map(Biglng, Biglat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Big Bear Lake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Big Pine'):\r\n Bplng, Bplat = ( -118.29, 37.16)\r\n xpt, ypt = map(Bplng, Bplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Big Pine\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Big Sur'):\r\n Bslng, Bslat = ( -121.86, 36.36)\r\n xpt, ypt = map(Bslng, Bslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Big Sur\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Blocksburg'):\r\n Bllng, Bllat = ( -123.64, 40.28)\r\n xpt, ypt = map(Bllng, Bllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Blocksburg\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Blythe'):\r\n Blylng, Blylat = ( -114.59, 33.62)\r\n xpt, ypt = map(Blylng, Blylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Blythe\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Bonny Doon'):\r\n Bolng, Bolat = ( -122.15, 37.04)\r\n xpt, ypt = map(Bolng, Bolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Bonny Doon\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Bonsall'):\r\n Bonlng, Bonlat = ( -117.23, 33.29)\r\n xpt, ypt = map(Bonlng, Bonlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Bonsall\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Borrego Springs'):\r\n Borrlng, Borrlat = ( -116.87, 33.93)\r\n xpt, ypt = map(Borrlng, Borrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Borrego Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Boulder Creek'):\r\n Boulng, Boulat = ( -122.12, 37.13)\r\n xpt, ypt = map(Boulng, Boulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Boulder Creek\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Boyle Heights'):\r\n Boylng, Boylat = ( -118.21, 34.02)\r\n xpt, ypt = map(Boylng, Boylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Boyle Heights\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Banning'):\r\n Banlng, Banlat = ( -116.87, 33.93)\r\n xpt, ypt = map(Banlng, Banlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Banning\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Brawley'):\r\n Brlng, Brlat = ( -115.53, 32.98)\r\n xpt, ypt = map(Brlng, Brlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Brawley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Brentwood'):\r\n Brelng, Brelat = ( -121.70, 37.93)\r\n xpt, ypt = map(Brelng, Brelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Brentwood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Buellton'):\r\n Bulng, Bulat = ( -120.19, 34.61)\r\n xpt, ypt = map(Bulng, Bulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Buellton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Buena Park'):\r\n Buelng, Buelat = ( -117.99, 33.87)\r\n xpt, ypt = map(Buelng, Buelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Buena Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Burbank'):\r\n Burlng, Burlat = ( -118.31, 34.18)\r\n xpt, ypt = map(Burlng, Burlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Burbank\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Calimesa'):\r\n Clng, Clat = ( -117.06, 34.00)\r\n xpt, ypt = map(Clng, Clat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Calimesa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Calistoga'):\r\n Callng, Callat = ( -122.58, 38.58)\r\n xpt, ypt = map(Callng, Callat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Calistoga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Camarillo'):\r\n Camlng, Camlat = ( -119.04, 34.22)\r\n xpt, ypt = map(Camlng, Camlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Camarillo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cambria'):\r\n Camblng, Camblat = ( -121.08, 35.56)\r\n xpt, ypt = map(Camblng, Camblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cambria\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Campbell'):\r\n Camplng, Camplat = ( -121.95, 37.29)\r\n xpt, ypt = map(Camplng, Camplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Campbell\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Canoga Park'):\r\n Canlng, Canlat = ( -118.61, 34.21)\r\n xpt, ypt = map(Canlng, Canlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Canoga Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Canyon Country'):\r\n Canylng, Canylat = ( -118.47, 34.42)\r\n xpt, ypt = map(Canylng, Canylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Canyon Country\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Canyon Lake'):\r\n Canyolng, Canyolat = ( -117.27, 33.69)\r\n xpt, ypt = map(Canyolng, Canyolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Canyon Lake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Capitola'):\r\n Caplng, Caplat = ( -121.95, 36.98)\r\n xpt, ypt = map(Caplng, Caplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Capitola\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cardiff by the Sea' or citywiththings[y] == 'Cardiff by The Sea'):\r\n Carlng, Carlat = ( -117.28, 33.02)\r\n xpt, ypt = map(Carlng, Carlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cardiff By The Sea\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Caribou'):\r\n Carilng, Carilat = ( -121.16, 40.08)\r\n xpt, ypt = map(Carilng, Carilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Caribou\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Carlsbad' or citywiththings[y] == 'Carlsbad/Oceanside'):\r\n Carllng, Carllat = ( -117.35, 33.16)\r\n xpt, ypt = map(Carllng, Carllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Carlsbad\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Carmel Valley' ):\r\n Carmlng, Carmlat = ( -121.73, 36.48)\r\n xpt, ypt = map(Carmlng, Carmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Carmel Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Carmichael' ):\r\n Carmilng, Carmilat = ( -121.33, 38.62)\r\n xpt, ypt = map(Carmilng, Carmilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Carmichael\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Carpenteria' or citywiththings[y] == 'Carpinteria'):\r\n Carplng, Carplat = ( -119.52, 34.40)\r\n xpt, ypt = map(Carplng, Carplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Carpinteria\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Carson' ):\r\n Carslng, Carslat = ( -118.28, 33.83)\r\n xpt, ypt = map(Carslng, Carslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Carson\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Castaic' ):\r\n Castlng, Castlat = ( -118.63, 34.49)\r\n xpt, ypt = map(Castlng, Castlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Castaic\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Castro Valley' ):\r\n Castrlng, Castrlat = ( -122.08, 37.69)\r\n xpt, ypt = map(Castrlng, Castrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Castro Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Catalina Island' ):\r\n Catlng, Catlat = ( -122.09, 37.69)\r\n xpt, ypt = map(Catlng, Catlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Catalina Island\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cathedral City' ):\r\n Cathlng, Cathlat = ( -116.47, 33.78)\r\n xpt, ypt = map(Cathlng, Cathlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cathedral City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cayucos' ):\r\n Caylng, Caylat = ( -120.89, 35.44)\r\n xpt, ypt = map(Caylng, Caylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cayucos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cerritos' ):\r\n Cerrlng, Cerrlat = ( -118.06, 33.86)\r\n xpt, ypt = map(Cerrlng, Cerrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cerritos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Chatsworth' ):\r\n Chlng, Chlat = ( -118.61, 34.25)\r\n xpt, ypt = map(Chlng, Chlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Chatsworth\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Chico' ):\r\n Chilng, Chilat = ( -121.84, 39.73)\r\n xpt, ypt = map(Chilng, Chilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Chico\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Chino' ):\r\n Chinlng, Chinlat = ( -117.69, 34.01)\r\n xpt, ypt = map(Chinlng, Chinlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Chino\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Chino Hills' ):\r\n Chhlng, Chhlat = ( -117.73, 33.99)\r\n xpt, ypt = map(Chhlng, Chhlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Chino Hills\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Chula Vista' ):\r\n Cvlng, Cvlat = ( -117.08, 32.64)\r\n xpt, ypt = map(Cvlng, Cvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Chula Vista\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Citrus Heights' ):\r\n Citlng, Citlat = ( -121.28, 38.70)\r\n xpt, ypt = map(Citlng, Citlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Citrus Heights\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'City of Industry' ):\r\n CIlng, CIlat = ( -117.96, 34.02)\r\n xpt, ypt = map(CIlng, CIlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Industry\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Clairemont' ):\r\n Clarlng, Clarlat = ( -117.72, 34.10)\r\n xpt, ypt = map(Clarlng, Clarlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Claremont\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Clayton' ):\r\n Claylng, Claylat = ( -121.94, 37.94)\r\n xpt, ypt = map(Claylng, Claylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Clayton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Clearlake' ):\r\n Clelng, Clelat = ( -122.63, 38.96)\r\n xpt, ypt = map(Clelng, Clelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Clearlake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Clovis' ):\r\n Clolng, Clolat = ( -119.70, 36.83)\r\n xpt, ypt = map(Clolng, Clolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Clovis\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Coalinga' ):\r\n Coalng, Coalat = ( -120.36, 36.14)\r\n xpt, ypt = map(Coalng, Coalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Coalinga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cobb' ):\r\n Cobblng, Cobblat = ( -122.72, 38.83)\r\n xpt, ypt = map(Cobblng, Cobblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cobb\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n\r\n if(citywiththings[y] == 'Caomptche' ):\r\n Coalng, Coalat = ( -123.59, 39.26)\r\n xpt, ypt = map(Caolng, Caolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Caomptche\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Concord' ):\r\n Conlng, Conlat = ( -122.03, 37.98)\r\n xpt, ypt = map(Conlng, Conlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Concord\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cool' ):\r\n Coolng, Coolat = ( -121.02, 38.89)\r\n xpt, ypt = map(Coolng, Coolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cool\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Copperopolis' ):\r\n Coplng, Coplat = ( -120.64, 37.98)\r\n xpt, ypt = map(Coplng, Coplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Copperopolis\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cornell' ):\r\n Cornlng, Cornlat = ( -118.78, 34.11)\r\n xpt, ypt = map(Cornlng, Cornlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cornell\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Corona' ):\r\n Corlng, Corlat = ( -117.57, 33.88)\r\n xpt, ypt = map(Corlng, Corlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Corona\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Corona del Mar' ):\r\n Cdmlng, Cdmlat = ( -117.87, 33.60)\r\n xpt, ypt = map(Cdmlng, Cdmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Corona del Mar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Corte Madera' ):\r\n Ctelng, Ctelat = ( -122.53, 37.93)\r\n xpt, ypt = map(Ctelng, Ctelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Corte Madera\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Costa Mesa' ):\r\n Cmlng, Cmlat = ( -117.92, 33.64)\r\n xpt, ypt = map(Cmlng, Cmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Costa Mesa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cotati' ):\r\n Cotlng, Cotlat = ( -122.71, 38.33)\r\n xpt, ypt = map(Cotlng, Cotlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cotati\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Coto de Caza' ):\r\n Cclng, Cclat = ( -117.59, 33.60)\r\n xpt, ypt = map(Cclng, Cclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Coto de Caza\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cottonwood' ):\r\n Cwlng, Cwlat = ( -122.28, 40.39)\r\n xpt, ypt = map(Cwlng, Cwlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cottonwood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Covina' or citywiththings[y] == 'West Covina' ):\r\n Covlng, Covlat = ( -117.89, 34.09)\r\n xpt, ypt = map(Covlng, Covlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Covina\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Crescent City' ):\r\n Crelng, Crelat = ( -124.20, 41.76)\r\n xpt, ypt = map(Crelng, Crelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cresent City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Crescent Mills' ):\r\n Creslng, Creslat = ( -120.91, 40.10)\r\n xpt, ypt = map(Creslng, Creslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Crescent Mills\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Crestline' ):\r\n Crestlng, Crestlat = ( -117.29, 34.24)\r\n xpt, ypt = map(Crestlng, Crestlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Crestline\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Culver City' ):\r\n Cullng, Cullat = ( -118.40, 34.02)\r\n xpt, ypt = map(Cullng, Cullat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Culver City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Cypress' ):\r\n Cylng, Cylat = ( -118.04, 33.82)\r\n xpt, ypt = map(Cylng, Cylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Cypress\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Daly City' ):\r\n Dlng, Dlat = ( -122.27, 37.42)\r\n xpt, ypt = map(Dlng, Dlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Dale City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Dana Point' ):\r\n Dalng, Dalat = ( -117.70, 33.47)\r\n xpt, ypt = map(Dalng, Dalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Dana Point\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Danville' ):\r\n Danlng, Danlat = ( -122.00, 37.82)\r\n xpt, ypt = map(Danlng, Danlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Danville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Del Mar' ):\r\n Dellng, Dellat = ( -117.27, 32.96)\r\n xpt, ypt = map(Dellng, Dellat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Del Mar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Del Rosa' ):\r\n Delng, Delat = ( -117.25, 34.16)\r\n xpt, ypt = map(Delng, Delat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Del Rosa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Desert Center' ):\r\n Deslng, Deslat = ( -115.40, 33.71)\r\n xpt, ypt = map(Deslng, Deslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Desert Center\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Desert Hot Springs' ):\r\n Desslng, Desslat = ( -116.50, 33.96)\r\n xpt, ypt = map(Desslng, Desslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Desert Hot Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Diamond Bar' ):\r\n Dilng, Dilat = ( -117.81, 34.03)\r\n xpt, ypt = map(Dilng, Dilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Diamond Bar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Discovery Bay' ):\r\n Dislng, Dislat = ( -121.60, 37.91)\r\n xpt, ypt = map(Dislng, Dislat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Discovery Bay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Downey' ):\r\n Dolng, Dolat = ( -118.13, 33.94)\r\n xpt, ypt = map(Dolng, Dolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Downey\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Duarte' ):\r\n Dulng, Dulat = ( -117.98, 34.14)\r\n xpt, ypt = map(Dulng, Dulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Duarte\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Dumont Dune National Park' ):\r\n Dumlng, Dumlat = ( -116.21, 35.68)\r\n xpt, ypt = map(Dumlng, Dumlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Dumont Dunes\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'East Hemet' ):\r\n Elng, Elat = ( -116.94, 33.74)\r\n xpt, ypt = map(Elng, Elat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"East Hemet\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'East Los Angeles' ):\r\n Ealng, Ealat = ( -118.17, 34.02)\r\n xpt, ypt = map(Ealng, Ealat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"East Los Angeles\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'East Palo Alto' ):\r\n Easlng, Easlat = ( -122.14, 37.47)\r\n xpt, ypt = map(Easlng, Easlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"East Palo Alto\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Eastvale' ):\r\n Eastlng, Eastlat = ( -117.58, 33.95)\r\n xpt, ypt = map(Eastlng, Eastlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Eastvale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Edwards' ):\r\n Edlng, Edlat = ( -117.89, 34.92)\r\n xpt, ypt = map(Edlng, Edlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Edwards\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Cajon' or citywiththings[y] == 'EL Cajon'):\r\n Ejlng, Ejlat = ( -116.96, 32.79)\r\n xpt, ypt = map(Ejlng, Ejlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Cajon\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Cerrito' ):\r\n Eclng, Eclat = ( -122.31, 37.92)\r\n xpt, ypt = map(Eclng, Eclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Cerrito\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Dorato' ):\r\n Edlng, Edlat = ( -120.44, 38.74)\r\n xpt, ypt = map(Edlng, Edlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Dorato\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Monte' or citywiththings[y] == 'South El Monte' ):\r\n Emlng, Emlat = ( -118.03, 34.07)\r\n xpt, ypt = map(Emlng, Emlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Monte\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Segundo' ):\r\n Eslng, Eslat = ( -118.42, 33.92)\r\n xpt, ypt = map(Eslng, Eslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Segundo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'El Soreno' ):\r\n Esslng, Esslat = ( -118.18, 34.07)\r\n xpt, ypt = map(Esslng, Esslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"El Soreno\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Elk Grove' ):\r\n Elklng, Elklat = ( -121.37, 38.41)\r\n xpt, ypt = map(Elklng, Elklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Elk Grove\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Elverta' ):\r\n Evlng, Evlat = ( -121.46, 38.72)\r\n xpt, ypt = map(Evlng, Evlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Elverta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Emeryville' ):\r\n Emylng, Emylat = ( -122.29, 37.83)\r\n xpt, ypt = map(Emylng, Emylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Emeryville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Encinitas' or citywiththings[y] == 'Encinitas Carlsbad' or citywiththings[y] == 'Encintas' ):\r\n Enclng, Enclat = ( -117.29,33.04)\r\n xpt, ypt = map(Enclng, Enclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Encinitas\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Encino' ):\r\n Enclng, Enclat = ( -118.52, 34.15)\r\n xpt, ypt = map(Enclng, Enclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Encino\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Escalon' ):\r\n Esclng, Esclat = ( -121.00, 37.80)\r\n xpt, ypt = map(Esclng, Esclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Escalon\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Escondido' ):\r\n Ecslng, Ecslat = ( -117.09, 33.12)\r\n xpt, ypt = map(Ecslng, Ecslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Escondido\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Eureka' ):\r\n Eurlng, Eurlat = ( -124.16, 40.80)\r\n xpt, ypt = map(Eurlng, Eurlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Eureka\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fair Oaks' ):\r\n Flng, Flat = ( -121.27, 38.64)\r\n xpt, ypt = map(Flng, Flat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fair Oaks\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fairfield' ):\r\n Falng, Falat = ( -122.04, 38.25)\r\n xpt, ypt = map(Falng, Falat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fairfield\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fall River Mills' ):\r\n Fallng, Fallat = ( -121.44, 41.00)\r\n xpt, ypt = map(Fallng, Fallat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fall River Mills\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fallbrook' ):\r\n Fablng, Fablat = ( -117.25, 33.38)\r\n xpt, ypt = map(Fablng, Fablat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fallbrook\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fillmore' or citywiththings[y] == 'Filmore'):\r\n Filng, Filat = ( -118.92, 34.40)\r\n xpt, ypt = map(Filng, Filat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fillmore\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Firebaugh' ):\r\n Firlng, Firlat = ( -120.46, 36.86)\r\n xpt, ypt = map(Firlng, Firlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Firebaugh\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Folsom' ):\r\n Folng, Folat = ( -121.18, 38.68)\r\n xpt, ypt = map(Folng, Folat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Folsom\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fontana' ):\r\n Fonlng, Fonlat = ( -117.44, 34.09)\r\n xpt, ypt = map(Fonlng, Fonlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fontana\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Foresthill' ):\r\n Forlng, Forlat = ( -120.82, 39.02)\r\n xpt, ypt = map(Forlng, Forlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Foresthill\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'fort bragg' or citywiththings[y] == 'Fort Bragg' ):\r\n Ftlng, Ftlat = ( -123.81, 39.45)\r\n xpt, ypt = map(Ftlng, Ftlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fort Bragg\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fortuna' ):\r\n Fnlng, Fnlat = ( -124.16, 40.60)\r\n xpt, ypt = map(Fnlng, Fnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fortuna\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fountain Valley' ):\r\n Fvlng, Fvlat = ( -117.95, 33.71)\r\n xpt, ypt = map(Fvlng, Fvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fountain Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Frazier Park' ):\r\n Frlng, Frlat = ( -118.94, 34.82)\r\n xpt, ypt = map(Frlng, Frlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Frazier Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fremont' ):\r\n Frelng, Frelat = ( -121.99, 37.55)\r\n xpt, ypt = map(Frelng, Frelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fremont\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fresno' or citywiththings[y] == 'Fresno County' ):\r\n Fslng, Fslat = ( -119.77, 36.75)\r\n xpt, ypt = map(Fslng, Fslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fresno\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Fullerton' ):\r\n Fullng, Fullat = ( -117.92, 33.87)\r\n xpt, ypt = map(Fullng, Fullat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Fullerton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Galt' ):\r\n Glng, Glat = ( -121.30, 38.25)\r\n xpt, ypt = map(Glng, Glat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Galt\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Garden Grove' ):\r\n Galng, Galat = ( -117.94, 33.77)\r\n xpt, ypt = map(Galng, Galat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Garden Grove\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Geyserville' ):\r\n Gylng, Gylat = ( -122.90, 38.71)\r\n xpt, ypt = map(Gylng, Gylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Geyserville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Gilroy' ):\r\n Gilng, Gilat = ( -121.57, 37.01)\r\n xpt, ypt = map(Gilng, Gilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Gilroy\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Glendale' ):\r\n Gllng, Gllat = ( -118.26, 34.14)\r\n xpt, ypt = map(Gllng, Gllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Glendale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Glendora' ):\r\n Glelng, Glelat = ( -117.87, 34.14)\r\n xpt, ypt = map(Glelng, Glelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Glendora\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Goleta' ):\r\n Golng, Golat = ( -119.83, 34.44 )\r\n xpt, ypt = map(Golng, Golat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Goleta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Grand Terrace' ):\r\n Grlng, Grlat = ( -117.31, 34.03)\r\n xpt, ypt = map(Grlng, Grlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Grand Terrace\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Granite Bay' ):\r\n Gralng, Gralat = ( -121.16, 38.76)\r\n xpt, ypt = map(Gralng, Gralat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Granite Bay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Grass Valley' ):\r\n Graslng, Graslat = ( -121.06, 39.22)\r\n xpt, ypt = map(Graslng, Graslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Grass Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Grover Beach' ):\r\n Grolng, Grolat = ( -120.62, 35.12)\r\n xpt, ypt = map(Grolng, Grolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Grover Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Gustine' ):\r\n Guslng, Guslat = ( -121.00, 37.26)\r\n xpt, ypt = map(Guslng, Guslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Gustine\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Half Moon Bay' ):\r\n Hlng, Hlat = ( -122.43, 37.46)\r\n xpt, ypt = map(Hlng, Hlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Half Moon Bay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hanford' ):\r\n Halng, Halat = ( -119.65, 36.33)\r\n xpt, ypt = map(Halng, Halat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hanford\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hawthorne' ):\r\n Hawlng, Hawlat = ( -118.35, 33.92)\r\n xpt, ypt = map(Hawlng, Hawlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hawthorne\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hayward' ):\r\n Haylng, Haylat = ( -122.08, 37.67)\r\n xpt, ypt = map(Haylng, Haylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hayward\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Healdsburg' ):\r\n Healng, Healat = ( -122.87, 38.61)\r\n xpt, ypt = map(Healng, Healat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Healdsburg\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hemet' ):\r\n Helng, Helat = ( -116.97, 33.75)\r\n xpt, ypt = map(Helng, Helat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hemet\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hermosa Beach' ):\r\n Herlng, Herlat = ( -118.40, 33.86)\r\n xpt, ypt = map(Herlng, Herlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hermosa Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hesperia' ):\r\n Heslng, Heslat = ( -117.30, 34.43)\r\n xpt, ypt = map(Heslng, Heslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hesperia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hidden Valley' ):\r\n Hilng, Hilat = ( -122.56, 38.81)\r\n xpt, ypt = map(Hilng, Hilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hidden Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Highland' or citywiththings[y] == 'Highland Park' ):\r\n Higlng, Higlat = ( -117.21, 34.13)\r\n xpt, ypt = map(Higlng, Higlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Highland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hinkley' ):\r\n Hylng, Hylat = ( -117.20, 34.94)\r\n xpt, ypt = map(Hylng, Hylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hinkley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hollister' ):\r\n Holng, Holat = ( -121.40, 36.85)\r\n xpt, ypt = map(Holng, Holat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hollister\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Hollywood' or citywiththings[y] == 'N. Hollywood' or citywiththings[y] == 'North Hollywood' or citywiththings[y] == 'West Hollywood'):\r\n Holllng, Holllat = ( -118.33, 34.09)\r\n xpt, ypt = map(Holllng, Holllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Hollywood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Huntington Beach' ):\r\n Hblng, Hblat = ( -118.00, 33.66)\r\n xpt, ypt = map(Hblng, Hblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Huntington Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Idyllwild' ):\r\n Ilng, Ilat = ( -116.74, 33.74)\r\n xpt, ypt = map(Ilng, Ilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Idyllwild\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Imperial Beach' ):\r\n Iblng, Iblat = ( -117.11, 32.58)\r\n xpt, ypt = map(Iblng, Iblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Imperial Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Indio' ):\r\n Inlng, Inlat = ( -116.21, 33.72)\r\n xpt, ypt = map(Inlng, Inlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Indio\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Irvine' ):\r\n Irlng, Irlat = ( -117.79, 33.68)\r\n xpt, ypt = map(Irlng, Irlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Irvine\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Isla Vista' ):\r\n Islng, Islat = ( -119.86, 34.41)\r\n xpt, ypt = map(Islng, Islat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Isla Vista\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Jackson' ):\r\n Jlng, Jlat = ( -120.77, 38.35)\r\n xpt, ypt = map(Jlng, Jlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Jackson\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Jamul' ):\r\n Jalng, Jalat = ( -116.88, 32.72)\r\n xpt, ypt = map(Jalng, Jalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Jamul\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Kelseyville' ):\r\n Klng, Klat = ( -122.84, 38.99)\r\n xpt, ypt = map(Klng, Klat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Kelseyville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Kerman' ):\r\n Kelng, Kelat = ( -120.06, 36.72)\r\n xpt, ypt = map(Kelng, Kelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Kerman\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Kings Beach' ):\r\n Kilng, Kilat = ( -120.03, 39.24)\r\n xpt, ypt = map(Kilng, Kilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Kings Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Kingsburg' ):\r\n Kinlng, Kinlat = ( -119.55, 36.51)\r\n xpt, ypt = map(Kinlng, Kinlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Kingsburg\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Knights Landing' ):\r\n Knlng, Knlat = ( -121.72, 38.80)\r\n xpt, ypt = map(Knlng, Knlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Knights Landing\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Canada' or citywiththings[y] == 'La Canada Flintridge'):\r\n Llng, Llat = ( -118.20, 34.21)\r\n xpt, ypt = map(Llng, Llat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Canada Flintridge\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Crescenta' ):\r\n Lalng, Lalat = ( -118.24, 34.23)\r\n xpt, ypt = map(Lalng, Lalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Crescenta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Habra' ):\r\n Lhlng, Lhlat = ( -117.95, 33.93)\r\n xpt, ypt = map(Lhlng, Lhlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Habra\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Jolla' ):\r\n Ljlng, Ljlat = ( -117.27, 32.83)\r\n xpt, ypt = map(Ljlng, Ljlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Jolla\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Mesa' ):\r\n Lmlng, Lmlat = ( -117.02, 32.77)\r\n xpt, ypt = map(Lmlng, Lmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Mesa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Mirada' ):\r\n Lmilng, Lmilat = ( -118.01, 33.92)\r\n xpt, ypt = map(Lmilng, Lmilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Mirada\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Puente' ):\r\n Lplng, Lplat = ( -117.95, 34.02)\r\n xpt, ypt = map(Lplng, Lplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Puente\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'La Quinta' ):\r\n Lqlng, Lqlat = ( -116.31, 33.66)\r\n xpt, ypt = map(Lqlng, Lqlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"La Quinta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ladera Ranch' ):\r\n Ldlng, Ldlat = ( -117.64, 33.55)\r\n xpt, ypt = map(Ldlng, Ldlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ladera Ranch\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lafayette' ):\r\n Lflng, Lflat = ( -122.12, 37.89)\r\n xpt, ypt = map(Lflng, Lflat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lafayette\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Laguna Beach' or citywiththings[y] == 'Laguna beach'):\r\n Lablng, Lablat = ( -117.78, 33.54)\r\n xpt, ypt = map(Lablng, Lablat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Laguna Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Laguna Niguel' ):\r\n Lanlng, Lanlat = ( -117.71, 33.52)\r\n xpt, ypt = map(Lanlng, Lanlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Laguna Niguel\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Laguna Woods' ):\r\n Lwlng, Lwlat = ( -117.73, 33.61)\r\n xpt, ypt = map(Lwlng, Lwlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Laguna Woods\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lake Alpine' ):\r\n Laalng, Laalat = ( -120.00, 38.48)\r\n xpt, ypt = map(Laalng, Laalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lake Alpine\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lake Arrowhead' ):\r\n Laklng, Laklat = ( -117.19, 34.25)\r\n xpt, ypt = map(Laklng, Laklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lake Arrowhead\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lake Elsinore' ):\r\n Leelng, Leelat = ( -117.33, 33.67)\r\n xpt, ypt = map(Leelng, Leelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lake Elsinore\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lake Forest' ):\r\n Lflng, Lflat = ( -117.69, 33.65)\r\n xpt, ypt = map(Lflng, Lflat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lake Forest\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lake Port' or citywiththings[y] == 'Lakeport'):\r\n Lplng, Lplat = ( -122.92, 39.04)\r\n xpt, ypt = map(Lplng, Lplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lakeport\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lakeside' ):\r\n Lslng, Lslat = ( -116.92, 32.86)\r\n xpt, ypt = map(Lslng, Lslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lakeside\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lakewood' ):\r\n Loolng, Loolat = ( -118.13, 33.85)\r\n xpt, ypt = map(Loolng, Loolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lakewood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lancaster' ):\r\n Lrrlng, Lrrlat = ( -118.15, 34.69)\r\n xpt, ypt = map(Lrrlng, Lrrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lancaster\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lawndale' ):\r\n Lddlng, Lddlat = ( -118.35, 33.89)\r\n xpt, ypt = map(Lddlng, Lddlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lawndale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Laytonville' ):\r\n Lylng, Lylat = ( -123.48, 39.69)\r\n xpt, ypt = map(Lylng, Lylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Laytonville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lemoore' ):\r\n Lmrlng, Lmrlat = ( -119.78, 36.30)\r\n xpt, ypt = map(Lmrlng, Lmrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lemoore\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lennox' ):\r\n Lxlng, Lxlat = ( -118.35, 33.94)\r\n xpt, ypt = map(Lxlng, Lxlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lennox\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lenwood' ):\r\n Lwrrlng, Lwrrlat = ( -117.10, 34.88)\r\n xpt, ypt = map(Lwrrlng, Lwrrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lenwood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lincoln' ):\r\n Lnlng, Lnlat = ( -121.29, 38.89)\r\n xpt, ypt = map(Lnlng, Lnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lincoln\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Litchfield' ):\r\n Litlng, Litlat = ( -120.39, 40.38)\r\n xpt, ypt = map(Litlng, Litlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Litchfield\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Livermore' ):\r\n Lvrlng, Lvrlat = ( -121.77, 37.68)\r\n xpt, ypt = map(Lvrlng, Lvrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Livermore\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Livingston' ):\r\n Lvglng, Lvglat = ( -120.72, 37.39)\r\n xpt, ypt = map(Lvglng, Lvglat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Livingston\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Llano' ):\r\n Lllng, Lllat = ( -117.82, 34.51)\r\n xpt, ypt = map(Lllng, Lllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Llano\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lodi' ):\r\n Lodlng, Lodlat = ( -121.27, 38.13)\r\n xpt, ypt = map(Lodlng, Lodlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lodi\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Loleta' ):\r\n Lollng, Lollat = ( -124.23, 40.64)\r\n xpt, ypt = map(Lollng, Lollat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Loleta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Loma Linda' ):\r\n Lmllng, Lmllat = ( -117.26, 34.05)\r\n xpt, ypt = map(Lmllng, Lmllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Loma Linda\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lomira' ):\r\n Lmrlng, Lmrlat = ( -118.32, 33.79)\r\n xpt, ypt = map(Lmrlng, Lmrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lomita\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lompoc' ):\r\n Lpclng, Lpclat = ( -120.46, 34.64)\r\n xpt, ypt = map(Lpclng, Lpclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lompoc\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Long Beach' or citywiththings[y] == 'Long Beach/Los Angles'):\r\n Lnglng, Lnglat = ( -118.19, 33.77)\r\n xpt, ypt = map(Lnglng, Lnglat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Long Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Loomis' ):\r\n Loomlng, Loomlat = ( -121.19, 38.82)\r\n xpt, ypt = map(Loomlng, Loomlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Loomis\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Alamitos' ):\r\n Llalng, Llalat = ( -118.07, 33.80)\r\n xpt, ypt = map(Llalng, Llalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Alamitos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Alamos' ):\r\n Lsalng, Lsalat = ( -120.28, 34.74)\r\n xpt, ypt = map(Lsalng, Lsalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Alamos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Angeles' or citywiththings[y] == 'Los Angeles/Burbank/Palmdale' or citywiththings[y] == 'West Los Angeles' or citywiththings[y] == 'Westchester'):\r\n Losalng, Losalat = ( -118.24, 34.05)\r\n xpt, ypt = map(Losalng, Losalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Angeles\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Banos' ):\r\n Lbalng, Lbalat = ( -120.85, 37.06)\r\n xpt, ypt = map(Lbalng, Lbalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Banos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Gatos' ):\r\n Lgatolng, Lgatolat = ( -121.96, 37.24)\r\n xpt, ypt = map(Lgatolng, Lgatolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Gatos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Los Olivos' ):\r\n Llvlng, Llvlat = ( -120.12, 34.67)\r\n xpt, ypt = map(Llvlng, Llvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Los Olivos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lucerne Valley' ):\r\n Luvlng, Luvlat = ( -116.97, 34.44)\r\n xpt, ypt = map(Luvlng, Luvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lucerne Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Lynwood' ):\r\n Lynlng, Lynlat = ( -118.21, 33.93)\r\n xpt, ypt = map(Lynlng, Lynlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Lynwood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Madera' or citywiththings[y] == 'Madera Ranchos' ):\r\n Mlng, Mlat = ( -120.06, 36.96)\r\n xpt, ypt = map(Mlng, Mlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Madera\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Magalia' ):\r\n Mglng, Mglat = ( -121.588, 39.81)\r\n xpt, ypt = map(Mglng, Mglat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Magalia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Malibu' ):\r\n Mallng, Mallat = ( -118.78, 34.03)\r\n xpt, ypt = map(Mallng, Mallat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Malibu\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mammoth Lakes' ):\r\n Mamlng, Mamlat = ( -118.97, 37.65)\r\n xpt, ypt = map(Mamlng, Mamlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mammoth Lakes\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Manhattan Beach' ):\r\n Mthlng, Mthlat = ( -118.41, 33.88)\r\n xpt, ypt = map(Mthlng, Mthlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Manhattan Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Manteca' ):\r\n Mtelng, Mtelat = ( -121.22, 37.80)\r\n xpt, ypt = map(Mtelng, Mtelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Manteca\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Marin County' ):\r\n Mrnlng, Mrnlat = ( -122.76, 38.08)\r\n xpt, ypt = map(Mrnlng, Mrnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Marin County\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Marina del Rey' or citywiththings[y] == 'Marina Del Rey' ):\r\n Mdrlng, Mdrlat = ( -118.45, 33.98)\r\n xpt, ypt = map(Mdrlng, Mdrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Marina del Rey\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mariposa' ):\r\n Mpolng, Mpolat = ( -119.97, 37.48)\r\n xpt, ypt = map(Mpolng, Mpolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mariposa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Martinez' ):\r\n Mzlng, Mzlat = ( -122.13, 38.02)\r\n xpt, ypt = map(Mzlng, Mzlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Martinez\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Marysville' ):\r\n Mvlng, Mvlat = ( -121.59, 39.15)\r\n xpt, ypt = map(Mvlng, Mvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Marysville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Maywood' ):\r\n Mdlng, Mdlat = ( -118.19, 33.99)\r\n xpt, ypt = map(Mdlng, Mdlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Maywood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mckittrick' ):\r\n Mcklng, Mcklat = ( -119.62, 35.31)\r\n xpt, ypt = map(Mcklng, Mcklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mckittrick\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mecca' ):\r\n Mecclng, Mecclat = ( -116.08, 33.57)\r\n xpt, ypt = map(Mecclng, Mecclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mecca\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Menifee' or citywiththings == 'Menifeeca' ):\r\n Mfeelng, Mfeelat = ( -117.19, 33.70)\r\n xpt, ypt = map(Mfeelng, Mfeelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Menifee\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mentone' ):\r\n Mtnelng, Mtnelat = ( -117.13, 34.07)\r\n xpt, ypt = map(Mtnelng, Mtnelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mentone\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Merced' ):\r\n Merclng, Merclat = ( -120.48, 37.30)\r\n xpt, ypt = map(Merclng, Merclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Merced\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mercy Hot Springs' ):\r\n Mhslng, Mhslat = ( -120.85, 36.70)\r\n xpt, ypt = map(Mhslng, Mhslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mercey Hot Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Milpitas' ):\r\n Milplng, Milplat = ( -121.90, 37.43)\r\n xpt, ypt = map(Milplng, Milplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Milpitas\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mira Loma' ):\r\n Miralng, Miralat = ( -117.52, 33.98)\r\n xpt, ypt = map(Miralng, Miralat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mira Loma\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mission Viejo' ):\r\n Mvvlng, Mvvlat = ( -117.667, 33.60)\r\n xpt, ypt = map(Mvvlng, Mvvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mission Viejo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Modesto' ):\r\n Mooolng, Mooolat = ( -121.00, 37.64)\r\n xpt, ypt = map(Mooolng, Mooolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Modesto\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mojave' ):\r\n Mjvlng, Mjvlat = ( -118.17, 35.05)\r\n xpt, ypt = map(Mjvlng, Mjvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mojave\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Monrovia' ):\r\n Mvilng, Mvilat = ( -118.00, 34.14)\r\n xpt, ypt = map(Mvilng, Mvilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Monrovia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Montague' ):\r\n Mtgelng, Mtgelat = ( -122.53, 41.73)\r\n xpt, ypt = map(Mtgelng, Mtgelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Montague\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Montara' ):\r\n Mtatalng, Mtatalat = ( -122.52, 37.54)\r\n xpt, ypt = map(Mtatalng, Mtatalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Montara\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Montclair' ):\r\n Mairlng, Mairlat = ( -117.69, 34.08)\r\n xpt, ypt = map(Mairlng, Mairlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Montclair\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Montebello' ):\r\n Mllolng, Mllolat = ( -118.11, 34.02)\r\n xpt, ypt = map(Mllolng, Mllolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Montebello\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Monterey' or citywiththings[y] == 'Monterey Bay' or citywiththings[y] == 'Monterey Park' ):\r\n Mterlng, Mterlat = ( -121.89, 36.60)\r\n xpt, ypt = map(Mterlng, Mterlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Monterey Bay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Moorpark' ):\r\n Mpklng, Mpklat = ( -118.88, 34.29)\r\n xpt, ypt = map(Mpklng, Mpklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Moorpark\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Moreno Valley' ):\r\n Mvllng, Mvllat = ( -117.23, 33.94)\r\n xpt, ypt = map(Mvllng, Mvllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Moreno Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Morgan Hill' ):\r\n Mghlng, Mghlat = ( -121.65, 37.13)\r\n xpt, ypt = map(Mghlng, Mghlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Morgan Hill\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Moro Bay' or citywiththings[y] == 'Morro Bay'):\r\n Morrlng, Morrlat = ( -120.85, 35.37)\r\n xpt, ypt = map(Morrlng, Morrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Morro Bay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Morongo Valley' ):\r\n Mrvlllng, Mrvlllat = ( -116.58, 34.05)\r\n xpt, ypt = map(Mrvlllng, Mrvlllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Morongo Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mount Shasta' or citywiththings == 'Mt. Shasta' ):\r\n Mshhlng, Mshhlat = ( -122.31, 41.31)\r\n xpt, ypt = map(Mshhlng, Mshhlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mount Shasta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mountain View' ):\r\n Mtnvlng, Mtnvlat = ( -122.08, 37.39)\r\n xpt, ypt = map(Mtnvlng, Mtnvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mountain View\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Mt. Laguna' ):\r\n Mtnlllng, Mtnlllat = ( -116.42, 32.87)\r\n xpt, ypt = map(Mtnlllng, Mtnlllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mount Laguna\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Murrieta' or citywiththings[y] == 'Murrietta' ):\r\n Murrrlng, Murrrlat = ( -117.21, 33.55)\r\n xpt, ypt = map(Murrrlng, Murrrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Murrieta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Napa' ):\r\n Nlng, Nlat = ( -122.29, 38.30)\r\n xpt, ypt = map(Nlng, Nlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Napa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'National City' ):\r\n Nalng, Nalat = ( -117.10, 32.68)\r\n xpt, ypt = map(Nalng, Nalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"National City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Near Woody' ):\r\n NWWlng, NWWlat = ( -118.83, 35.70)\r\n xpt, ypt = map(NWWlng, NWWlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Woody\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Needles' ):\r\n Neelng, Neelat = ( -114.61, 34.85)\r\n xpt, ypt = map(Neelng, Neelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Needles\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Newberry Springs' ):\r\n Newlng, Newlat = ( -116.69, 34.83)\r\n xpt, ypt = map(Newlng, Newlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Newberry Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Newbury Park' ):\r\n Newplng, Newplat = ( -118.91, 34.18)\r\n xpt, ypt = map(Newplng, Newplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Newbury Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Newhall' ):\r\n Newhlng, Newhlat = ( -118.53, 34.38)\r\n xpt, ypt = map(Newhlng, Newhlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Newhall\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Newman' ):\r\n Newmlng, Newmlat = ( -121.02, 37.31)\r\n xpt, ypt = map(Newmlng, Newmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Newman\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Newport Beach' ):\r\n Nwblng, Nwblat = ( -117.93, 33.62)\r\n xpt, ypt = map(Nwblng, Nwblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Newport Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Nipomo' ):\r\n Noplng, Noplat = ( -120.48, 35.04)\r\n xpt, ypt = map(Noplng, Noplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Nipomo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Norwalk' ):\r\n Norlng, Norlat = ( -118.08, 33.90)\r\n xpt, ypt = map(Norlng, Norlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Norwalk\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Novato' ):\r\n Novlng, Novlat = ( -122.57, 38.11)\r\n xpt, ypt = map(Novlng, Novlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Novato\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Nuevo' ):\r\n Nulng, Nulat = ( -117.15, 33.80)\r\n xpt, ypt = map(Nulng, Nulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Nuevo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oakdale' ):\r\n Olng, Olat = ( -120.85, 37.77)\r\n xpt, ypt = map(Olng, Olat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oakdale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oakland' ):\r\n Oalng, Oalat = ( -122.27, 37.80)\r\n xpt, ypt = map(Oalng, Oalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oakland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oakley' ):\r\n Oklng, Oklat = ( -121.71, 38.00)\r\n xpt, ypt = map(Oklng, Oklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oakley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ocean Beach' ):\r\n OBlng, OBlat = ( -117.25, 32.75)\r\n xpt, ypt = map(OBlng, OBlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ocean Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ocean Cove' ):\r\n OClng, OClat = ( -123.30, 38.56)\r\n xpt, ypt = map(OClng, OClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ocean Cove\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oceano' ):\r\n Oolng, Oolat = ( -120.61, 35.10)\r\n xpt, ypt = map(Oolng, Oolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oceano\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oceanside' ):\r\n Osslng, Osslat = ( -117.38, 33.20)\r\n xpt, ypt = map(Osslng, Osslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oceanside\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oildale' ):\r\n Oilng, Oilat = ( -119.02, 35.42)\r\n xpt, ypt = map(Oilng, Oilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oildale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ontario' ):\r\n Ontlng, Ontlat = ( -117.65, 34.06)\r\n xpt, ypt = map(Ontlng, Ontlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ontario\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Orange' or citywiththings[y] == 'Orange County' or citywiththings[y] == 'Orange County area'):\r\n Orglng, Orglat = ( -117.85, 33.79)\r\n xpt, ypt = map(Orglng, Orglat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Orange\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Orangevale' ):\r\n Oralng, Oralat = ( -121.23, 38.68)\r\n xpt, ypt = map(Oralng, Oralat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Orangevale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Organ House' ):\r\n OrHlng, OrHlat = ( -121.27, 39.34)\r\n xpt, ypt = map(OrHlng, OrHlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oregon House\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Orland' ):\r\n Orllng, Orllat = ( -122.20, 39.75)\r\n xpt, ypt = map(Orllng, Orllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Orland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Orville' ):\r\n Orvlng, Orvlat = ( -121.56, 39.51)\r\n xpt, ypt = map(Orvlng, Orvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Orville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Otay Mesa' ):\r\n Otalng, Otalat = ( -116.97, 32.56)\r\n xpt, ypt = map(Otalng, Otalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Otay\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Oxnard' ):\r\n Oxlng, Oxlat = ( -119.18, 34.20)\r\n xpt, ypt = map(Oxlng, Oxlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Oxnard\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ozena' ):\r\n Ozlng, Ozlat = ( -119.33, 34.69)\r\n xpt, ypt = map(Ozlng, Ozlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ozena Campground\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pacheco' ):\r\n Plng, Plat = ( -122.08, 37.98)\r\n xpt, ypt = map(Plng, Plat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pacheco\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pacific Grove' ):\r\n Pclng, Pclat = ( -121.92, 36.62)\r\n xpt, ypt = map(Pclng, Pclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pacific Grove\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pacifica' ):\r\n Pflng, Pflat = ( -122.49, 37.61)\r\n xpt, ypt = map(Pflng, Pflat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pacifica\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pacoima' ):\r\n Paclng, Paclat = ( -118.41, 34.78)\r\n xpt, ypt = map(Paclng, Paclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pacoima\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Palm Desert' ):\r\n Palmlng, Palmlat = ( -116.37, 33.72)\r\n xpt, ypt = map(Palmlng, Palmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Palm Desert\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Palm Springs' ):\r\n PSlng, PSlat = ( -116.55, 33.83)\r\n xpt, ypt = map(PSlng, PSlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Palm Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Palmdale' ):\r\n Pdalng, Pdalat = ( -118.12, 34.58)\r\n xpt, ypt = map(Pdalng, Pdalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Palmdale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Palo Alto' ):\r\n PAllng, PAllat = ( -122.14, 37.44)\r\n xpt, ypt = map(PAllng, PAllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Palo Alto\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Paradise' ):\r\n Pdilng, Pdilat = ( -121.62, 39.76)\r\n xpt, ypt = map(Pdilng, Pdilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Paradise\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Paramount' ):\r\n Paralng, Paralat = ( -118.16, 33.89)\r\n xpt, ypt = map(Paralng, Paralat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Paramount\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pasadena' ):\r\n Pnalng, Pnalat = ( -118.14, 34.15)\r\n xpt, ypt = map(Pnalng, Pnalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pasadena\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Paso Robles' ):\r\n Psrlng, Psrlat = ( -120.65, 35.64)\r\n xpt, ypt = map(Psrlng, Psrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Paso Robles\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Patterson' ):\r\n Pttlng, Pttlat = ( -121.13, 37.47)\r\n xpt, ypt = map(Pttlng, Pttlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Patterson\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pearblossom' ):\r\n Psslng, Psslat = ( -117.91, 34.51)\r\n xpt, ypt = map(Psslng, Psslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pearblossom\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Perris' ):\r\n Prrslng, Prrslat = ( -117.23, 33.78)\r\n xpt, ypt = map(Prrslng, Prrslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Perris\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Petaluma' ):\r\n Petalng, Petalat = ( -122.64, 38.23)\r\n xpt, ypt = map(Petalng, Petalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Petaluma\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Phelan' ):\r\n Phlng, Phlat = ( -117.57, 34.43)\r\n xpt, ypt = map(Phlng, Phlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Phelan\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pico Rivera' ):\r\n Pcolng, Pcolat = ( -118.10, 33.98)\r\n xpt, ypt = map(Pcolng, Pcolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pico Rivera\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pilot Hill' or citywiththings[y] == 'Pilot hill' ):\r\n PHilng, PHilat = ( -121.01, 38.83)\r\n xpt, ypt = map(PHilng, PHilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pilot Hill\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pine Grove' ):\r\n PGlng, PGlat = ( -120.66, 38.41)\r\n xpt, ypt = map(PGlng, PGlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pine Grove\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pine Mountain Club' ):\r\n PMClng, PMClat = ( -119.16, 34.85)\r\n xpt, ypt = map(PMClng, PMClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pine Mountain Club\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pinion Hills' or citywiththings[y] == 'Pinon Hills' ):\r\n Pinlng, Pinlat = ( -117.65, 34.43)\r\n xpt, ypt = map(Pinlng, Pinlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pinon Hills\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pismo' ):\r\n Psmlng, Psmlat = ( -120.64, 35.14)\r\n xpt, ypt = map(Psmlng, Psmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pismo Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Placentia' ):\r\n Pcelng, Pcelat = ( -117.87, 33.87)\r\n xpt, ypt = map(Pcelng, Pcelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Placentia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Placerville' ):\r\n Pvlllng, Pvlllat = ( -120.80, 38.73)\r\n xpt, ypt = map(Pvlllng, Pvlllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Placerville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Play Vista' or citywiththings[y] == 'Playa Vista'):\r\n Playlng, Playlat = ( -118.43, 33.97)\r\n xpt, ypt = map(Playlng, Playlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Playa Vista\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pleasanton' ):\r\n Ptonlng, Ptonlat = ( -121.87, 37.66)\r\n xpt, ypt = map(Ptonlng, Ptonlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pleasanton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Pollock Pines' ):\r\n Polllng, Polllat = ( -120.59, 38.76)\r\n xpt, ypt = map(Polllng, Polllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Pollock Pines\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ponoma' ):\r\n Pnmlng, Pnmlat = ( -117.75, 34.06)\r\n xpt, ypt = map(Pnmlng, Pnmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ponoma\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Port Hueneme' ):\r\n Phuelng, Phuelat = ( -119.20, 34.15)\r\n xpt, ypt = map(Phuelng, Phuelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Port Hueneme\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Porterville' ):\r\n Porrlng, Porrlat = ( -119.02, 36.07)\r\n xpt, ypt = map(Porrlng, Porrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Porterville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Potter Valley' ):\r\n Pottlng, Pottlat = ( -123.11, 39.32)\r\n xpt, ypt = map(Pottlng, Pottlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Potter Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Poway' ):\r\n Powlng, Powlat = ( -117.04, 32.96)\r\n xpt, ypt = map(Powlng, Powlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Poway\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Quincy' ):\r\n Qlng, Qlat = ( -117.04, 32.96)\r\n xpt, ypt = map(Qlng, Qlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Quincy\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ramona' ):\r\n Rlng, Rlat = ( -116.88, 33.04)\r\n xpt, ypt = map(Rlng, Rlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ramona\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rancho Bernardo' ):\r\n Rblng, Rblat = ( -117.08, 33.03)\r\n xpt, ypt = map(Rblng, Rblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rancho Bernardo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rancho Cordova' ):\r\n Rclng, Rclat = ( -121.30, 38.59)\r\n xpt, ypt = map(Rclng, Rclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rancho Cordova\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rancho Cucamonga' ):\r\n Rcalng, Rcalat = ( -117.59, 34.11)\r\n xpt, ypt = map(Rcalng, Rcalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rancho Cucamonga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rancho Mirage' ):\r\n Rmlng, Rmlat = ( -116.41, 33.74)\r\n xpt, ypt = map(Rmlng, Rmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rancho Mirage\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ranchos Palos Verdes' ):\r\n Rplng, Rplat = ( -118.39, 33.74)\r\n xpt, ypt = map(Rplng, Rplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ranchos Palos Verdes\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rancho Santa Margarita' ):\r\n Rsmlng, Rsmlat = ( -117.60, 33.64)\r\n xpt, ypt = map(Rsmlng, Rsmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rancho Santa Margarita\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Red Bluff' ):\r\n Redlng, Redlat = ( -122.24, 40.18)\r\n xpt, ypt = map(Redlng, Redlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Red Bluff\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Redding' ):\r\n Reddlng, Reddlat = ( -122.39, 40.59)\r\n xpt, ypt = map(Reddlng, Reddlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Redding\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Redlands' ):\r\n Redllng, Redllat = ( -117.18, 34.06)\r\n xpt, ypt = map(Redllng, Redllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Redlands\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Redondo Beach' ):\r\n Rbblng, Rbblat = ( -118.39, 33.85)\r\n xpt, ypt = map(Rbblng, Rbblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Redondo Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Redwood City' or citywiththings[y] == 'Redwood Valley' ):\r\n Rclng, Rclat = ( -122.24, 37.49)\r\n xpt, ypt = map(Rclng, Rclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Redwood City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Reedley' ):\r\n Reelng, Reelat = ( -119.45, 36.60)\r\n xpt, ypt = map(Reelng, Reelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Reedley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Reseda' ):\r\n Reslng, Reslat = ( -118.54, 34.20)\r\n xpt, ypt = map(Reslng, Reslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Reseda\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rialto' ):\r\n Rialng, Rialat = ( -117.37, 34.11)\r\n xpt, ypt = map(Rialng, Rialat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rialto\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ridgecrest' ):\r\n Ridlng, Ridlat = ( -117.67, 35.62)\r\n xpt, ypt = map(Ridlng, Ridlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ridgecrest\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rio Del Mar' ):\r\n Riolng, Riolat = ( -121.88, 36.96)\r\n xpt, ypt = map(Riolng, Riolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rio Del Mar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rio Dell' ):\r\n Rdellng, Rdellat = ( -124.11, 40.50)\r\n xpt, ypt = map(Rdellng, Rdellat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rio Dell\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rio Linda' ):\r\n Rlinlng, Rlinlat = ( -121.45, 38.69)\r\n xpt, ypt = map(Rlinlng, Rlinlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rio Linda\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rio Vista' ):\r\n Rvislng, Rvislat = ( -121.69, 38.16)\r\n xpt, ypt = map(Rvislng, Rvislat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rio Vista\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ripon' ):\r\n Riplng, Riplat = ( -121.14, 37.74)\r\n xpt, ypt = map(Riplng, Riplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ripon\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Riverbank' ):\r\n Rivlng, Rivlat = ( -120.94, 37.74)\r\n xpt, ypt = map(Rivlng, Rivlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Riverbank\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Riverside' ):\r\n Rsdlng, Rsdlat = ( -117.40, 33.95)\r\n xpt, ypt = map(Rsdlng, Rsdlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Riverside\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rocklin') :\r\n Rcklng, Rcklat = ( -121.24, 38.71)\r\n xpt, ypt = map(Rcklng, Rcklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rocklin\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rohnert Park' or citywiththings[y] == 'Rohnert park' ):\r\n Rohnlng, Rohnlat = ( -122.70, 38.34)\r\n xpt, ypt = map(Rohnlng, Rohnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rohnert Park\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rosamond' ):\r\n Rsalng, Rsalat = ( -118.16, 34.86)\r\n xpt, ypt = map(Rsalng, Rsalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rosamond\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rosemead' ):\r\n Roselng, Roselat = ( -118.07, 34.08)\r\n xpt, ypt = map(Roselng, Roselat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rosemead\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Roseville' ):\r\n Rosvlng, Rosvlat = ( -121.29, 38.75)\r\n xpt, ypt = map(Rosvlng, Rosvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Roseville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rough and Ready' ):\r\n RRlng, RRlat = ( -121.14, 39.23)\r\n xpt, ypt = map(RRlng, RRlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rough and Ready\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Rubidoux' ):\r\n Rxxlng, Rxxlat = ( -117.43, 34.00)\r\n xpt, ypt = map(Rxxlng, Rxxlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Rubidoux\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sacramento' or citywiththings[y] == 'North Highlands' or citywiththings[y] == 'North Sacramento' or citywiththings[y] == 'West Sacramento'):\r\n Slng, Slat = ( -121.49, 38.58)\r\n xpt, ypt = map(Slng, Slat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sacramento\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Salinas' ):\r\n Salng, Salat = ( -121.66, 36.68)\r\n xpt, ypt = map(Salng, Salat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Salinas\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Salton City' or citywiththings[y] == 'Salton Sea' or citywiththings[y] == 'Salton Sea Beach' ):\r\n SClng, SClat = ( -115.96, 33.30)\r\n xpt, ypt = map(SClng, SClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Salton City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Anselmo' ):\r\n SAlng, SAlat = ( -122.56, 37.98)\r\n xpt, ypt = map(SAlng, SAlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Anselmo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Bernardino' ):\r\n SBlng, SBlat = ( -117.29, 34.11)\r\n xpt, ypt = map(SBlng, SBlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Bernardino\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Carlos' ):\r\n SCClng, SCClat = ( -122.26, 37.51)\r\n xpt, ypt = map(SCClng, SCClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Carlos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Clemente' ):\r\n SCllng, SCllat = ( -117.61, 33.43)\r\n xpt, ypt = map(SCllng, SCllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Clemente\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Diego' or citywiththings[y] == 'San diego'):\r\n SDlng, SDlat = ( -117.16, 32.72)\r\n xpt, ypt = map(SDlng, SDlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Diego\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Dimas' ):\r\n SDilng, SDilat = ( -117.81, 34.11)\r\n xpt, ypt = map(SDilng, SDilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Dimas\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Fernando' or citywiththings[y] == 'San Fernando Valley' ):\r\n SFelng, SFelat = ( -118.44, 34.18)\r\n xpt, ypt = map(SFelng, SFelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Fernando\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Francisco' ):\r\n SFlng, SFlat = ( -122.42, 37.77)\r\n xpt, ypt = map(SFlng, SFlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Francisco\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Gabriel' or citywiththings[y] == 'San Gabriel Valley' ):\r\n SGlng, SGlat = ( -118.11, 34.10)\r\n xpt, ypt = map(SGlng, SGlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Gabriel\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Jacinto' ):\r\n SJalng, SJalat = ( -116.96, 33.78)\r\n xpt, ypt = map(SJalng, SJalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Jacinto\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Jose' ):\r\n SJlng, SJlat = ( -121.89, 37.34)\r\n xpt, ypt = map(SJlng, SJlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Jose\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Juan Capistrano' ):\r\n SJClng, SJClat = ( -117.66, 33.50)\r\n xpt, ypt = map(SJClng, SJClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Juan Capistrano\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Leandro' ):\r\n SLlng, SLlat = ( -122.16, 37.72)\r\n xpt, ypt = map(SLlng, SLlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Leandro\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Luis Obispo' ):\r\n SLOlng, SLOlat = ( -120.66, 35.28)\r\n xpt, ypt = map(SLOlng, SLOlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Luis Obispo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Marcos' or citywiththings[y] == 'San Marcos/Vista' ):\r\n SMMlng, SMMlat = ( -117.17, 33.14)\r\n xpt, ypt = map(SMMlng, SMMlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Marcos\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Mateo' ):\r\n SMlng, SMlat = ( -122.33, 37.56)\r\n xpt, ypt = map(SMlng, SMlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Mateo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Pablo' ):\r\n SPlng, SPlat = ( -122.35, 37.96)\r\n xpt, ypt = map(SPlng, SPlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Pablo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Pedro' ):\r\n SPelng, SPelat = ( -118.29, 33.74)\r\n xpt, ypt = map(SPelng, SPelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Pedro\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Rafael' ):\r\n SRlng, SRlat = ( -122.53, 37.97)\r\n xpt, ypt = map(SRlng, SRlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Rafael\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Ramon' ):\r\n SRRlng, SRRlat = ( -121.98, 37.78)\r\n xpt, ypt = map(SRRlng, SRRlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Ramon\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Ysidro' ):\r\n SYlng, SYlat = ( -117.05, 32.56)\r\n xpt, ypt = map(SYlng, SYlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Ysidro\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Ana' ):\r\n SAnlng, SAnlat = ( -117.87, 33.75)\r\n xpt, ypt = map(SAnlng, SAnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Ana\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Barbara' or citywiththings[y] == 'Santa Barbara County'):\r\n SBalng, SBalat = ( -119.70, 34.42)\r\n xpt, ypt = map(SBalng, SBalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Barbara\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Clara' ):\r\n SCllng, SCllat = ( -121.96, 37.35)\r\n xpt, ypt = map(SCllng, SCllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Clara\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'San Clarita' ):\r\n SCrlng, SCrlat = ( -118.54, 34.39)\r\n xpt, ypt = map(SCrlng, SCrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"San Clarita\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Cruz' ):\r\n SCClng, SCClat = ( -118.54, 34.39)\r\n xpt, ypt = map(SCClng, SCClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Cruz\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Fe' ):\r\n SaFlng, SaFlat = ( -117.20, 33.02)\r\n xpt, ypt = map(SaFlng, SaFlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Fe\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Maria' ):\r\n SMalng, SMalat = ( -120.44, 34.95)\r\n xpt, ypt = map(SMalng, SMalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Maria\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Monica' ):\r\n SMclng, SMclat = ( -118.49, 34.02)\r\n xpt, ypt = map(SMclng, SMclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Monica\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Paula' ):\r\n SPPlng, SPPlat = ( -119.06, 34.35)\r\n xpt, ypt = map(SPPlng, SPPlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Paula\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Rosa' ):\r\n SRalng, SRalat = ( -122.71, 38.44)\r\n xpt, ypt = map(SRalng, SRalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Rosa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santa Ysabel' ):\r\n SYblng, SYblat = ( -116.67, 33.11)\r\n xpt, ypt = map(SYblng, SYblat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santa Ysabel\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Santee' ):\r\n Steelng, Steelat = ( -116.97, 32.84)\r\n xpt, ypt = map(Steelng, Steelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Santee\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Saratoga' ):\r\n Stogalng, Stogalat = ( -122.02, 37.26)\r\n xpt, ypt = map(Stogalng, Stogalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Saratoga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Saugus' ):\r\n Saulng, Saulat = ( -118.54, 34.41)\r\n xpt, ypt = map(Saulng, Saulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Saugus\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Seal Beach' ):\r\n SeaBlng, SeaBlat = ( -118.10, 33.74)\r\n xpt, ypt = map(SeaBlng, SeaBlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Seal Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sears Point Raceway' ):\r\n SPRlng, SPRlat = ( -122.45, 38.16)\r\n xpt, ypt = map(SPRlng, SPRlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sonoma Raceway\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sebastopol' ):\r\n Spollng, Spollat = ( -122.82, 38.40)\r\n xpt, ypt = map(Spollng, Spollat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sebastopol\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Shafter' ):\r\n Saftlng, Saftlat = ( -119.27, 35.50)\r\n xpt, ypt = map(Saftlng, Saftlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Shafter\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Shasta City' or citywiththings[y] == 'Shasta Lake'):\r\n SCLlng, SCLlat = ( -122.31, 41.31)\r\n xpt, ypt = map(SCLlng, SCLlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Mount Shasta\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Shell Beach' ):\r\n Shhlng, Shhlat = ( -120.67, 35.16)\r\n xpt, ypt = map(Shhlng, Shhlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Shell Beach\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sherman Oaks' ):\r\n ShOlng, ShOlat = ( -118.45, 34.15)\r\n xpt, ypt = map(ShOlng, ShOlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sherman Oaks\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Signal Hill' ):\r\n SHlllng, SHlllat = ( -118.17, 33.80)\r\n xpt, ypt = map(SHlllng, SHlllat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Signal Hill\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Simi Valley' ):\r\n Similng, Similat = ( -118.78, 34.27)\r\n xpt, ypt = map(Similng, Similat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Simi Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sky Valley' ):\r\n Skylng, Skylat = ( -116.35, 33.89)\r\n xpt, ypt = map(Skylng, Skylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sky Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Soledad' ):\r\n Sdadlng, Sdadlat = ( -121.33, 36.42)\r\n xpt, ypt = map(Sdadlng, Sdadlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Soledad\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Solromar' ):\r\n Smarlng, Smarlat = ( -118.95, 34.05)\r\n xpt, ypt = map(Smarlng, Smarlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Solromar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Somis' ):\r\n Sislng, Sislat = ( -119.00, 34.26)\r\n xpt, ypt = map(Sislng, Sislat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Somis\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sonoma' or citywiththings[y] == 'West Sonoma County' ):\r\n Smaalng, Smaalat = ( -122.46, 38.29)\r\n xpt, ypt = map(Smaalng, Smaalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sonoma\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'South Gate' or citywiththings[y] == 'Southgate' ):\r\n SGalng, SGalat = ( -118.21, 33.95)\r\n xpt, ypt = map(SGalng, SGalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"South Gate\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'South Lake Tahoe' ):\r\n SLTlng, SLTlat = ( -119.98, 38.94)\r\n xpt, ypt = map(SLTlng, SLTlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"South Lake Tahoe\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'South Los Angeles' ):\r\n SLAalng, SLAalat = ( -118.24, 34.05)\r\n xpt, ypt = map(SLAalng, SLAalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"South Los Angeles\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Spring Valley' ):\r\n SVlng, SVlat = ( -117.00, 32.74)\r\n xpt, ypt = map(SVlng, SVlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Spring Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Stanford' ):\r\n Sfolng, Sfolat = ( -122.17, 37.42)\r\n xpt, ypt = map(Sfolng, Sfolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Stanford\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Stockton' ):\r\n Stonlng, Stonlat = ( -121.29, 37.96)\r\n xpt, ypt = map(Stonlng, Stonlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Stockton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sultana' ):\r\n Stanalng, Stanalat = ( -119.34, 36.55)\r\n xpt, ypt = map(Stanalng, Stanalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sultana\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Summerland' ):\r\n Smmlng, Smmlat = ( -119.59, 34.42)\r\n xpt, ypt = map(Smmlng, Smmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Summerland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sun Valley' ):\r\n SVunlng, SVunlat = ( -118.38, 34.23)\r\n xpt, ypt = map(SVunlng, SVunlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sun Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sunnyvale' ):\r\n Sunnlng, Sunnlat = ( -122.04, 37.67)\r\n xpt, ypt = map(Sunnlng, Sunnlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sunnyvale\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Susanville' ):\r\n Suslng, Suslat = ( -120.65, 40.42)\r\n xpt, ypt = map(Suslng, Suslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Susanville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Sylmar' ):\r\n Sylmlng, Sylmlat = ( -118.46, 34.31)\r\n xpt, ypt = map(Sylmlng, Sylmlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Sylmar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tarzana' ):\r\n Tlng, Tlat = ( -118.55, 34.15)\r\n xpt, ypt = map(Tlng, Tlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tarzana\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tahachapi' ):\r\n Tilng, Tilat = ( -118.45, 35.13)\r\n xpt, ypt = map(Tilng, Tilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tahachapi\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tehama' ):\r\n Talng, Talat = ( -122.12, 40.03)\r\n xpt, ypt = map(Talng, Talat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tehama\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Temecula' ):\r\n Tmalng, Tmalat = ( -117.15, 33.49)\r\n xpt, ypt = map(Tmalng, Tmalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Temecula\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Temple City' or citywiththings[y] == 'Temple City/Arcadia' ):\r\n TClng, TClat = ( -118.06, 34.11)\r\n xpt, ypt = map(TClng, TClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Temple City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Templeton' ):\r\n Ttonlng, Ttonlat = ( -120.71, 35.55)\r\n xpt, ypt = map(Ttonlng, Ttonlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Templeton\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Thousand Oaks' ):\r\n TOlng, TOlat = ( -118.84, 34.17)\r\n xpt, ypt = map(TOlng, TOlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Thousand Oaks\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Three Rivers' ):\r\n TRlng, TRlat = ( -118.90, 36.44)\r\n xpt, ypt = map(TRlng, TRlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Three Rivers\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Toluca Lake' ):\r\n Tcalng, Tcalat = ( -118.35, 34.15)\r\n xpt, ypt = map(Tcalng, Tcalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Toluca Lake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Topaga' or citywiththings[y] == 'Topanga Beach' ):\r\n Tngalng, Tngalat = ( -118.60, 34.09)\r\n xpt, ypt = map(Tngalng, Tngalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Topanga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Torrance' ):\r\n Trrlng, Trrlat = ( -118.34, 33.84)\r\n xpt, ypt = map(Trrlng, Trrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Torrance\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tracy' ):\r\n Traclng, Traclat = ( -121.43, 37.74)\r\n xpt, ypt = map(Traclng, Traclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tracy\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Trinidad' ):\r\n Tdadlng, Tdadlat = ( -124.14, 41.06)\r\n xpt, ypt = map(Tdadlng, Tdadlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Trinidad\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Truckee' ):\r\n Trulng, Trulat = ( -120.18, 39.33)\r\n xpt, ypt = map(Trulng, Trulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Truckee\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tujunga' ):\r\n Tjalng, Tjalat = ( -118.31, 34.26)\r\n xpt, ypt = map(Tjalng, Tjalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tujunga\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Turlock' ):\r\n Tlocklng, Tlocklat = ( -120.85, 37.49)\r\n xpt, ypt = map(Tlocklng, Tlocklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Turlock\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Tustin' ):\r\n Ttinlng, Ttinlat = ( -117.83, 33.75)\r\n xpt, ypt = map(Ttinlng, Ttinlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Tustin\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ukiah' ):\r\n Ulng, Ulat = ( -123.21, 39.15)\r\n xpt, ypt = map(Ulng, Ulat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ukiah\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'University City' ):\r\n Unlng, Unlat = ( -117.21, 32.86)\r\n xpt, ypt = map(Unlng, Unlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"University City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Upland' ):\r\n Uplng, Uplat = ( -117.65, 34.10)\r\n xpt, ypt = map(Uplng, Uplat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Upland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Vacaville' ):\r\n Vlng, Vlat = ( -121.99, 38.36)\r\n xpt, ypt = map(Vlng, Vlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Vacaville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Vallejo' ):\r\n Valng, Valat = ( -122.26, 38.10)\r\n xpt, ypt = map(Valng, Valat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Vallejo\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Valley Springs' ):\r\n VSlng, VSlat = ( -120.83, 38.19)\r\n xpt, ypt = map(VSlng, VSlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Valley Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Valley Village' ):\r\n VVlng, VVlat = ( -118.38, 34.16)\r\n xpt, ypt = map(VVlng, VVlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Valley Village\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Van Nuys' or citywiththings[y] == 'Vannuys'):\r\n VNlng, VNlat = ( -118.45, 34.19)\r\n xpt, ypt = map(VNlng, VNlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Van Nuys\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Vandenberg Village' ):\r\n VVVlng, VVVlat = ( -120.47, 34.71)\r\n xpt, ypt = map(VVVlng, VVVlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Vandenberg Village\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Venice' or citywiththings[y] == 'Venice beach'):\r\n Velng, Velat = ( -118.47, 33.99)\r\n xpt, ypt = map(Velng, Velat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Venice\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Ventura' ):\r\n Vrrlng, Vrrlat = ( -119.23, 34.27)\r\n xpt, ypt = map(Vrrlng, Vrrlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Ventura\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Victorville' ):\r\n Viclng, Viclat = ( -117.29, 34.54)\r\n xpt, ypt = map(Viclng, Viclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Victorville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Visalia' ):\r\n Vlilng, Vlilat = ( -119.29, 36.33)\r\n xpt, ypt = map(Vlilng, Vlilat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Visalia\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Vista' ):\r\n Vislng, Vislat = ( -117.24, 33.20)\r\n xpt, ypt = map(Vislng, Vislat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Vista\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Walnut' ):\r\n Wlng, Wlat = ( -117.87, 34.02)\r\n xpt, ypt = map(Wlng, Wlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Walnut\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Walnut Creek' ):\r\n WClng, WClat = ( -122.07, 37.91)\r\n xpt, ypt = map(WClng, WClat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Walnut Creek\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Warner Springs' ):\r\n WSlng, WSlat = ( -116.65, 33.28)\r\n xpt, ypt = map(WSlng, WSlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Warner Springs\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Watsonville' ):\r\n Wvlng, Wvlat = ( -121.76, 36.91)\r\n xpt, ypt = map(Wvlng, Wvlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Watsonville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Weed' ):\r\n Weelng, Weelat = ( -122.39, 41.42)\r\n xpt, ypt = map(Weelng, Weelat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Weed\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'West Hills' ):\r\n Welng, Welat = ( -118.64, 34.20)\r\n xpt, ypt = map(Welng, Welat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"West Hills\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Westminster' ):\r\n Wterlng, Wterlat = ( -118.00, 33.75)\r\n xpt, ypt = map(Wterlng, Wterlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Westminster\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Westport' ):\r\n Wportlng, Wportlat = ( -123.78, 39.64)\r\n xpt, ypt = map(Wportlng, Wportlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Westport\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Westwood' ):\r\n Woolng, Woolat = ( -121.01, 40.31)\r\n xpt, ypt = map(Woolng, Woolat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Westwood\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Whittier' ):\r\n Wtttlng, Wtttlat = ( -118.03, 33.98)\r\n xpt, ypt = map(Wtttlng, Wtttlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Whittier\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Wildomar' ):\r\n Wildlng, Wildlat = ( -117.28, 33.60)\r\n xpt, ypt = map(Wildlng, Wildlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Wildomar\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Williams' ):\r\n Wmslng, Wmslat = ( -122.15, 39.15)\r\n xpt, ypt = map(Wmslng, Wmslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Williams\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Willits' ):\r\n Wlltlng, Wlltlat = ( -123.36, 39.41)\r\n xpt, ypt = map(Wlltlng, Wlltlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Willits\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Willows' or citywiththings[y] == 'Wilows'):\r\n Wwslng, Wwslat = ( -122.19, 39.52)\r\n xpt, ypt = map(Wwslng, Wwslat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Willows\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Wilmington' ):\r\n Wmylng, Wmylat = ( -118.26, 33.79)\r\n xpt, ypt = map(Wmylng, Wmylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Wilmington\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Winchester' ):\r\n Wchlng, Wchlat = ( -117.08, 33.71)\r\n xpt, ypt = map(Wchlng, Wchlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Winchester\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Windsor' ):\r\n Wsorlng, Wsorlat = ( -122.82, 38.55)\r\n xpt, ypt = map(Wsorlng, Wsorlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Windsor\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Woodlake' ):\r\n Wlaklng, Wlaklat = ( -119.10, 36.41)\r\n xpt, ypt = map(Wlaklng, Wlaklat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt , \"Woodlake\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Woodland' or citywiththings[y] == 'Woodland Hills'):\r\n WHIlng, WHIlat = ( -121.77, 38.68)\r\n xpt, ypt = map(WHIlng, WHIlat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt + 10000, \"Woodland\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Yountville' ):\r\n Ylng, Ylat = ( -122.36, 38.40)\r\n xpt, ypt = map(Ylng, Ylat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt + 10000, \"Yountville\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Yuba City' ):\r\n Ybalng, Ybalat = ( -121.62, 39.14)\r\n xpt, ypt = map(Ybalng, Ybalat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt + 10000, \"Yuba City\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Yucaipa' ):\r\n Yclng, Yclat = ( -117.04, 34.03)\r\n xpt, ypt = map(Yclng, Yclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt + 10000, \"Yucaipa\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n\r\n if(citywiththings[y] == 'Yucca Valley' ):\r\n Ycclng, Ycclat = ( -116.43, 34.11)\r\n xpt, ypt = map(Ycclng, Ycclat, inverse = False)\r\n map.plot(xpt,ypt, 'D', markersize = 5, color = 'r')\r\n plt.text(xpt,ypt + 10000, \"Yucca Valley\" , fontsize = 7, fontweight = 'bold', color = '#FFFFFF')\r\n #----------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n plt.title('UFO SIGHTINGS IN CALIFORNIA')\r\n plt.show() #Learn how to plot multiple points I think plt.show() comes after all the points\r\n\r\n if(searcher == 0):\r\n print(\"\") #So it does nothing if it is not found. Mostly for the date\r\n","sub_path":"FinalUFOSightingMapper.py","file_name":"FinalUFOSightingMapper.py","file_ext":"py","file_size_in_byte":176242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"202220990","text":"# Copyright (C) 2019 Pavlov Media\n# License Proprietary. Do not copy, share nor distribute.\n\nfrom odoo import api, models, fields\n\n\nclass Users(models.Model):\n _inherit = 'res.users'\n\n hr_job_id = fields.Many2one('hr.job', 'Job Position')\n hr_job_title = fields.Char(\"Job Title\")\n hr_department_id = fields.Many2one('hr.department', string=\"Department\")\n hr_parent_id = fields.Many2one('hr.employee', string=\"Manager\")\n user_template_id = fields.Many2one('res.users', string=\"User Template\")\n child_user_ids = fields.One2many('res.users',\n 'user_template_id',\n string=\"Users\")\n\n @api.onchange('hr_job_title',\n 'hr_job_id',\n 'hr_department_id',\n 'hr_parent_id')\n def on_change_update_employee_fields(self):\n if self.employee_ids:\n for record in self.employee_ids:\n record.write({\n 'job_title': self.hr_job_title,\n 'job_id': self.hr_job_id.id,\n 'department_id': self.hr_department_id.id,\n 'parent_id': self.hr_parent_id.id})\n\n @api.onchange('country_id')\n def _onchange_country_id(self):\n if self.country_id and self.country_id != self.state_id.country_id:\n self.state_id = False\n\n @api.onchange('state_id')\n def _onchange_state(self):\n if self.state_id.country_id:\n self.country_id = self.state_id.country_id\n\n @api.multi\n def action_create_new_user(self):\n for rec in self:\n default_vals = {\n \"name\": \"NEW USER - ChangeMe\",\n \"login\": \"new_user@changeme.com\",\n \"active\": False,\n \"user_template_id\": rec.id,\n \"default_operating_unit_id\": rec.default_operating_unit_id.id\n }\n new_user = rec.copy(default=default_vals)\n form_view_id = self.env.ref(\"base.view_users_form\").id\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'res.users',\n 'target': 'current',\n 'res_id': new_user.id,\n 'views': [(form_view_id, 'form')],\n 'type': 'ir.actions.act_window'\n }\n\n @api.multi\n def write(self, values):\n for rec in self:\n if values.get('user_template_id', False):\n template = self.env['res.users'].browse(\n values.get('user_template_id'))\n values.update({\n 'groups_id': [(6, 0, template.groups_id.ids)],\n 'default_operating_unit_id':\n template.default_operating_unit_id.id,\n 'operating_unit_ids':\n [(6, 0, template.operating_unit_ids.ids)]\n })\n return super(Users, rec).write(values)\n","sub_path":"pavlov_media_hr/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"40428601","text":"fruits1 = ['банан', 'яблоко', 'виноград', 'апельсин']\nfruits2 = ['арбуз', 'киви', 'банан', 'мандарин', 'виноград']\n\nresult = []\n\nfor fruit in fruits1:\n if fruit in fruits2:\n result.append(fruit)\n\nprint(result)\n\n#------\nresult = [fruit for fruit in fruits1 if fruit in fruits2]\n\nprint(result)\n\n#------\nnew_result = list(set(fruits1) & set(fruits2))\n\nprint(new_result)","sub_path":"GU Python Videocourse/Lesson6/Lesson_6.1.py","file_name":"Lesson_6.1.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"309157629","text":"import os\nimport csv\nimport cv2\nimport numpy as np\nimport random\nimport sklearn\nimport matplotlib.image as mpimg\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Flatten, Dense, Lambda, Cropping2D, Dropout, Activation\n\n\n### Read data from csv file\nsamples = []\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\n# Split the data\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n\n### Augmented images and steering angles generator\ndef generator(samples, batch_size=16):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n random.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n augmented_images = []\n augmented_angles = []\n for batch_sample in batch_samples:\n # Images and angles from center camera\n name_center = './data/IMG/'+batch_sample[0].split('/')[-1]\n center_image = mpimg.imread(name_center)\n center_angle = float(batch_sample[3])\n augmented_images.append(center_image)\n augmented_angles.append(center_angle)\n \n # From left camera\n name_left = './data/IMG/'+batch_sample[1].split('/')[-1]\n left_image = mpimg.imread(name_left)\n left_angle = center_angle + 0.2\n augmented_images.append(left_image)\n augmented_angles.append(left_angle)\n \n # From right camera\n name_right = './data/IMG/'+batch_sample[2].split('/')[-1]\n right_image = mpimg.imread(name_right)\n right_angle = center_angle - 0.2\n augmented_images.append(right_image)\n augmented_angles.append(right_angle)\n \n \n # Flipped images from center camera\n flip_image_center = cv2.flip(center_image,1)\n augmented_images.append(flip_image_center)\n augmented_angles.append(center_angle*-1.0)\n \n # From left camera\n flip_image_left = cv2.flip(left_image,1)\n augmented_images.append(flip_image_left)\n augmented_angles.append(left_angle*-1)\n \n # From right camera\n flip_image_right = cv2.flip(right_image,1)\n augmented_images.append(flip_image_right)\n augmented_angles.append(right_angle*-1)\n\n # trim image to only see section with road\n X_train = np.array(augmented_images)\n y_train = np.array(augmented_angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# Set our batch size\nbatch_size=16\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\n\n### Convolutional Neural Network architecture with Keras\n# Initialization\nmodel = Sequential()\n\n# Normalization of data\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\n\n# Cropping unnecessary parts of image\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\n\n# First convolution layer\nmodel.add(Conv2D(24,(5,5),strides=(2,2),activation=\"elu\"))\n\n# Second convolution layer\nmodel.add(Conv2D(36,(5,5),strides=(2,2),activation=\"elu\"))\n\n# Third convolution layer\nmodel.add(Conv2D(48,(5,5),strides=(2,2),activation=\"elu\"))\n\n# Fourth convolution layer, strides 1x1\nmodel.add(Conv2D(64,(3,3),activation=\"elu\"))\n\n# Fifth convolution layer, strides 1x1\nmodel.add(Conv2D(64,(3,3),activation=\"elu\"))\n\n# Flatten layer\nmodel.add(Flatten())\n\n#model.add(Dropout(0.8))\n\n# First fully connected layer\nmodel.add(Dense(100, activation='elu'))\n\n# Second fully connected layer\nmodel.add(Dense(50, activation='elu'))\n\n# Third fully connected layer\nmodel.add(Dense(10, activation='elu'))\n\n# Final fully connected layer\nmodel.add(Dense(1))\n\n# Compile, optimization and generator\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch=len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3, verbose=1)\n\n# Save the model\nmodel.save('model.h5')","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"452315108","text":"from __future__ import unicode_literals\n\nfrom decimal import Decimal, ROUND_UP\n\nimport balanced\nfrom aspen import log\nfrom aspen.utils import typecheck\nfrom gittip import get_tips_and_total\nfrom psycopg2 import IntegrityError\n\n\nMINIMUM = Decimal(\"0.50\") # per Balanced\nFEE = ( Decimal(\"0.30\") # $0.30\n , Decimal(\"1.039\") # 3.9%\n )\n\n\nclass Payday(object):\n \"\"\"Represent an abstract event during which money is moved.\n\n On Payday, we want to use a participant's Gittip balance to settle their\n tips due (pulling in more money via credit card as needed), but we only\n want to use their balance at the start of Payday. Balance changes should be\n atomic globally per-Payday.\n\n \"\"\"\n\n def __init__(self, db):\n \"\"\"Takes a gittip.postgres.PostgresManager instance.\n \"\"\"\n self.db = db\n\n\n def run(self):\n \"\"\"This is the starting point for payday.\n\n This method runs every Friday. It is structured such that it can be run\n again safely (with a newly-instantiated Payday object) if it crashes.\n\n \"\"\"\n log(\"Greetings, program! It's PAYDAY!!!!\")\n ts_start = self.start()\n self.zero_out_pending()\n participants = self.get_participants()\n self.loop(ts_start, participants)\n self.end()\n\n\n def start(self):\n \"\"\"Try to start a new Payday.\n \n If there is a Payday that hasn't finished yet, then the UNIQUE\n constraint on ts_end will kick in and notify us of that. In that case\n we load the existing Payday and work on it some more. We use the start\n time of the current Payday to synchronize our work.\n\n \"\"\"\n try:\n rec = self.db.fetchone(\"INSERT INTO paydays DEFAULT VALUES \"\n \"RETURNING ts_start\")\n log(\"Starting a new payday.\")\n except IntegrityError: # Collision, we have a Payday already.\n rec = self.db.fetchone(\"\"\"\n\n SELECT ts_start\n FROM paydays\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n\n \"\"\")\n log(\"Picking up with an existing payday.\")\n assert rec is not None # Must either create or recycle a Payday.\n\n ts_start = rec['ts_start']\n log(\"Payday started at %s.\" % ts_start)\n return ts_start\n\n\n def zero_out_pending(self):\n \"\"\"Zero out the pending column.\n\n We keep track of balance changes as a result of Payday in the pending\n column, and then move them over to the balance column in one big\n transaction at the end of Payday.\n\n \"\"\"\n START_PENDING = \"\"\"\\\n\n UPDATE participants\n SET pending=0.00\n WHERE pending IS NULL\n\n \"\"\"\n self.db.execute(START_PENDING)\n log(\"Zeroed out the pending column.\")\n return None\n\n\n def get_participants(self):\n \"\"\"Return an iterator of participants dicts.\n \"\"\"\n PARTICIPANTS = \"\"\"\\\n SELECT id, balance, balanced_account_uri\n FROM participants\n WHERE claimed_time IS NOT NULL\n \"\"\"\n participants = self.db.fetchall(PARTICIPANTS)\n log(\"Fetched participants.\")\n return participants\n\n\n def loop(self, ts_start, participants):\n \"\"\"Given an iterator, do Payday.\n \"\"\"\n i = 0 \n log(\"Processing participants.\")\n for i, participant in enumerate(participants, start=1):\n if i % 100 == 0:\n log(\"Processed %d participants.\" % i)\n self.charge_and_or_transfer(ts_start, participant)\n log(\"Processed %d participants.\" % i)\n\n\n def charge_and_or_transfer(self, ts_start, participant):\n \"\"\"Given one participant record, pay their day.\n\n Charge each participants' credit card if needed before transfering\n money between Gittip accounts.\n \n \"\"\"\n tips, total = get_tips_and_total( participant['id']\n , for_payday=ts_start\n , db=self.db\n )\n typecheck(total, Decimal)\n short = total - participant['balance']\n if short > 0:\n\n # The participant's Gittip account is short the amount needed to\n # fund all their tips. Let's try pulling in money from their credit\n # card. If their credit card fails we'll forge ahead, in case they\n # have a positive Gittip balance already that can be used to fund\n # at least *some* tips. The charge method will have set\n # last_bill_result to a non-empty string if the card did fail.\n\n self.charge( participant['id']\n , participant['balanced_account_uri']\n , short\n )\n \n nsuccessful_tips = 0\n for tip in tips:\n result = self.tip(participant, tip, ts_start)\n if result >= 0:\n nsuccessful_tips += result\n else:\n break\n\n self.mark_participant(nsuccessful_tips)\n\n\n def end(self):\n \"\"\"End Payday.\n\n Transfer pending into balance for all users, setting pending to NULL.\n Close out the paydays entry as well.\n\n \"\"\"\n\n with self.db.get_connection() as conn:\n cursor = conn.cursor()\n\n cursor.execute(\"\"\"\\\n\n UPDATE participants\n SET balance = (balance + pending)\n , pending = NULL\n\n \"\"\")\n self.mark_end(cursor)\n\n conn.commit()\n log(\"Finished payday.\")\n\n\n # Move money between Gittip participants.\n # =======================================\n\n def tip(self, participant, tip, ts_start):\n \"\"\"Given dict, dict, and datetime, log and return int.\n\n Return values:\n\n 0 if no valid tip available or tip has not been claimed\n 1 if tip is valid\n -1 if transfer fails and we cannot continue\n\n \"\"\"\n msg = \"$%s from %s to %s.\"\n msg %= (tip['amount'], participant['id'], tip['tippee'])\n\n if tip['amount'] == 0:\n\n # The tips table contains a record for every time you click a tip\n # button. So if you click $0.25 then $3.00 then $0.00, that\n # generates three entries. We are looking at the last entry here,\n # and it's zero.\n\n return 0\n\n claimed_time = tip['claimed_time']\n if claimed_time is None or claimed_time > ts_start:\n\n # Gittip is opt-in. We're only going to collect money on a person's\n # behalf if they opted-in by claiming their account before the\n # start of this payday.\n\n log(\"SKIPPED: %s\" % msg)\n return 0\n\n if not self.transfer(participant['id'], tip['tippee'], tip['amount']):\n\n # The transfer failed due to a lack of funds for the participant.\n # Don't try any further transfers.\n\n log(\"FAILURE: %s\" % msg)\n return -1\n\n log(\"SUCCESS: %s\" % msg)\n return 1\n\n\n def transfer(self, tipper, tippee, amount):\n \"\"\"Given two unicodes and a Decimal, return a boolean.\n\n If the tipper doesn't have enough in their Gittip account then we\n return False. Otherwise we decrement tipper's balance and increment\n tippee's *pending* balance by amount.\n\n \"\"\"\n typecheck(tipper, unicode, tippee, unicode, amount, Decimal)\n with self.db.get_connection() as conn:\n cursor = conn.cursor()\n\n try:\n self.debit_participant(cursor, tipper, amount)\n except ValueError:\n return False\n\n self.credit_participant(cursor, tippee, amount)\n self.record_transfer(cursor, tipper, tippee, amount)\n self.mark_transfer(cursor, amount)\n\n conn.commit()\n return True\n\n\n def debit_participant(self, cursor, participant, amount):\n \"\"\"Decrement the tipper's balance.\n \"\"\"\n\n DECREMENT = \"\"\"\\\n\n UPDATE participants\n SET balance=(balance - %s)\n WHERE id=%s\n AND pending IS NOT NULL\n RETURNING balance\n\n \"\"\"\n cursor.execute(DECREMENT, (amount, participant))\n rec = cursor.fetchone()\n assert rec is not None, (amount, participant) # sanity check\n if rec['balance'] < 0:\n # User is out of money. Bail. The transaction will be rolled back\n # by our context manager.\n raise ValueError() # TODO: proper exception type\n\n\n def credit_participant(self, cursor, participant, amount):\n \"\"\"Increment the tippee's *pending* balance.\n \n The pending balance will clear to the balance proper when Payday is\n done.\n\n \"\"\"\n\n INCREMENT = \"\"\"\\\n\n UPDATE participants\n SET pending=(pending + %s)\n WHERE id=%s\n AND pending IS NOT NULL\n RETURNING pending\n\n \"\"\"\n cursor.execute(INCREMENT, (amount, participant))\n rec = cursor.fetchone()\n assert rec is not None, (participant, amount) # sanity check\n\n\n def record_transfer(self, cursor, tipper, tippee, amount):\n RECORD = \"\"\"\\\n\n INSERT INTO transfers\n (tipper, tippee, amount)\n VALUES (%s, %s, %s)\n\n \"\"\"\n cursor.execute(RECORD, (tipper, tippee, amount))\n\n\n # Move money into Gittip from the outside world.\n # ==============================================\n\n def charge(self, participant_id, balanced_account_uri, amount):\n \"\"\"Given two unicodes and a Decimal, return a boolean.\n\n This is the only place where we actually charge credit cards. Amount\n should be the nominal amount. We compute Gittip's fee in this function\n and add it to amount.\n\n \"\"\"\n typecheck( participant_id, unicode\n , balanced_account_uri, (unicode, None)\n , amount, Decimal\n )\n\n if balanced_account_uri is None:\n self.mark_missing_funding()\n return False\n\n charge_amount, fee, error = self.hit_balanced( participant_id\n , balanced_account_uri\n , amount\n )\n\n # XXX If the power goes out at this point then Postgres will be out of\n # sync with Balanced. We'll have to resolve that manually be reviewing\n # the Balanced transaction log and modifying Postgres accordingly.\n # \n # this could be done by generating an ID locally and commiting that to \n # the db and then passing that through in the meta field -\n # https://www.balancedpayments.com/docs/meta\n # Then syncing would be a case of simply:\n # for payment in unresolved_payments:\n # payment_in_balanced = balanced.Transaction.query.filter(\n # **{'meta.unique_id': 'value'}).one()\n # payment.transaction_uri = payment_in_balanced.uri\n \n with self.db.get_connection() as connection:\n cursor = connection.cursor()\n\n if error:\n last_bill_result = error\n amount = Decimal('0.00')\n self.mark_failed(cursor)\n else:\n last_bill_result = ''\n EXCHANGE = \"\"\"\\\n\n INSERT INTO exchanges\n (amount, fee, participant_id)\n VALUES (%s, %s, %s)\n\n \"\"\"\n cursor.execute(EXCHANGE, (amount, fee, participant_id))\n self.mark_success(cursor, charge_amount, fee)\n\n\n # Update the participant's balance.\n # =================================\n # Credit card charges go immediately to balance, not to pending.\n\n RESULT = \"\"\"\\\n\n UPDATE participants\n SET last_bill_result=%s\n , balance=(balance + %s)\n WHERE id=%s\n\n \"\"\"\n cursor.execute(RESULT, (last_bill_result, amount, participant_id))\n\n\n connection.commit()\n\n return not bool(last_bill_result) # True indicates success\n\n\n def hit_balanced(self, participant_id, balanced_account_uri, amount):\n \"\"\"We have a purported balanced_account_uri. Try to use it.\n \"\"\"\n typecheck( participant_id, unicode\n , balanced_account_uri, unicode\n , amount, Decimal\n )\n\n try_charge_amount = (amount + FEE[0]) * FEE[1]\n try_charge_amount = try_charge_amount.quantize( FEE[0]\n , rounding=ROUND_UP\n )\n charge_amount = try_charge_amount\n also_log = ''\n if charge_amount < MINIMUM:\n charge_amount = MINIMUM # per Balanced\n also_log = ', rounded up to $%s' % charge_amount\n\n fee = try_charge_amount - amount\n cents = int(charge_amount * 100)\n\n msg = \"Charging %s %d cents ($%s + $%s fee = $%s%s) ... \"\n msg %= participant_id, cents, amount, fee, try_charge_amount, also_log\n\n try:\n customer = balanced.Account.find(balanced_account_uri)\n customer.debit(cents, description=participant_id)\n log(msg + \"succeeded.\")\n except balanced.exc.HTTPError as err:\n log(msg + \"failed: %s\" % err.message)\n return charge_amount, fee, err.message\n\n return charge_amount, fee, None\n\n\n # Record-keeping.\n # ===============\n\n def mark_missing_funding(self):\n STATS = \"\"\"\\\n\n UPDATE paydays\n SET ncc_missing = ncc_missing + 1\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n self.assert_one_payday(self.db.fetchone(STATS))\n\n\n def mark_failed(self, cursor):\n STATS = \"\"\"\\\n\n UPDATE paydays\n SET ncc_failing = ncc_failing + 1\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n cursor.execute(STATS)\n self.assert_one_payday(cursor.fetchone())\n\n\n def mark_success(self, cursor, charge_amount, fee):\n STATS = \"\"\"\\\n\n UPDATE paydays\n SET nexchanges = nexchanges + 1\n , exchange_volume = exchange_volume + %s\n , exchange_fees_volume = exchange_fees_volume + %s\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n cursor.execute(STATS, (charge_amount, fee))\n self.assert_one_payday(cursor.fetchone())\n\n\n def mark_transfer(self, cursor, amount):\n STATS = \"\"\"\\\n\n UPDATE paydays\n SET ntransfers = ntransfers + 1\n , transfer_volume = transfer_volume + %s\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n cursor.execute(STATS, (amount,))\n self.assert_one_payday(cursor.fetchone())\n\n\n def mark_participant(self, nsuccessful_tips):\n STATS = \"\"\"\\\n\n UPDATE paydays \n SET nparticipants = nparticipants + 1\n , ntippers = ntippers + %s\n , ntips = ntips + %s\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n self.assert_one_payday( self.db.fetchone( STATS\n , ( 1 if nsuccessful_tips > 0 else 0\n , nsuccessful_tips # XXX bug?\n )\n )\n )\n\n\n def mark_end(self, cursor):\n cursor.execute(\"\"\"\\\n\n UPDATE paydays\n SET ts_end=now()\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\")\n self.assert_one_payday(cursor.fetchone())\n\n\n def assert_one_payday(self, payday):\n \"\"\"Given the result of a payday stats update, make sure it's okay.\n \"\"\"\n assert payday is not None \n payday = list(payday)\n assert len(payday) == 1, payday\n","sub_path":"gittip/billing/payday.py","file_name":"payday.py","file_ext":"py","file_size_in_byte":16503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"516149022","text":"import vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom vk_api.utils import get_random_id\n\ndef main():\n\n vk_session = vk_api.VkApi(token='82427e0f0fa1d6b0f4d8b38a819e14c25984a418e3dad5b8330b7e948d22afa4134ed0a41a67ad30f80e4')\n\n vk = vk_session.get_api()\n\n longpoll = VkLongPoll(vk_session)\n\n for event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n vk.messages.send(\n user_id=event.user_id,\n random_id=get_random_id(),\n message=event.text\n )\n print('ok')\n\nif __name__ == '__main__':\n main()","sub_path":"Tasks/Kostin-Rozhkov/bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"59907988","text":"'''\n\tThis solution was submitted by Team: eyeCoders_UOP \n\t\tduring ACES Coders v8 2020 \n\tTeam lead: Rusiru Thushara thusharakart@gmail.com\n\n\tThe solution runs in O(n)\n'''\ndef getline():return [float(x) for x in input().strip().split(' ')]\nc,e,n,s0 = getline()\narr, lst = [], []\nm = c/s0\nfor i in range(int(n)):\n x,s = getline()\n arr.append((x,s))\n if x=0:\n m = max(m,(c-x)/s)\n elif s<0:\n lst.append((x,s))\ncount = 0\nfor x,s in lst:\n if x+s*m <= c:\n count+=1\nprint(round(m),count)\n","sub_path":"codeBase/ACES_Coders/v8.0/coders/A-walk-to-remember/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"120924049","text":"import os\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom data.base_dataset import BaseDataset, get_params, get_transform, normalize\nfrom data.image_folder import make_dataset, make_dataset_NIDB\n\n\nclass NIDBDataset(BaseDataset):\n def initialize(self, opt):\n self.opt = opt\n self.A_root = opt.data_root\n self.B_root = '/media/kodai-nakashima/backup/NMDB_v1/dataset_detection'\n\n ### input A (no danger domain) ###\n self.dir_A = os.path.join(self.A_root, opt.phase + '_img')\n self.A_paths = sorted(make_dataset(self.dir_A))\n\n ### input B (danger domain) ###\n self.dir_B = os.path.join(self.B_root, opt.phase, '0')\n self.B_paths = sorted(make_dataset_NIDB(self.dir_B))\n\n self.dataset_size = min(len(self.A_paths), len(self.B_paths))\n\n def __getitem__(self, index):\n ### input A ###\n A_path = self.A_paths[index]\n A = Image.open(A_path).convert('RGB')\n params = get_params(self.opt, A.size)\n transform_A = get_transform(self.opt, params)\n A_tensor = transform_A(A)\n\n ### input B ###\n B_path = self.B_paths[index]\n B = Image.open(B_path).convert('RGB')\n transform_B = get_transform(self.opt, params)\n B_tensor = transform_B(B)\n\n input_dict = {'label': A_tensor, 'image': B_tensor}\n return input_dict\n\n def __len__(self):\n return self.dataset_size//self.opt.batch_size * self.opt.batch_size\n\n def name(self):\n return 'NIDBDataset'\n","sub_path":"data/nidb_dataset.py","file_name":"nidb_dataset.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336678300","text":"# -*- coding: utf-8 -*-\n\"\"\"Unit-test of module 'backend' in 'msbackup' package.\"\"\"\n\nimport os\nimport filecmp\nimport shutil\nimport subprocess\nimport tempfile\nimport unittest\n\nfrom msbackup.backend import File\nfrom test.mock import TextFile\n\n\ntry:\n import configparser\nexcept ImportError:\n from six.moves import configparser\n\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\nTEST_ROOT = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BackendFile(unittest.TestCase):\n \"\"\"Test case of module 'backend' of 'msbackup' package.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"Setting up class fixture before running tests in the class.\"\n config = configparser.RawConfigParser()\n config.read(os.path.join(TEST_ROOT, 'test.config'))\n cls.out = TextFile()\n cls.err = TextFile()\n cls.backend = File(config, out=cls.out, err=cls.err)\n\n def setUp(self):\n \"\"\"Setting up the test case.\"\"\"\n self.test_dir = tempfile.mkdtemp('_msbackup-test_backend')\n fout, self.test_file = tempfile.mkstemp(dir=self.test_dir)\n os.write(fout, os.urandom(16*1024))\n os.close(fout)\n\n def tearDown(self):\n \"\"\"Tear down the test case.\"\"\"\n self.out.data = u''\n self.err.data = u''\n shutil.rmtree(self.test_dir, True)\n\n def check(self):\n \"\"\"Check results of executing back-end method.\"\"\"\n archive_path = u'{}.tar.bz2'.format(self.test_file)\n self.assertTrue(os.path.exists(archive_path))\n origin = u'{}.origin'.format(self.test_file)\n os.rename(self.test_file, origin)\n params = [u'/bin/tar', u'-xjf', archive_path,\n u'-C', os.path.dirname(archive_path)]\n with open(os.devnull, 'w') as out:\n self.assertEqual(0, subprocess.call(params, stdout=out))\n self.assertTrue(os.path.exists(self.test_file))\n self.assertTrue(filecmp.cmp(origin, self.test_file, shallow=False))\n\n def test_archive(self):\n \"\"\"Test of method backend.File.archive().\"\"\"\n output = u'{}.tar.bz2'.format(self.test_file)\n src = os.path.basename(self.test_file)\n base_dir = os.path.dirname(output)\n self.assertEqual(0, self.backend.archive(src, output, base_dir))\n self.check()\n self.assertEqual(u'', self.out.data)\n self.assertEqual(u'', self.err.data)\n\n def test_backup(self):\n \"\"\"Test of method backend.File.backup().\"\"\"\n self.assertEqual(0, self.backend.backup(self.test_file,\n self.test_dir))\n self.check()\n self.assertEqual(u'', self.out.data)\n self.assertEqual(u'', self.err.data)\n\n def test_backup_verbose(self):\n \"\"\"Test of method backend.File.backup() with verbose output.\"\"\"\n self.assertEqual(0, self.backend.backup(self.test_file,\n self.test_dir,\n verbose=True))\n self.check()\n self.assertEqual(u'Backup of {}\\n'.format(self.test_file),\n self.out.data)\n self.assertEqual(u'', self.err.data)\n\n\nif __name__ == \"__main__\":\n import xmlrunner\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))\n","sub_path":"test/test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"257417918","text":"'''\nThis App illustrates how to open files with Tkinter\n'''\n#Packages\nfrom tkinter import *\nfrom PIL import ImageTk,Image\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nwindow = Tk()\nwindow.title(\"Files\")\nwindow.iconbitmap('D:/e-Learning/Tkinter/Images/India-flag.ico')\nwindow.geometry(\"500x500\")\n\ndef open():\n global img\n window.filename = filedialog.askopenfilename(initialdir=\"D:/e-Learning/Tkinter/Images\", title=\"Select a file\", filetypes=((\"png files\", \"*.png\"),(\"all files\", \"*.*\")))\n #lbl =Label(window, text=window.filename).pack()\n img = ImageTk.PhotoImage(Image.open(window.filename))\n img_lbl = Label(image=img).pack()\n\nbtn = Button(window, text=\"Open an Image!\", command=open).pack()\n\n#event handler\nwindow.mainloop()\n","sub_path":"App's/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"440566372","text":"from django.shortcuts import render\n\nfrom dashboard.models import Supplies\n\n\ndef supplies_list(request):\n supplies = Supplies.objects.filter(done=False)\n supplies = [(e.what, e.when) for e in supplies]\n\n context = {'supplies': supplies}\n\n return render(request, 'house-management/list-supplies.html', context)\n","sub_path":"dashboard/views/_house_management/_supplies_needed.py","file_name":"_supplies_needed.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"431708759","text":"from re import match\nfrom typing import List, Tuple\n\nfrom programmes.recettes.recettes_manager import conseiller_recette, recettes_existantes, gestionnaire_des_recettes\nfrom programmes.cuisines.configer import chercheur_de_toutes_les_configs, supprimer_config, Config, trouveur_de_config, \\\n createur_de_config, saver_de_config\nfrom programmes.cuisines.cuisine_manager import cuisine_opener, chercheur_de_cuisines, Cuisine, cuisine_saver\nfrom programmes.bases import crediter, Date\nfrom programmes.stock.stock_manager import IngredientStock\nfrom programmes.recettes.recettes_opener import decodage\n\n\nclass Rectangle:\n def __init__(self, x, y, nom, texte: List[str]):\n self.x = x\n self.y = y\n self.nom = nom\n self.texte = texte\n\n def aff_ligne(self, ligne: int):\n if ligne == 0:\n aff = ',['\n aff += self.nom\n aff += ']' + '-' * (self.x - 4 - len(self.nom))\n aff += ','\n return aff\n elif ligne == self.y:\n return '\\'' + '-' * (self.x - 2) + '\\''\n if ligne-1 < len(self.texte):\n return '|' + self.texte[ligne-1] + ' ' * (self.x - 2 - len(self.texte[ligne-1])) + '|'\n return '|' + ' ' * (self.x - 2) + '|'\n\n def aff(self, retour_a_la_ligne: bool):\n aff = self.aff_ligne(0)\n if retour_a_la_ligne:\n aff += '\\n'\n for yy in range(1, self.y):\n aff += self.aff_ligne(yy)\n if retour_a_la_ligne:\n aff += '\\n'\n aff += self.aff_ligne(self.y)\n return aff\n\n\ndef merge_rectangle(liste_rec: list):\n maxi = 0\n for rec in liste_rec:\n maxi = max(rec.y, maxi)\n aff = ''\n for i in range(maxi+1):\n for rec in liste_rec:\n aff += rec.aff_ligne(i)\n aff += '\\n'\n return aff\n\n\ndef reponses(texte: str, rep_poss: Tuple, boucle: bool):\n texte += '\\n\\nreponses : '\n rep = input(texte)\n while (rep not in rep_poss and len(rep_poss) != 0) and boucle:\n rep = input(texte)\n return rep\n\n\ndef aff_main_menu():\n config = trouveur_de_config()\n langue = config.langue.dictionnaire['affichage_dev']\n aff = langue[0]\n r1 = Rectangle(50, 10, langue[1], conseiller_recette(config))\n r2 = Rectangle(50, 10, langue[2], config.cuisine.ingredients_bientot_perimes(0, 8))\n aff += merge_rectangle([r1, r2])\n rep = ''\n while rep != 'Q':\n rep = reponses(aff, ('F', 'P', 'C', 'Q', 'S'), True)\n if rep == 'C':\n config = aff_configuration(config)\n if rep == 'P':\n reponses(Rectangle(50, 10, langue[3], crediter).aff(True), (), False)\n if rep == 'S':\n print('S')\n if config is not None:\n print('C')\n aff_gestion_stock(config)\n else:\n reponses(Rectangle(50, 10, langue[4], [langue[5]]).aff(True), (), False)\n if rep == 'F':\n aff_file(config)\n\n\ndef aff_file(config: Config):\n langue = config.langue.dictionnaire['affichage_dev']\n liste1 = recettes_existantes()\n liste2 = []\n for i, ele in enumerate(liste1):\n liste2.append(str(i) + ' ' + ele)\n aff = Rectangle(50, 10, langue[20], liste2).aff(True)\n rep = reponses(aff, (), True)\n lecteur_de_recettes([liste1[int(rep)]], config)\n\n\ndef lecteur_de_recettes(str_recettes: List[str], config: Config):\n langue = config.langue.dictionnaire['affichage_dev']\n recettes = []\n for str_rec in str_recettes:\n recettes.append(decodage(str_rec))\n ordre_actions = gestionnaire_des_recettes(recettes)\n for i in range(len(ordre_actions)):\n suiv = ''\n if i + 1 < len(ordre_actions):\n suiv = ' ' + ordre_actions[i+1].aff_str()\n aff = Rectangle(100, 10, langue[21], [ordre_actions[i].aff_str(), suiv]).aff(True)\n reponses(aff, (), False)\n\n\ndef aff_gestion_stock(config: Config):\n rep = ''\n langue = config.langue.dictionnaire['affichage_dev']\n while rep != 'Q':\n rec = Rectangle(50, 12, langue[6], config.cuisine.liste_stock(0, 8))\n rep = reponses(rec.aff(True), (), True)\n reg = match(r'(?P.+) (?P.+) (?P.+) (?P\\d+)/(?P\\d+)/(?P\\d+)$', rep)\n if reg is not None:\n config.cuisine.add_ingredients(IngredientStock(reg['nom'], int(reg['nombre']), reg['unite'],\n Date(int(reg['jour']), int(reg['mois']), int(reg['annee']))))\n print('ok')\n\n\ndef aff_utilisateur(config: Config):\n rep = ''\n langue = config.langue.dictionnaire['affichage_dev']\n while rep != 'Q':\n liste = [langue[7]]\n utilisateur_actuel = trouveur_de_config()\n liste_utilisateurs = chercheur_de_toutes_les_configs()\n liste.extend(liste_utilisateurs)\n for i in range(1, len(liste)):\n if liste[i] == utilisateur_actuel.nom_utilisateur:\n liste[i] = '-' + str(i - 1) + ' ' + liste[i]\n else:\n liste[i] = ' ' + str(i - 1) + ' ' + liste[i]\n liste.extend([langue[8], langue[9]])\n aff = Rectangle(50, 10, langue[10], liste).aff(True)\n rep = reponses(aff, ('R', 'C', 'U'), False)\n if len(rep) == 2 and int(rep[0]) < len(liste_utilisateurs):\n if rep[1] == 'R':\n aff = Rectangle(100, 2, langue[11], [langue[12]]).aff(True)\n rep2 = reponses(aff, (), False)\n if rep2 == 'O':\n supprimer_config(liste_utilisateurs[int(rep[0])])\n elif rep[1] == 'U':\n createur_de_config(liste_utilisateurs[int(rep[0])])\n elif rep == 'C':\n nom = input(langue[13])\n aff = [langue[14]]\n liste_cuisines = chercheur_de_cuisines()\n for i, cuis in enumerate(liste_cuisines):\n aff.append(f'{i} {cuis}')\n aff.append(langue[15])\n cuisine = None\n while cuisine is None:\n rec = Rectangle(100, 10, langue[16], aff)\n rep = reponses(rec.aff(True), (), False)\n if rep == 'N':\n nom_cuisine = input(langue[17])\n cuisine = Cuisine(nom_cuisine, [], [])\n cuisine_saver(cuisine)\n\n else:\n cuisine = cuisine_opener(liste_cuisines[int(rep)])\n config = Config(nom, cuisine, config.langues)\n createur_de_config(config.nom_utilisateur)\n saver_de_config(config)\n config = trouveur_de_config()\n return config\n\n\ndef aff_configuration(config: Config):\n langue = config.langue.dictionnaire['affichage_dev']\n aff = Rectangle(50, 10, langue[18], [langue[19]]).aff(True)\n rep = reponses(aff, ('U', 'M'), True)\n if rep == 'U':\n config = aff_utilisateur(config)\n return config\n","sub_path":"programmes/affichage_dev.py","file_name":"affichage_dev.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"338261146","text":"import logging\nimport numpy as np\nfrom sklearn.ensemble import IsolationForest\n\nclass iForest():\n MAX_N_SAMPLES = 32000\n\n def __init__(self, max_number_of_samples=None, outliers_fraction=0.1, n_estimators=100):\n self.max_number_of_samples = max_number_of_samples if max_number_of_samples else self.MAX_N_SAMPLES\n self.outliers_fraction = outliers_fraction\n self.n_estimators = n_estimators\n self.classifier = IsolationForest(n_estimators=self.n_estimators,max_samples=self.max_number_of_samples,\n contamination=self.outliers_fraction, random_state=None)\n\n def train(self, train_data):\n\n n_train_samples = train_data.shape[0]\n train_data = train_data.reshape(n_train_samples, -1)\n\n if n_train_samples > self.max_number_of_samples:\n logging.warning(\n 'Discarding training data: using {} of {} chunks.'.format(self.max_number_of_samples, n_train_samples))\n train_data = self._subsample_data(train_data)\n\n self.classifier.fit(train_data)\n\n def predict(self, test_sample):\n data = test_sample.reshape(test_sample.data.shape[0], -1)\n prediction = self.classifier.predict(data)\n return prediction\n\n def decision_function(self, test_sample):\n data = test_sample.data.reshape(test_sample.data.shape[0], -1)\n anomaly_score = self.classifier.decision_function(data)\n return np.squeeze(anomaly_score)\n\n def _subsample_data(self, data):\n return data[np.random.choice(data.shape[0], self.max_number_of_samples, replace=False)]\n\n @property\n def configuration(self):\n return {\n 'max_number_of_samples': self.max_number_of_samples,\n 'outliers_fraction': self.outliers_fraction,\n 'n_estimators': self.n_estimators\n }\n","sub_path":"models/isolationforest.py","file_name":"isolationforest.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"327864201","text":"# coding:utf-8\r\n# /home/computer/lcy/prcv/experiment/attribute/saved_model/all/1_model.pkl\r\n\r\nimport os\r\nfrom PIL import Image\r\nimport numpy as np\r\n# np.set_printoptions(threshold='nan')\r\nimport scipy.io\r\nimport torchvision.transforms as transforms\r\nimport h5py\r\nimport matplotlib\r\n\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom model.resnet50_original import ft_net\r\nimport torch\r\nimport csv\r\n\r\ntransform = transforms.Compose([\r\n transforms.Resize((384, 128)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n]\r\n)\r\n\r\ntest_img_path = '/home/computer/lcy/prcv/data/Attributes/test_images/'\r\ncsv_path = '/home/computer/lcy/prcv/experiment/attribute/csv/'\r\n\r\n\r\ndef TestAndQuery_namelist():\r\n test_mat_path = '/home/computer/lcy/prcv/data/Attributes/RAP_attributes_data.mat'\r\n data = scipy.io.loadmat(test_mat_path)\r\n data = data['RAP_attributes_data']\r\n\r\n # test_data = data['test_set'][0][0]\r\n # print(len(test_data))\r\n\r\n # all_test_name = []\r\n # for i in range(len(test_data)):\r\n # temp_test_name = test_data[i][0][0]\r\n # # print(temp_test_name)\r\n # all_test_name.append(temp_test_name)\r\n\r\n all_query_attrname = []\r\n for line in open(\"/home/computer/lcy/prcv/data/Attributes/attr_query_index.txt\"):\r\n line = line.strip()\r\n line = line.strip('\\n')\r\n line = line.split(' ')\r\n line = np.asarray(line, dtype=np.int64)\r\n # print(line)\r\n\r\n all_query_attrname.append(line)\r\n\r\n # print(all_query_attrname)\r\n\r\n # print(len(all_test_name))\r\n print(len(all_query_attrname))\r\n\r\n return all_query_attrname\r\n\r\n\r\ndef csv(all_query_attrname, all_test_name, all_test_feature):\r\n male = np.expand_dims(1 - all_test_feature[:, 0], axis=0)\r\n male = male.T\r\n all_test_feature = np.hstack((male, all_test_feature))\r\n test_len = all_test_feature.shape[0]\r\n print(all_test_feature.shape)\r\n with open(os.path.join(csv_path, 'query_results.csv'), 'w', newline='') as csvFile:\r\n wr = csv.writer(csvFile)\r\n\r\n\r\n\r\n# def predict_query_result(all_query_attrname):\r\n# model = ft_net\r\n# model.load_state_dict(torch.load('./saved_model/1_354_model.pkl'))\r\n# model.cuda()\r\n# model.eval()\r\n#\r\n# return all_query_attrname\r\n\r\n# def predict_result(all_test_name):\r\n# model = ft_net()\r\n# model.load_state_dict(torch.load('./saved_model/1_354_model.pkl'))\r\n# model.cuda()\r\n# model.eval()\r\n#\r\n# all_test_feature = []\r\n#\r\n# for index, img_name in enumerate(all_test_name):\r\n# print(img_name)\r\n# img = Image.open(os.path.join(test_img_path, img_name))\r\n# img = transform(img)\r\n# img = img.unsqueeze(0)\r\n# img = img.cuda()\r\n# with torch.no_grad():\r\n# output = model(img)\r\n# # print(output)\r\n# prediction = []\r\n# for attr in output:\r\n# # print(attr)\r\n# attr1 = attr.cpu().detach().numpy()\r\n# attr2 = attr1[0][1]\r\n# # print(attr2)\r\n# prediction.append(attr2)\r\n# prediction = np.asarray(prediction, dtype=np.float32)\r\n# # print(prediction)\r\n#\r\n# all_test_feature.append(prediction)\r\n# print('---------------------------------------------------------------')\r\n#\r\n# all_test_feature = np.asarray(all_test_feature, dtype=np.float32)\r\n# # print(all_test_feature.shape)\r\n#\r\n# f1 = h5py.File(os.path.join(csv_path, 'feature.h5'), 'w')\r\n# f1['all_test_feature'] = all_test_feature\r\n# f1.close()\r\n#\r\n# return all_test_feature\r\n\r\n# def load_featureh5(featureh5_name):\r\n# f2 = h5py.File(featureh5_name, 'r')\r\n# all_test_feature = f2['all_test_feature'][()]\r\n# f2.close()\r\n#\r\n# return all_test_feature\r\n\r\n\r\n# def csv1(all_test_name, all_test_feature):\r\n# # print(all_test_feature)\r\n#\r\n# male = 1 - all_test_feature[:, 0]\r\n# male = np.expand_dims(male, axis=0)\r\n# male = male.T\r\n# all_test_feature = np.hstack((male, all_test_feature))\r\n# test_len = all_test_feature.shape[0]\r\n# # print(all_test_feature.shape)\r\n#\r\n#\r\n# with open(os.path.join(csv_path, 'attr_recognition.csv'), 'w', newline='') as csvFile:\r\n# wr = csv.writer(csvFile)\r\n#\r\n# first_row = []\r\n# first_row.append('')\r\n# for i1 in range(55):\r\n# first_row.append('')\r\n# wr.writerow(first_row)\r\n#\r\n# for index, temp_test_name in enumerate(all_test_name):\r\n# temp_row = []\r\n# print(temp_test_name)\r\n# temp_row.append(str(temp_test_name))\r\n#\r\n# for i in range(55):\r\n# temp_attr_predict = all_test_feature[index][i]\r\n# temp_row.append(str(temp_attr_predict))\r\n#\r\n# print(len(temp_row))\r\n# wr.writerow(temp_row)\r\n# print('-----------------------------------------------------')\r\n#\r\n\r\nif __name__ == '__main__':\r\n all_query_attrname = TestAndQuery_namelist()\r\n # all_test_feature = predict_result(all_test_name)\r\n # all_test_feature = load_featureh5(os.path.join(csv_path,'feature.h5'))\r\n # csv1(all_test_name, all_test_feature)\r\n","sub_path":"attribute/LauYi_output_csv.py","file_name":"LauYi_output_csv.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"12887987","text":"def modify_interface(self, modify):\n '\\n Modify the interface.\\n '\n options = {\n 'interface-name': self.parameters['interface_name'],\n 'vserver': self.parameters['vserver'],\n }\n self.set_options(options, modify)\n interface_modify = netapp_utils.zapi.NaElement.create_node_with_children('net-interface-modify', **options)\n try:\n self.server.invoke_successfully(interface_modify, enable_tunneling=True)\n except netapp_utils.zapi.NaApiError as err:\n self.module.fail_json(msg=('Error modifying interface %s: %s' % (self.parameters['interface_name'], to_native(err))), exception=traceback.format_exc())","sub_path":"Data Set/bug-fixing-5/98da0e6c99f4e62398fb1e99f7eba99dd5fbfba2--bug.py","file_name":"98da0e6c99f4e62398fb1e99f7eba99dd5fbfba2--bug.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"160705669","text":"import requests as r\n\nimage_path = '/Volumes/Kabaad/falcon/input_jpg.jpg'\nimage_name = 'drawing'\nurl = 'http://localhost:5000/images'\n\nfiles = {'image': (image_name, open(image_path, 'rb'))}\nresp = r.post(url, files=files)\n\nprint (resp.text)","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"6464488","text":"from config import database, helpers, db_context\nimport base\nhelpers.extent_model(\n \"TMPER_AprPeriodEmpOut\",\n \"base\",\n [['apr_period', 'apr_year', 'employee_code']],\n apr_period=(\"numeric\", True),\n apr_year=(\"numeric\", True),\n employee_code=(\"text\"),\n department_code=(\"text\"),\n job_w_code=(\"text\"),\n reason=(\"text\"),\n note=(\"text\"),\n created_on=(\"date\"),\n created_by=(\"text\"),\n modified_on=(\"date\"),\n modified_by=(\"text\")\n )\ndef TMPER_AprPeriodEmpOut():\n ret = db_context.collection(\"TMPER_AprPeriodEmpOut\")\n return ret","sub_path":"apps/performance/api/models/TMPER_AprPeriodEmpOut.py","file_name":"TMPER_AprPeriodEmpOut.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"334882500","text":"import gzip, json, os , sys\nimport re\nfrom collections import defaultdict\nimport argparse\n\n\"\"\"\nThis script is used to operate on the gzipped version of the Wikidata json dumps, process each of the entities and extract their labels and perfrom some simple cleaning operations on the labels as well. The script outputs a dictionary containing all the labels for a given qnode, currently just focuses on the 'en'(English) labels in wikidata. Add more languages to that list to process labels of other languages as well. \n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-w\",\"--wikidatapath\")\nparser.add_argument(\"-l\",\"--labelout\")\nparser.add_argument(\"-g\",\"--glossaryout\")\n\nargs = parser.parse_args()\n\nmapOflabels = defaultdict(list)\n\n# Add all the languages that we want to extract here\nlanguages = ['en']\nglossary = set()\ndef clean(string: str):\n string = ' '.join(string.split()).strip()\n return string\n#logfile = open('run.log', 'a')\nprint(\"Starting file processing ......\")\nwith gzip.GzipFile(args.wikidatapath, 'r') as fin:\n for linecount,line in enumerate(fin):\n\n try:\n js = line.strip().decode('utf-8')[:-1]\n data = json.loads(js)\n temp_glossary = set()\n # Extract labels based on languages set initially\n for lang in languages:\n if lang in data['labels']:\n lb = clean(data['labels'][lang]['value'])\n # Add it to glossary as well\n temp_glossary.add(lb)\n # Add to glossary all the labels and alsoKnownAs words\n if lang in data['aliases']:\n for alias in data['aliases'][lang]:\n temp_glossary.add(clean(alias['value']))\n\n\n # Finally add everything to globals\n glossary.update(temp_glossary)\n for key in temp_glossary:\n mapOflabels[key].append(data['id'])\n\n except:\n continue\n\nprint(\"Writing output to file...\")\n\nwith open(args.labelout,\"w\") as out:\n out.write(json.dumps(mapOflabels))\n\nwith open(args.glossaryout,\"w\") as outfile:\n for word in glossary:\n outfile.write(word + \"\\n\")\n\n\n","sub_path":"scripts/wikidata_processing/glossary_label_map_gen.py","file_name":"glossary_label_map_gen.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"66927185","text":"#Source of inspiration: https://www.codementor.io/@sagaragarwal94/building-a-basic-restful-api-in-python-58k02xsiq\nimport re\n\nimport requests\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api\nimport random\n\napp = Flask(__name__)\napi = Api(app)\n\ndef create_result_dict(search_phrase, related_words, status):\n return {\"search_phrase\": search_phrase, \"related_words\": related_words, \"status\": status}\n\ndef get_request_args(request):\n request_word_param = 'word'\n no_valid_word = \"No valid search word was entered. Enter a search word in the format of word=SEARCH_PHRASE. Separate spaces in your phrase with a '+'\"\n request_number_of_words_param = 'number_of_words'\n no_valid_number = \"No valid number of words was entered. Enter a number of words in the format of number_of_words=NUMBER_OF_WORDS\"\n invalid = 'invalid'\n\n try:\n word = request.args.get(request_word_param)\n except:\n return invalid, create_result_dict(\"\", \"\", no_valid_word)\n\n try:\n number_of_words = int(request.args.get(request_number_of_words_param))\n except:\n return invalid, create_result_dict(\"\", \"\", no_valid_number)\n\n return word, number_of_words\n\ndef get_related_words_from_api(word):\n response = requests.get(f'https://api.datamuse.com/words?ml={word}&md=fr')\n return response.json()\n\ndef get_status(response_length, number_of_words):\n status = \"\"\n if response_length == 0:\n status = \"No search matches\"\n elif response_length < number_of_words:\n status = \"The number of available words was less than requested\"\n elif response_length >= number_of_words:\n status = \"Successful\"\n return status\n\ndef get_number_of_words(response_length, number_of_words):\n if response_length < number_of_words:\n number_of_words = response_length\n return number_of_words\n\ndef get_random_num(response_length):\n return random.randint(0, response_length - 1)\n\ndef get_frequency_random_num(response_length, parsed_response):\n frequency_index = 2\n random_num = get_random_num(response_length)\n try:\n frequency = re.search(r'f:([\\d\\.]*)', parsed_response[random_num]['tags'][frequency_index])\n except:\n frequency = 0\n if frequency:\n frequency = float(frequency.group(1))\n else:\n frequency = 0\n return frequency, parsed_response[random_num]['word']\n\nclass WordGenerator(Resource):\n\n def get(self):\n word, number_of_words = get_request_args(request)\n\n if word == 'invalid':\n return number_of_words\n\n parsed_response = get_related_words_from_api(word)\n response_length = len(parsed_response)\n\n status = get_status(response_length, number_of_words)\n number_of_words = get_number_of_words(response_length, number_of_words)\n\n list_of_related_words = []\n if parsed_response != []:\n\n #Continue picking random words from the list of responses until list is full with number_of_words\n while len(list_of_related_words) < number_of_words:\n frequency, random_word = get_frequency_random_num(response_length, parsed_response)\n\n #If the returned phrase has a space or the frequency is less than 1 in a million\n while \" \" in random_word or frequency < 1:\n frequency, random_word = get_frequency_random_num(response_length, parsed_response)\n\n #See if word is in the list already\n if random_word not in list_of_related_words:\n for curr_word in list_of_related_words:\n\n #Also check if the word is contained in an already chosen word (i.e. hair, haircut)\n if random_word in curr_word:\n continue\n\n list_of_related_words.append(random_word)\n\n return {\"search_phrase\": word, \"related_words\": list_of_related_words, \"status\": status}\n\napi.add_resource(WordGenerator, '/word_generator')\n\nif __name__ == '__main__':\n app.run()","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"216386323","text":"import re\nfrom os import path\nfrom tempfile import NamedTemporaryFile\nfrom urllib.request import urlretrieve\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom markdown import markdown\nfrom mdx_gfm import GithubFlavoredMarkdownExtension as GitHubMarkdown\n\n\nclass Command(BaseCommand):\n help = 'Download the updated version of CEAP variables description table'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--source', '-s', dest='source', default=None,\n help='Data directory of Serenata de Amor (datasets source)'\n )\n\n def handle(self, *args, **options):\n origin = 'https://{}.amazonaws.com/{}/{}-ceap-datasets.md'.format(\n settings.AMAZON_REGION,\n settings.AMAZON_BUCKET,\n settings.AMAZON_S3_CEAPTRANSLATION_DATE\n )\n\n target = path.join(\n settings.BASE_DIR,\n 'jarbas',\n 'layers',\n 'static',\n 'ceap-datasets.html'\n )\n tmp = NamedTemporaryFile()\n\n self.stdout.write('Downloading markdown from ' + origin)\n urlretrieve(origin, filename=tmp.name)\n\n self.stdout.write('Converting markdown to HTML')\n with open(tmp.name) as md:\n source = markdown(md.read(), extensions=[GitHubMarkdown()])\n\n self.stdout.write('Saving HTML to ' + target)\n with open(target, 'w') as html:\n style = \"https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/2.4.1/github-markdown.min.css\"\n body_styles = ''.join((\n 'box-sizing: border-box;',\n 'min-width: 200px;',\n 'max-width: 980px;',\n 'margin: 0 auto;',\n 'padding: 45px;'\n ))\n structure = \"\"\"\n \n \n \n \n {}\n \n \n \n {}\n \n \n \"\"\"\n\n title = 'Quota for Exercising Parliamentary Activity (CEAP)'\n full_source = structure.format(title, style, body_styles, source)\n html.write(minify(full_source))\n\n\ndef minify(html):\n return re.compile(r'(^[\\s]*)|\\n[\\s]*').sub('', html)\n","sub_path":"jarbas/chamber_of_deputies/management/commands/ceapdatasets.py","file_name":"ceapdatasets.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"362464254","text":"# Copyright (C) 2007 Matthew Neeley\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"\n### BEGIN NODE INFO\n[info]\nname = DG535\nversion = 2.0\ndescription = Pulse sequencer box.\n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 5\n### END NODE INFO\n\"\"\"\n\nfrom labrad import types as T\nfrom labrad.server import setting\nfrom labrad.gpib import GPIBManagedServer, GPIBDeviceWrapper\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nCHANNELS = 'T T0 A B AB C D CD ALL'.split()\nALL = CHANNELS.index('ALL')\nMODES = 'TTL NIM ECL VAR'.split()\n\ndef findString(key, ls):\n if key is None:\n key = 0\n elif isinstance(key, str):\n key = ls.index(key)\n elif isinstance(key, (int, long)):\n if key < 0 or key >= len(ls):\n raise Exception('Out of range.')\n return key\n\ndef makeChannelCommand(cmd, channel, params, all_channels=[2,3,4,5,6,7]):\n if params:\n params = ',' + params\n if channel == ALL:\n channel = all_channels\n else:\n channel = [channel]\n cmds = ['%s %d%s' % (cmd, c, params) for c in channel]\n return ';'.join(cmds)\n\nclass DG535Server(GPIBManagedServer):\n name = 'DG535'\n deviceName = 'SRS DG535'\n deviceIdentFunc = 'identify_device'\n \n def initContext(self, c):\n c['channel'] = 2\n c['anchor'] = 1\n\n @setting(1000, server='s', address='s', idn='s')\n def identify_device(self, c, server, address, idn=None):\n try:\n yield self.client.refresh()\n p = self.client[server].packet()\n p.address(address)\n p.timeout(1)\n p.write('ES')\n p.read()\n resp = yield p.send()\n returnValue(self.deviceName)\n except Exception:\n pass\n \n @setting(11, 'Select Channel', chan=['s', 'w'], returns=['w'])\n def select_channel(self, c, chan=2):\n ch = c['channel'] = findString(chan, CHANNELS)\n return ch\n\n @setting(12, 'Select Delay Anchor', chan=['s', 'w'], returns=['w'])\n def select_delay_anchor(self, c, chan=1):\n ch = c['anchor'] = findString(data, CHANNELS)\n return ch\n\n def doCommand(self, c, cmd, params):\n dev = self.selectedDevice(c)\n chan = c['channel']\n cmd = makeChannelCommand(cmd, chan, params)\n return dev.write(cmd)\n\n @setting(20, 'Set Channel Delay', delay=['v[s]'], returns=['b'])\n def set_channel_delay(self, c, delay):\n params = '%d,%g' % (c['anchor'], delay)\n yield self.doCommand(c, 'DT', params)\n returnValue(True)\n\n @setting(30, 'Set High Impedance', data=['b'], returns=['b'])\n def set_high_impedance(self, c, data):\n params = str(int(data))\n yield self.doCommand(c, 'TZ', params)\n returnValue(True)\n\n @setting(31, 'Set Output Mode', mode=['s', 'w'], returns=['b'])\n def set_output_mode(self, c, mode=0):\n params = str(findString(data, MODES))\n yield self.doCommand(c, 'OM', params)\n returnValue(True)\n\n @setting(32, 'Set Output Amplitude', amp=['v[V]'], returns=['b'])\n def set_output_amplitude(self, c, amp):\n params = str(float(amp))\n yield self.doCommand(c, 'OA', params)\n returnValue(True)\n\n @setting(33, 'Set Output Offset', off=['v[V]'], returns=['b'])\n def set_output_offset(self, c, off):\n params = str(float(off))\n yield self.doCommand(c, 'OO', params)\n returnValue(True)\n\n @setting(34, 'Set Output Inversion', inv=['b'], returns=['b'])\n def set_output_inversion(self, c, inv):\n params = str(int(not inv))\n yield self.doCommand(c, 'OP', params)\n returnValue(True)\n\n__server__ = DG535Server()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n","sub_path":"dg535.py","file_name":"dg535.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"548664351","text":"# -*- encoding: utf-8 -*-\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport re\n\n# configuration\nBASE_URL = \"http://www.colgate.edu/academics/courseofferings\"\nSELECT_ALL_ID = \"btnDptAll\"\nSUBMIT_BUTTON_ID = \"ImageButton1\"\nNEXT_PAGE_ID = \"btnNext\"\nOUTPUT_FILE = \"courses.out\"\n\ndef main():\n with open(OUTPUT_FILE, 'w') as output:\n driver = webdriver.Firefox()\n driver.get(BASE_URL)\n # click on the Select All button\n driver.find_element_by_id(SELECT_ALL_ID).click()\n # click on the List Courses button\n driver.find_element_by_id(SUBMIT_BUTTON_ID).click()\n\n # as long as there is a Next link, keep parsing\n while NEXT_PAGE_ID in driver.page_source:\n data = parse_results(driver.page_source)\n output.write(data)\n driver.find_element_by_id(NEXT_PAGE_ID).click()\n\n # parse the last page\n data = parse_results(driver.page_source)\n output.write(data)\n\n driver.close()\n output.close()\n\ndef parse_results(html):\n out = \"\"\n soup = BeautifulSoup(html)\n table = soup.find_all(\"tr\", \"bodyText\")\n for row in table:\n count = 0 # Only need the first 9 things\n data = []\n for cell in row.contents[1:]:\n count += 1\n if count > 9:\n continue\n if cell.string is None:\n inner = cell.div\n content = str(inner.string).strip()\n else:\n content = str(cell.string).strip()\n data.append(content)\n out = out + str(data)+\"\\n\"\n return out\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"173551068","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nf = open(\"log_server.log\")\nNO_QUEUES = 8\n\ntime = list()\nbuffer = list()\ndropps_memory = list()\ndropps_codel = list()\ncwin = list()\ncwintime = list()\n\nlines = f.readlines()\n\nnormalizeTime = 0\nfor l in lines:\n if\"TIME:\" in l:\n normalizeTime = float(l.strip().split(\":\")[1]) \n break\n\nno_dropped_memory = 0\ndrop_time_memory = list()\nno_dropped_codel = 0\ndrop_time_codel = list()\n\nqueues = {}\nfor i in range(NO_QUEUES):\n queues[i] = list()\n\nsjournTimeQueues = {}\nfor i in range(NO_QUEUES):\n sjournTimeQueues[i] = list()\nsjournTime = {}\nfor i in range(NO_QUEUES):\n sjournTime[i] = list()\n\ncurrTime = 0\nfor l in lines:\n if \"TIME:\" in l:\n currTime = (float(l.strip().strip(\":\").split()[1]) - normalizeTime) / 1000000\n time.append(currTime)\n\n if \"BUFFER:\" in l:\n buffer.append(float(l.strip().strip(\":\").split()[1]))\n\n if \"DROPPED_MEMORY:\" in l:\n if no_dropped_memory < float(l.strip().strip(\":\").split()[1]):\n dropps_memory.append(float(l.strip().strip(\":\").split()[1]))\n drop_time_memory.append(currTime) \n no_dropped_memory = float(l.strip().strip(\":\").split()[1])\n \n if \"DROPPED_CODEL:\" in l:\n #if no_dropped_codel < float(l.strip().strip(\":\").split()[1]):\n tmp =l.strip().split()\n dropps_codel.append(float(tmp[1]))\n drop_time_codel.append((float(tmp[2])- normalizeTime)/1000000) \n no_dropped_codel = float(tmp[1])\n \n if l.strip().startswith(\"Q:\"):\n tmp = l.strip().split()\n queues[int(tmp[1])].append(float(tmp[3]))\n \n if \"cwin\" in l:\n cwin.append(float(l.strip().split()[1]))\n cwintime.append((float(l.strip().split()[2]) - normalizeTime) / 1000000) \n \n if \"SJOURN_TIME\" in l:\n tmp = l.strip().split()\n sjournTimeQueues[int(tmp[3])].append(float(tmp[1])/ 1000)\n sjournTime[int(tmp[3])].append((float(tmp[2])- normalizeTime) / 1000000)\n\n\n \n# print(\"DROPPED MEMORY= \" , no_dropped_memory)\n# print(\"DROPPED CODEL= \" , no_dropped_codel)\n\n#plt.plot(cwintime, cwin, linewidth=0.5)\n#plt.plot(time, buffer, linewidth=0.1)\n\nfig, axs = plt.subplots(1)\n##SETUP PLOT FOR QUEUE MANAGEMENT##\n# for i,q in enumerate(queues):\n# axs[0].plot(time, queues[q], linewidth=1, label=\"Flow \" + str(i))\n# axs[0].legend(loc=\"upper right\")\n# axs[0].axhline(y=0, color = \"black\")\n# axs[0].set(xlabel='Time (s)', ylabel='Queue Size in Bytes')\n# axs[0].set_xlim(left=0)\n\n# axs[0].plot(drop_time_memory, dropps_memory, \"rx\", linewidth=1)\n# axs[0].plot(drop_time_codel, dropps_codel, \"rx\", linewidth=1, label=\"CoDel Drop\")\n# axs[0].legend(loc=\"upper right\")\n# axs[0].set_title(\"Queue Evolution Over Time\")\n\n##SETUP PLOT FOR SJOURN TIME##\n# for i,s in enumerate(sjournTimeQueues):\n# axs[0].plot(sjournTime[s], sjournTimeQueues[s], linewidth=1, label=\"Flow \" + str(i) )\n# axs[0].legend(loc=\"upper right\")\n# axs[0].axhline(y=5, label='Target Time',color=\"black\")\n# axs[0].legend(loc=\"upper right\")\n# axs[0].axhline(y=0, color = \"black\")\n# axs[0].set(xlabel='Time (s)', ylabel='Sjourn Time (ms)')\n# axs[0].set_xlim(left=0)\n# axs[0].set_ylim(top=100, bottom=-5)\n\n# axs[0].plot(drop_time_memory, dropps_memory, \"rx\", linewidth=1)\n# axs[0].plot(drop_time_codel, dropps_codel, \"rx\", linewidth=1, label=\"CoDel Drop\")\n# axs[0].legend(loc=\"upper right\")\n# axs[0].set_title(\"Sjourn Time Evolution\")\n\n\nfor i,s in enumerate(sjournTimeQueues):\n plt.plot(sjournTime[s], sjournTimeQueues[s], linewidth=1, label=\"CoDel Queue \" + str(i) )\n plt.legend(loc=\"upper right\", fontsize=18)\nplt.axhline(y=5, label='Target Time (5ms)',color=\"black\")\nplt.legend(loc=\"upper right\", fontsize=18)\nplt.axhline(y=0, color = \"black\")\nplt.xlabel('Time (s)', fontsize=18) \nplt.ylabel('Sjourn Time (ms)', fontsize=18)\n#plt.set_xlim(left=0)\n#plt.set_ylim(top=100, bottom=-5)\n\nplt.plot(drop_time_memory, dropps_memory, \"rx\", linewidth=1)\nplt.plot(drop_time_codel, dropps_codel, \"rx\", linewidth=1, label=\"CoDel Drop\")\nplt.legend(loc=\"upper right\", fontsize=18)\n# plt.title(\"Frame Sjourn Time Evolution over Per Queue O\")\n\n\n\n# plt.plot(time, reserved_frames, linewidth=5, label=\"QUIC reserved_frames\")\n# plt.legend(loc=\"upper left\", prop={\"size\":18})\n\n# plt.plot(time, retry_frames, linewidth=5, label=\"QUIC retry_frames\")\n# plt.legend(loc=\"upper left\", prop={\"size\":18})\n\n# plt.plot(time, block_queue_cc, linewidth=5, label=\"Datagram block_queue_cc\")\n# plt.legend(loc=\"upper left\", prop={\"size\":18})\n\n# plt.plot(time, block_queue_non_cc, linewidth=5, label=\"Datagram block_queue_non_cc\")\n# plt.legend(loc=\"upper left\", prop={\"size\":18})\n\n# plt.axhline(y=0, color = \"black\")\n# plt.xlabel('Time (s)',fontsize=18)\n# plt.ylabel('Number of frames queued', fontsize=18)\n\n#plt.legend(loc=\"upper right\")\n#plt.suptitle(\"Queue Evolution Over Time\")\nplt.rcParams.update({'font.size': 22})\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.show()\n\n\n\n\nplt.show()\nf.close()","sub_path":"qplot.py","file_name":"qplot.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"34180128","text":"import numpy as np\n\n\ndef get_repeat_length_in_history(history, tol=10 ** -5):\n \"\"\"\n Find any repeat of last moves in history. Including repeats of just one element \n (so this can be used to check simple convergence as well as \"cyclic convergence\").\n\n Parameters\n ==========\n\n history: any iterator of numpy arrays.\n \"\"\"\n size = len(history)\n for cycle_size in range(1, int(size / 2) + 1):\n if np.allclose(\n history[-cycle_size:],\n history[-2 * cycle_size : -cycle_size],\n atol=tol,\n ):\n return len(history[-cycle_size:])\n return float(\"inf\")\n\n\ndef get_evolutionary_best_response(\n opponents,\n best_response_function,\n tol=10 ** -5,\n initial=np.array([1, 1, 1, 1]),\n):\n\n history = [initial]\n best_response = best_response_function(opponents + history)\n history.append(best_response)\n\n repeat_length = get_repeat_length_in_history(history, tol=tol)\n while repeat_length >= float(\"inf\"):\n\n best_response = best_response_function(opponents + [history[-1]])\n print(\"Next generation.\")\n history.append(best_response)\n repeat_length = get_repeat_length_in_history(history, tol=tol)\n\n return best_response, history, repeat_length\n","sub_path":"src/opt_mo/evolutionary_best_response.py","file_name":"evolutionary_best_response.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"136689225","text":"# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# |_ |~) _ _| _ /~\\ _ |.\n# |_)\\/ |_)(_|(_|| \\_/|_|(_|||\n# /\n# ____________ ______\n# / __ `\\ / /\n# | \\/ / / /\n# |______ / / /\n# |____/ / /\n# _____________ / /\n# \\ / / /\n# \\ / / /\n# \\_______/ / /\n# ______ / /\n# \\ / / /\n# \\ / / /\n# \\/ / /\n# / /\n# / /\n# \\ /\n# \\ /\n# \\/\n# _\n# \\ / _ __|_. _ _ |_)\n# \\/ (/_| | |(_(_|| \\/\n# /\n# VerticaPy is a Python library with scikit-like functionality for conducting\n# data science projects on data stored in Vertica, taking advantage Vertica’s\n# speed and built-in analytics and machine learning features. It supports the\n# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize\n# data transformation operations, and offers beautiful graphical options.\n#\n# VerticaPy aims to do all of the above. The idea is simple: instead of moving\n# data around for processing, VerticaPy brings the logic to the data.\n#\n#\n# Modules\n#\n# Standard Python Modules\nimport os, datetime\n\n# VerticaPy Modules\nimport verticapy, vertica_python\nfrom verticapy import vDataFrame\nfrom verticapy.connect import current_cursor\nfrom verticapy.utilities import *\nfrom verticapy.toolbox import *\nfrom verticapy.errors import *\n\n# ---#\ndef gen_dataset(features_ranges: dict, nrows: int = 1000):\n \"\"\"\n---------------------------------------------------------------------------\nGenerates a dataset using the input parameters.\n\nParameters\n----------\nfeatures_ranges: dict,\n Dictionary including the features types and ranges.\n For str : The subdictionary must include two keys: 'type' must\n be set to 'str' and 'value' must include the feature\n categories.\n For int : The subdictionary must include two keys: 'type' must\n be set to 'int' and 'range' must include two integers\n that represent the lower and the upper bound.\n For float : The subdictionary must include two keys: 'type' must\n be set to 'float' and 'range' must include two floats\n that represent the lower and the upper bound.\n For date : The subdictionary must include two keys: 'type' must\n be set to 'date' and 'range' must include the start\n date and the number of days after.\n For datetime : The subdictionary must include two keys: 'type' must\n be set to 'date' and 'range' must include the start\n date and the number of days after.\nnrows: int, optional\n The maximum number of rows in the dataset.\n\nReturns\n-------\nvDataFrame\n Generated dataset.\n \"\"\"\n # Saving information to the query profile table\n save_to_query_profile(\n name=\"gen_dataset\",\n path=\"datasets\",\n json_dict={\"features_ranges\": features_ranges, \"nrows\": nrows,},\n )\n # -#\n\n version(condition=[9, 3, 0])\n check_types([(\"features_ranges\", features_ranges, [dict]), (\"nrows\", nrows, [int])])\n\n sql = []\n\n for param in features_ranges:\n\n if features_ranges[param][\"type\"] == str:\n\n val = features_ranges[param][\"values\"]\n if isinstance(val, str):\n sql += [f\"'{val}' AS \\\"{param}\\\"\"]\n else:\n n = len(val)\n val = \", \".join([\"'\" + str(elem) + \"'\" for elem in val])\n sql += [f'(ARRAY[{val}])[RANDOMINT({n})] AS \"{param}\"']\n\n elif features_ranges[param][\"type\"] == float:\n\n val = features_ranges[param][\"range\"]\n lower, upper = val[0], val[1]\n sql += [\n f\"({lower} + RANDOM() * ({upper} - {lower}))::FLOAT \" f'AS \"{param}\"'\n ]\n\n elif features_ranges[param][\"type\"] == int:\n\n val = features_ranges[param][\"range\"]\n lower, upper = val[0], val[1]\n sql += [f\"({lower} + RANDOM() * ({upper} - {lower}))::INT \" f'AS \"{param}\"']\n\n elif features_ranges[param][\"type\"] == datetime.date:\n\n val = features_ranges[param][\"range\"]\n start_date, number_of_days = val[0], val[1]\n sql += [\n f\"('{start_date}'::DATE + RANDOMINT({number_of_days})) \" f'AS \"{param}\"'\n ]\n\n elif features_ranges[param][\"type\"] == datetime.datetime:\n\n val = features_ranges[param][\"range\"]\n start_date, number_of_days = val[0], val[1]\n sql += [\n f\"('{start_date}'::TIMESTAMP + {number_of_days} \"\n f'* RANDOM()) AS \"{param}\"'\n ]\n\n elif features_ranges[param][\"type\"] == bool:\n\n sql += [f'RANDOMINT(2)::BOOL AS \"{param}\"']\n\n else:\n\n ptype = features_ranges[param][\"type\"]\n raise ParameterError(f\"Parameter {param}: Type {ptype}\" \"is not supported.\")\n\n sql = \", \".join(sql)\n sql = (\n f\"(SELECT {sql} FROM (SELECT tm FROM (SELECT '03-11-1993'\"\n \"::TIMESTAMP + INTERVAL '1 second' AS t UNION ALL SELECT\"\n f\" '03-11-1993'::TIMESTAMP + INTERVAL '{nrows} seconds' AS\"\n \" t) x TIMESERIES tm AS '1 second' OVER(ORDER BY t)) y) z\"\n )\n\n return vDataFrameSQL(sql)\n\n\n# ---#\ndef gen_meshgrid(features_ranges: dict):\n \"\"\"\n---------------------------------------------------------------------------\nGenerates a dataset using regular steps.\n\nParameters\n----------\nfeatures_ranges: dict,\n Dictionary including the features types and ranges.\n For str : The subdictionary must include two keys: 'type' must\n be set to 'str' and 'value' must include the feature\n categories.\n For int : The subdictionary must include two keys: 'type' must\n be set to 'int' and 'range' must include two integers\n that represent the lower and the upper bound.\n For float : The subdictionary must include two keys: 'type' must\n be set to 'float' and 'range' must include two floats\n that represent the lower and the upper bound.\n For date : The subdictionary must include two keys: 'type' must\n be set to 'date' and 'range' must include the start\n date and the number of days after.\n For datetime : The subdictionary must include two keys: 'type' must\n be set to 'date' and 'range' must include the start\n date and the number of days after.\n Numerical and date-like features must have an extra key in the \n dictionary named 'nbins' corresponding to the number of bins used to \n compute the different categories.\n\nReturns\n-------\nvDataFrame\n generated dataset.\n \"\"\"\n # Saving information to the query profile table\n save_to_query_profile(\n name=\"gen_meshgrid\",\n path=\"datasets\",\n json_dict={\"features_ranges\": features_ranges,},\n )\n # -#\n\n check_types([(\"features_ranges\", features_ranges, [dict])])\n\n sql = []\n\n for idx, param in enumerate(features_ranges):\n\n nbins = 100\n if \"nbins\" in features_ranges[param]:\n nbins = features_ranges[param][\"nbins\"]\n ts_table = (\n f\"(SELECT DAY(tm - '03-11-1993'::TIMESTAMP) AS tm FROM \"\n \"(SELECT '03-11-1993'::TIMESTAMP AS t UNION ALL SELECT\"\n f\" '03-11-1993'::TIMESTAMP + INTERVAL '{nbins} days' AS t)\"\n \" x TIMESERIES tm AS '1 day' OVER(ORDER BY t)) y\"\n )\n\n if features_ranges[param][\"type\"] == str:\n val = features_ranges[param][\"values\"]\n if isinstance(val, str):\n val = [val]\n val = \" UNION ALL \".join(\n [\n f\"\"\"(SELECT '{elem}' \n AS \\\"{param}\\\")\"\"\"\n for elem in val\n ]\n )\n sql += [f\"({val}) x{idx}\"]\n\n elif features_ranges[param][\"type\"] == float:\n val = features_ranges[param][\"range\"]\n lower, upper = val[0], val[1]\n h = (upper - lower) / nbins\n sql += [\n f'(SELECT ({lower} + {h} * tm)::FLOAT AS \"{param}\" '\n f\"FROM {ts_table}) x{idx}\"\n ]\n\n elif features_ranges[param][\"type\"] == int:\n val = features_ranges[param][\"range\"]\n lower, upper = val[0], val[1]\n h = (upper - lower) / nbins\n sql += [\n f'(SELECT ({lower} + {h} * tm)::INT AS \"{param}\" '\n f\"FROM {ts_table}) x{idx}\"\n ]\n\n elif features_ranges[param][\"type\"] == datetime.date:\n val = features_ranges[param][\"range\"]\n start_date, number_of_days = val[0], val[1]\n h = number_of_days / nbins\n sql += [\n f\"(SELECT ('{start_date}'::DATE + {h} * tm)::DATE\"\n f' AS \"{param}\" FROM {ts_table}) x{idx}'\n ]\n\n elif features_ranges[param][\"type\"] == datetime.datetime:\n val = features_ranges[param][\"range\"]\n start_date, number_of_days = val[0], val[1]\n h = number_of_days / nbins\n sql += [\n f\"(SELECT ('{start_date}'::DATE + {h} * tm)::TIMESTAMP \"\n f'AS \"{param}\" FROM {ts_table}) x{idx}'\n ]\n\n elif features_ranges[param][\"type\"] == bool:\n sql += [\n f'((SELECT False AS \"{param}\") UNION ALL '\n f'(SELECT True AS \"{param}\")) x{idx}'\n ]\n\n else:\n ptype = features_ranges[param][\"type\"]\n raise ParameterError(\n f\"Parameter {param}: Type {ptype} \" \"is not supported.\"\n )\n\n sql = \"(SELECT * FROM {0}) x\".format(\" CROSS JOIN \".join(sql))\n\n return vDataFrameSQL(sql)\n\n\n# ---#\ndef load_dataset(\n schema: str, name: str, dtype: dict, copy_cols: list = [], dataset_name: str = \"\"\n):\n \"\"\"\n General Function to ingest a dataset\n \"\"\"\n # Saving information to the query profile table\n save_to_query_profile(\n name=\"load_\" + dataset_name,\n path=\"datasets\",\n json_dict={\"schema\": schema, \"name\": name,},\n )\n # -#\n\n check_types([(\"schema\", schema, [str]), (\"name\", name, [str])])\n\n try:\n\n vdf = vDataFrame(name, schema=schema)\n\n except:\n\n name = quote_ident(name)\n schema = \"v_temp_schema\" if not (schema) else quote_ident(schema)\n create_table(table_name=name, dtype=dtype, schema=schema)\n\n try:\n\n path = os.path.dirname(verticapy.__file__)\n if dataset_name in (\"laliga\",):\n path += f\"/data/{dataset_name}/*.json\"\n query = \"COPY {0}.{1} FROM {2} PARSER FJsonParser();\".format(\n schema, name, \"{}\"\n )\n else:\n path += f\"/data/{dataset_name}.csv\"\n if not (copy_cols):\n copy_cols = [quote_ident(col) for col in dtype]\n copy_cols = \"(\" + \", \".join(copy_cols) + \")\"\n query = (\n \"COPY {0}.{1}{2} FROM {3} DELIMITER ',' NULL '' \"\n \"ENCLOSED BY '\\\"' ESCAPE AS '\\\\' SKIP 1;\"\n ).format(schema, name, copy_cols, \"{}\")\n\n cur = current_cursor()\n\n if isinstance(cur, vertica_python.vertica.cursor.Cursor) and (\n dataset_name not in (\"laliga\",)\n ):\n\n query = query.format(\"STDIN\")\n executeSQL(query, title=\"Ingesting the data.\", method=\"copy\", path=path)\n\n else:\n\n query = query.format(f\"LOCAL '{path}'\")\n executeSQL(query, title=\"Ingesting the data.\")\n\n executeSQL(\"COMMIT;\", title=\"Commit.\")\n vdf = vDataFrame(name, schema=schema)\n\n except:\n\n drop(schema + \".\" + name, method=\"table\")\n raise\n\n return vdf\n\n\n#\n#\n# ---#\ndef load_airline_passengers(schema: str = \"public\", name: str = \"airline_passengers\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the airline passengers dataset into the Vertica database. \nThis dataset is ideal for time series and regression models. If a table \nwith the same name and schema already exists, this function will create \na vDataFrame from the input relation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the airline passengers vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\"date\": \"Date\", \"passengers\": \"Integer\"},\n dataset_name=\"airline_passengers\",\n )\n\n\n# ---#\ndef load_amazon(schema: str = \"public\", name: str = \"amazon\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the amazon dataset into the Vertica database. This dataset is ideal\nfor time series and regression models. If a table with the same name and \nschema already exists, this function will create a vDataFrame from the \ninput relation.\n\nParameters\n---------- \nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe amazon vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\"date\": \"Date\", \"state\": \"Varchar(32)\", \"number\": \"Integer\"},\n dataset_name=\"amazon\",\n )\n\n\n# ---#\ndef load_cities(schema: str = \"public\", name: str = \"cities\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the Cities dataset into the Vertica database. This dataset is ideal\nfor geospatial models. If a table with the same name and schema already \nexists, this function will create a vDataFrame from the input relation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the Cities vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\"city\": \"Varchar(82)\", \"geometry\": \"Geometry\"},\n [\"city\", \"gx FILLER LONG VARCHAR(65000)\", \"geometry AS ST_GeomFromText(gx)\"],\n dataset_name=\"cities\",\n )\n\n\n# ---#\ndef load_commodities(schema: str = \"public\", name: str = \"commodities\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the commodities dataset into the Vertica database. This dataset is\nideal for time series and regression models. If a table with the same name \nand schema already exists, this function will create a vDataFrame from the \ninput relation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the amazon vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"date\": \"Date\",\n \"Gold\": \"Float\",\n \"Oil\": \"Float\",\n \"Spread\": \"Float\",\n \"Vix\": \"Float\",\n \"Dol_Eur\": \"Float\",\n \"SP500\": \"Float\",\n },\n dataset_name=\"commodities\",\n )\n\n\n# ---#\ndef load_gapminder(schema: str = \"public\", name: str = \"gapminder\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the gapminder dataset into the Vertica database. This dataset is \nideal for time series and regression models. If a table with the same name \nand schema already exists, this function will create a vDataFrame from the \ninput relation.\n\nParameters\n---------- \nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the gapminder vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"country\": \"Varchar(96)\",\n \"year\": \"Integer\",\n \"pop\": \"Integer\",\n \"continent\": \"Varchar(52)\",\n \"lifeExp\": \"Float\",\n \"gdpPercap\": \"Float\",\n },\n dataset_name=\"gapminder\",\n )\n\n\n# ---#\ndef load_iris(schema: str = \"public\", name: str = \"iris\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the iris dataset into the Vertica database. This dataset is ideal \nfor classification and clustering models. If a table with the same name and \nschema already exists, this function will create a vDataFrame from the input \nrelation.\n\nParameters\n----------\nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe iris vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"SepalLengthCm\": \"Numeric(5,2)\",\n \"SepalWidthCm\": \"Numeric(5,2)\",\n \"PetalLengthCm\": \"Numeric(5,2)\",\n \"PetalWidthCm\": \"Numeric(5,2)\",\n \"Species\": \"Varchar(30)\",\n },\n [\n \"Id FILLER Integer\",\n \"SepalLengthCm\",\n \"SepalWidthCm\",\n \"PetalLengthCm\",\n \"PetalWidthCm\",\n \"Species\",\n ],\n dataset_name=\"iris\",\n )\n\n\n# ---#\ndef load_laliga(schema: str = \"public\", name: str = \"laliga\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the La-Liga dataset into the Vertica database. This dataset is ideal\nto test complex data types. If a table with the same name and schema already \nexists, this function will create a vDataFrame from the input relation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the LaLiga vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"away_score\": \"int\",\n \"away_team\": 'Row(\"away_team_gender\" varchar, \"away_team_group\" varchar, \"away_team_id\" int, \"away_team_name\" varchar, \"country\" Row(\"id\" int, \"name\" varchar), \"managers\" Array[Row(\"country\" Row(\"id\" int, \"name\" varchar), \"dob\" date, \"id\" int, \"name\" varchar, \"nickname\" varchar)])',\n \"competition\": 'Row(\"competition_id\" int, \"competition_name\" varchar, \"country_name\" varchar)',\n \"competition_stage\": 'Row(\"id\" int, \"name\" varchar)',\n \"home_score\": \"int\",\n \"home_team\": 'Row(\"country\" Row(\"id\" int, \"name\" varchar), \"home_team_gender\" varchar, \"home_team_group\" varchar, \"home_team_id\" int, \"home_team_name\" varchar, \"managers\" Array[Row(\"country\" Row(\"id\" int, \"name\" varchar), \"dob\" date, \"id\" int, \"name\" varchar, \"nickname\" varchar)])',\n \"kick_off\": \"time\",\n \"last_updated\": \"date\",\n \"match_date\": \"date\",\n \"match_id\": \"int\",\n \"match_status\": \"varchar\",\n \"match_week\": \"int\",\n \"metadata\": 'Row(\"data_version\" date, \"shot_fidelity_version\" int, \"xy_fidelity_version\" int)',\n \"season\": 'Row(\"season_id\" int, \"season_name\" varchar)',\n },\n dataset_name=\"laliga\",\n )\n\n\n# ---#\ndef load_market(schema: str = \"public\", name: str = \"market\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the market dataset into the Vertica database. This dataset is ideal\nfor data exploration. If a table with the same name and schema already \nexists, this function will create a vDataFrame from the input relation.\n\nParameters\n----------\nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe market vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\"Form\": \"Varchar(32)\", \"Name\": \"Varchar(32)\", \"Price\": \"Float\"},\n dataset_name=\"market\",\n )\n\n\n# ---#\ndef load_pop_growth(schema: str = \"public\", name: str = \"pop_growth\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the population growth dataset into the Vertica database. This \ndataset is ideal for time series and geospatial models. If a table with \nthe same name and schema already exists, this function will create a \nvDataFrame from the input relation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the pop growth vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"year\": \"Int\",\n \"continent\": \"Varchar(100)\",\n \"country\": \"Varchar(100)\",\n \"city\": \"Varchar(100)\",\n \"population\": \"Float\",\n \"lat\": \"Float\",\n \"lon\": \"Float\",\n },\n dataset_name=\"pop_growth\",\n )\n\n\n# ---#\ndef load_smart_meters(schema: str = \"public\", name: str = \"smart_meters\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the smart meters dataset into the Vertica database. This dataset is \nideal for time series and regression models. If a table with the same name \nand schema already exists, this function will create a vDataFrame from the \ninput relation.\n\nParameters\n----------\nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe smart meters vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\"time\": \"Timestamp\", \"val\": \"Numeric(11,7)\", \"id\": \"Integer\"},\n dataset_name=\"smart_meters\",\n )\n\n\n# ---#\ndef load_titanic(schema: str = \"public\", name: str = \"titanic\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the titanic dataset into the Vertica database. This dataset is \nideal for classification models. If a table with the same name and schema \nalready exists, this function will create a vDataFrame from the input \nrelation.\n\nParameters\n----------\nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe titanic vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"pclass\": \"Integer\",\n \"survived\": \"Integer\",\n \"name\": \"Varchar(164)\",\n \"sex\": \"Varchar(20)\",\n \"age\": \"Numeric(6,3)\",\n \"sibsp\": \"Integer\",\n \"parch\": \"Integer\",\n \"ticket\": \"Varchar(36)\",\n \"fare\": \"Numeric(10,5)\",\n \"cabin\": \"Varchar(30)\",\n \"embarked\": \"Varchar(20)\",\n \"boat\": \"Varchar(100)\",\n \"body\": \"Integer\",\n \"home.dest\": \"Varchar(100)\",\n },\n dataset_name=\"titanic\",\n )\n\n\n# ---#\ndef load_winequality(schema: str = \"public\", name: str = \"winequality\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the winequality dataset into the Vertica database. This dataset is \nideal for regression and classification models. If a table with the same \nname and schema already exists, this function will create a vDataFrame from \nthe input relation.\n\nParameters\n----------\nschema: str, optional\n\tSchema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n\tName of the new relation.\n\nReturns\n-------\nvDataFrame\n\tthe winequality vDataFrame.\n\t\"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"fixed_acidity\": \"Numeric(6,3)\",\n \"volatile_acidity\": \"Numeric(7,4)\",\n \"citric_acid\": \"Numeric(6,3)\",\n \"residual_sugar\": \"Numeric(7,3)\",\n \"chlorides\": \"Float\",\n \"free_sulfur_dioxide\": \"Numeric(7,2)\",\n \"total_sulfur_dioxide\": \"Numeric(7,2)\",\n \"density\": \"Float\",\n \"pH\": \"Numeric(6,3)\",\n \"sulphates\": \"Numeric(6,3)\",\n \"alcohol\": \"Float\",\n \"quality\": \"Integer\",\n \"good\": \"Integer\",\n \"color\": \"Varchar(20)\",\n },\n dataset_name=\"winequality\",\n )\n\n\n# ---#\ndef load_world(schema: str = \"public\", name: str = \"world\"):\n \"\"\"\n---------------------------------------------------------------------------\nIngests the World dataset into the Vertica database. This dataset is ideal \nfor ideal for geospatial models. If a table with the same name and schema \nalready exists, this function will create a vDataFrame from the input \nrelation.\n\nParameters\n----------\nschema: str, optional\n Schema of the new relation. If empty, a temporary local table will be\n created.\nname: str, optional\n Name of the new relation.\n\nReturns\n-------\nvDataFrame\n the World vDataFrame.\n \"\"\"\n return load_dataset(\n schema,\n name,\n {\n \"pop_est\": \"Int\",\n \"continent\": \"Varchar(32)\",\n \"country\": \"Varchar(82)\",\n \"geometry\": \"Geometry\",\n },\n [\n \"pop_est\",\n \"continent\",\n \"country\",\n \"gx FILLER LONG VARCHAR(65000)\",\n \"geometry AS ST_GeomFromText(gx)\",\n ],\n dataset_name=\"world\",\n )\n\n\n#\n# Datasets used in the tests\n#\n# ---#\ndef load_dataset_cl(table_name: str = \"dataset_cl\", schema: str = \"public\"):\n # Classification Dataset\n\n data = [\n [1, \"Bus\", \"Male\", 0, \"Cheap\", \"Low\"],\n [2, \"Bus\", \"Male\", 1, \"Cheap\", \"Med\"],\n [3, \"Train\", \"Female\", 1, \"Cheap\", \"Med\"],\n [4, \"Bus\", \"Female\", 0, \"Cheap\", \"Low\"],\n [5, \"Bus\", \"Male\", 1, \"Cheap\", \"Med\"],\n [6, \"Train\", \"Male\", 0, \"Standard\", \"Med\"],\n [7, \"Train\", \"Female\", 1, \"Standard\", \"Med\"],\n [8, \"Car\", \"Female\", 1, \"Expensive\", \"Hig\"],\n [9, \"Car\", \"Male\", 2, \"Expensive\", \"Med\"],\n [10, \"Car\", \"Female\", 2, \"Expensive\", \"Hig\"],\n ]\n input_relation = \"{}.{}\".format(quote_ident(schema), quote_ident(table_name))\n\n drop(name=input_relation, method=\"table\")\n create_table(\n table_name=table_name,\n schema=schema,\n dtype={\n \"Id\": \"INT\",\n \"transportation\": \"VARCHAR\",\n \"gender\": \"VARCHAR\",\n \"owned cars\": \"INT\",\n \"cost\": \"VARCHAR\",\n \"income\": \"CHAR(4)\",\n },\n )\n insert_into(table_name=table_name, schema=schema, data=data, copy=False)\n\n return vDataFrame(input_relation=input_relation)\n\n\n# ---#\ndef load_dataset_reg(table_name: str = \"dataset_reg\", schema: str = \"public\"):\n # Regression Dataset\n\n data = [\n [1, 0, \"Male\", 0, \"Cheap\", \"Low\"],\n [2, 0, \"Male\", 1, \"Cheap\", \"Med\"],\n [3, 1, \"Female\", 1, \"Cheap\", \"Med\"],\n [4, 0, \"Female\", 0, \"Cheap\", \"Low\"],\n [5, 0, \"Male\", 1, \"Cheap\", \"Med\"],\n [6, 1, \"Male\", 0, \"Standard\", \"Med\"],\n [7, 1, \"Female\", 1, \"Standard\", \"Med\"],\n [8, 2, \"Female\", 1, \"Expensive\", \"Hig\"],\n [9, 2, \"Male\", 2, \"Expensive\", \"Med\"],\n [10, 2, \"Female\", 2, \"Expensive\", \"Hig\"],\n ]\n input_relation = \"{}.{}\".format(quote_ident(schema), quote_ident(table_name))\n\n drop(name=input_relation, method=\"table\")\n create_table(\n table_name=table_name,\n schema=schema,\n dtype={\n \"Id\": \"INT\",\n \"transportation\": \"INT\",\n \"gender\": \"VARCHAR\",\n \"owned cars\": \"INT\",\n \"cost\": \"VARCHAR\",\n \"income\": \"CHAR(4)\",\n },\n )\n insert_into(table_name=table_name, schema=schema, data=data, copy=False)\n\n return vDataFrame(input_relation=input_relation)\n\n\n# ---#\ndef load_dataset_num(table_name: str = \"dataset_num\", schema: str = \"public\"):\n # Numerical Dataset\n\n data = [\n [1, 7.2, 3.6, 6.1, 2.5],\n [2, 7.7, 2.8, 6.7, 2.0],\n [3, 7.7, 3.0, 6.1, 2.3],\n [4, 7.9, 3.8, 6.4, 2.0],\n [5, 4.4, 2.9, 1.4, 0.2],\n [6, 4.6, 3.6, 1.0, 0.2],\n [7, 4.7, 3.2, 1.6, 0.2],\n [8, 6.5, 2.8, 4.6, 1.5],\n [9, 6.8, 2.8, 4.8, 1.4],\n [10, 7.0, 3.2, 4.7, 1.4],\n ]\n input_relation = \"{}.{}\".format(quote_ident(schema), quote_ident(table_name))\n\n drop(name=input_relation, method=\"table\")\n create_table(\n table_name=table_name,\n schema=schema,\n dtype={\n \"Id\": \"INT\",\n \"col1\": \"FLOAT\",\n \"col2\": \"FLOAT\",\n \"col3\": \"FLOAT\",\n \"col4\": \"FLOAT\",\n },\n )\n insert_into(table_name=table_name, schema=schema, data=data, copy=False)\n\n return vDataFrame(input_relation=input_relation)\n","sub_path":"verticapy/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":30093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"351033050","text":"#DEC 2016 Stefano Bosisio\n#creation of a new systemw ith parmed.py\n#usage: python parmed_system.py TOP CRD top_ion crd_ion CUDA_IDX Howmanyions?\nimport parmed\nfrom parmed.amber import *\nimport os,sys,math,time,subprocess,random\n\n\ndef tleapdat(mol2_file):\n r\"\"\" tleapdat: Function to create the tleap inputfile for solvation process\n\n Parameters\n ----------\n mol2_file : Input mol2 file to be solvated\n\n\n Returns\n -------\n tleap.dat : .dat file which can be read by tleap via tleap -f tleap.dat\n\n \"\"\"\n\n tleap_file = open(\"tleap.dat\",\"w\")\n tleap_file.write(\"\"\"source leaprc.gaff\nloadoff ../../cyclohexane/cyc.off\nmol = loadmol2 %s\nsolvatebox mol CYC 20.0 0.75 iso\nsaveamberparm mol solvated.parm7 solvated.rst7\nquit\"\"\" %mol2_file)\n tleap_file.close()\n\n\ndef parmed_add(mol2_file): # ,cuda_index):\n r\"\"\" parmed_add: Function for adding a water box to mol2_file\n\n Parameters\n ----------\n mol2_file : Input mol2 file to be solvated\n\n cuda_index: int index of the GPU to run MD\n\n Returns\n -------\n prmtop, rst7 : topology and coordinate files\n\n \"\"\"\n print(\"preparing tleap inputfile\")\n tleapdat(mol2_file)\n print(\"Running tleap\")\n cmd =\"tleap -f tleap.dat\"\n print(cmd)\n os.system(cmd)\n\n print(\"Equilibration\")\n\n if not os.path.exists(\"equilibration\"):\n os.makedirs(\"equilibration\")\n# if not os.path.exists(\"equilibration/md_sire\"):#\n# os.makedirs(\"equilibration/md_sire\")\n\n cmd= \"mv solvated.* equilibration/.\"\n os.system(cmd)\n #change directory since we will work into equilibration\n os.chdir(\"equilibration\")\n\n equilibration()\n\n\ndef sander_files():\n r\"\"\" sander_files: Run an equilibration protocol with sander\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n #WARNING: here we have to add ntxo=1 to the input because Amber16 save in NetCFD automatically\n print(\"Creating sander files\")\n min00001_file = open(\"min00001.in\",\"w\")\n min00001_file.write('''\nMinimise whole system\n&cntrl\nntxo=1,\nimin = 1, ntmin = 1,\nmaxcyc = 100, ncyc = 10,\nntpr = 20, ntwe = 20,\ndx0 = 1.0D-7,\nntb = 1,\nntr = 1, restraint_wt = 10.00,\nrestraintmask=\"!:CYC\",\n/\n'''\n )\n\n md00002_file = open(\"md00002.in\",\"w\")\n md00002_file.write('''heat the system\n&cntrl\nntxo=1,\nimin = 0, nstlim = 1000, irest = 0, ntx = 1, dt = 0.002,\nnmropt = 1,\nntt = 1, temp0 = 300.0, tempi = 5.0, tautp = 1.0,\nntb = 1, pres0 = 1.0,\nntc = 2, ntf = 2,\nioutfm = 1, iwrap = 1,\nntwe = 200, ntwx = 200, ntpr = 100,\nntr = 1, restraint_wt = 10.00,\nrestraintmask=\"!:CYC\",\n/\n\n&wt\ntype = 'TEMP0',\nistep1 = 0, istep2 = 1000,\nvalue1 = 5.0, value2 = 300.0\n/\n\n&wt type = 'END'\n /\n ''')\n\n md00003_file = open(\"md00003.in\",\"w\")\n md00003_file.write('''constant temperature\n&cntrl\nntxo=1,\nimin = 0, nstlim = 4000, irest = 1, ntx = 5, dt = 0.002,\nntt = 1, temp0 = 300.0, tautp = 1.0,\nntb = 1,\nntc = 2, ntf = 2,\nioutfm = 1, iwrap = 1,\nntwe = 800, ntwx = 800, ntpr = 400,\nntr = 1, restraint_wt = 10.00,\nrestraintmask=\"!:CYC\",\n/\n ''')\n\n md00004_file = open(\"md00004.in\",\"w\")\n md00004_file.write('''md with sander\n&cntrl\nntxo=1,\nimin = 0, nstlim = 15000, irest = 1, ntx = 5, dt = 0.002,\nntt = 1, temp0 = 298.0, tautp = 1.0,\nntp = 1, pres0 = 1.0, taup = 0.5,\nntb = 2,\nntc = 2, ntf = 2,\nioutfm = 1, iwrap = 1,\nntwe = 3000, ntwx = 3000, ntpr = 1500,\nntr = 1, restraint_wt = 10.00,\nrestraintmask=\"!:CYC\",\n/\n ''')\n min00001_file.close()\n md00002_file.close()\n md00003_file.close()\n md00004_file.close()\n\n print(\"Created all the in files for sander\")\n\n\ndef equilibration():\n r\"\"\" equilibration: instructions to equilibrate the new system\n parm7 and rst7 will be copy into a new equilibration/ folder\n there equilibration will be run in sander\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n wherearewe = os.getcwd()\n print(\"now we are here\")\n print(wherearewe)\n\n sander_files()\n\n print(\"Minimisation\")\n cmd = \"sander -i min00001.in -p solvated.parm7 -c solvated.rst7 -O -o min00001.out -e min00001.en -x min00001.nc -inf min00001.info -r min00001.rst7 -ref solvated.rst7\"\n os.system(cmd)\n os.system(\"wait\")\n #creation = True\n #while(creation):\n# if os.path.isfile(\"solvated.rst7\"):#\n# creation = True\n# else:\n# creation = False\n\n print(\"Equilibration\")\n cmd = \"sander -i md00002.in -p solvated.parm7 -c min00001.rst7 -O -o md00002.out -e md00002.en -x md00002.nc -inf md00002.info -r md00002.rst7 -ref solvated.rst7\"\n os.system(cmd)\n os.system(\"wait\")\n\n print(\"Pressure control\")\n cmd = \"sander -i md00003.in -p solvated.parm7 -c md00002.rst7 -O -o md00003.out -e md00003.en -x md00003.nc -inf md00003.info -r md00003.rst7 -ref solvated.rst7\"\n os.system(cmd)\n os.system(\"wait\")\n\n# print(\"MD sander\")\n# cmd = \"sander -i md00004.in -p solvated.parm7 -c md00003.rst7 -O -o md00004.out -e md00004.en -x md00004.nc -inf md00004.info -r md00004.rst7 -ref solvated.rst7\"\n# os.system(cmd)\n# os.system(\"wait\")\n\n #now we have create all the files\n cmd = \"cp solvated.parm7 md00003.rst7 ../.\"\n os.system(cmd)\n print(\"Created solvated.parm7 and md00004.rst7\")\n\n #Once it's finished we need to call Sire ina new folder and running a fast MD\n #then extract rst7\n\n# cmd = \"cp md00003.rst7 solvated.parm7 md_sire/.\"\n# os.system(cmd)#\n# os.chdir(\"md_sire\")\n# md_sire(cuda_index)\n\ndef sim_cfg(crd,nmoves,ncycles,constraint,timestep,minimise):\n\n print(\"Creation of the sim.cfg files for md\")\n sim_file = open(\"sim.cfg\",\"w\")\n\n sim_file.write(\n'''\ntopfile= \"solvated.parm7\"\ncrdfile= \"%s\"\nnmoves = %s\nbuffered coordinates frequency = 100\nncycles = %s\nsave coordinates = True\nconstraint = %s\ntimestep = %s\ncutoff type = cutoffperiodic\ncutoff distance = 14*angstrom\nbarostat = True\nandersen = True\nprecision = mixed\ncenter solute = True\nminimise = %s\n'''\n % (crd,nmoves,ncycles,constraint,timestep,minimise) )\n\n sim_file.close()\n\n\ndef md_sire(cuda_index):\n\n print(\"Running Sire to equilibate system density\")\n print(\"Creating md_sire directory\")\n\n if not os.path.exists(\"md_output\"):\n os.makedirs(\"md_output\")\n\n sim_cfg(\"md00003.rst7\",1000,10,\"allbonds\",\"2*femtosecond\",\"False\")\n #TODO add the case of running on the cluster! (for all the sander/sire/..)\n\n os.chdir(\"md_output\")\n cmd = \"~/sire.app/bin/somd -C ../sim.cfg -t ../solvated.parm7 -c ../md00003.rst7 -d %s -p CUDA\" % cuda_index\n os.system(cmd)\n os.system(\"wait\")\n\n cpptraj(tidy=True)\n\ndef cpptraj_file():\n\n cpptraj = open(\"traj.in\",\"w\")\n cpptraj.write(\"parm ../solvated.parm7\\n\")\n cpptraj.write(\"trajin traj*.dcd 100 100 1\\n\")\n cpptraj.write(\"trajout solvated.rst7\\n\")\n cpptraj.write(\"go\\n\")\n cpptraj.write(\"quit\")\n cpptraj.close()\n\ndef cpptraj(tidy=False):\n\n print(\"Extracting the coordinates\")\n print(\"Creatin cpptraj input file\")\n cpptraj_file()\n cmd = \"cpptraj -i traj.in\"\n os.system(cmd)\n os.system(\"wait\")\n\n print(\"Extract rest7 file, now I copy everything to the mother directory as SYSTEM.top and SYSTEM.crd\")\n\n if not tidy:\n print(\"not tidying up\")\n else:\n tidy_up()\n\ndef tidy_up():\n #tidy up everything\n\n cmd = \"cp solvated.rst7 ../../../SYSTEM_1.crd\"\n os.system(cmd)\n cmd = \"cp ../solvated.parm7 ../../../SYSTEM_1.top\"\n os.system(cmd)\n\n #cmd = \"mkdir ../../../inputfiles\"\n #os.system(cmd)\n #os.chdir(\"../../../\")\n #cmd = \"mv * inputfiles/.\"\n #os.system(cmd)\n\n# cmd = \"mv inputfiles/SYSTEM.top . \"#\n# os.system(cmd)\n# cmd = \"mv inputfiles/SYSTEM.crd . \"\n# os.system(cmd)\n# print(\"Check if you need writedistres.py\")\n\n\n\n\n###MAIN SCRIPT###\n\n#mol2_file is the file we have to work with\nmol2_file =sys.argv[1]\n#cuda_index is the index of the GPU on the workstation I need to run Sire\n#cuda_index = sys.argv[2]\n#now pass to the function\nparmed_add(mol2_file)#,cuda_index)\n","sub_path":"inputfiles/boat/d_236difluoro/cyclohexane/cyclohexane.py","file_name":"cyclohexane.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"185530198","text":"from base_folder.celery.db import *\nimport discord\nfrom discord.ext import commands\n\n'''\nAll events that responds to guild.role\n'''\n\n\nclass ListenerRoles(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_guild_role_create(self, role: discord.Role):\n stdoutchannel = self.client.get_channel(self.client.cache.states[role.guild.id].get_channel())\n if stdoutchannel is not None:\n await self.client.log.stdout(stdoutchannel, f\"Role {role.name} got created\")\n roles_to_db.delay(role.guild.id, role.name, role.id)\n\n @commands.Cog.listener()\n async def on_guild_role_delete(self, role: discord.Role):\n stdoutchannel = self.client.get_channel(self.client.cache.states[role.guild.id].get_channel())\n if stdoutchannel is not None:\n await self.client.log.stdout(stdoutchannel, f\"Role {role.name} got deleted\")\n remove_role.delay(role.guild.id, role.id)\n\n @commands.Cog.listener()\n async def on_guild_role_update(self, before, after):\n stdoutchannel = self.client.get_channel(self.client.cache.states[before.guild.id].get_channel())\n if stdoutchannel is not None:\n if before.name != after.name:\n await self.client.log.stdout(stdoutchannel, f\"Role {after.name} got updated from {before.name} to {after.name}\")\n update_role_name.delay(before.guild.id, before.id, after.name)\n\n\ndef setup(client):\n client.add_cog(ListenerRoles(client))\n","sub_path":"base_folder/bot/modules/listener/listener_roles.py","file_name":"listener_roles.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"587089102","text":"\"\"\"Make progress chart of onolab members.\"\"\"\nfrom pathlib import Path\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\nclass User:\n def __init__(self, name, path):\n self.name = name\n self.path = path\n self.progress = [0] * 10\n\n\ndef get_progress() -> List[User]:\n cur = Path(\".\")\n users = list(\n filter(lambda x: x.is_dir() and x.name not in IGNORE, sorted(cur.iterdir()))\n )\n\n progress = []\n # user ごとの progress を取得する\n for user in users:\n u = User(user.name, user)\n for chap, max_cnt in zip(range(n_chapters), n_codes):\n # user/chapterXX の path (章だけ 1-indexed なので num+1)\n chapter_path = Path(user / f\"chapter{chap+1:02d}\")\n\n # user/chapterXX に含まれる .py ファイルをカウント\n py_files = list(chapter_path.glob(\"[0-9][0-9].py\"))\n print(f\"{chapter_path}\", py_files)\n\n # 問題数は max_cnt が上限で、それ以上のファイル数が含まれる場合は max_cnt にする\n solved_cnt = min(len(py_files), max_cnt)\n u.progress[chap] = solved_cnt\n progress.append(u)\n\n return progress\n\n\ndef plot_progress(users: np.array, scores: np.array):\n # 描画されるグラフのサイズを指定\n plt.figure(figsize=(8, 6))\n\n # 各章ごとに棒グラフを積み上げていく\n for chap in range(n_chapters):\n label = f\"Chapter {chap+1}\"\n bottom = np.sum(scores[:, :chap], axis=1)\n plt.bar(\n users,\n scores[:, chap],\n bottom=bottom,\n align=\"center\",\n tick_label=users,\n label=label,\n )\n\n # グラフの設定\n plt.xticks(rotation=30, fontsize=10)\n plt.ylim(0, sum(n_codes))\n\n # 凡例をグラフの外側に表示する\n plt.legend(bbox_to_anchor=(1.28, 1.0))\n plt.subplots_adjust(right=0.8)\n\n plt.savefig(\"progress.png\")\n\n\ndef main():\n data = get_progress()\n users = np.array([user.name for user in data])\n scores = np.array([user.progress for user in data])\n\n if scores.size:\n plot_progress(users, scores)\n\n\nif __name__ == \"__main__\":\n # 章数と各章の問題数\n n_chapters, n_codes = 7, [6, 19, 9, 15, 10, 11, 18]\n\n # progress bar に表示しないディレクトリ名\n IGNORE = [\".git\", \".github\", \".automation\"]\n\n sns.set()\n sns.set_palette(\"hls\", n_chapters)\n\n main()\n","sub_path":".automation/make_progress.py","file_name":"make_progress.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"80190861","text":"#!/usr/bin/env python\n\nfrom flask import Flask, request, redirect, url_for\nimport sqlite3\n\nDATABASE = 'lab4.db'\ndb = None\napp = Flask(__name__)\n\n# Database\ndef get_db():\n global db\n if db is None:\n db = sqlite3.connect(DATABASE)\n db.row_factory = sqlite3.Row\n return db\n\ndef close_connection():\n global db\n if db is not None:\n db.close()\n db = None\n\ndef query_db(query, args=(), one=False):\n cur = get_db().cursor()\n cur.execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\ndef add_task(description, priority, category):\n try:\n priority = int(priority)\n except Exception:\n priority = 0 \n \n query_db(\"INSERT INTO tasks (description, priority, category) values (?,?,?)\", [description, priority, category], True)\n get_db().commit()\n\ndef get_tasks(maxcount=-1):\n return query_db(\"SELECT * FROM tasks\")\n\n\n# Routes\n\n@app.route('/', methods=['GET'])\ndef index():\n return '''%s

See tasks.

%s''' % (get_header('Welcome!'), get_footer())\n\n@app.route('/tasks', methods=['GET'])\ndef tasks():\n tasks = get_tasks()\n close_connection()\n\n resp = '''%s

Here are all the tasks in the database

''' % get_header('Tasks!')\n\n if len(tasks) > 0:\n resp = resp + '
    '\n else:\n resp = resp + '

    Ups, actually there are no tasks!

    '\n\n for task in tasks:\n resp = resp + ('''
  • [%s, %d] %s [Delete]
  • ''' % (task['category'], task['priority'], task['description'], url_for(\"delete_task\", task_id=task['id'])))\n\n if len(tasks) > 0:\n resp = resp + '
'\n resp = resp + get_footer()\n return resp\n\n@app.route('/tasks/new', methods=['GET', 'POST'])\ndef new_task():\n if request.method == 'POST':\n description = request.form['description']\n category = request.form['category']\n priority = request.form['priority']\n\n add_task(description, priority, category)\n\n return redirect(url_for('tasks'))\n else:\n return '''%s

Fill in all the data to add a new task.

\n
\n
\n \n \n
\n
\n \n \n
\n
\n \n \n
\n
\n \n
\n
\n %s''' % (get_header('Add New Task'), url_for('new_task'), get_footer())\n\n@app.route('/tasks//delete', methods=['GET', 'POST'])\ndef delete_task(task_id):\n query_db(\"DELETE FROM tasks WHERE id = ?\", [task_id], True)\n get_db().commit()\n return redirect(url_for('tasks'))\n\n# Helpers\n\ndef get_header(title='TODO List'):\n return '''\n\n\n %s\n\n\n

%s

\n \n''' % (title, title, url_for(\"index\"), url_for(\"tasks\"), url_for(\"new_task\"))\n\ndef get_footer():\n return ''''''\n\n# Main App\n\nif __name__ == \"__main__\":\n query_db(\"CREATE TABLE IF NOT EXISTS tasks (id INTEGER PRIMARY KEY AUTOINCREMENT, description TEXT, priority INTEGER, category TEXT)\")\n close_connection()\n\n app.debug = True\n app.run()\n","sub_path":"lab4/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172649088","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 9 13:38:39 2018\n\n@author: Playdata\n\"\"\"\nimport math\ndef merge(A,p,q,r):\n L = [A[i] for i in range(p,q+1)]\n R = [A[j] for j in range(q+1,r+1)]\n j = i = 0\n k = p\n while i < len(L) and j < len(R):\n if L[i] <= R[j]:\n A[k] = L[i]\n i +=1\n else:\n A[k] = R[j]\n j+=1\n k += 1\n if j == len(R):\n A[k:r+1] = L[i:]\n \ndef merge_sort(A,p,r):\n if r>p:\n q = math.floor((r+p)/2)\n merge_sort(A,p,q)\n merge_sort(A,q+1,r)\n merge(A,p,q,r)\n \nitems = [4,3,2,1,17]\nmerge_sort(items, 0, len(items)-1)\nprint (items)","sub_path":"clrs/Merge.py","file_name":"Merge.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"545697016","text":"import codegen\nimport ast\n\ndef to_ast_and_back_again(source):\n return codegen.to_source(ast.parse(source))\n\ndef test_del():\n source = \"del l[0]\"\n assert source == to_ast_and_back_again(source)\n source = \"del obj.x\"\n assert source == to_ast_and_back_again(source)\n\ndef test_try_expect():\n source = (\"try:\\n\"\n \" '#'[2]\\n\"\n \"except IndexError:\\n\"\n \" print 'What did you expect?!'\")\n assert source == to_ast_and_back_again(source)\n source = (\"try:\\n\"\n \" l = []\\n\"\n \" l[1]\\n\"\n \"except IndexError, index_error:\\n\"\n \" print index_error\")\n assert source == to_ast_and_back_again(source)\n\ndef test_import():\n source = \"import intertools as iterators\"\n assert source == to_ast_and_back_again(source)\n source = \"from math import floor as fl, ceil as cl\"\n assert source == to_ast_and_back_again(source)\n","sub_path":"test/test_codegen.py","file_name":"test_codegen.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"260252030","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:Ocean-yyl\n# datetime:2020-04-08 18:20\n# software: PyCharm\n\"\"\"\nSpecifying logic adapters 指定逻辑适配器\nThe logic_adapters parameter is a list of logic adapters.\nIn ChatterBot, a logic adapter is a class that takes an\ninput statement and returns a response to that statement.\n\n您可以选择使用任意数量的逻辑适配器。\n在此示例中,我们将使用两个逻辑适配器。当输入语句要求时,\nTimeLogicAdapter返回当前时间。\nMathematicalEvaluation适配器解决了使用基本运算的数学问题\n\"\"\"\nfrom chatterbot import ChatBot\n\nbot = ChatBot(\n\t'Norman',\n\tstorage_adapter='chatterbot.storage.SQLStorageAdapter',\n\tdatabase_uri='sqlite:///database.sqlite3',\n\tlogic_adapters=[\n\t\t'chatterbot.logic.MathematicalEvaluation',\n\t\t'chatterbot.logic.TimeLogicAdapter'\n\t],\n)\n\nwhile True:\n\ttry:\n\t\tbot_input = bot.get_response(input(\"Inputs:\"))\n\t\tprint(\"Norman:\",bot_input)\n\texcept(KeyboardInterrupt, EOFError, SystemExit):\n\t\tbreak","sub_path":"2Tutorial2.py","file_name":"2Tutorial2.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"137092794","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Teesheet',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(null=True, auto_now_add=True)),\n ('last_modified', models.DateTimeField(null=True, auto_now=True)),\n ('number', models.IntegerField(default=1)),\n ('firstStartTime', models.DateTimeField()),\n ('numberOfStarts', models.IntegerField()),\n ('startIntervalMinutes', models.IntegerField()),\n ('startIntervalMinutes2', models.IntegerField(null=True)),\n ('numberOfLowScoresToInclude', models.IntegerField()),\n ('showTics', models.BooleanField(default=True)),\n ('teamHCPSumPercent', models.IntegerField(null=True)),\n ('teamHCPSumDecimalPoints', models.IntegerField(null=True)),\n ('teamScoringInstructions', models.CharField(null=True, max_length=256)),\n ('assignRedBall', models.BooleanField(default=False)),\n ('countRedBall', models.IntegerField(null=True)),\n ('recordClosestToPin', models.BooleanField(default=False)),\n ('recordLongestDrive', models.BooleanField(default=False)),\n ('recordSandie', models.BooleanField(default=False)),\n ('recordChipin', models.BooleanField(default=False)),\n ('course', models.ForeignKey(to='courses.Course')),\n ('firstHole', models.ForeignKey(related_name='tsFirstHole', to='courses.HoleName')),\n ],\n options={\n 'ordering': ['firstStartTime'],\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"teesheets/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"237663484","text":"import logging\n\nimport math\nimport torch\nimport torch.nn as nn\nimport time\nfrom torch.autograd import Variable\nfrom pytocl.analysis import DataLogWriter\nfrom pytocl.car import State, Command, MPS_PER_KMH\nfrom pytocl.controller import CompositeController, ProportionalController, \\\n IntegrationController, DerivativeController\nimport pickle\n# from pre_train.pickler import load_model, save_model\nimport numpy as np\n\nfrom pre_train.mlp_torch_2 import MLP#, transform, back_transform, load_model\nimport torch.nn.functional as F\n\n# from .pre_train.mlp_torch import MLP, transform, back_transform\n_logger = logging.getLogger(__name__)\n\n# class MyDriver(Driver):\nclass MyDriver:\n \"\"\"\n Driving logic.\n\n Implement the driving intelligence in this class by processing the current\n car state as inputs creating car control commands as a response. The\n ``drive`` function is called periodically every 20ms and must return a\n command within 10ms wall time.\n \"\"\"\n\n def __init__(self, logdata=True):\n\n start = time.time()\n self.steering_ctrl = CompositeController(\n ProportionalController(0.4),\n # IntegrationController(0.2, integral_limit=1.5),\n # DerivativeController(2)\n )\n self.acceleration_ctrl = CompositeController(\n ProportionalController(3.7),\n )\n self.data_logger = DataLogWriter() if logdata else None\n # lst = pickle.load(open('pre_train/models/dimensions', 'rb'))\n self.output_dimensions = [3]#lst[0]\n\n self.brake = 0\n self.models = dict()\n # for i in [0,3]:\n # self.models[i] = pickle.load(open('pre_train/models/mod_temporal_torch' + '_' + str(i),'rb'))\n self.model3 = pickle.load(open('pre_train/models/mod_temporal_torch_3','rb'))\n self.model1 = pickle.load(open('pre_train/models/mod_temporal_torch_1','rb'))\n # self.model0 = pickle.load(open('pre_train/models/mod_temporal_torch_0','rb'))\n # self.model = load_model(open('pre_train/models/mod_temporal_torch','rb'))\n # self.model = load_model(open('pre_train/models/mod_temporal_torch','rb'))\n # self.model = load_model(open('pre_train/models/mod_temporal_torch','rb'))\n self.logger = open('logger', 'w')\n # t = pickle.load(open('pre_train/models/ustd_torch', 'rb'))\n # t = load_model(open('pre_train/models/ustd_torch', 'rb'))\n self.mu = self.model3.mu\n self.std = self.model3.std\n # print(self.mu)\n # print(self.std)\n # number of previous states used for the prediction\n # self.history = 20\n self.past_sensors = []\n self.past_command = [np.zeros((48))]\n self.past_command2 = [np.zeros((48))]\n # self.past_command[-1][0] = (1 - self.mu.data[0]) / self.std.data[0]\n # self.past_command[-1][2] = (1 - self.mu.data[2]) / self.std.data[2]\n self.it = 0\n self.outCounter = 0\n self.stuckCounter = 0\n # self.input_dimensions = [3]+[i for i in range(5, 48)]\n self.state_dimensions = self.model3.state_dimensions\n self.input_dimensions = self.model3.input_dimensions\n # if len(lst) > 3:\n # self.history = lst[3]\n self.history = 4#self.model3.history\n self.use_lstm = False\n self.use_lstm3 = False\n self.hn = Variable(torch.zeros(1, 256))\n self.cn = Variable(torch.zeros(1, 256))\n self.index = {}\n self.countnospeed = 0\n self.countRecovery = 0\n self.stuckBack = False\n self.stuckFront = False\n self.countRecovery = 0\n\n self.past_command[-1] = self.past_command[-1][self.input_dimensions]\n self.past_command2[-1] = self.past_command2[-1][self.input_dimensions]\n print(time.time() - start)\n @property\n def range_finder_angles(self):\n \"\"\"Iterable of 19 fixed range finder directions [deg].\n\n The values are used once at startup of the client to set the directions\n of range finders. During regular execution, a 19-valued vector of track\n distances in these directions is returned in ``state.State.tracks``.\n \"\"\"\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90\n\n def on_shutdown(self):\n \"\"\"\n Server requested driver shutdown.\n\n Optionally implement this event handler to clean up or write data\n before the application is stopped.\n \"\"\"\n if self.data_logger:\n self.data_logger.close()\n self.data_logger = None\n\n\n def carstate_matrix2(self, carstate):\n m = np.zeros((48))\n # m[0] = self.past_command[-1][0]\n # m[1] = self.past_command[-1][1]\n # m[2] = self.past_command[-1][2]\n # m[3] = self.past_command[-1][3]\n # m[4] = self.past_command[-1][4]\n\n DEGREE_PER_RADIANS = 180 / math.pi\n MPS_PER_KMH = 1000 / 3600\n\n m[5] = 0\n m[6] = carstate.angle / DEGREE_PER_RADIANS\n m[7] = carstate.current_lap_time\n m[8] = carstate.damage\n m[9] = carstate.distance_from_start\n m[10] = carstate.distance_raced\n m[11] = carstate.fuel\n m[12] = carstate.last_lap_time\n m[13] = carstate.race_position\n # for i in range(42, 47):\n # m[i] = carstate.opponents[i-42]\n m[14] = carstate.rpm\n m[15] = carstate.speed_x / MPS_PER_KMH\n m[16] = carstate.speed_y / MPS_PER_KMH\n m[17] = carstate.speed_z / MPS_PER_KMH\n for i in range(18, 37):\n m[i] = carstate.distances_from_edge[i-18]\n m[37] = carstate.distance_from_center\n for i in range(38, 42):\n m[i] = carstate.wheel_velocities[i-38] / DEGREE_PER_RADIANS\n m[42] = carstate.z\n for i in range(43, 48):\n m[i] = carstate.focused_distances_from_edge[i-43]\n\n # return m[5:]\n return m[self.state_dimensions]\n\n\n def drive(self, carstate: State) -> Command:\n \"\"\"\n # Produces driving command in response to newly received car state.\n #\n # This is a dummy driving routine, very dumb and not really considering a\n # lot of inputs. But it will get the car (if not disturbed by other\n # drivers) successfully driven along the race track.\n # \"\"\"\n\n outCommand = Command()\n\n # if self.countRecovery == 50:\n # print(\"got out\")\n # self.stuckBack = False\n # self.stuckFront = False\n # self.countRecovery = 0\n #\n # if carstate.speed_x < 2 and carstate.current_lap_time > 5:\n # self.countnospeed += 1\n #\n # if self.countnospeed == 200:\n # if carstate.gear > 0:\n # self.stuckFront = True\n # print('just got stuck')\n # else:\n # self.stuckBack = True\n #\n # if self.stuckFront:\n # print(\"stuck front\")\n # outCommand.gear = -1\n # outCommand.accelerator = 0.5\n # outCommand.steer = -carstate.angle\n # self.countRecovery += 1\n # self.countnospeed = 0\n # return outCommand\n #\n # if self.stuckBack:\n # outCommand.gear = 1\n # outCommand.accelerator = 0.5\n # outCommand.steer = carstate.angle\n # self.countnospeed = 0\n # self.countRecovery += 1\n # return outCommand\n\n # if carstate.speed_x / MPS_PER_KMH < 30:\n # self.accelerate(carstate, 40, outCommand)\n # print('pid')\n # self.steer(carstate, 0.0, outCommand)\n # return outCommand\n\n start = time.time()\n # self.steer(carstate, 0.0, command)\n self.it += 1\n O = len(self.output_dimensions)\n #multiply speeds by 3.6\n # wheel velocities\n # features = self.carstate_matrix2(carstate)[[x - 5 for x in self.state_dimensions]].reshape(1,-1)\n features = self.carstate_matrix2(carstate).reshape(1,-1)\n # _logger.info(carstate)\n\n features = Variable(torch.FloatTensor(features))\n # features = Variable(torch.FloatTensor(features))\n\n t_features = self.model3.transform(features, self.model3.mu[torch.LongTensor(self.state_dimensions)],\n self.model3.std[torch.LongTensor(self.state_dimensions)])\n t_features1 = self.model1.transform(features, self.model1.mu[torch.LongTensor(self.state_dimensions)],\n self.model1.std[torch.LongTensor(self.state_dimensions)])\n\n # if features[0, 18].data[0] == -1:\n # _logger.info('im out')\n # t_features.data[0, 18:37] = 0\n\n # if carstate.distances_from_edge[0] == -1:\n # outCommand.meta = 1\n if len(self.past_command) >= self.history:\n\n feat2 = t_features.data\n if not self.use_lstm:\n for i in reversed(range(1, self.history)):\n feat2 = torch.cat((feat2, torch.FloatTensor(self.past_command[-i]).view(1, -1)), 1)\n feat2 = Variable(feat2)\n\n feat1 = t_features1.data\n if not self.use_lstm:\n for i in reversed(range(1, self.history)):\n feat1 = torch.cat((feat1, torch.FloatTensor(self.past_command2[-i]).view(1, -1)), 1)\n feat1 = Variable(feat1)\n\n if not self.use_lstm3:\n t_prediction = self.model3(feat2)\n else:\n # t_prediction, self.hn, self.cn = self.model.forward(feat2, self.hn, self.cn)\n t_prediction, self.hn = self.model3.forward(t_features, self.hn)\n\n\n t_prediction1 = self.model1(feat1)\n\n prediction = self.model3.back_transform(t_prediction,\n self.model3.mu[torch.LongTensor([3])],\n self.model3.std[torch.LongTensor([3])])\n\n # print(self.mu[torch.LongTensor([3])], self.std[torch.LongTensor([3])])\n # self.output_dimensions2 = [1]\n prediction1 = self.model1.back_transform(t_prediction1,\n self.model1.mu[torch.LongTensor([1])],\n self.model1.std[torch.LongTensor([1])])\n\n\n prediction = prediction[0]\n prediction1 = prediction1[0]\n\n if self.it > 100:\n # print('normal')\n outCommand.steering = prediction.data[0]\n # self.accelerate(carstate, np.sum(carstate.distances_from_edge)/600 * 100, outCommand)\n self.accelerate(carstate, 50, outCommand)\n # if prediction1.data[0] < 1 or carstate.speed_x / MPS_PER_KMH < 30:\n # outCommand.brake = 0\n # self.accelerate(carstate, np.sum(carstate.distances_from_edge)/600 * 100 + 10, outCommand)\n # # outCommand.accelerator =\n # print(np.sum(carstate.distances_from_edge) /600 * 100 + 10)\n # print('here')\n # elif carstate.speed_x / MPS_PER_KMH > 30:\n # # self.accelerate(carstate, 50, outCommand)\n #\n # if self.brake == 0:\n # # if self.brake == 0:\n # # self.brake = 10\n # # outCommand.accelerator = 0\n # outCommand.brake = 1#prediction1.data[0]\n # outCommand.accelerator = 0\n # # self.brake = 1\n # print('braking ')\n # self.brake = 1\n # # self.accelerate(carstate, 50, outCommand)\n # else:\n # # outCommand.accelerator = 0\n # # self.accelerate(carstate, 100, outCommand)\n # self.brake = 0\n # outCommand.accelerator = 0\n # outCommand.brake = 0\n # # outCommand.brake = prediction1.data[0] * 2\n\n outCommand.clutch = 0#prediction.data[4]\n\n else:\n outCommand.accelerator = 1\n outCommand.gear = 1\n outCommand.steering = 0\n outCommand.brake = 0\n outCommand.clutch = 0\n # print(prediction1.data[0])\n # print(outCommand)\n # print(carstate.speed_x / MPS_PER_KMH)\n t_prediction = self.model3.transform(prediction.view(1, -1),\n self.model3.mu[torch.LongTensor(self.output_dimensions)],\n self.model3.std[torch.LongTensor(self.output_dimensions)])[0]\n outCommand.brake = 0\n\n if self.input_dimensions != self.state_dimensions:\n t_features_numpy = np.hstack((np.zeros((1, O)), t_features.data.numpy()))\n t_features_numpy[0, :O] = t_prediction.data.numpy()[:O]\n else:\n t_features_numpy = t_features.data.numpy()\n t_features_numpy1 = t_features1.data.numpy()\n self.past_command.append(t_features_numpy[0, :])\n self.past_command2.append(t_features_numpy1[0, :])\n\n if carstate.distance_from_center > 0.99 or carstate.distance_from_center < -0.99:\n self.steer(carstate, 0.0, outCommand)\n self.accelerate(carstate, 20, outCommand)\n print('out of tracl')\n\n # if self.data_logger:\n # self.data_logger.log(carstate, command)\n\n # print(len(self.past_command))\n if len(self.past_command) > self.history:\n self.past_command = self.past_command[1:]\n if len(self.past_command2) > self.history:\n self.past_command2 = self.past_command2[1:]\n # print(time.time() - start, self.it)\n return outCommand\n\n def accelerate(self, carstate, target_speed, command):\n # compensate engine deceleration, but invisible to controller to\n # prevent braking:\n speed_error = 1.0025 * target_speed * MPS_PER_KMH - carstate.speed_x\n acceleration = self.acceleration_ctrl.control(\n speed_error,\n carstate.current_lap_time\n )\n\n # stabilize use of gas and brake:\n acceleration = math.pow(acceleration, 3)\n\n if acceleration > 0:\n if abs(carstate.distance_from_center) >= 1:\n # off track, reduced grip:\n acceleration = min(0.4, acceleration)\n\n command.accelerator = min(acceleration, 1)\n\n if carstate.rpm > 8000:\n command.gear = carstate.gear + 1\n\n # else:\n # command.brake = min(-acceleration, 1)\n\n if carstate.rpm < 2500:\n command.gear = carstate.gear - 1\n\n if not command.gear:\n command.gear = carstate.gear or 1\n\n def steer(self, carstate, target_track_pos, command):\n steering_error = target_track_pos - carstate.distance_from_center\n command.steering = self.steering_ctrl.control(\n steering_error,\n carstate.current_lap_time\n )\n\n def set_net(self, net):\n self.net = net\n\n\n\n def on_restart(self):\n print(\"restarted\")\n self.it = 0\n # self.__init__(logdata=False)\n","sub_path":"my_driver_copy.py","file_name":"my_driver_copy.py","file_ext":"py","file_size_in_byte":15429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"181208105","text":"import requests\nimport sys\nfrom time import sleep\nfrom fake_useragent import UserAgent\nfrom data_handler import data_parser_cleaner, data_insert\n\n\ndef get_rough_data(url, header):\n try:\n response = requests.get(url, headers=header)\n except ConnectionError:\n get_rough_data(url, header)\n return response.text\n\n\ndef main():\n ua = UserAgent(verify_ssl=False)\n header = {\n 'user-agent': ua.random,\n 'Refere': 'https://book.douban.com/subject/25862578/'\n }\n urls = (f'https://book.douban.com/subject/25862578/comments/?start={i}&limit=20&status=P&sort=new_score'\n for i in range(0, 200, 20))\n count = 0\n for url in urls:\n stars, content = data_parser_cleaner(get_rough_data(url, header))\n try:\n data_insert(stars, content)\n print('20 comments inserted.')\n except Exception as e:\n print(e)\n finally:\n header['Refere'] = url\n print('wait for 5 sec.')\n sleep(5)\n count += 20\n print(f\"About {count} comments inserted.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week04/HomeWork/spiders/spider/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"198386489","text":"import json\nimport matplotlib.pyplot as plt\n\ndef createScoreHistogram(data):\n plt.hist(data, density=False, bins = 50) # `density=False` displays counts, #change number of bins for num of bins on histogram\n # plt.xlim(0, 500) #change limit of x axis (score)\n plt.ylabel('Number')\n plt.xlabel('Size')\n plt.show()\n\nstring = './inv_idx_free.json'############## ENTER JSON FILE PATH YOU WANT TO CREATE HISTOGRAM OF ###############\nwith open(string) as f:\n data = json.load(f)\n scores = [len(obj['term']) for obj in data if obj['term'] is not None]\n createScoreHistogram(scores)\n f.close()\n ","sub_path":"joke_dataset/scoring_histograms.py","file_name":"scoring_histograms.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"132534557","text":"\nimport unittest\nimport functools\nimport argparse\nimport os,sys,inspect\nimport copy\n\n\nfrom argg_hdl.argg_hdl_base import *\nfrom argg_hdl.argg_hdl_v_symbol import *\nfrom argg_hdl.examples.axiStream import *\nfrom argg_hdl.argg_hdl_v_entity import *\n\n\nfrom argg_hdl.argg_hdl_simulation import *\n\n\nclass tb_entity(v_entity):\n def __init__(self):\n super().__init__()\n self.architecture()\n\n\n def architecture(self):\n clk = v_sl()\n\n\n @timed()\n def p1():\n clk << 1\n print(\"set clk to 1\")\n yield wait_for(10)\n clk << 1\n print(\"set clk to 1 again\")\n yield wait_for(10)\n clk << 0\n print(\"set clk to 0\")\n yield wait_for(10)\n\n counter = v_slv(32)\n v_counter = v_slv(32,varSigConst=varSig.variable_t)\n @rising_edge(clk)\n def p2():\n v_counter << v_counter +1\n counter << counter + 1\n print(\"counter\", value(counter))\n print(\"v_counter\", value(v_counter))\n\n end_architecture()\n\n\n\n\n\n","sub_path":"argg_hdl/examples/Example1.py","file_name":"Example1.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"478619890","text":"import re\n\nfrom django.core.cache import cache\n\nfrom fancy_cache.middleware import REMEMBERED_URLS_KEY, LONG_TIME\nfrom fancy_cache.utils import md5\n\n__all__ = ('find_urls',)\n\n\ndef _match(url, regexes):\n if not regexes:\n return url\n for regex in regexes:\n if regex.match(url):\n return True\n return False\n\n\ndef _urls_to_regexes(urls):\n regexes = []\n for each in urls:\n parts = each.split('*')\n if len(parts) == 1:\n regexes.append(re.compile('^%s$' % re.escape(parts[0])))\n else:\n _re = '.*'.join(re.escape(x) for x in parts)\n regexes.append(re.compile('^%s$' % _re))\n return regexes\n\n\ndef find_urls(urls=None, purge=False):\n remembered_urls = cache.get(REMEMBERED_URLS_KEY, {})\n _del_keys = []\n if urls:\n regexes = _urls_to_regexes(urls)\n for url in remembered_urls:\n if not urls or _match(url, regexes):\n cache_key = remembered_urls[url]\n if not cache.get(cache_key):\n continue\n if purge:\n cache.delete(cache_key)\n _del_keys.append(url)\n misses_cache_key = '%s__misses' % url\n misses_cache_key = md5(misses_cache_key)\n hits_cache_key = '%s__hits' % url\n hits_cache_key = md5(hits_cache_key)\n\n misses = cache.get(misses_cache_key)\n hits = cache.get(hits_cache_key)\n if misses is None and hits is None:\n stats = None\n else:\n stats = {\n 'hits': hits or 0,\n 'misses': misses or 0\n }\n yield (url, cache_key, stats)\n\n if _del_keys:\n # means something was changed\n for url in _del_keys:\n remembered_urls.pop(url)\n misses_cache_key = '%s__misses' % url\n hits_cache_key = '%s__hits' % url\n cache.delete(misses_cache_key)\n cache.delete(hits_cache_key)\n\n cache.set(\n REMEMBERED_URLS_KEY,\n remembered_urls,\n LONG_TIME\n )\n","sub_path":"fancy_cache/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"166116475","text":"\"\"\"Module to control interaction with an ANSYS shell instance.\nBuilt using ANSYS documentation from\nhttps://www.sharcnet.ca/Software/Ansys/\n\nThis module makes no claim to own any rights to ANSYS. It's merely an\ninterface to software owned by ANSYS.\n\n\"\"\"\nimport glob\nimport string\nimport re\nimport os\nimport tempfile\nimport warnings\nimport logging\nimport time\nimport subprocess\nfrom threading import Thread\nimport weakref\nimport random\nfrom shutil import copyfile, rmtree\n\nimport appdirs\nimport pexpect\nimport numpy as np\nimport psutil\n\nimport pyansys\nfrom pyansys.geometry_commands import geometry_commands\nfrom pyansys.element_commands import element_commands\nfrom pyansys.mapdl_functions import _MapdlCommands\nfrom pyansys.deprec_commands import _DeprecCommands\nfrom pyansys.convert import is_float\n\ntry:\n import matplotlib.pyplot as plt\n import matplotlib.image as mpimg\n MATPLOTLIB_LOADED = True\nexcept:\n MATPLOTLIB_LOADED = False\n\n\ndef random_string(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\ndef find_ansys():\n \"\"\"Searches for ansys path within environmental variables.\n\n Reutrns\n -------\n ansys_exe_path : str\n Full path to ANSYS executable\n\n version : float\n Version of ANSYS\n \"\"\"\n ansys_sysdir_var = 'ANSYS_SYSDIR'\n paths = {}\n for var in os.environ:\n if 'ANSYS' in var and '_DIR' in var:\n # add path if valid\n path = os.environ[var]\n if os.path.isdir(path):\n\n # add path if version number is in path\n version_str = var[5:8]\n if is_float(version_str):\n paths[int(version_str)] = path\n\n if not paths:\n return '', ''\n\n # check through all available paths and return the latest version\n while paths:\n version = max(paths.keys())\n ansys_path = paths[version]\n\n if ansys_sysdir_var in os.environ:\n sysdir = os.environ[ansys_sysdir_var]\n ansys_bin_path = os.path.join(ansys_path, 'bin', sysdir)\n if 'win' in sysdir:\n ansys_exe = 'ansys%d.exe' % version\n else:\n ansys_exe = 'ansys%d' % version\n else:\n ansys_bin_path = os.path.join(ansys_path, 'bin')\n ansys_exe = 'ansys%d' % version\n\n ansys_exe_path = os.path.join(ansys_bin_path, ansys_exe)\n if os.path.isfile(ansys_exe_path):\n break\n else:\n paths.pop(version)\n paths.remove(ansys_path)\n\n version_float = float(version)/10.0\n return ansys_exe_path, version_float\n\n\ndef tail(filename, nlines):\n \"\"\" Read the last nlines of a text file \"\"\"\n with open(filename) as qfile:\n qfile.seek(0, os.SEEK_END)\n endf = position = qfile.tell()\n linecnt = 0\n while position >= 0:\n qfile.seek(position)\n next_char = qfile.read(1)\n if next_char == \"\\n\" and position != endf-1:\n linecnt += 1\n\n if linecnt == nlines:\n break\n position -= 1\n\n if position < 0:\n qfile.seek(0)\n\n return qfile.read()\n\n\ndef kill_process(proc_pid):\n \"\"\" kills a process with extreme prejudice \"\"\"\n process = psutil.Process(proc_pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n\n\n# settings directory\nsettings_dir = appdirs.user_data_dir('pyansys')\nif not os.path.isdir(settings_dir):\n try:\n os.makedirs(settings_dir)\n except:\n warnings.warn('Unable to create settings directory.\\n' +\n 'Will be unable to cache ANSYS executable location')\n\nCONFIG_FILE = os.path.join(settings_dir, 'config.txt')\n\n# specific to pexpect process\n###############################################################################\nready_items = [rb'BEGIN:',\n rb'PREP7:',\n rb'SOLU_LS[0-9]+:',\n rb'POST1:',\n rb'POST26:',\n rb'RUNSTAT:',\n rb'AUX2:',\n rb'AUX3:',\n rb'AUX12:',\n rb'AUX15:',\n # continue\n rb'YES,NO OR CONTINUOUS\\)\\=',\n rb'executed\\?',\n # errors\n rb'SHOULD INPUT PROCESSING BE SUSPENDED\\?',\n # prompts\n rb'ENTER FORMAT for',\n]\n\nprocessors = ['/PREP7',\n '/POST1',\n '/SOLUTION',\n '/POST26',\n '/AUX2',\n '/AUX3',\n '/AUX12',\n '/AUX15',\n '/MAP',]\n\n\nCONTINUE_IDX = ready_items.index(rb'YES,NO OR CONTINUOUS\\)\\=')\nWARNING_IDX = ready_items.index(rb'executed\\?')\nERROR_IDX = ready_items.index(rb'SHOULD INPUT PROCESSING BE SUSPENDED\\?')\nPROMPT_IDX = ready_items.index(rb'ENTER FORMAT for')\n\nnitems = len(ready_items)\nexpect_list = []\nfor item in ready_items:\n expect_list.append(re.compile(item))\nignored = re.compile(r'[\\s\\S]+'.join(['WARNING', 'command', 'ignored']))\n\n###############################################################################\n\n# test for png file\npng_test = re.compile(r'WRITTEN TO FILE')\n\nINVAL_COMMANDS = {'*vwr': 'Use \"with ansys.non_interactive:\\n\\t*ansys.Run(\"VWRITE(...\"',\n '*cfo': '',\n '*CRE': 'Create a function within python or run as non_interactive',\n '*END': 'Create a function within python or run as non_interactive',\n '*IF': 'Use a python if or run as non_interactive'}\n\n\ndef check_valid_ansys():\n \"\"\" Checks if a valid version of ANSYS is installed and preconfigured \"\"\"\n ansys_bin = get_ansys_path(allow_input=False)\n if ansys_bin is not None:\n version = int(re.findall(r'\\d\\d\\d', ansys_bin)[0])\n return not(version < 170 and os.name != 'posix')\n\n return False\n\n\ndef setup_logger(loglevel='INFO'):\n \"\"\" Setup logger \"\"\"\n\n # return existing log if this function has already been called\n if hasattr(setup_logger, 'log'):\n setup_logger.log.setLevel(loglevel)\n ch = setup_logger.log.handlers[0]\n ch.setLevel(loglevel)\n return setup_logger.log\n\n # create logger\n log = logging.getLogger(__name__)\n log.setLevel(loglevel)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(loglevel)\n\n # create formatter\n formatstr = '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n formatter = logging.Formatter(formatstr)\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n log.addHandler(ch)\n\n # make persistent\n setup_logger.log = log\n\n return log\n\n\ndef get_ansys_path(allow_input=True):\n \"\"\" Acquires ANSYS Path from a cached file or user input \"\"\"\n exe_loc = None\n if os.path.isfile(CONFIG_FILE):\n with open(CONFIG_FILE) as f:\n exe_loc = f.read()\n # verify\n if not os.path.isfile(exe_loc) and allow_input:\n print('Cached ANSYS executable %s not found' % exe_loc)\n exe_loc = save_ansys_path()\n elif allow_input: # create configuration file\n exe_loc = save_ansys_path()\n\n return exe_loc\n\n\ndef change_default_ansys_path(exe_loc):\n \"\"\"\n Change your default ansys path\n\n Parameters\n ----------\n exe_loc : str\n Ansys executable. Must be a full path.\n\n \"\"\"\n if os.path.isfile(exe_loc):\n with open(CONFIG_FILE, 'w') as f:\n f.write(exe_loc)\n else:\n raise Exception('File %s is invalid or does not exist' % exe_loc)\n\n\ndef save_ansys_path(exe_loc=''):\n \"\"\" Find ANSYS path or query user \"\"\"\n print('Cached ANSYS executable %s not found' % exe_loc)\n exe_loc, ver = find_ansys()\n if os.path.isfile(exe_loc):\n print('Found ANSYS at %s' % exe_loc)\n resp = input('Use this location? [Y/n]')\n if resp != 'n':\n change_default_ansys_path(exe_loc)\n return exe_loc\n\n if exe_loc is not None:\n if os.path.isfile(exe_loc):\n return exe_loc\n\n # otherwise, query user for the location\n with open(CONFIG_FILE, 'w') as f:\n try:\n exe_loc = raw_input('Enter location of ANSYS executable: ')\n except NameError:\n exe_loc = input('Enter location of ANSYS executable: ')\n if not os.path.isfile(exe_loc):\n raise Exception('ANSYS executable not found at this location:\\n%s' % exe_loc)\n\n f.write(exe_loc)\n\n return exe_loc\n\n\nclass Mapdl(_MapdlCommands, _DeprecCommands):\n \"\"\"This class opens ANSYS in the background and allows commands to\n be passed to a persistent session.\n\n Parameters\n ----------\n exec_file : str, optional\n The location of the ANSYS executable. Will use the cached\n location when left at the default None.\n\n run_location : str, optional\n ANSYS working directory. Defaults to a temporary working\n directory.\n\n jobname : str, optional\n ANSYS jobname. Defaults to ``'file'``.\n\n nproc : int, optional\n Number of processors. Defaults to 2.\n\n override : bool, optional\n Attempts to delete the lock file at the run_location.\n Useful when a prior ANSYS session has exited prematurely and\n the lock file has not been deleted.\n\n wait : bool, optional\n When True, waits until ANSYS has been initialized before\n initializing the python ansys object. Set this to False for\n debugging.\n\n loglevel : str, optional\n Sets which messages are printed to the console. Default\n 'INFO' prints out all ANSYS messages, 'WARNING` prints only\n messages containing ANSYS warnings, and 'ERROR' prints only\n error messages.\n\n additional_switches : str, optional\n Additional switches for ANSYS, for example aa_r, and academic\n research license, would be added with:\n\n - additional_switches=\"-aa_r\"\n\n Avoid adding switches like -i -o or -b as these are already\n included to start up the ANSYS MAPDL server.\n\n start_timeout : float, optional\n Time to wait before raising error that ANSYS is unable to\n start.\n\n interactive_plotting : bool, optional\n Enables interactive plotting using ``matplotlib``. Default\n False.\n\n log_broadcast : bool, optional\n Additional logging for ansys solution progress. Default True\n and visible at log level 'INFO'.\n\n check_version : bool, optional\n Check version of binary file and raise exception when invalid.\n\n prefer_pexpect : bool, optional\n When enabled, will avoid using ansys APDL in CORBA server mode\n and will spawn a process and control it using pexpect.\n Default False.\n\n log_apdl : str, optional\n Opens an APDL log file in the current ANSYS working directory.\n Default 'w'. Set to 'a' to append to an existing log.\n\n Examples\n --------\n >>> import pyansys\n >>> mapdl = pyansys.Mapdl()\n\n Run MAPDL with the smp switch and specify the location of the\n ansys binary\n\n >>> import pyansys\n >>> mapdl = pyansys.Mapdl('/ansys_inc/v194/ansys/bin/ansys194',\n additional_switches='-smp')\n \"\"\"\n\n def __init__(self, exec_file=None, run_location=None,\n jobname='file', nproc=2, override=False,\n loglevel='INFO', additional_switches='',\n start_timeout=120, interactive_plotting=False,\n log_broadcast=False, check_version=True,\n prefer_pexpect=True, log_apdl='w'):\n \"\"\" Initialize connection with ANSYS program \"\"\"\n self.log = setup_logger(loglevel.upper())\n self._jobname = jobname\n self.non_interactive = self._non_interactive(self)\n self.redirected_commands = {'*LIS': self._list}\n self._processor = 'BEGIN'\n\n # default settings\n self.allow_ignore = False\n self.process = None\n self.lockfile = ''\n self._interactive_plotting = False\n self.using_corba = None\n self.auto_continue = True\n self.apdl_log = None\n self._store_commands = False\n self._stored_commands = []\n self.response = None\n self._output = ''\n self._outfile = None\n\n if exec_file is None:\n # Load cached path\n exec_file = get_ansys_path()\n if exec_file is None:\n raise Exception('Invalid or path or cannot load cached ansys path' +\n 'Enter one manually using pyansys.ANSYS(exec_file=...)')\n\n else: # verify ansys exists at this location\n if not os.path.isfile(exec_file):\n raise Exception('Invalid ANSYS executable at %s' % exec_file +\n 'Enter one manually using pyansys.ANSYS(exec_file=\"\")')\n self.exec_file = exec_file\n\n # check ansys version\n if check_version:\n version = int(re.findall(r'\\d\\d\\d', self.exec_file)[0])\n if version < 170 and os.name != 'posix':\n raise Exception('ANSYS MAPDL server requires version 17.0 or greater ' +\n 'for windows')\n self.version = str(version)\n\n # create temporary directory\n self.path = run_location\n if self.path is None:\n temp_dir = tempfile.gettempdir()\n self.path = os.path.join(temp_dir, 'ansys')\n if not os.path.isdir(self.path):\n try:\n os.mkdir(self.path)\n except:\n raise Exception('Unable to create temporary working '\n 'directory %s\\n' % self.path +\n 'Please specify run_location=')\n else:\n if not os.path.isdir(self.path):\n raise Exception('%s is not a valid folder' % self.path)\n\n # Check for lock file\n self.lockfile = os.path.join(self.path, self._jobname + '.lock')\n if os.path.isfile(self.lockfile):\n if not override:\n raise Exception('Lock file exists for jobname %s \\n' % self._jobname +\n ' at %s\\n' % self.lockfile +\n 'Set override=True to delete lock and start ANSYS')\n else:\n os.remove(self.lockfile)\n\n # key will be output here when ansys server is available\n self.broadcast_file = os.path.join(self.path, 'mapdl_broadcasts.txt')\n if os.path.isfile(self.broadcast_file):\n os.remove(self.broadcast_file)\n\n # create a dummy input file\n tmp_inp = os.path.join(self.path, 'tmp.inp')\n with open(tmp_inp, 'w') as f:\n f.write('FINISH')\n\n if os.name != 'posix':\n prefer_pexpect = False\n\n # open a connection to ANSYS\n self.nproc = nproc\n self.start_timeout = start_timeout\n self.prefer_pexpect = prefer_pexpect\n self.log_broadcast = log_broadcast\n self.interactive_plotting = interactive_plotting\n self._open(additional_switches)\n\n if log_apdl:\n filename = os.path.join(self.path, 'log.inp')\n self.open_apdl_log(filename, mode=log_apdl)\n\n def _open(self, additional_switches=''):\n \"\"\"\n Opens up ANSYS an ansys process using either pexpect or\n ansys_corba.\n \"\"\"\n if (int(self.version) < 170 and os.name == 'posix') or self.prefer_pexpect:\n self._open_process(self.nproc, self.start_timeout, additional_switches)\n else: # use corba\n self.open_corba(self.nproc, self.start_timeout, additional_switches)\n\n # separate logger for broadcast file\n if self.log_broadcast:\n self.broadcast_logger = Thread(target=ANSYS._start_broadcast_logger,\n args=(weakref.proxy(self),))\n self.broadcast_logger.start()\n\n # setup plotting for PNG\n if self.interactive_plotting:\n self.enable_interactive_plotting()\n\n def open_apdl_log(self, filename, mode='w'):\n \"\"\"Starts writing all APDL commands to an ANSYS input\n\n Parameters\n ----------\n filename : str\n Filename of the log\n\n \"\"\"\n if self.apdl_log is not None:\n raise Exception('APDL command logging already enabled.\\n')\n\n self.log.debug('Opening ANSYS log file at %s', filename)\n self.apdl_log = open(filename, mode=mode, buffering=1) # line buffered\n if mode != 'w':\n self.apdl_log.write('! APDL script generated using pyansys %s\\n' %\n pyansys.__version__)\n\n def _close_apdl_log(self):\n \"\"\" Closes APDL log \"\"\"\n if self.apdl_log is not None:\n self.apdl_log.close()\n self.apdl_log = None\n\n def _open_process(self, nproc, timeout, additional_switches):\n \"\"\" Opens an ANSYS process using pexpect \"\"\"\n command = '%s -j %s -np %d %s' % (self.exec_file, self._jobname, nproc,\n additional_switches)\n self.log.debug('Spawning shell process using pexpect')\n self.log.debug('Command: \"%s\"', command)\n self.log.debug('At \"%s\"', self.path)\n self.process = pexpect.spawn(command, cwd=self.path)\n self.process.delaybeforesend = None\n self.log.debug('Waiting for ansys to start...')\n\n try:\n index = self.process.expect(['BEGIN:', 'CONTINUE'], timeout=timeout)\n except: # capture failure\n raise Exception(self.process.before.decode('utf-8'))\n\n if index:\n self.process.sendline('') # enter to continue\n self.process.expect('BEGIN:', timeout=timeout)\n self.log.debug('ANSYS Initialized')\n self.log.debug(self.process.before.decode('utf-8'))\n self.using_corba = False\n\n def enable_interactive_plotting(self):\n \"\"\" Enables interactive plotting. Requires matplotlib \"\"\"\n if MATPLOTLIB_LOADED:\n self.Show('PNG')\n self._interactive_plotting = True\n else:\n raise Exception('Install matplotlib to use enable interactive plotting\\n' +\n 'or turn interactive plotting off with:\\n' +\n 'interactive_plotting=False')\n\n def set_log_level(self, loglevel):\n \"\"\" Sets log level \"\"\"\n setup_logger(loglevel=loglevel.upper())\n\n def __enter__(self):\n return self\n\n @property\n def is_alive(self):\n if self.process is None:\n return False\n else:\n if self.using_corba:\n return self.process.poll() is None\n else:\n return self.process.isalive()\n\n def _start_broadcast_logger(self, update_rate=1.0):\n \"\"\" separate logger using broadcast_file \"\"\"\n # listen to broadcast file\n loadstep = 0\n overall_progress = 0\n try:\n old_tail = ''\n old_size = 0\n while self.is_alive:\n new_size = os.path.getsize(self.broadcast_file)\n if new_size != old_size:\n old_size = new_size\n new_tail = tail(self.broadcast_file, 4)\n if new_tail != old_tail:\n lines = new_tail.split('>>')\n for line in lines:\n line = line.strip().replace('<loadstep:\n loadstep=n\n overall_progress = 0\n self.log.info(line)\n elif \"overall-progress\" in line:\n n=int(re.search(r'\\d+', line).group())\n if n>overall_progress:\n overall_progress=n\n self.log.info(line)\n old_tail = new_tail\n time.sleep(update_rate)\n except Exception as e:\n pass\n\n def run(self, command, write_to_log=True):\n \"\"\"Runs APDL command(s)\n\n Parameters\n ----------\n command : str\n ANSYS APDL command.\n\n These commands will be written to a temporary input file and then run\n using /INPUT.\n\n write_to_log : bool, optional\n Overrides APDL log writing. Default True. When set to False, will\n not write command to log even through APDL command logging is enabled.\n\n Returns\n -------\n command_output : str\n Command output from ANSYS.\n\n Notes\n -----\n When two or more commands need to be run non-interactively\n (i.e. ``*VWRITE``) then use\n\n >>> with ansys.non_interactive:\n >>> ansys.run(\"*VWRITE,LABEL(1),VALUE(1,1),VALUE(1,2),VALUE(1,3)\")\n >>> ansys.run(\"(1X,A8,' ',F10.1,' ',F10.1,' ',1F5.3)\")\n \"\"\"\n if self._store_commands:\n self._stored_commands.append(command)\n return\n elif command[:3].upper() in INVAL_COMMANDS:\n exception = Exception('Invalid pyansys command \"%s\"\\n\\n%s' %\n (command, INVAL_COMMANDS[command[:3]]))\n raise exception\n elif command[:4].upper() in INVAL_COMMANDS:\n exception = Exception('Invalid pyansys command \"%s\"\\n\\n%s' %\n (command, INVAL_COMMANDS[command[:4]]))\n raise exception\n elif write_to_log and self.apdl_log is not None:\n if not self.apdl_log.closed:\n self.apdl_log.write('%s\\n' % command)\n\n if command[:4] in self.redirected_commands:\n function = self.redirected_commands[command[:4]]\n return function(command)\n\n text = self._run(command)\n if text:\n self.response = text.strip()\n else:\n self.response = ''\n\n if self.response:\n self.log.info(self.response)\n if self._outfile:\n self._outfile.write('%s\\n' % self.response)\n\n if '*** ERROR ***' in self.response: # flag error\n self.log.error(self.response)\n # if not continue_on_error:\n raise Exception(self.response)\n\n # special returns for certain geometry commands\n try:\n short_cmd = command.split(',')[0]\n except:\n short_cmd = None\n\n if short_cmd in geometry_commands:\n return geometry_commands[short_cmd](self.response)\n\n if short_cmd in element_commands:\n return element_commands[short_cmd](self.response)\n\n return self.response\n\n def _run(self, command):\n if self.using_corba:\n # check if it's a single non-interactive command\n if command[:4].lower() == 'cdre':\n with self.non_interactive:\n return self.run(command)\n else:\n return self._run_corba_command(command)\n else:\n return self._run_process_command(command)\n\n # def store_processor(self, command):\n # \"\"\" Check if a command is changing the processor and store it\n # if so.\n # \"\"\"\n # # command may be abbreviated, check\n # processors = ['/PREP7',\n # '/POST1',\n # '/SOL', # /SOLUTION\n # '/POST26',\n # '/AUX2',\n # '/AUX3',\n # '/AUX12',\n # '/AUX15']\n\n # short_proc = ['/PRE',\n # '/POST',\n # '/SOL', # /SOLUTION\n # '/POS',\n # '/AUX']\n\n def _list(self, command):\n \"\"\" Replaces *LIST command \"\"\"\n items = command.split(',')\n filename = os.path.join(self.path, '.'.join(items[1:]))\n if os.path.isfile(filename):\n self.response = open(filename).read()\n self.log.info(self.response)\n else:\n raise Exception('Cannot run:\\n%s\\n' % command + 'File does not exist')\n\n def _run_process_command(self, command, return_response=True):\n \"\"\" Sends command and returns ANSYS's response \"\"\"\n if not self.process.isalive():\n raise Exception('ANSYS process closed')\n\n if command[:4].lower() == '/out':\n items = command.split(',')\n if len(items) > 1:\n self._output = '.'.join(items[1:])\n else:\n self._output = ''\n\n # send the command\n self.log.debug('Sending command %s' % command)\n self.process.sendline(command)\n\n # do not expect\n if '/MENU' in command:\n self.log.info('Enabling GUI')\n self.process.sendline(command)\n return\n\n full_response = ''\n while True:\n i = self.process.expect_list(expect_list, timeout=None)\n response = self.process.before.decode('utf-8')\n full_response += response\n if i >= CONTINUE_IDX and i < WARNING_IDX: # continue\n self.log.debug('Continue: Response index %i. Matched %s'\n % (i, ready_items[i].decode('utf-8')))\n self.log.info(response + ready_items[i].decode('utf-8'))\n if self.auto_continue:\n user_input = 'y'\n else:\n user_input = input('Response: ')\n self.process.sendline(user_input)\n\n elif i >= WARNING_IDX and i < ERROR_IDX: # warning\n self.log.debug('Prompt: Response index %i. Matched %s'\n % (i, ready_items[i].decode('utf-8')))\n self.log.warning(response + ready_items[i].decode('utf-8'))\n if self.auto_continue:\n user_input = 'y'\n else:\n user_input = input('Response: ')\n self.process.sendline(user_input)\n\n elif i >= ERROR_IDX and i < PROMPT_IDX: # error\n self.log.debug('Error index %i. Matched %s'\n % (i, ready_items[i].decode('utf-8')))\n self.log.error(response)\n response += ready_items[i].decode('utf-8')\n raise Exception(response)\n\n elif i >= PROMPT_IDX: # prompt\n self.log.debug('Prompt index %i. Matched %s'\n % (i, ready_items[i].decode('utf-8')))\n self.log.info(response + ready_items[i].decode('utf-8'))\n # user_input = input('Response: ')\n # self.process.sendline(user_input)\n raise Exception('User input expected. Try using non_interactive')\n\n else: # continue item\n self.log.debug('continue index %i. Matched %s'\n % (i, ready_items[i].decode('utf-8')))\n break\n \n # handle response\n if '*** ERROR ***' in response: # flag error\n self.log.error(response)\n if not continue_on_error:\n raise Exception(response)\n elif ignored.search(response): # flag ignored command\n if not self.allow_ignore:\n self.log.error(response)\n raise Exception(response)\n else:\n self.log.warning(response)\n else:\n self.log.info(response)\n\n if self._interactive_plotting:\n self._display_plot(full_response)\n\n if 'is not a recognized' in full_response:\n if not self.allow_ignore:\n full_response = full_response.replace('This command will be ignored.',\n '')\n full_response += '\\n\\nIgnore these messages by setting allow_ignore=True'\n raise Exception(full_response)\n\n # return last response and all preceding responses\n return full_response\n\n @property\n def processor(self):\n \"\"\" Returns the current processor \"\"\"\n msg = self.run('/Status')\n processor = None\n matched_line = [line for line in msg.split('\\n') if \"Current routine\" in line]\n if matched_line:\n # get the processor\n processor = re.findall(r'\\(([^)]+)\\)', matched_line[0])[0]\n return processor\n\n def _run_corba_command(self, command):\n \"\"\"\n Sends a command to the mapdl server\n\n \"\"\"\n if not self.is_alive:\n raise Exception('ANSYS process has been terminated')\n\n # cleanup command\n command = command.strip()\n if not command:\n raise Exception('Empty command')\n\n if command[:4].lower() == '/com':\n split_command = command.split(',')\n if len(split_command) < 2:\n return ''\n elif not split_command[1]:\n return ''\n elif split_command[1]:\n if not split_command[1].strip():\n return ''\n\n # /OUTPUT not redirected properly in corba\n if command[:4].lower() == '/out':\n items = command.split(',')\n if len(items) < 2: # empty comment\n return ''\n elif not items[1]: # empty comment\n return ''\n elif items[1]:\n if not items[1].strip(): # empty comment\n return ''\n\n items = command.split(',')\n if len(items) > 1: # redirect to file\n if len(items) > 2:\n if items[2].strip():\n filename = '.'.join(items[1:3]).strip()\n else:\n filename = '.'.join(items[1:2]).strip()\n else:\n filename = items[1]\n\n if filename:\n if os.path.basename(filename) == filename:\n filename = os.path.join(self.path, filename)\n self._output = filename\n if len(items) == 5:\n if items[4].lower().strip() == 'append':\n self._outfile = open(filename, 'a')\n else:\n self._outfile = open(filename, 'w')\n else:\n self._output = ''\n if self._outfile:\n self._outfile.close()\n self._outfile = None\n return ''\n\n # include error checking\n text = ''\n additional_text = ''\n\n self.log.debug('Running command %s' % command)\n text = self.mapdl.executeCommandToString(command)\n\n # print supressed output\n additional_text = self.mapdl.executeCommandToString('/GO')\n\n if 'is not a recognized' in text:\n if not self.allow_ignore:\n text = text.replace('This command will be ignored.', '')\n text += '\\n\\nIgnore these messages by setting allow_ignore=True'\n raise Exception(text)\n\n if text:\n text = text.replace('\\\\n', '\\n')\n if '*** ERROR ***' in text:\n self.log.error(text)\n raise Exception(text)\n\n if additional_text:\n additional_text = additional_text.replace('\\\\n', '\\n')\n if '*** ERROR ***' in additional_text:\n self.log.error(additional_text)\n raise Exception(additional_text)\n\n if self._interactive_plotting:\n self._display_plot('%s\\n%s' % (text, additional_text))\n\n # return text, additional_text\n if text == additional_text:\n additional_text = ''\n return '%s\\n%s' % (text, additional_text)\n\n def load_parameters(self):\n \"\"\"Loads and returns all current parameters\n\n Examples\n --------\n >>> parameters, arrays = mapdl.load_parameters()\n >>> print(parameters)\n {'ANSINTER_': 2.0,\n 'CID': 3.0,\n 'TID': 4.0,\n '_ASMDIAG': 5.363415510271,\n '_MAXELEMNUM': 26357.0,\n '_MAXELEMTYPE': 7.0,\n '_MAXNODENUM': 40908.0,\n '_MAXREALCONST': 1.0}\n \"\"\"\n # load ansys parameters to python\n filename = os.path.join(self.path, 'parameters.parm')\n self.Parsav('all', filename)\n self.parameters, self.arrays = load_parameters(filename)\n\n def add_file_handler(self, filepath, append):\n \"\"\" Adds a file handler to the log \"\"\"\n if append:\n mode = 'a'\n else:\n mode = 'w'\n\n self.fileHandler = logging.FileHandler(filepath)\n formatstr = '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n\n # self.fileHandler.setFormatter(logging.Formatter(formatstr))\n # self.log.addHandler(self.fileHandler)\n\n self.fileHandler = logging.FileHandler(filepath, mode=mode)\n self.fileHandler.setFormatter(logging.Formatter(formatstr))\n self.fileHandler.setLevel(logging.DEBUG)\n self.log.addHandler(self.fileHandler)\n self.log.info('Added file handler at %s' % filepath)\n\n def remove_file_handler(self):\n self.log.removeHandler(self.fileHandler)\n self.log.info('Removed file handler')\n\n def _display_plot(self, text):\n \"\"\"Display the last generated plot from ANSYS\"\"\" \n png_found = png_test.findall(text)\n if png_found:\n # flush graphics writer\n self.show('CLOSE')\n self.show('PNG')\n\n # get last filename based on the current jobname\n filenames = glob.glob(os.path.join(self.path, '%s*.png' % self.jobname))\n filenames.sort()\n filename = filenames[-1]\n\n if os.path.isfile(filename):\n img = mpimg.imread(filename)\n plt.imshow(img)\n plt.axis('off')\n plt.show() # consider in-line plotting\n else:\n self.log.error('Unable to find screenshot at %s' % filename)\n pass\n\n def __del__(self):\n \"\"\"Clean up when complete\"\"\"\n try:\n self.exit()\n except Exception as e:\n self.log.error('exit: %s', str(e))\n\n try:\n self.kill()\n except Exception as e:\n self.log.error('kill: %s', str(e))\n\n try:\n self._close_apdl_log()\n except Exception as e:\n self.log.error('Failed to close apdl log: %s', str(e))\n\n def Exit(self):\n msg = DeprecationWarning('\\n\"Exit\" decpreciated. \\n' +\n 'Please use \"exit\" instead')\n warnings.warn(msg)\n self.exit()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # clean up when complete\n self.exit()\n\n def exit(self, close_log=True):\n \"\"\"Exit ANSYS process without attempting to kill the process.\n \"\"\"\n self.log.debug('Terminating ANSYS')\n try:\n if self.using_corba:\n self.mapdl.terminate()\n else:\n if self.process is not None:\n self.process.sendline('FINISH')\n self.process.sendline('EXIT')\n\n except Exception as e:\n if 'WaitingForReply' not in str(e):\n raise Exception(e)\n\n # self.log.info('ANSYS exited')\n if close_log:\n if self.apdl_log is not None:\n self.apdl_log.close()\n\n def kill(self):\n \"\"\" Forces ANSYS process to end and removes lock file \"\"\"\n if self.is_alive:\n try:\n self.exit()\n except:\n kill_process(self.process.pid)\n self.log.debug('Killed process %d' % self.process.pid)\n\n if os.path.isfile(self.lockfile):\n try:\n os.remove(self.lockfile)\n except:\n self.log.warning('Unable to remove lock file %s ' % self.lockfile)\n\n @property\n def results(self):\n \"\"\" Returns a binary interface to the result file \"\"\"\n raise NotImplementedError('Depreciated. Use \"result\" instead')\n\n @property\n def result(self):\n \"\"\" Returns a binary interface to the result file \"\"\"\n try:\n result_path = self.inquire('RSTFILE')\n if not os.path.dirname(result_path):\n result_path = os.path.join(self.path, '%s.rst' % result_path)\n except:\n result_path = os.path.join(self.path, '%s.rst' % self._jobname)\n\n if not os.path.isfile(result_path):\n raise FileNotFoundError('No results found at %s' % result_path)\n return pyansys.read_binary(result_path)\n\n def __call__(self, command, **kwargs):\n return self.run(command, **kwargs)\n\n def open_corba(self, nproc, timeout, additional_switches):\n \"\"\"\n Open a connection to ANSYS via a CORBA interface\n \"\"\"\n self.log.info('Connecting to ANSYS via CORBA')\n\n # command must include \"aas\" flag to start MAPDL server\n command = '\"%s\" -j %s -aas -i tmp.inp -o out.txt -b -np %d %s' % (self.exec_file, self._jobname, nproc, additional_switches)\n\n # remove the broadcast file if it exists:\n if os.path.isfile(self.broadcast_file):\n os.remove(self.broadcast_file)\n\n # add run location to command\n self.log.debug('Spawning shell process with: \"%s\"' % command)\n self.log.debug('At \"%s\"' % self.path)\n old_path = os.getcwd()\n os.chdir(self.path)\n self.process = subprocess.Popen(command,\n shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n os.chdir(old_path)\n\n # listen for broadcast file\n self.log.debug('Waiting for valid key in %s' % self.broadcast_file)\n telapsed = 0\n tstart = time.time()\n while telapsed < timeout:\n try:\n if os.path.isfile(self.broadcast_file):\n with open(self.broadcast_file, 'r') as f:\n text = f.read()\n if 'visited:collaborativecosolverunitior' in text:\n self.log.debug('Initialized ANSYS')\n break\n time.sleep(0.1)\n telapsed = time.time() - tstart\n\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n\n # exit if timed out\n if telapsed > timeout:\n err_msg = 'Unable to start ANSYS within %.1f seconds' % timeout\n self.log.error(err_msg)\n raise TimeoutError(err_msg)\n\n # open server\n keyfile = os.path.join(self.path, 'aaS_MapdlId.txt')\n with open(keyfile) as f:\n key = f.read()\n\n # attempt to import corba\n try:\n from ansys_corba import CORBA\n except:\n pip_cmd = 'pip install ansys_corba'\n raise ImportError('Missing ansys_corba.\\n' +\n 'This feature does not support MAC OS.\\n' +\\\n 'Otherwise, please install with \"%s\"' % pip_cmd)\n\n orb = CORBA.ORB_init()\n self.mapdl = orb.string_to_object(key)\n\n # quick test\n try:\n self.mapdl.getComponentName()\n except:\n raise Exception('Unable to connect to APDL server')\n\n self.using_corba = True\n self.log.debug('Connected to ANSYS using CORBA interface')\n self.log.debug('Key %s' % key)\n\n class _non_interactive:\n \"\"\"Allows user to enter commands that need to run\n non-interactively.\n\n Examples\n --------\n To use an non-interactive command like *VWRITE, use:\n\n >>> with ansys.non_interactive:\n ansys.run(\"*VWRITE,LABEL(1),VALUE(1,1),VALUE(1,2),VALUE(1,3)\")\n ansys.run(\"(1X,A8,' ',F10.1,' ',F10.1,' ',1F5.3)\")\n\n \"\"\"\n def __init__(self, parent):\n self.parent = parent\n\n def __enter__(self):\n self.parent.log.debug('entering non-interactive mode')\n self.parent._store_commands = True\n\n def __exit__(self, type, value, traceback):\n self.parent.log.debug('entering non-interactive mode')\n self.parent._flush_stored()\n\n def _flush_stored(self):\n \"\"\"Writes stored commands to an input file and runs the input\n file. Used with non_interactive.\n \"\"\"\n self.log.debug('Flushing stored commands')\n tmp_out = os.path.join(appdirs.user_data_dir('pyansys'),\n 'tmp_%s.out' % random_string())\n self._stored_commands.insert(0, \"/OUTPUT, '%s'\" % tmp_out)\n self._stored_commands.append('/OUTPUT')\n commands = '\\n'.join(self._stored_commands)\n self.apdl_log.write(commands + '\\n')\n\n # write to a temporary input file\n filename = os.path.join(appdirs.user_data_dir('pyansys'),\n 'tmp_%s.inp' % random_string())\n self.log.debug('Writing the following commands to a temporary ' +\n 'apdl input file:\\n%s' % commands)\n\n with open(filename, 'w') as f:\n f.writelines(commands)\n\n self._store_commands = False\n self._stored_commands = []\n self.run(\"/INPUT, '%s'\" % filename, write_to_log=False)\n if os.path.isfile(tmp_out):\n self.response = '\\n' + open(tmp_out).read()\n\n # clean up output file and append the output to the existing\n # output file\n # self.run('/OUTPUT, %s, , , APPEND' % self._output)\n # if os.path.isfile(tmp_out):\n # for line in open(tmp_out).readlines():\n # self.run('/COM,%s\\n' % line[:74])\n\n if self.response is None:\n self.log.warning('Unable to read response from flushed commands')\n else:\n self.log.info(self.response)\n\n def get_float(self, entity=\"\", entnum=\"\", item1=\"\", it1num=\"\",\n item2=\"\", it2num=\"\", **kwargs):\n \"\"\"Used to get the value of a float-parameter from APDL\n Take note, that internally an apdl parameter __floatparameter__ is\n created/overwritten.\n \"\"\"\n line = self.get(\"__floatparameter__\", entity, entnum, item1, it1num,\n item2, it2num, **kwargs)\n return float(re.search(r\"(?<=VALUE\\=).*\", line).group(0))\n\n def read_float_parameter(self, parameter_name):\n \"\"\"Read out the value of a ANSYS parameter to use in python.\n Can raise TypeError.\n\n Parameters\n ----------\n parameter_name : str\n Name of the parameter inside ANSYS.\n\n Returns\n -------\n float\n Value of ANSYS parameter.\n \"\"\"\n try:\n line = self.run(parameter_name + \" = \" + parameter_name)\n except TypeError:\n print('Input variable parameter_name should be string')\n raise\n return float(re.search(r\"(?<=\\=).*\", line).group(0))\n\n def read_float_from_inline_function(self, function_str):\n \"\"\"Use a APDL inline function to get a float value from ANSYS.\n Take note, that internally an APDL parameter __floatparameter__ is\n created/overwritten.\n\n Parameters\n ----------\n function_str : str\n String containing an inline function as used in APDL..\n\n Returns\n -------\n float\n Value returned by inline function..\n\n Examples\n --------\n >>> inline_function = \"node({},{},{})\".format(x, y, z)\n >>> node = apdl.read_float_from_inline_function(inline_function)\n \"\"\"\n self.run(\"__floatparameter__=\"+function_str)\n return self.read_float_parameter(\"__floatparameter__\")\n\n def open_gui(self, include_result=True):\n \"\"\"Saves existing database and opens up APDL GUI\n\n Parameters\n ----------\n include_result : bool, optional\n Allow the result file to be post processed in the GUI.\n \"\"\"\n # specify a path for the temporary database\n temp_dir = tempfile.gettempdir()\n save_path = os.path.join(temp_dir, 'ansys_tmp')\n if os.path.isdir(save_path):\n rmtree(save_path)\n os.mkdir(save_path)\n\n name = 'tmp'\n tmp_database = os.path.join(save_path, '%s.db' % name)\n if os.path.isfile(tmp_database):\n os.remove(tmp_database)\n\n # get the state, close, and finish\n prior_processor = self.processor\n self.finish()\n self.save(tmp_database)\n self.exit(close_log=False)\n\n # # verify lock file is gone\n # while os.path.isfile(self.lockfile):\n # time.sleep(0.1)\n\n # copy result file to temp directory\n if include_result:\n resultfile = os.path.join(self.path, '%s.rst' % self.jobname)\n if os.path.isfile(resultfile):\n tmp_resultfile = os.path.join(save_path, '%s.rst' % name)\n copyfile(resultfile, tmp_resultfile)\n\n # write temporary input file\n start_file = os.path.join(save_path, 'start%s.ans' % self.version)\n with open(start_file, 'w') as f:\n f.write('RESUME\\n')\n\n # some versions of ANSYS just look for \"start.ans\" when starting\n other_start_file = os.path.join(save_path, 'start.ans')\n with open(other_start_file, 'w') as f:\n f.write('RESUME\\n')\n\n # issue system command to run ansys in GUI mode\n cwd = os.getcwd()\n os.chdir(save_path)\n os.system('cd \"%s\" && \"%s\" -g -j %s' % (save_path, self.exec_file, name))\n os.chdir(cwd)\n\n # must remove the start file when finished\n os.remove(start_file)\n os.remove(other_start_file)\n\n # open up script again when finished\n self._open()\n self.resume(tmp_database)\n if prior_processor is not None:\n if 'BEGIN' not in prior_processor:\n self.run('/%s' % prior_processor)\n\n @property\n def jobname(self):\n \"\"\"MAPDL job name.\n\n This is requested from the active mapdl instance\n \"\"\"\n return self.inquire('JOBNAME')\n\n def inquire(self, func):\n \"\"\"Returns system information\n\n Parameters\n ----------\n func : str\n Specifies the type of system information returned. See the\n notes section for more information.\n\n Returns\n -------\n value : str\n Value of the inquired item.\n\n Notes\n -----\n Allowable func entries\n LOGIN - Returns the pathname of the login directory on Linux\n systems or the pathname of the default directory (including\n drive letter) on Windows systems.\n\n - ``DOCU`` - Pathname of the ANSYS docu directory.\n - ``APDL`` - Pathname of the ANSYS APDL directory.\n - ``PROG`` - Pathname of the ANSYS executable directory.\n - ``AUTH`` - Pathname of the directory in which the license file resides.\n - ``USER`` - Name of the user currently logged-in.\n - ``DIRECTORY`` - Pathname of the current directory.\n - ``JOBNAME`` - Current Jobname.\n - ``RSTDIR`` - Result file directory\n - ``RSTFILE`` - Result file name\n - ``RSTEXT`` - Result file extension\n - ``OUTPUT`` - Current output file name\n\n Examples\n --------\n Return the job name\n\n >>> mapdl.inquire('JOBNAME')\n 'file'\n\n Return the result file name\n\n >>> mapdl.inquire('RSTFILE')\n 'file.rst'\n \"\"\"\n response = ''\n try:\n response = self.run('/INQUIRE, , %s' % func)\n return response.split('=')[1].strip()\n except IndexError:\n raise RuntimeError('Cannot parse %s' % response)\n\n def Run(self, command):\n msg = DeprecationWarning('\\nCommand \"Run\" decpreciated. \\n' +\n 'Please use \"run\" instead')\n warnings.warn(msg)\n return self.run(command)\n\n\nclass ANSYS(Mapdl):\n\n def __init__(self, *args, **kwargs):\n msg = DeprecationWarning('\\nClass \"ANSYS\" decpreciated. \\n' +\n 'Please use \"Mapdl\" instead')\n warnings.warn(msg)\n super(ANSYS, self).__init__(*args, **kwargs)\n\n\n# TODO: Speed this up with:\n# https://tinodidriksen.com/2011/05/cpp-convert-string-to-double-speed/\ndef load_parameters(filename):\n \"\"\"Load parameters from a file\n\n Parameters\n ----------\n filename : str\n Name of the parameter file to read in.\n\n Returns\n -------\n parameters : dict\n Dictionary of single value parameters\n\n arrays : dict\n Dictionary of arrays\n \"\"\"\n parameters = {}\n arrays = {}\n\n with open(filename) as f:\n append_mode = False\n append_text = []\n for line in f.readlines():\n if append_mode:\n if 'END PREAD' in line:\n append_mode = False\n values = ''.join(append_text).split(' ')\n shp = arrays[append_varname].shape\n raw_parameters = np.genfromtxt(values)\n\n n_entries = np.prod(shp)\n if n_entries != raw_parameters.size:\n paratmp = np.zeros(n_entries)\n paratmp[:raw_parameters.size] = raw_parameters\n paratmp = paratmp.reshape(shp)\n else:\n paratmp = raw_parameters.reshape(shp, order='F')\n\n arrays[append_varname] = paratmp.squeeze()\n append_text.clear()\n else:\n nosep_line = line.replace('\\n', '').replace('\\r', '')\n append_text.append(\" \" + re.sub(r\"(?<=\\d)-(?=\\d)\",\" -\", nosep_line))\n\n elif '*DIM' in line:\n # *DIM, Par, Type, IMAX, JMAX, KMAX, Var1, Var2, Var3, CSYSID\n split_line = line.split(',')\n varname = split_line[1].strip()\n arr_type = split_line[2]\n imax = int(split_line[3])\n jmax = int(split_line[4])\n kmax = int(split_line[5])\n\n if arr_type == 'CHAR':\n arrays[varname] = np.empty((imax, jmax, kmax), dtype=' int:\r\n a=collections.Counter(s)\r\n maxodd=0\r\n ret=0\r\n for i in a:\r\n if a[i]%2==0:\r\n ret+=a[i]\r\n else: #表示奇数存在\r\n ret+=a[i]-1 #加上比这个奇数小一的偶数\r\n maxodd=1 # 就需要除加上比它小一的偶数外还要加1\r\n ret+=maxodd\r\n return ret\r\n\r\na=Solution()\r\na.longestPalindrome(s=\"ccc\")\r\n","sub_path":"序号题/409.py","file_name":"409.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544144068","text":"from flask import Flask, render_template, request, url_for, jsonify, make_response, redirect\nfrom werkzeug.utils import secure_filename\nfrom flask_api import FlaskAPI\n\nimport os\nimport sys\nimport traceback\n\nfrom apis.router_api import Router\n\n#路由表\nrouter = Router()\n\napp = FlaskAPI(__name__)\nBASEDIR = os.path.abspath(os.path.dirname(sys.argv[0])) # 调用该模块的py文件的根目录\napp.config.from_pyfile(BASEDIR + '/config.py')\napp.config['BASEDIR'] = BASEDIR\n\n\n@app.route('/browser//', methods=['GET', 'POST'])\ndef api(funname):\n \"\"\"\n 统一路由入口\n \"\"\"\n try:\n func = router.call(f'browser/{funname}/{request.method}')\n return func()\n except:\n traceback.print_exc()\n\n\n# 定制404界面\n@app.errorhandler(404)\ndef show_error_page(error):\n return render_template('error_404.html'), 404\n\n\nif __name__ == '__main__':\n # app.run(host=app.config['HOST'], port=app.config['PORT'], debug=True)\n for k, v in app.config.items():\n print(k, v)\n","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"578510755","text":"import matplotlib.pyplot as plt\n\nwith open('loss.txt') as f:\n lines = f.readlines()\nif len(lines) is 0:\n print('No lines in file')\n exit()\n\nlosses = []\niters = []\n\nfor line in lines:\n splitStr = line.split()\n iters.append(int(splitStr[0]))\n losses.append(float(splitStr[1]))\n\nplt.plot(iters, losses, markevery=5)\nplt.title(\"Model loss\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"Total loss\")\nplt.savefig(\"loss.png\")\n","sub_path":"Week3/TaskD/LossPlotter.py","file_name":"LossPlotter.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"169660898","text":"import sys\nfrom collections import defaultdict\n\ninp_fp = open(sys.argv[1])\nout_fp = open(sys.argv[2], \"w\")\nleft_tf = {\n \"T-D\" : \"3\",\n \"T-T\" : \"4\",\n \"T-N\" : \"5\"}\nright_tf = {\n \"D-T\" : \"1\",\n \"T-T\" : \"4\",\n \"N-T\" : \"7\"}\nboth_tf = {\n \"T-T\" : \"4\"}\nfor f in inp_fp:\n binding_dict = defaultdict(lambda : 0)\n tmp_f = open (f.strip())\n total_reads = 0 \n for line in tmp_f:\n binding_id = line.split(\"\\t\")[0].split(\"#\")[-1] \n binding_dict[binding_id] +=1\n total_reads += 1 \n tmp_f.close()\n left_tf_count = 0 \n right_tf_count = 0\n both_tf_count = 0\n for k in left_tf:\n left_tf_count += binding_dict[left_tf[k]]\n for k in right_tf: \n right_tf_count += binding_dict[right_tf[k]]\n for k in both_tf:\n both_tf_count += binding_dict[both_tf[k]]\n \n obs_by_exp = \"NA\" \n if (left_tf_count >0) and (right_tf_count >0):\n expected = (left_tf_count/total_reads)*(right_tf_count/total_reads) \n obs_by_exp = round((both_tf_count/total_reads)/expected,3)\n to_write = \"\\t\".join(map(str,[f.strip(), left_tf_count, right_tf_count, \n both_tf_count, total_reads, obs_by_exp ]))\n out_fp.write(to_write + \"\\n\")\n else:\n to_write = \"\\t\".join(map(str,[f.strip(), left_tf_count, right_tf_count, \n both_tf_count, total_reads, obs_by_exp])) \n out_fp.write(to_write + \"\\n\")\n\nout_fp.close() \ninp_fp.close()\n \n \n","sub_path":"scripts/excess_ratio_cobinding.py","file_name":"excess_ratio_cobinding.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"111266458","text":"import pkuseg\n# from collections import Counter\n# import pprint\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ncontent = []\nwith open('/home/yeung/PycharmProjects/untitled/wordcloud/shijiuda.txt', encoding='utf-8') as f:\n content = f.read()\n\nlexicon=['习近平', '中国']\nseg = pkuseg.pkuseg(user_dict=lexicon)\ntext = seg.cut(content)\n\nstopwords = []\nwith open('/home/yeung/PycharmProjects/untitled/wordcloud/stopword.txt', encoding='utf-8') as f:\n stopwords = f.read()\n\nnew_text = ''\nfor w in text:\n if w not in stopwords:\n new_text += w\n new_text += \" \"\n\n\n\n# counter = Counter(new_text)\n# speech = list(counter.elements())\n# print(speech)\n# pprint.pprint(counter.most_common(50))\n\nimg = Image.open('/home/yeung/PycharmProjects/untitled/wordcloud/party.png')\nimg_array = np.array(img)\nwordcloud = WordCloud(background_color='white', font_path='usr/share/fonts/YaHeiConsolas.ttf', mask=img_array).generate(new_text)\n# plt.imshow(wordcloud, interpolation='bilinear')\n# plt.axis('off')\n# plt.figure()\n\n\nwordcloud.to_file('hah.png')\n\n","sub_path":"wordcloud/seg.py","file_name":"seg.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"117656212","text":"from get_matches import get_matches\nfrom db_ops import update_db\nfrom dateutil.parser import parse\nimport time\n\n\ndef score_alphabet(entries):\n \"\"\"\n Keeps track of an \"alphabet\" like file of all possible scores in our DB\n :param entries: New entries to be checked\n :return: None\n \"\"\"\n raise NotImplementedError\n\n\ndef retrieval(my_db):\n \"\"\"\n Executed every 5 minutes, the main routine retrieves the latests matches and inserts them in the database\n :param my_db: My mongoDB\n :return: None\n \"\"\"\n try:\n results = get_matches()\n except ValueError:\n print('Could not get matches. Damn you OPAP.')\n results = None\n\n if results:\n with open('latest.entry', 'r') as file:\n latest = parse(file.read())\n\n # reverse order so that the latest event show up first\n results = results[::-1]\n\n # to be inserted in the db\n new_entries = []\n\n # in case of an unfinished game, the time field will be null\n if not results[0]:\n results = results[1:]\n\n for game in results:\n if parse(game[\"time\"]) > latest:\n new_entries.append(game)\n\n else:\n break\n\n if new_entries:\n x = update_db(new_entries, my_db)\n print(str(len(new_entries)) + \" insertions on \" + time.strftime(\"%Y-%m-%d @ %H:%M:%S\"))\n\n # find the latest entry and write the result in a file\n latest = results[0][\"time\"]\n with open('latest.entry', 'w') as file:\n file.write(str(latest))","sub_path":"routines.py","file_name":"routines.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"300194624","text":"import matplotlib.pyplot as plt\n\ndef Squares():\n\n input_values = [1,2,3,4,5]\n squares = [1,4,9,16,25]\n plt.plot(input_values,squares, linewidth=3)\n plt.title(\"Square Numbers\", fontsize=24)\n plt.xlabel(\"Value\", fontsize=14)\n plt.ylabel(\"Square of Value\", fontsize=14)\n\n plt.tick_params(axis='both', labelsize=10)\n\n plt.show()\n \n#Squares()\n\ndef Scatter():\n\n plt.scatter(2,4,s=20)\n plt.title(\"The World's Best Graph\", fontsize=24)\n plt.xlabel(\"Value\", fontsize=14)\n plt.ylabel(\"Square of Value\", fontsize=14)\n\n plt.tick_params(axis=\"both\",which=\"major\", labelsize=14)\n plt.show()\n\n#Scatter()\n\ndef Scatterplot():\n\n x_values = [1,2,3,4,5,6]\n y_values = [1,4,9,16,25,36]\n\n plt.scatter(x_values,y_values, s=20)\n plt.title(\"The World's (New) Best Graph\", fontsize=24)\n plt.xlabel(\"Value\", fontsize=14)\n plt.ylabel(\"Square of Value\", fontsize=14)\n\n plt.tick_params(axis=\"both\",which=\"major\", labelsize=14)\n plt.show()\n\n#Scatterplot()\n\ndef AutoGenerateValues():\n\n x_values = list(range(1,1001))\n y_values = [x**2 for x in x_values]\n plt.scatter(x_values, y_values,c='green',edgecolor='none', s=1)\n plt.xlabel(\"Value\", fontsize=14)\n plt.ylabel(\"Square of Value\", fontsize=14)\n plt.tick_params(axis=\"both\",which=\"major\", labelsize=14)\n plt.axis([0,1100,0,1100000])\n plt.show() #Or plt.savefig('filename.png','bbox_inches='tight')\n\nAutoGenerateValues()\n","sub_path":"Desktop/Python/MatPlotLib/Plots.py","file_name":"Plots.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"296337511","text":"#!/usr/bin/python\n\nimport pickle\nimport numpy\nimport sys\nimport matplotlib.pyplot\nfrom sklearn import linear_model\nsys.path.append(\"../tools/\")\nfrom feature_format import featureFormat, targetFeatureSplit\nfrom outlier_cleaner import sortByError\n\n### read in data dictionary, convert to numpy array\ndata_dict = pickle.load( open(\"../final_project/final_project_dataset.pkl\", \"r\") )\nfeatures = [\"salary\", \"bonus\"]\ndata = featureFormat(data_dict, features)\ntrainX = list(map((lambda x: x[0]), data))\ntrainY = list(map((lambda x: x[1]), data))\n\nl = len(trainX)\ntrainX = numpy.array(trainX)\ntrainY = numpy.array(trainY)\ntrainX = trainX.reshape(l,1)\ntrainY = trainY.reshape(l,1)\n### your code below\n\nfor point in data:\n salary = point[0]\n bonus = point[1]\n matplotlib.pyplot.scatter( salary, bonus )\n\nmatplotlib.pyplot.xlabel(\"salary\")\nmatplotlib.pyplot.ylabel(\"bonus\")\nmatplotlib.pyplot.show()\n\nreg = linear_model.LinearRegression()\nreg.fit(trainX, trainY)\npredictions = reg.predict(trainX)\nsort_data = sortByError( predictions, trainX, trainY )\n\n\nprint(sort_data[len(sort_data) - 1])\nl = sort_data[len(sort_data) - 1][3] + 1\nfor x in data_dict:\n l = l -1\n print(x)\n print(l)\n if l == 0:\n break\n","sub_path":"outliers/enron_outliers.py","file_name":"enron_outliers.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"83235419","text":"#!/usr/bin/env python\n\n# libraries\nimport warnings\nimport numpy as np\n\n## reduction models\nfrom .._externals.ppca import PPCA\nfrom sklearn.decomposition import PCA, FastICA, IncrementalPCA, KernelPCA, FactorAnalysis, TruncatedSVD, SparsePCA, MiniBatchSparsePCA, DictionaryLearning, MiniBatchDictionaryLearning\nfrom sklearn.manifold import TSNE, MDS, SpectralEmbedding, LocallyLinearEmbedding, Isomap\n\n# internal libraries\nfrom ..tools.df2mat import df2mat\nfrom ..tools.normalize import normalize as normalizer\nfrom .._shared.helpers import *\n\n# main function\ndef reduce(x, ndims=3, model='IncrementalPCA', model_params={}, normalize=False, internal=False,\n align=False):\n \"\"\"\n Reduces dimensionality of an array, or list of arrays\n\n Parameters\n ----------\n x : Numpy array or list of arrays\n Dimensionality reduction using PCA is performed on this array. If\n there are nans present in the data, the function will try to use\n PPCA to interpolate the missing values.\n\n ndims : int\n Number of dimensions to reduce\n\n model : str\n Decomposition/manifold learning model to use. Models supported: PCA,\n IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,\n FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,\n TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS.\n\n model_params : dict\n Optional dictionary of scikit-learn parameters to pass to reduction model.\n See scikit-learn specific model docs for details.\n\n normalize : str or False\n If set to 'across', the columns of the input data will be z-scored\n across lists (default). If set to 'within', the columns will be\n z-scored within each list that is passed. If set to 'row', each row of\n the input data will be z-scored. If set to False, the input data will\n be returned (default is False).\n\n align : bool\n If set to True, data will be run through the ``hyperalignment''\n algorithm implemented in hypertools.tools.align (default: False).\n\n Returns\n ----------\n x_reduced : Numpy array or list of arrays\n The reduced data with ndims dimensionality is returned. If the input\n is a list, a list is returned.\n\n \"\"\"\n\n # sub functions\n def fill_missing(x):\n\n # ppca if missing data\n m = PPCA()\n m.fit(data=np.vstack(x))\n x_pca = m.transform()\n\n # if the whole row is missing, return nans\n all_missing = [idx for idx,a in enumerate(np.vstack(x)) if all([type(b)==np.nan for b in a])]\n if len(all_missing)>0:\n for i in all_missing:\n x_pca[i,:]=np.nan\n\n # get the original lists back\n if len(x)>1:\n x_split = np.cumsum([i.shape[0] for i in x][:-1])\n return list(np.split(x_pca,x_split,axis=0))\n else:\n return [x_pca]\n\n def reduce_list(x, model, model_params):\n split = np.cumsum([len(xi) for xi in x])[:-1]\n m=model(**model_params)\n x_r = np.vsplit(m.fit_transform(np.vstack(x)), split)\n if len(x)>1:\n return [xi for xi in x_r]\n else:\n return [x_r[0]]\n\n # dictionary of models\n models = {\n 'PCA' : PCA,\n 'IncrementalPCA' : IncrementalPCA,\n 'SparsePCA' : SparsePCA,\n 'MiniBatchSparsePCA' : MiniBatchSparsePCA,\n 'KernelPCA' : KernelPCA,\n 'FastICA' : FastICA,\n 'FactorAnalysis' : FactorAnalysis,\n 'TruncatedSVD' : TruncatedSVD,\n 'DictionaryLearning' : DictionaryLearning,\n 'MiniBatchDictionaryLearning' : MiniBatchDictionaryLearning,\n 'TSNE' : TSNE,\n 'Isomap' : Isomap,\n 'SpectralEmbedding' : SpectralEmbedding,\n 'LocallyLinearEmbedding' : LocallyLinearEmbedding,\n 'MDS' : MDS\n }\n\n # main\n x = format_data(x)\n\n assert all([i.shape[1]>ndims for i in x]), \"In order to reduce the data, ndims must be less than the number of dimensions\"\n\n # if there are any nans in any of the lists, use ppca\n if np.isnan(np.vstack(x)).any():\n warnings.warn('Missing data: Inexact solution computed with PPCA (see https://github.com/allentran/pca-magic for details)')\n x = fill_missing(x)\n\n # normalize\n if normalize:\n x = normalizer(x, normalize=normalize)\n\n # build model params dict\n if model_params=={}:\n model_params = {\n 'n_components' : ndims\n }\n elif 'n_components' in model_params:\n pass\n else:\n model_params['n_components']=ndims\n\n # reduce data\n x_reduced = reduce_list(x, models[model], model_params)\n\n if align == True:\n # Import is here to avoid circular imports with reduce.py\n from .align import align as aligner\n x_reduced = aligner(x_reduced)\n\n # return data\n if internal or len(x_reduced)>1:\n return x_reduced\n else:\n return x_reduced[0]\n","sub_path":"hypertools/tools/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"193357481","text":"# coding: utf-8\n#\n# Copyright 2020 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for auth-related one-off jobs.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport ast\nimport itertools\n\nfrom constants import constants\nfrom core.domain import auth_domain\nfrom core.domain import auth_jobs_one_off as auth_jobs\nfrom core.domain import taskqueue_services\nfrom core.platform import models\nfrom core.platform.auth import firebase_auth_services\nfrom core.platform.auth import firebase_auth_services_test\nfrom core.tests import test_utils\nimport feconf\nimport python_utils\n\nimport contextlib2\n\nauth_models, user_models = (\n models.Registry.import_models([models.NAMES.auth, models.NAMES.user]))\n\n\nclass AuditFirebaseImportReadinessOneOffJobTests(test_utils.AppEngineTestBase):\n\n AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False\n\n def count_one_off_jobs_in_queue(self):\n \"\"\"Returns the number of one off jobs in the taskqueue.\"\"\"\n return self.count_jobs_in_mapreduce_taskqueue(\n taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS)\n\n def run_one_off_job(self):\n \"\"\"Begins the one off job and asserts it completes as expected.\n\n Returns:\n *. The output of the one off job.\n \"\"\"\n job_id = auth_jobs.AuditFirebaseImportReadinessOneOffJob.create_new()\n self.assertEqual(self.count_one_off_jobs_in_queue(), 0)\n auth_jobs.AuditFirebaseImportReadinessOneOffJob.enqueue(job_id)\n self.assertEqual(self.count_one_off_jobs_in_queue(), 1)\n self.process_and_flush_pending_mapreduce_tasks()\n self.assertEqual(self.count_one_off_jobs_in_queue(), 0)\n return sorted(\n ast.literal_eval(o) for o in\n auth_jobs.AuditFirebaseImportReadinessOneOffJob.get_output(job_id))\n\n def create_user(self, user_id, email, deleted=False):\n \"\"\"Creates a new user with the provided ID and email address.\n\n Args:\n user_id: str. The user's ID.\n email: str. The user's email address.\n deleted: bool. Value for the user's deleted property.\n \"\"\"\n user_models.UserSettingsModel(\n id=user_id, email=email, deleted=deleted,\n role=feconf.ROLE_ID_EXPLORATION_EDITOR,\n preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE]\n ).put()\n\n def test_users_with_distinct_emails_returns_empty_output(self):\n self.create_user('u1', 'u1@test.com')\n self.create_user('u2', 'u2@test.com')\n\n self.assertEqual(self.run_one_off_job(), [])\n\n def test_users_with_same_email_are_reported(self):\n self.create_user('u1', 'a@test.com')\n self.create_user('u2', 'a@test.com')\n\n self.assertEqual(self.run_one_off_job(), [\n ['ERROR: a@test.com is a shared email', 'u1, u2'],\n ])\n\n def test_deleted_users_are_reported(self):\n self.create_user('u1', 'u1@test.com', deleted=True)\n self.create_user('u2', 'u2@test.com', deleted=True)\n self.create_user('u3', 'u3@test.com', deleted=False)\n\n self.assertEqual(self.run_one_off_job(), [\n ['ERROR: Found deleted users', 'u1, u2'],\n ])\n\n def test_system_committer_is_ignored_by_duplicate_email_check(self):\n self.create_user('xx', 'admin@test.com')\n self.create_user('yy', 'admin@test.com')\n auth_models.UserAuthDetailsModel(\n id='xx', gae_id=feconf.SYSTEM_COMMITTER_ID\n ).put()\n auth_models.UserIdentifiersModel(\n id=feconf.SYSTEM_COMMITTER_ID, user_id='xx'\n ).put()\n\n self.assertEqual(self.run_one_off_job(), [\n ['INFO: SYSTEM_COMMITTER_ID skipped', ['xx']],\n ])\n\n def test_system_committer_is_ignored_by_deleted_check(self):\n self.create_user('u1', 'admin@test.com', deleted=True)\n auth_models.UserAuthDetailsModel(\n id='u1', gae_id=feconf.SYSTEM_COMMITTER_ID\n ).put()\n auth_models.UserIdentifiersModel(\n id=feconf.SYSTEM_COMMITTER_ID, user_id='u1'\n ).put()\n\n self.assertEqual(self.run_one_off_job(), [\n ['INFO: SYSTEM_COMMITTER_ID skipped', ['u1']],\n ])\n\n\nclass PopulateFirebaseAccountsOneOffJobTests(test_utils.AppEngineTestBase):\n\n AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False\n\n def setUp(self):\n super(PopulateFirebaseAccountsOneOffJobTests, self).setUp()\n self._auth_id_generator = itertools.count()\n self.exit_stack = contextlib2.ExitStack()\n self.sdk_stub = firebase_auth_services_test.FirebaseAdminSdkStub()\n\n self.sdk_stub.install(self)\n self.exit_stack.callback(self.sdk_stub.uninstall)\n\n # Forces all users to produce the same hash value during unit tests to\n # prevent them from being sharded and complicating the testing logic.\n self.exit_stack.enter_context(self.swap_to_always_return(\n auth_jobs, 'ID_HASHING_FUNCTION', value=1))\n\n def tearDown(self):\n self.exit_stack.close()\n super(PopulateFirebaseAccountsOneOffJobTests, self).tearDown()\n\n def count_one_off_jobs_in_queue(self):\n \"\"\"Returns the number of one off jobs in the taskqueue.\"\"\"\n return self.count_jobs_in_mapreduce_taskqueue(\n taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS)\n\n def run_one_off_job(self):\n \"\"\"Begins the one off job and asserts it completes as expected.\n\n Returns:\n *. The output of the one off job.\n \"\"\"\n job_id = auth_jobs.PopulateFirebaseAccountsOneOffJob.create_new()\n self.assertEqual(self.count_one_off_jobs_in_queue(), 0)\n auth_jobs.PopulateFirebaseAccountsOneOffJob.enqueue(job_id)\n self.assertEqual(self.count_one_off_jobs_in_queue(), 1)\n self.process_and_flush_pending_mapreduce_tasks()\n self.assertEqual(self.count_one_off_jobs_in_queue(), 0)\n return sorted(\n ast.literal_eval(o) for o in\n auth_jobs.PopulateFirebaseAccountsOneOffJob.get_output(job_id))\n\n def create_oppia_user(self, deleted=False):\n \"\"\"Returns an (auth_id, user_id) pair for a new user.\n\n Args:\n deleted: bool. Value for the user's deleted property.\n\n Returns:\n AuthIdUserIdPair. The association the user should create.\n \"\"\"\n auth_id = 'aid%d' % python_utils.NEXT(self._auth_id_generator)\n user_id = 'uid_%s' % auth_id\n user_models.UserSettingsModel(\n id=user_id, email=('email_%s@test.com' % auth_id), deleted=deleted,\n role=feconf.ROLE_ID_EXPLORATION_EDITOR,\n preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE]\n ).put()\n return auth_domain.AuthIdUserIdPair(auth_id, user_id)\n\n def create_multi_oppia_users(self, count):\n \"\"\"Returns specified number of (auth_id, user_id) pairs for new users.\n\n Args:\n count: int. The number of users to create.\n\n Returns:\n list(auth_domain.AuthIdUserIdPair). The auth associations of the\n users.\n \"\"\"\n return [self.create_oppia_user() for _ in python_utils.RANGE(count)]\n\n def assert_auth_mapping_exists(self, auth_assoc):\n \"\"\"Asserts that the given auth association exists.\n\n Args:\n auth_assoc: AuthIdUserIdPair. The association to check.\n \"\"\"\n auth_id, user_id = auth_assoc\n self.assertEqual(\n firebase_auth_services.get_auth_id_from_user_id(user_id), auth_id)\n self.assertEqual(\n firebase_auth_services.get_user_id_from_auth_id(auth_id), user_id)\n\n def assert_auth_mapping_does_not_exist(self, auth_assoc):\n \"\"\"Asserts that the given auth association does not exist.\n\n Args:\n auth_assoc: AuthIdUserIdPair. The association to check.\n \"\"\"\n auth_id, user_id = auth_assoc\n self.assertIsNone(\n firebase_auth_services.get_auth_id_from_user_id(user_id))\n self.assertIsNone(\n firebase_auth_services.get_user_id_from_auth_id(auth_id))\n\n def assert_multi_auth_mappings_exist(self, auth_assocs):\n \"\"\"Asserts that the given auth associations exist.\n\n Args:\n auth_assocs: list(AuthIdUserIdPair). The association to check.\n \"\"\"\n auth_ids, user_ids = (list(a) for a in python_utils.ZIP(*auth_assocs))\n self.assertEqual(\n firebase_auth_services.get_multi_auth_ids_from_user_ids(user_ids),\n auth_ids)\n self.assertEqual(\n firebase_auth_services.get_multi_user_ids_from_auth_ids(auth_ids),\n user_ids)\n\n def assert_multi_auth_mappings_do_not_exist(self, auth_assocs):\n \"\"\"Asserts that the given auth associations exist.\n\n Args:\n auth_assocs: list(AuthIdUserIdPair). The association to check.\n \"\"\"\n auth_ids, user_ids = (list(a) for a in python_utils.ZIP(*auth_assocs))\n self.assertEqual(\n firebase_auth_services.get_multi_user_ids_from_auth_ids(auth_ids),\n [None] * len(auth_ids))\n self.assertEqual(\n firebase_auth_services.get_multi_auth_ids_from_user_ids(user_ids),\n [None] * len(user_ids))\n\n def test_successfully_imports_one_user(self):\n auth_assoc = self.create_oppia_user()\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['SUCCESS: Created Firebase accounts', 1],\n ])\n\n self.assert_auth_mapping_exists(auth_assoc)\n self.sdk_stub.assert_firebase_user_exists(auth_assoc.auth_id)\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 1],\n ])\n\n def test_successfully_imports_users_in_bulk(self):\n self.exit_stack.enter_context(\n self.swap(auth_jobs, 'MAX_USERS_FIREBASE_CAN_IMPORT_PER_CALL', 3))\n\n auth_assocs = self.create_multi_oppia_users(11)\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['SUCCESS: Created Firebase accounts', 11],\n ])\n\n self.assert_multi_auth_mappings_exist(auth_assocs)\n self.sdk_stub.assert_multi_firebase_users_exist(\n [a.auth_id for a in auth_assocs])\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 11],\n ])\n\n def test_skips_deleted_users(self):\n self.create_oppia_user(deleted=True)\n\n self.assertItemsEqual(self.run_one_off_job(), [])\n\n def test_initialize_app_error_is_reported(self):\n self.exit_stack.enter_context(self.sdk_stub.mock_initialize_app_error())\n\n auth_assoc = self.create_oppia_user()\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['WARNING: No action needed',\n 'UnknownError(u\\'could not init\\',)'],\n ])\n\n self.assert_auth_mapping_does_not_exist(auth_assoc)\n self.sdk_stub.assert_firebase_user_does_not_exist(auth_assoc.auth_id)\n\n def test_delete_app_error_is_reported(self):\n self.exit_stack.enter_context(self.sdk_stub.mock_delete_app_error())\n\n auth_assoc = self.create_oppia_user()\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['SUCCESS: Created Firebase accounts', 1],\n ['WARNING: No action needed',\n 'UnknownError(u\\'could not delete app\\',)'],\n ])\n\n # Deleting the app should not be a fatal error, so we should still\n # create a firebase account and an association.\n self.assert_auth_mapping_exists(auth_assoc)\n self.sdk_stub.assert_firebase_user_exists(auth_assoc.auth_id)\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 1],\n ])\n\n def test_import_user_error_is_reported(self):\n mock_import_users_error = self.sdk_stub.mock_import_users_error(\n call_error_sequence=(True,)) # Always raise an exception.\n\n auth_assoc = self.create_oppia_user()\n\n with mock_import_users_error:\n self.assertItemsEqual(self.run_one_off_job(), [\n ['FAILURE: Failed to create Firebase accounts',\n 'DataLossError(u\\'Failed to connect\\',)'],\n ])\n\n self.assert_auth_mapping_does_not_exist(auth_assoc)\n self.sdk_stub.assert_firebase_user_does_not_exist(auth_assoc.auth_id)\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['SUCCESS: Created Firebase accounts', 1],\n ])\n\n self.assert_auth_mapping_exists(auth_assoc)\n self.sdk_stub.assert_firebase_user_exists(auth_assoc.auth_id)\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 1],\n ])\n\n def test_single_import_batch_error_is_reported(self):\n self.exit_stack.enter_context(\n self.swap(auth_jobs, 'MAX_USERS_FIREBASE_CAN_IMPORT_PER_CALL', 3))\n mock_import_users_error = self.sdk_stub.mock_import_users_error(\n call_error_sequence=(False, True, False))\n\n auth_assocs = self.create_multi_oppia_users(9)\n\n with mock_import_users_error:\n self.assertItemsEqual(self.run_one_off_job(), [\n ['FAILURE: Failed to create Firebase accounts',\n 'DataLossError(u\\'Failed to connect\\',)'],\n ['SUCCESS: Created Firebase accounts', 6],\n ])\n\n successful_assocs = auth_assocs[:3] + auth_assocs[6:]\n self.assert_multi_auth_mappings_exist(successful_assocs)\n self.sdk_stub.assert_multi_firebase_users_exist(\n [a.auth_id for a in successful_assocs])\n failed_assocs = auth_assocs[3:6]\n self.assert_multi_auth_mappings_do_not_exist(failed_assocs)\n self.sdk_stub.assert_multi_firebase_users_do_not_exist(\n [a.auth_id for a in failed_assocs])\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 6],\n ['SUCCESS: Created Firebase accounts', 3],\n ])\n\n self.assert_multi_auth_mappings_exist(auth_assocs)\n self.sdk_stub.assert_multi_firebase_users_exist(\n [a.auth_id for a in auth_assocs])\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 9],\n ])\n\n def test_individual_user_import_errors_are_reported(self):\n self.exit_stack.enter_context(\n self.swap(auth_jobs, 'MAX_USERS_FIREBASE_CAN_IMPORT_PER_CALL', 3))\n mock_import_users_error = self.sdk_stub.mock_import_users_error(\n user_error_sequence=(False, True, False, False))\n\n auth_assocs = self.create_multi_oppia_users(10)\n\n with mock_import_users_error:\n self.assertItemsEqual(self.run_one_off_job(), [\n ['FAILURE: Failed to create Firebase accounts',\n 'Import user_id=\\'uid_aid1\\' failed: FirebaseError'],\n ['FAILURE: Failed to create Firebase accounts',\n 'Import user_id=\\'uid_aid5\\' failed: FirebaseError'],\n ['FAILURE: Failed to create Firebase accounts',\n 'Import user_id=\\'uid_aid9\\' failed: FirebaseError'],\n ['SUCCESS: Created Firebase accounts', 7],\n ])\n\n successful_assocs = (\n auth_assocs[:1] + auth_assocs[2:5] + auth_assocs[6:9])\n self.assert_multi_auth_mappings_exist(successful_assocs)\n self.sdk_stub.assert_multi_firebase_users_exist(\n [a.auth_id for a in successful_assocs])\n failed_assocs = [auth_assocs[1], auth_assocs[5], auth_assocs[9]]\n self.assert_multi_auth_mappings_do_not_exist(failed_assocs)\n self.sdk_stub.assert_multi_firebase_users_do_not_exist(\n [a.auth_id for a in failed_assocs])\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 7],\n ['SUCCESS: Created Firebase accounts', 3],\n ])\n\n self.assert_multi_auth_mappings_exist(auth_assocs)\n self.sdk_stub.assert_multi_firebase_users_exist(\n [a.auth_id for a in auth_assocs])\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: Pre-existing Firebase accounts', 10],\n ])\n\n def test_system_comitter_is_ignored(self):\n auth_assoc = self.create_oppia_user()\n auth_models.UserAuthDetailsModel(\n id=auth_assoc.user_id, gae_id=feconf.SYSTEM_COMMITTER_ID\n ).put()\n auth_models.UserIdentifiersModel(\n id=feconf.SYSTEM_COMMITTER_ID, user_id=auth_assoc.user_id\n ).put()\n\n self.assertItemsEqual(self.run_one_off_job(), [\n ['INFO: SYSTEM_COMMITTER_ID skipped', [auth_assoc.user_id]],\n ])\n\n self.assert_auth_mapping_does_not_exist(auth_assoc)\n self.sdk_stub.assert_firebase_user_does_not_exist(auth_assoc.auth_id)\n","sub_path":"core/domain/auth_jobs_one_off_test.py","file_name":"auth_jobs_one_off_test.py","file_ext":"py","file_size_in_byte":17490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"28027360","text":"# This is the script to demonstrate several cluster analysis algorithms\n# Based on Python 2.7\n# Author: Peng Zhang\n# E-mail: hizhangp@mail.ustc.edu.cn\n# dataset: http://cs.joensuu.fi/sipu/datasets/\n\n\nimport sys\nfrom appwindow import *\nfrom PyQt4.QtGui import *\n\n\ndef main():\n # create window\n app = QApplication(sys.argv)\n win = ApplicationWindow()\n\n win.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"cluster_analysis.py","file_name":"cluster_analysis.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"469258763","text":"# -*- coding: UTF-8\n#\ntry:\n from lxml import etree\nexcept ImportError:\n import xml.etree.ElementTree as etree\nimport collections\nimport os.path\n\n\nPeak = collections.namedtuple('Peak', ('min_mass', 'max_mass'))\n\nclass MirParser:\n\n def __init__(self, mir_file=None):\n parser = etree.XMLParser()#recover=True encoding='utf-8'\n if os.path.isfile(mir_file):\n self._mir_root = etree.parse(mir_file, parser=parser).getroot()\n else:\n self._mir_root = etree.fromstring(mir_file, parser=parser)\n\n\n def parse_peaks(self):\n \"\"\"Parse peaks from the .mir file.\n\n >>> example_mir = '''\n ... \n ... \n ... \n ... \n ... '''\n >>> m = MirParser(example_mir)\n >>> m.parse_peaks()\n [Peak(min_mass='24.535144', max_mass='24.735144'), Peak(min_mass='40.1662', max_mass='40.3662'), Peak(min_mass='59.00335', max_mass='59.20335')]\n \"\"\"\n peaks = []\n if self._mir_root.tag == 'ImagingResults':\n for peak in self._mir_root.findall(\"Result[@Type='PkFilter']\"):\n peak_attributes = dict(peak.items())\n peaks.append(Peak(float(peak_attributes['MinMass']), float(peak_attributes['MaxMass'])))\n return sorted(peaks, key=lambda x: x[0])\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n\n","sub_path":"mir_parser.py","file_name":"mir_parser.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"255867149","text":"\nfrom stack import Stack\n\ndef revstring(mystr):\n str_stack = Stack()\n result =\"\"\n\n for c in mystr:\n str_stack.push(c)\n\n while not str_stack.isEmpty():\n result += str_stack.pop()\n\n return result\n\nstr = input(\"Enter a string:\")\nprint(revstring(str))\n","sub_path":"data-structures/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"542016176","text":"import requests\n\nwith open('yandex_disk_token.txt', 'r') as file:\n token = file.read().strip()\n\nclass YaUploader:\n def __init__(self, token: str):\n self.token = token\n\n def upload(self, file_path: str):\n params = {\n 'path': name_file\n }\n headers = {\n 'content-type': 'application/json',\n 'accept': 'application/json', 'authorization': f'OAuth {uploader.token}'\n }\n req = requests.get(API_BASE_URL + \"/v1/disk/resources/upload\", params=params, headers=headers)\n upload_url = req.json()[\"href\"]\n requests.put(upload_url, headers=headers, files={'file': path_to_file})\n\n\nif __name__ == '__main__':\n token = 'Тут токен'\n API_BASE_URL = \"https://cloud-api.yandex.net:443\"\n name_file = '№33.jpg'\n path_to_file = \"C://Users/BuniN/Desktop/№33.jpg\"\n uploader = YaUploader(token)\n uploader.upload(path_to_file)","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339105781","text":"with open('C:/Users/dmoho/Desktop/.story.txt','r+') as file:\n count_line=0\n count_word_all=0\n count_word=0\n count_letter=0\n for line in file:\n if line[0]!=\"T\":\n count_line+=1\n words=line.split()\n for word in words:\n count_word_all+=1\n if word==\"the\" or word==\"The\":\n count_word+=1\n for letter in word:\n if letter==\"a\":\n count_letter+=1\nprint(f'Quantity of lines without letter \"T\" in the biginning is {count_line}\\nQuantity of words \"the\" or \"The\" in the text is {count_word}\\nQuantity of words in the text is {count_word_all}')","sub_path":"File_txt/File_txt_task_14.2.py","file_name":"File_txt_task_14.2.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629556600","text":"from Transaction import Transaction\r\n# BankStatement Class\r\nclass BStatement:\r\n # Declaring the constructor\r\n def __init__(self, initialBal=0.0):\r\n self.__BeginningBal = initialBal\r\n self.__EndingBal = initialBal\r\n self.__TransactionLog = []\r\n self.__ArrangedLog = []\r\n self.__RunningBalLog = []\r\n self.__NumberofEntries = 0\r\n self.__NumberofDeposits = 0\r\n self.__NumberofWithdrawals = 0\r\n# Setter for beginning and end balance\r\n\r\n def setBegEndBals(self, BegEndBalance):\r\n self.__BeginningBal = self.__EndingBal = BegEndBalance\r\n# Getter for beginning balance\r\n\r\n def getBeginningBal(self):\r\n return self.__BeginningBal\r\n# Getter for end balance\r\n\r\n def getEndingBal(self):\r\n return self.__EndingBal\r\n# Getter for the number of entries\r\n\r\n def getNumberofEntries(self):\r\n return self.__NumberofEntries\r\n# Getter for the number of deposits\r\n\r\n def getNumberofDeposits(self):\r\n return self.__NumberofDeposits\r\n# Getter for the number of withdrawals\r\n\r\n def getNumberofWithDrawals(self):\r\n return self.__NumberofWithdrawals\r\n# Insert transaction method\r\n\r\n def insertTransaction(self, transaction):\r\n self.__TransactionLog.append(transaction)\r\n# Appending this transaction to the transaction log\r\n self.__NumberofEntries += 1\r\n# Incrementing the number of entries\r\n# If there is a deposit then we add the amount to last amount\r\n if transaction.getCode() == 'Deposit':\r\n self.__NumberofDeposits += 1\r\n# Incrementing the number of deposits\r\n# if RunningBalLog is not empty get the last balance and add amount\r\n if len(self.__RunningBalLog) > 0:\r\n self.__EndingBal = self.__RunningBalLog[-1] + transaction.getAmount()\r\n self.__RunningBalLog.append(self.__EndingBal)\r\n# Appending the end balance to running balance log\r\n else:\r\n # Otherwise, it means its the first transaction\r\n self.__EndingBal = self.getBeginningBal() + transaction.getAmount()\r\n self.__RunningBalLog.append(self.__EndingBal)\r\n else:\r\n # otherwise there is a withdrawal transaction, for example:\r\n # deduct the amount\r\n self.__NumberofWithdrawals += 1\r\n if len(self.__RunningBalLog) > 0:\r\n self.__EndingBal = self.__RunningBalLog[-1] - transaction.getAmount()\r\n self.__RunningBalLog.append(self.__EndingBal)\r\n else:\r\n self.__EndingBal = self.getBeginningBal() - transaction.getAmount()\r\n self.__RunningBalLog.append(self.__EndingBal)\r\n# Displaying all transactions, beginning and end balances\r\n def displayResults(self):\r\n print(\"The beginning transaction was: $\" + str(self.__BeginningBal))\r\n for index, t in enumerate(self.__TransactionLog):\r\n print(\"Transaction: \" + str(index + 1) + \" was a \" + t.getCode() + \" amount: $\" + str(t.getAmount()) + \" for \" + t.getNote())\r\n print(\"Running Bal: $\" + str(self.__RunningBalLog[index]))\r\n print(\"The ending balance is: $\" + str(self.__EndingBal))\r\n print(\"The number of Transactions are: \" + str(self.__NumberofEntries)) #The number of transactions are printed\r\n print(\"The number of Deposits are: \" + str(self.__NumberofDeposits))\r\n# The number of Deposits are printed\r\n print(\"The number of Withdrawals are: \" + str(self.__NumberofWithdrawals))\r\n# The number of Withdrawals are printed\r\n# Arranging the current transactions\r\n\r\n def arrangeTransactions(self):\r\n self.__ArrangedLog.clear()\r\n# Clearing the current list\r\n# Then, loop through all of the transactions. Also, add all\r\n# deposit transactions\r\n for t in self.__TransactionLog:\r\n if t.getCode() == 'Deposit':\r\n self.__ArrangedLog.append(t)\r\n# Loop through all of the transactions and then finally add\r\n# all of the withdrawal transactions\r\n for t in self.__TransactionLog:\r\n if t.getCode() == 'Withdrawal':\r\n self.__ArrangedLog.append(t)\r\n# Prints the arranged transactions\r\n\r\n def printArranged(self):\r\n print(\"Printing the Deposits and Withdrawals as a group:\")\r\n for index, t in enumerate(self.__ArrangedLog):\r\n print(\"Transaction was a \" + t.getCode() + \" amount: $\" + str(t.getAmount()) + \" for \" + t.getNote())\r\n# Start method to be called from main method\r\n\r\n\r\ndef start():\r\n myStatement = BStatement()\r\n myStatement.setBegEndBals(29.92);\r\n T1 = Transaction()\r\n T1.setAmount(157.56)\r\n T1.setCode('Deposit')\r\n T1.setNote('CTPay')\r\n T2 = Transaction(149.86, 'Withdrawal', \"Rent\")\r\n T3 = Transaction()\r\n T3.setAmount(89.56)\r\n T3.setCode('Deposit')\r\n T3.setNote('Tips')\r\n T4 = Transaction(17.56, 'Deposit', \"Gift\")\r\n T5 = Transaction()\r\n T5.setAmount(89.77)\r\n T5.setCode('Withdrawal')\r\n T5.setNote('Date')\r\n T6 = Transaction(167.75, 'Deposit', \"Loan\")\r\n T7 = Transaction()\r\n T7.setAmount(90.00)\r\n T7.setCode('Withdrawal')\r\n T7.setNote('Loan Payment')\r\n T8 = Transaction(71.77, 'Withdrawal', \"Groceries\")\r\n myStatement.insertTransaction(T1)\r\n myStatement.insertTransaction(T2)\r\n myStatement.insertTransaction(T3)\r\n myStatement.insertTransaction(T4)\r\n myStatement.insertTransaction(T5)\r\n myStatement.insertTransaction(T6)\r\n myStatement.insertTransaction(T7)\r\n myStatement.insertTransaction(T8)\r\n myStatement.displayResults()\r\n myStatement.arrangeTransactions()\r\n myStatement.printArranged()\r\n # Main method of the program\r\n\r\n\r\nif __name__ == '__main__':\r\n start()","sub_path":"Bank_Account_Class.py","file_name":"Bank_Account_Class.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"335022737","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 30 08:04:10 2021\n\n@author: christostrydom\n\"\"\"\n\nimport os\nimport subprocess\n\nos.system(\"echo Hello from the other side!\")\nprint(\"Current Working Directory \" , os.getcwd())\ncurrency='USDZAR'\n\ntry:\n # Change the current working Directory \n os.chdir(\"\"\"/Users/christostrydom/Data/{currency}\"\"\".format(currency=currency))\n print(\"Directory changed\")\nexcept OSError:\n print(\"Can't change the Current Working Directory\")\n \n# subprocess.run(['duka', 'EURUSD','-d', '2018-02-02'],shell=True)\nsubprocess.run('duka {currency} -d 2018-02-02'.format(currency=currency),shell=True)\n\n","sub_path":"duka_download.py","file_name":"duka_download.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"42921314","text":"\"\"\"\r\nTutorial 4: Complex Models\r\n==========================\r\n\r\nIn this tutorial, we will fix more complex models with N=10, N=20 and more parameters. We will consider the following:\r\n\r\n - Why more complex model are more difficult to fit, and may lead the non-linear search to incorrectly infer\r\n models with significantly lower likelihoods than the true maximum likelihood model.\r\n\r\n - Strategies for ensuring the non-linear search correctly estimates the maximum likelihood model.\r\n\r\n - What drives the run-times of a model-fit, and how one must carefully balance run-times with model complexity.\r\nfor mitigating this:\r\n\r\nWHAT I NEED TO WRITE:\r\n\r\n- Example which fits an N=15 model and gets an incorrect result, concepts like \"local maxima\", model complexity,\r\nusing composition API to simplify model, etc, using priors to do this.\r\n\r\n- Sections on run times.\r\n\r\n- Sections on non-linear search settings.\r\n\r\nCan rewrite and borrow from HowToLens.\r\n\r\nIn this example, every fit to the noisy 1D signal was a good fit, based on the fit looking visually close to the data.\r\n\r\nFor modeling in general, however, things are not always so simple. It is common for the model-fit to provide a bad fit to the data.\r\nFurthermore, it can be difficult to determine if this is because the model is genuinely a poor fit or because the non-linear search (e.g. `emcee`)\r\nfailed sample parameter space robustly enough to locate the highest likelihood regions of parameter space. The next session will illustrate an example of this.\r\n\r\nWhen a non-linear search infers a lower likelihood solution than the highest likelihood solutions that exist in the parameter space, called\r\nthe \"global maximum likelihood\", it is said to have become trapped by a \"local maximum\". There is no simple way to determine if a non-linear has\r\ndone this. The process typically involves visually inspecting the results, fitting the model many times (ideally with different models, non-linear searches and settings) and building up intuition for your modeling problem as to how things behave and when they work / do not work.\r\n\r\nOwing to the model-specific nature of this problem, these lectures will only briefly illustrate model-fitting failures and how one might overcome them.\r\nIf you embark on your own model-fitting endeavours, this will be the aspect of model-fitting you will have to learn about yourself!\r\n\"\"\"\r\n# %matplotlib inline\r\n# from pyprojroot import here\r\n# workspace_path = str(here())\r\n# %cd $workspace_path\r\n# print(f\"Working Directory has been set to `{workspace_path}`\")\r\n\r\nimport autofit as af\r\nimport os\r\nfrom os import path\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\n__Data__\r\n\r\nWe first load the dataset we will fit, which is a new `dataset` where the underlying signal is a sum of two `Gaussian` \r\nprofiles which share the same centre\r\n\"\"\"\r\ndataset_path = path.join(\"dataset\", \"example_1d\", \"gaussian_x2\")\r\ndata = af.util.numpy_array_from_json(file_path=path.join(dataset_path, \"data.json\"))\r\nnoise_map = af.util.numpy_array_from_json(\r\n file_path=path.join(dataset_path, \"noise_map.json\")\r\n)\r\n\r\n\"\"\"\r\nPlotting the data shows the noisy signal is more complicated than just a 1D Gaussian.\r\n\r\nNote that both Gaussians are centred at the same point (x = 50). We will compose a model that reflects this.\r\n\"\"\"\r\nxvalues = np.arange(data.shape[0])\r\nplt.errorbar(\r\n xvalues, data, yerr=noise_map, color=\"k\", ecolor=\"k\", elinewidth=1, capsize=2\r\n)\r\nplt.title(\"1D Gaussian dataset with errors from the noise-map.\")\r\nplt.xlabel(\"x values of profile\")\r\nplt.ylabel(\"Signal Value\")\r\nplt.show()\r\n\r\n\"\"\"\r\n__Models__\r\n\r\nWe create the `Gaussian` class which will form our model components using the standard **PyAutoFit** format.\r\n\"\"\"\r\n\r\n\r\nclass Gaussian:\r\n def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Gaussian`s model parameters.\r\n sigma=5.0,\r\n ):\r\n \"\"\"\r\n Represents a 1D Gaussian profile.\r\n\r\n This is a model-component of example models in the **HowToFit** lectures and is used to fit example datasets\r\n via a non-linear search.\r\n\r\n Parameters\r\n ----------\r\n centre\r\n The x coordinate of the profile centre.\r\n normalization\r\n Overall normalization of the profile.\r\n sigma\r\n The sigma value controlling the size of the Gaussian.\r\n \"\"\"\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.sigma = sigma\r\n\r\n def model_data_1d_via_xvalues_from(self, xvalues: np.ndarray):\r\n \"\"\"\r\n\r\n Returns a 1D Gaussian on an input list of Cartesian x coordinates.\r\n\r\n The input xvalues are translated to a coordinate system centred on the Gaussian, via its `centre`.\r\n\r\n The output is referred to as the `model_data` to signify that it is a representation of the data from the\r\n model.\r\n\r\n Parameters\r\n ----------\r\n xvalues\r\n The x coordinates in the original reference frame of the data.\r\n \"\"\"\r\n transformed_xvalues = np.subtract(xvalues, self.centre)\r\n return np.multiply(\r\n np.divide(self.normalization, self.sigma * np.sqrt(2.0 * np.pi)),\r\n np.exp(-0.5 * np.square(np.divide(transformed_xvalues, self.sigma))),\r\n )\r\n\r\n\r\n\"\"\"\r\n__Analysis__\r\n\r\nWe now define the `Analysis` class for this model-fit. \r\n\r\nThe `log_likelihood_function` of this analysis now assumes that the `instance` that is input into it will contain\r\nmultiple 1D profiles.\r\n \r\n The way the `model_data` is computed is updating accordingly (the sum of each individual Gaussian's `model_data`).\r\n\"\"\"\r\n\r\n\r\nclass Analysis(af.Analysis):\r\n def __init__(self, data, noise_map):\r\n super().__init__()\r\n\r\n self.data = data\r\n self.noise_map = noise_map\r\n\r\n def log_likelihood_function(self, instance):\r\n \"\"\"\r\n Returns the log likelihood of the fit of an `instance` containing many 1D\r\n Profiles (e.g. Gaussians) to the dataset, using a model instance.\r\n\r\n Parameters\r\n ----------\r\n instance\r\n A list of 1D profiles with parameters set via the non-linear search.\r\n\r\n Returns\r\n -------\r\n float\r\n The log likelihood value indicating how well this model fit the `MaskedDataset`.\r\n \"\"\"\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood\r\n\r\n def model_data_from_instance(self, instance):\r\n \"\"\"\r\n To create the summed profile of all individual profiles, we use a list comprehension to iterate over\r\n all profiles in the instance.\r\n\r\n The key point to understand is that the `instance` has the properties of a Python `iterator` and therefore\r\n can be looped over using the standard Python for syntax (e.g. `for profile in instance`).\r\n\r\n __Alternative Syntax__\r\n\r\n For those not familiar with list comprehensions, the code below shows how to use the instance to create the\r\n summed profile using a more simple for loop.\r\n\r\n model_data = np.zeros(shape=self.data.xvalues.shape[0])\r\n\r\n for profile in instance:\r\n model_data += profile.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n return model_data\r\n \"\"\"\r\n xvalues = np.arange(self.data.shape[0])\r\n\r\n return sum(\r\n [\r\n profile.model_data_1d_via_xvalues_from(xvalues=xvalues)\r\n for profile in instance\r\n ]\r\n )\r\n\r\n\r\n\"\"\"\r\n__Collection__\r\n\r\nUse a `Collection` to compose the model we fit, consisting of two `Gaussian`'s.\r\n\"\"\"\r\nmodel = af.Collection(gaussian_0=Gaussian, gaussian_1=Gaussian)\r\n\r\n\"\"\"\r\n__Model Customization__\r\n\r\nWe can fully customize the model that we fit. \r\n\r\nFirst, lets align the centres of the two `Gaussian`'s (given we know they are aligned in the data). Note that\r\ndoing so reduces the number of free parameters in the model by 1, from N=6 to N=5.\r\n\r\nLets suppose we have a `dataset` that consists of three `Gaussian` \r\nprofiles, but we also know the following information about the dataset:\r\n\r\n- The 2 `Gaussian`'s are centrally aligned.\r\n- The `sigma` of one `Gaussian` is equal to 1.0.\r\n- The sigma of another `Gaussian` is above 3.0.\r\n\r\nWe can edit the `Model` components we pass into the `Collection` to meet these constraints accordingly.\r\n\r\nLets first create the model `Gaussian`'s as we did in the previous tutorial.\r\n\"\"\"\r\ngaussian_0 = af.Model(Gaussian)\r\ngaussian_1 = af.Model(Gaussian)\r\n\r\n\"\"\"\r\nWe can centrally align the two `Gaussian`'s by setting the `centre` of the first `Gaussian` to the `centre` of the\r\nsecond `Gaussian`.\r\n\r\nThis removes a free parameter from the model reducing the dimensionality by 1 (from N=6 to N=5).\r\n\"\"\"\r\ngaussian_0.centre = gaussian_1.centre\r\n\r\n\"\"\"\r\nWe can follow the same API to set the `sigma` of the first `Gaussian` to 1.0.\r\n\r\nThis again removes another free parameter from the model (from N=5 to N=4).\r\n\"\"\"\r\ngaussian_0.sigma = 1.0\r\n\r\n\"\"\"\r\nWe can add assertions, for example requiring that the `sigma` value of the second `Gaussian` is above 2.0.\r\n\r\nAssertions do not change the dimensionality of the model, because we are not fixing or removing any free parameters.\r\n\"\"\"\r\ngaussian_1.add_assertion(gaussian_1.sigma > 3.0)\r\n\r\n\"\"\"\r\nWe again input these newly customized model components into the `Collection`.\r\n\"\"\"\r\nmodel = af.Collection(\r\n gaussian_0=gaussian_0,\r\n gaussian_1=gaussian_1,\r\n)\r\n\r\n\"\"\"\r\nThe customized model can be printed via the `info` attribute, where the customizes discussed above can be seen.\r\n\"\"\"\r\nprint(model.info)\r\n\r\n\"\"\"\r\n__Model Fit__\r\n\r\nLets now perform the fit using our model which is composed of two profile's in a non-linear parameter space of\r\ndimensionality N=4.\r\n\"\"\"\r\nanalysis = Analysis(data=data, noise_map=noise_map)\r\n\r\nsearch = af.Emcee()\r\n\r\nprint(\r\n \"The non-linear search has begun running. \\n\"\r\n \"Checkout the autofit_workspace/output/howtofit/tutorial_5__gaussian_x1__exponential_x1 \\n\"\r\n \"folder for live output of the results.\\n\"\r\n \"This Jupyter notebook cell with progress once the search has completed - this could take a few minutes!\"\r\n)\r\n\r\nresult = search.fit(model=model, analysis=analysis)\r\n\r\nprint(\"The search has finished run - you may now continue the notebook.\")\r\n\r\n\"\"\"\r\n__Result__\r\n\r\nThe `info` attribute shows the result in a readable format, which contains informaiton on the full collection\r\nof model components.\r\n\"\"\"\r\nprint(result.info)\r\n\r\n\"\"\"\r\n__Cookbooks__\r\n\r\nThis tutorial illustrates how to compose model out of multiple components, using a `Collection`.\r\n\r\n**PyAutoFit** has many advanced model composition tools, which offer more customization of `Collection` objects,\r\nallow models to be composed and fitted to multiple datasets and for multi-level models to be created out of\r\nhierarchies of Python classes.\r\n\r\nCheckout the `autofit_workspace/*/model` package for these cookbooks with give a full run through of all of\r\n**PyAutoFit**'s model composition tools, or read them on the readthedocs:\r\n\r\n - `cookbook 1: Basics `_\r\n\r\n - `cookbook 2: Collections `_\r\n\r\n__Wrap Up__\r\n\r\nAnd with that, we are complete. In this tutorial, we learned how to compose and fit complex models in **PyAutoFit**.\r\n \r\nTo end, you should think again in more detail about your model fitting problem:\r\n\r\n Are there many different model components you may wish to define and fit?\r\n\r\n Is your data the super position of many different model components, like the profiles in this tutorial?\r\n\r\n In this tutorial, all components of our model did the same thing, represent a 1D profile. In your model, you may\r\nhave model components that represent different parts of your model, which need to be combined in more complicated ways\r\nin order to create your model-fit. You now have all the tools you need to define, compose and fit very complex models!\r\n\"\"\"\r\n","sub_path":"scripts/howtofit/chapter_1_introduction/tutorial_4_complex_models.py","file_name":"tutorial_4_complex_models.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"561779055","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom functools import reduce\nimport operator\nfrom random import randint, choice\n\nimport logging\n_logger = logging.getLogger(__name__)\n_logger.setLevel(logging.DEBUG)\n\n# Add FileHandler and only log WARNING and higher\nLOG_FILE = 'logfile.log'\nfh = logging.FileHandler(LOG_FILE)\nfh.name = 'File Logger'\nfh.level = logging.WARNING\n_logger.addHandler(fh)\n\n# Add optional ConsoleHandler\nch = logging.StreamHandler()\nch.name = 'Console Logger'\nch.level = logging.DEBUG\n_logger.addHandler(ch)\n\n# Predefined stocks provided:\ndefault_stocks = {\n 'TEA': {\n 'type': 'Common',\n 'last_dividend': 0,\n 'fixed_dividend': None,\n 'par_value': 100,\n },\n 'POP': {\n 'type': 'Common',\n 'last_dividend': 8,\n 'fixed_dividend': None,\n 'par_value': 100,\n },\n 'ALE': {\n 'type': 'Common',\n 'last_dividend': 23,\n 'fixed_dividend': None,\n 'par_value': 60,\n },\n 'GIN': {\n 'type': 'Preferred',\n 'last_dividend': 8,\n 'fixed_dividend': 0.02,\n 'par_value': 100,\n },\n 'JOE': {\n 'type': 'Common',\n 'last_dividend': 13,\n 'fixed_dividend': None,\n 'par_value': 250,\n },\n}\n\n\nclass Stock(object):\n \"\"\"Stock class, defines methods to get data of interest\"\"\"\n\n def __init__(self, symbol, type='Common', last_dividend=0.0,\n fixed_dividend=None, par_value=0, market_price=0):\n \"\"\"\n Instantiate a stock.\n \"\"\"\n self.symbol = symbol\n self.price = market_price\n self.type = type\n self.last_dividend = last_dividend\n self.fixed_dividend = fixed_dividend\n self.par_value = par_value\n self.market_price = market_price\n\n def __str__(self):\n return self.symbol\n\n\nclass Market(object):\n \"\"\"\n Class to simulate a market api\n \"\"\"\n\n def add_stock(self, stock_symbol, type,\n last_dividend, fixed_dividend, par_value):\n \"\"\"\n Add a stock to the market.\n Return complete list of stocks if successful.\n \"\"\"\n if stock_symbol in self.stocks.keys():\n _logger.error(\"Stock already exists\")\n return False\n else:\n self.stocks[stock_symbol] = Stock(\n symbol=stock_symbol,\n type=type,\n last_dividend=last_dividend,\n fixed_dividend=fixed_dividend,\n par_value=par_value,\n )\n return self.stocks\n\n def __init__(self,\n stocks=dict(\n zip([simbolo for simbolo in default_stocks.keys()],\n [Stock(\n symbol=simbolo,\n type=values.get('type'),\n last_dividend=values.get('last_dividend'),\n fixed_dividend=values.get('fixed_dividend'),\n par_value=values.get('par_value'),\n market_price=values.get('market_price')\n ) for simbolo, values in default_stocks.items()\n ])\n ),\n open=datetime.now()\n ):\n \"\"\"\n Instantiate market object.\n open: datetime of opening event\n Stocks: set of Stock instances\n operations: list of operations, every operation is a dict with:\n - timestamp: datetime of the transaction\n - stock: stock symbol (str)\n - quantity: quantity of shares traded (int)\n - type: 'buy' or 'sell' (str)\n - price: trade price (int)\n \"\"\"\n self.open = open\n self.stocks = stocks\n self.operations = []\n\n def get_dividend_yield(self, stock, market_price):\n # perform some validation\n if stock not in [\n a_stock for a_stock in self.stocks.keys()]:\n _logger.error(\"Stock not in the market, please add stock first\")\n return False\n try:\n market_price = float(market_price)\n except ValueError:\n _logger.error(\n \"Wrong market price: {}\".format(market_price))\n return False\n # Return expected results:\n if self.stocks[stock].type == 'Common':\n return self.stocks[stock].last_dividend / market_price\n else:\n return (\n self.stocks[stock].fixed_dividend *\n self.stocks[stock].par_value\n ) / market_price\n if not self.stocks[stock].fixed_dividend:\n return self.stocks[stock].last_dividend / market_price\n else:\n return (\n self.stocks[stock].fixed_dividend *\n self.stocks[stock].par_value\n ) / market_price\n\n def get_pe_ratio(self, stock, market_price):\n return market_price / self.stocks[stock].last_dividend\n\n def record_trade(self, stock, operation=False):\n \"\"\"\n Record a trade with timestamp, quantity of shares, buy or sell indicator\n and trade price.\n \"\"\"\n # Use random values to ease testing if no operation is given:\n if not operation:\n operation = {}\n print(\"Getting some random values\")\n operation['timestamp'] = datetime.now()\n operation['quantity'] = randint(1, 100)\n operation['type'] = choice(['buy', 'sell'])\n operation['price'] = randint(1, 10000)\n if not operation.get('stock', False):\n operation['stock'] = stock\n self.operations.append(operation)\n # Set stock price to the one in this last trade:\n self.stocks[stock].market_price = operation['price']\n return True\n\n def get_gbce_all_share_index(self):\n \"\"\"\n Use the geometric mean of prices for all stocks\n \"\"\"\n prices_list = [a_stock.market_price\n for symbol, a_stock in self.stocks.items()]\n if not all(prices_list):\n print(\"Not all prices are set for the stocks in the market\")\n return 0\n return reduce(operator.mul, prices_list, 1)**(1/len(prices_list))\n\n def get_volume_weighted_stock_price(self):\n \"\"\"\n Based on trades in the past 15 minutes, get\n \"\"\"\n last_operations = [op for op in self.operations\n if op.get('timestamp') > datetime.now() - timedelta(\n minutes=15)\n ]\n return sum([op.get('price') * op.get('quantity')\n for op in last_operations]) / sum(\n [op.get('quantity') for op in last_operations]\n )\n","sub_path":"simplestock.py","file_name":"simplestock.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"340588436","text":"#!/usr/bin/env python3\n\nimport socket\nimport threading\nfrom pathlib import Path\nimport os\nimport copy\nimport time\nimport datetime\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\n\nif os.name == 'posix':\n print('os is linux')\n import resource # ( -> pip install python-resources )\n # set linux max_num_open_socket from 1024 to 128k\n resource.setrlimit(resource.RLIMIT_NOFILE, (127000, 128000))\n\n\n\nlisten_PORT = 2690 # pyprox listening to 127.0.0.1:listen_PORT\n\nCloudflare_IP = '104.16.226.10' # plos.org (can be any dirty cloudflare ip)\nCloudflare_port = 443\n\nL_fragment = 77 # length of fragments of Client Hello packet (L_fragment Byte in each chunk)\nfragment_sleep = 0.2 # sleep between each fragment to make GFW-cache full so it forget previous chunks. LOL.\n\n\n\n# ignore description below , its for old code , just leave it intact.\nmy_socket_timeout = 60 # default for google is ~21 sec , recommend 60 sec unless you have low ram and need close soon\nfirst_time_sleep = 0.01 # speed control , avoid server crash if huge number of users flooding (default 0.1)\naccept_time_sleep = 0.01 # avoid server crash on flooding request -> max 100 sockets per second\n\n\n\nclass ThreadedServer(object):\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n\n def listen(self):\n self.sock.listen(128) # up to 128 concurrent unaccepted socket queued , the more is refused untill accepting those.\n while True:\n client_sock , client_addr = self.sock.accept() \n client_sock.settimeout(my_socket_timeout)\n \n #print('someone connected')\n time.sleep(accept_time_sleep) # avoid server crash on flooding request\n thread_up = threading.Thread(target = self.my_upstream , args =(client_sock,) )\n thread_up.daemon = True #avoid memory leak by telling os its belong to main program , its not a separate program , so gc collect it when thread finish\n thread_up.start()\n \n\n def my_upstream(self, client_sock):\n first_flag = True\n backend_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n backend_sock.settimeout(my_socket_timeout)\n while True:\n try:\n if( first_flag == True ): \n first_flag = False\n\n time.sleep(first_time_sleep) # speed control + waiting for packet to fully recieve\n data = client_sock.recv(16384)\n #print('len data -> ',str(len(data))) \n #print('user talk :')\n\n if data: \n backend_sock.connect((Cloudflare_IP,Cloudflare_port))\n thread_down = threading.Thread(target = self.my_downstream , args = (backend_sock , client_sock) )\n thread_down.daemon = True\n thread_down.start()\n # backend_sock.sendall(data) \n send_data_in_fragment(data,backend_sock)\n\n else: \n raise Exception('cli syn close')\n\n else:\n data = client_sock.recv(4096)\n if data:\n backend_sock.sendall(data)\n else:\n raise Exception('cli pipe close')\n \n except Exception as e:\n #print('upstream : '+ repr(e) )\n time.sleep(2) # wait two second for another thread to flush\n client_sock.close()\n backend_sock.close()\n return False\n\n\n\n \n def my_downstream(self, backend_sock , client_sock):\n first_flag = True\n while True:\n try:\n if( first_flag == True ):\n first_flag = False \n data = backend_sock.recv(16384)\n if data:\n client_sock.sendall(data)\n else:\n raise Exception('backend pipe close at first')\n \n else:\n data = backend_sock.recv(4096)\n if data:\n client_sock.sendall(data)\n else:\n raise Exception('backend pipe close')\n \n except Exception as e:\n #print('downstream '+backend_name +' : '+ repr(e)) \n time.sleep(2) # wait two second for another thread to flush\n backend_sock.close()\n client_sock.close()\n return False\n\n\ndef send_data_in_fragment(data , sock):\n \n for i in range(0, len(data), L_fragment):\n fragment_data = data[i:i+L_fragment]\n print('send ',len(fragment_data),' bytes') \n \n # sock.send(fragment_data)\n sock.sendall(fragment_data)\n\n time.sleep(fragment_sleep)\n\n print('----------finish------------')\n\n\nprint (\"Now listening at: 127.0.0.1:\"+str(listen_PORT))\nThreadedServer('',listen_PORT).listen()\n","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"7914331","text":"#!/home/mtran/anaconda3/bin/python\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport pickle\nfrom sklearn.cluster import KMeans\nimport random\nfrom sklearn.preprocessing import LabelEncoder\n\n\ndef mosi_filename_convert(input_string):\n # convert input string such that last 4 index are digits\n cnt = 0\n output_str = ''\n for i in reversed(range(len(input_string))):\n if(input_string[i].isdigit()):\n cnt += 1\n output_str += input_string[i]\n else:\n if(cnt < 4):\n output_str += '0'*(4-cnt)\n cnt = 4\n output_str += input_string[i]\n return output_str[::-1]\n\n\ncentroids_video = pd.read_csv('~/transformer/data/centroids_facemesh_5000.csv', header=None).values\nkmeans_video = KMeans(n_clusters=centroids_video.shape[0])\nkmeans_video.cluster_centers_ = centroids_video\n\nlabel_dict = {} # map filename -> corresponding label\ninput_path_train_video, label_file, data_type = sys.argv[1], sys.argv[2], sys.argv[3]\nif(data_type != 'meld'):\n meta_data_df = pd.read_csv(label_file)\n meta_data = meta_data_df.values\n X, y, group = [], [], []\nelse:\n label_dict_train, label_dict_dev, label_dict_test = {}, {}, {}\nif(data_type == 'meld'):\n meta_data_train_path = os.path.join(label_file, 'train_sent_emo.csv')\n meta_data_dev_path = os.path.join(label_file, 'dev_sent_emo.csv')\n meta_data_test_path = os.path.join(label_file, 'test_sent_emo.csv')\n metadata_train = pd.read_csv(meta_data_train_path).values\n metadata_dev = pd.read_csv(meta_data_dev_path).values\n metadata_test = pd.read_csv(meta_data_test_path).values\n label_list = metadata_train[:, 3]\n le = LabelEncoder().fit(label_list)\n for row in metadata_train:\n dia_id, utt_id = row[5], row[6]\n file_str = 'dia'+str(dia_id)+'_'+'utt'+str(utt_id)\n label_dict_train[file_str] = le.transform([row[3]])[0]\n for row in metadata_dev:\n dia_id, utt_id = row[5], row[6]\n file_str = 'dia'+str(dia_id)+'_'+'utt'+str(utt_id)\n label_dict_dev[file_str] = le.transform([row[3]])[0]\n for row in metadata_test:\n dia_id, utt_id = row[5], row[6]\n file_str = 'dia'+str(dia_id)+'_'+'utt'+str(utt_id)\n label_dict_test[file_str] = le.transform([row[3]])[0]\nelif(data_type == 'cremad'):\n label_list = []\n for row in meta_data:\n if(len(row[10]) == 1):\n label_list.append(row[10])\n le = LabelEncoder().fit(label_list)\n for row in meta_data:\n if(len(row[10]) == 1):\n label = le.transform([row[10]])[0]\n label_dict[row[7]] = [label, row[7][:4]]\n group.append(row[7][:4])\n\nelif(data_type == 'mosi'):\n for row in meta_data:\n label_dict[row[0]] = [row[-1]+1, row[0][:-5]]\n group.append(row[0][:-5])\n\nif(data_type != \"meld\"):\n group = list(set(group))\n train_group, val_group, test_group = [], [], []\n for pid in group:\n r = random.uniform(0, 1)\n if(r < 0.6):\n train_group.append(pid)\n elif(r < 0.8):\n val_group.append(pid)\n else:\n test_group.append(pid)\n\n output_train, output_test, output_val = [], [], []\n for file in os.listdir(input_path_train_video):\n current_data = pd.read_csv(os.path.join(input_path_train_video, file), header=None).values\n current_cluster = kmeans_video.predict(current_data)\n current_str = ''\n for i in range(len(current_cluster)):\n current_str += str(current_cluster[i]) + ' '\n if(data_type == 'mosi'):\n label = label_dict[mosi_filename_convert(file.split('.')[0])][0]\n pid = mosi_filename_convert(file.split('.')[0])[:-5]\n elif(data_type == 'cremad'):\n label, pid = label_dict[file.split('.')[0]][0], label_dict[file.split('.')[0]][1]\n if(pid in train_group):\n output_train.append([current_str.strip(), label])\n elif(pid in val_group):\n output_val.append([current_str.strip(), label])\n else:\n output_test.append([current_str.strip(), label])\n\n pd.DataFrame(output_train).to_csv(data_type + '_train.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\n pd.DataFrame(output_val).to_csv(data_type + '_dev.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\n pd.DataFrame(output_test).to_csv(data_type + '_test.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\nelse:\n path_train = \"/data/perception-working/minh/facemesh_emotion_data/meld6/train/\"\n path_dev = \"/data/perception-working/minh/facemesh_emotion_data/meld6/val/\"\n path_test = \"/data/perception-working/minh/facemesh_emotion_data/meld6/test/\"\n output_train, output_test, output_val = [], [], []\n for file in os.listdir(path_train):\n current_data = pd.read_csv(os.path.join(path_train, file), header=None).values\n current_cluster = kmeans_video.predict(current_data)\n current_str = ''\n for i in range(len(current_cluster)):\n current_str += str(current_cluster[i]) + ' '\n output_train.append([current_str.strip(), label_dict_train[file.split('.')[0]]])\n for file in os.listdir(path_dev):\n current_data = pd.read_csv(os.path.join(path_dev, file), header=None).values\n current_cluster = kmeans_video.predict(current_data)\n current_str = ''\n for i in range(len(current_cluster)):\n current_str += str(current_cluster[i]) + ' '\n output_val.append([current_str.strip(), label_dict_dev[file.split('.')[0]]])\n for file in os.listdir(path_test):\n current_data = pd.read_csv(os.path.join(path_test, file), header=None).values\n current_cluster = kmeans_video.predict(current_data)\n current_str = ''\n for i in range(len(current_cluster)):\n current_str += str(current_cluster[i]) + ' '\n if('final_videos_test' in file):\n file = file.replace('final_videos_test', '')\n output_test.append([current_str.strip(), label_dict_test[file.split('.')[0]]])\n pd.DataFrame(output_train).to_csv(data_type + '_train.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\n pd.DataFrame(output_val).to_csv(data_type + '_dev.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\n pd.DataFrame(output_test).to_csv(data_type + '_test.tsv',\n sep='\\t', index=False, header=['sentence', 'label'])\n","sub_path":"bert/preprocess/create_finetune_dataset.py","file_name":"create_finetune_dataset.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"112766482","text":"import sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\n\nimport itertools\nimport unittest\nfrom Santorini.Common import pieces\nfrom Santorini.Lib.json_validate import validate_json\nfrom Santorini.Remote import jsonschemas\n\n# common JSON artifacts to test\n# all tuples of direction, e.g. (\"EAST\", \"NORTH\"), (\"EAST\", \"PUT\"), ...\nall_dir_tuples = pieces.DIR_TABLE.keys()\n\n# valid Names\n# a valid Name is a string of lowercase characters\nnames = [\"a\", \"asdfhasouasdfaqwertyuiopasdfghjklzxcvbnm\", \"bjapojapjpoajjijrq\", \"z\"]\n# \"\" is not considered a valid Name\n# an invalid Name contains non-alphabetic characters and non-lowercase characters\ninvalid_names = [\"AASDFASDFADF\", \"asafFsdsf\", \"1x02u9,0@$@>!/.,c34c92ASFD084\\\\\\n\\n\\r\", \"\"]\n\n# valid Workers\n# a valid Worker is a valid Name followed by 1 or 2\nworkers = [name + str(num) for num, name in itertools.product([1,2], names)] \n# a Worker with an invalid Name is invalid \ninvalid_workers = [name + str(num) for num, name in itertools.product([1,2], invalid_names)] \n# Workers cannot start with numbers\ninvalid_workers += [str(num) + name for num, name in itertools.product([1,2], names + invalid_names)] \n\n# valid Actions are one of\n# String or\n# (Worker, EastWest, NorthSouth) or\n# (Worker, EastWest, NorthSouth, EastWest, NorthSouth)\nactions = [[worker] + list(dir_tuple) for worker, dir_tuple in itertools.product(workers, all_dir_tuples)]\nactions += [[worker] + list(dir_tuple) + list(dir_tuple2) for worker, dir_tuple, dir_tuple2 in itertools.product(workers, all_dir_tuples, all_dir_tuples)]\n# invalid actions contain invalid workers or invalid directions\ninvalid_actions = [(worker,) + dir_tuple for worker, dir_tuple in itertools.product(invalid_workers, all_dir_tuples)]\ninvalid_actions += [(worker,) + dir_tuple + dir_tuple2 for worker, dir_tuple, dir_tuple2 in itertools.product(invalid_workers, all_dir_tuples, all_dir_tuples)]\ninvalid_actions += [(worker,) for worker in invalid_workers]\n# (NorthSouth, EastWest) instead of (EastWest, NorthSouth)\ninvalid_actions += [(worker,) + (dir_tuple[1], dir_tuple[0]) for worker, dir_tuple in itertools.product(workers, all_dir_tuples)] \n\n# valid Coordinates are natural numbers in [0,5]\ncoordinates = [num for num in range(6)]\n# invalid Coordinates are non-numbers, non-integers, or integers less than 0 or greater than 5\ninvalid_coordinates = [\"potato\", 1.29495, -2.2323, -5, 192]\n\n# valid WorkerPlaces are (Worker, Coordinate, Coordinate)\nworker_places = [list(wp) for wp in itertools.product(workers, coordinates, coordinates)]\n# invalid WorkerPlaces are not that\ninvalid_worker_places = [[\"blah\", \"blah\", \"blah\"], [\"worker1\"], \"(\\\"worker1\\\", 2, 2)\", []]\ninvalid_worker_places += [list(wp) for wp in itertools.product(invalid_workers, invalid_coordinates, invalid_coordinates)]\n\n# valid Places are (Coordinate, Coordinate)\nplaces = [list(p) for p in list(itertools.product(coordinates, coordinates))]\n# invalid Places are not that\ninvalid_places = [list(p) for p in itertools.product(invalid_coordinates, invalid_coordinates)]\n\n# valid Placements are [WorkerPlace, ...] of up to 3 WorkerPlaces\nplacements = [[]]\nplacements += [[wp] for wp in worker_places]\nplacements += [worker_places[:2], worker_places[:3]] \n# invalid Placements are not that\ninvalid_placements = [\"potato\", [[\"potato\", 2, 2], [\"potato\", 1, 1]], [\"potato\", \"potato\", \"potato\"]]\ninvalid_placements += [[[\"potato\", 2, 2], [\"potato\", 2, 2], [\"potato\", 2, 2], [\"potato\", 2, 2]]] \ninvalid_placements += [[\"potato\", [\"potato\", 2, 2]]] \ninvalid_placements += [[wp] for wp in invalid_worker_places]\n\n# valid EncounterOutcomes are (String, String) or (String, String, \"irregular\")\nencounter_outcomes = [[\"potato\", \"tomato\"], [\"potato\", \"tomato\", \"irregular\"]]\n# invalid EncounterOutcomes are not that\ninvalid_encounter_outcomes = [[\"potato\"], [\"potato\", \"tomato\", \"somewhat-irregular\"], [\"potato\", \"tomato\", \"irregular\", \"blah\"]]\n\n# valid Results are arrrays of EncounterOutcomes\nresultss = [[outcome] for outcome in encounter_outcomes]\nresultss += [list(r) for r in itertools.product(encounter_outcomes, repeat=2)]\nresultss += [[[\"potato\", \"tomato\"], [\"potato\", \"tomato\"], [\"potato\", \"tomato\", \"irregular\"]]]\n\naction_tests = \\\n [(\"player\", True),\n (\"PlaYer\", False),\n (\"--_aslk2345678=<=s>dtfs789_-@#%@!%%#%#@,./,.3,/4,2/.3,4\\\\\\\"])[(^&*`~?/\\n\\t\\r\", False),\n (1, False),\n (0, False),\n (2, False),\n (jsonschemas, False)]\naction_tests += [(action, True) for action in actions]\naction_tests += [(action, False) for action in invalid_actions]\n\nboard_tests = []\nboard_row_tests = []\nbuilding_worker_tests = []\ncell_tests = []\nclient_config_tests = []\ncoordinate_tests = [(coord, True) for coord in coordinates] \ncoordinate_tests += [(coord, False) for coord in invalid_coordinates] \nencounter_outcome_tests = [(outcome, True) for outcome in encounter_outcomes] \nencounter_outcome_tests += [(outcome, False) for outcome in invalid_encounter_outcomes] \nheight_tests = [(height, True) for height in range(4)]\nname_tests = [(name, True) for name in names]\nname_tests += [(name, False) for name in invalid_names]\nobserver_tests = []\nplace_tests = [(place, True) for place in places] \nplace_tests += [(place, False) for place in invalid_places] \nplacement_tests = [(placement, True) for placement in placements] \nplacement_tests += [(placement, False) for placement in invalid_placements] \nplayer_tests = [] \nplaying_as_tests = [(list(pl), True) for pl in list(itertools.product([\"playing-as\"], names))]\nplaying_as_tests += [(list(pl), False) for pl in list(itertools.product([\"playing-as\"], invalid_names))]\nresults_tests = [(results, True) for results in resultss]\nserver_config_tests = []\nworker_tests = [(worker, True) for worker in workers] \nworker_tests += [(worker, False) for worker in invalid_workers] \nworker_place_tests = [(wp, True) for wp in worker_places] \nworker_place_tests = [(wp, False) for wp in invalid_worker_places] \n_direction_items_tests = []\n\njson_schema_tests = [\n [\"ACTION\", action_tests],\n [\"BOARD\", board_tests],\n [\"BOARD_ROW\", board_row_tests],\n [\"BUILDING_WORKER\", building_worker_tests],\n [\"CELL\", cell_tests],\n [\"CLIENT_CONFIG\", client_config_tests],\n [\"COORDINATE\", coordinate_tests],\n [\"ENCOUNTER_OUTCOME\", encounter_outcome_tests],\n [\"HEIGHT\", height_tests],\n [\"NAME\", name_tests],\n [\"OBSERVER\", observer_tests],\n [\"PLACE\", place_tests],\n [\"PLACEMENT\", placement_tests],\n [\"PLAYER\", player_tests],\n [\"PLAYING_AS\", playing_as_tests],\n [\"RESULTS\", results_tests],\n [\"SERVER_CONFIG\", server_config_tests],\n [\"WORKER\", worker_tests],\n [\"WORKER_PLACE\", worker_place_tests],\n [\"_DIRECTION_ITEMS\", _direction_items_tests],\n]\n\nclass TestRemoteJsonschemas(unittest.TestCase):\n \"\"\" Test the remote json schemas \"\"\"\n TEST_MSG = \"Testing test_{schema_name}_{num}\"\n\n def test_json_validation(self):\n for schema_name, schema_tests in json_schema_tests:\n schema = TestRemoteJsonschemas.get_schema(schema_name)\n\n for test_num, (schema_test, expected_result) in enumerate(schema_tests): \n test_msg = TestRemoteJsonschemas.TEST_MSG.format(schema_name=schema_name, num=test_num)\n self.run_validation_subtest(test_msg, schema, schema_test, expected_result)\n\n @staticmethod\n def get_schema(name):\n \"\"\"\n Gets the named schema from jsonschemas\n :param String name: name of the schema in jsonschemas\n :rtype JsonSchema or None: the jsonschema found\n \"\"\"\n return getattr(jsonschemas, name, None)\n\n def run_validation_subtest(self, msg, schema, schema_test, expected):\n \"\"\"\n Runs a json validation test using the given schema on the given test string\n :param String msg: a test message\n :param JsonSchema schema: the schema to validate against\n :param JsonString schema_test: the Json String to be validated\n :param bool expected: the expected result\n \"\"\"\n with self.subTest(msg, schema_test=schema_test,expected_result=expected):\n self.assertEqual(validate_json(schema, schema_test), expected)\n\n\n","sub_path":"Santorini/Tests/test_jsonschemas.py","file_name":"test_jsonschemas.py","file_ext":"py","file_size_in_byte":8157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"511416963","text":"import markdown\nimport json\nfrom jinja2 import Template\n\n\nclass PageBuilder():\n \"\"\"\n Receives a path to a JSON file containing metadata for a list of web pages\n to be built. The metadata contains information on where the final HTML\n file should be saved, which HTML template to use, and where the markdown\n content for that page can be found.\n\n Page metadata schema:\n {\n 'Page Title': {\n 'template': either a path to the HTML template or null. If null,\n defaults to '../templates/page_template.html',\n 'content': path to a markdown file with the content to be rendered\n to HTML,\n 'pathout': path to where the final file should be saved\n }\n }\n \"\"\"\n def __init__(self, pages):\n \"\"\"\n This class is used to create generic web pages for PSL\n Parameters\n ----------\n pages: dictionary containing information on all the pages to be created\n Returns\n -------\n None\n \"\"\"\n self.pages = pages\n self.required_attributes = {'template', 'content', 'pathout'}\n\n def build_pages(self):\n \"\"\"\n Method to loop through all pages in JSON metadata and output rendered\n HTML files\n \"\"\"\n for page in self.pages:\n attributes = set(self.pages[page].keys())\n assert self.required_attributes.issubset(attributes)\n title = page\n pathout = self.pages[page]['pathout']\n content = self.pages[page]['content']\n template = self.pages[page]['template']\n # use default template if template is null\n if not template:\n template = '../templates/page_template.html'\n\n # read and convert markdown content to HTML\n with open(content, 'r') as f:\n md_text = f.read()\n html_text = markdown.markdown(md_text)\n self.write_page(pathout, template,\n title=title, content=html_text)\n\n def write_page(self, pathout, template_path, **kwargs):\n \"\"\"\n Render the HTML template with the markdown text\n Parameters\n ----------\n pathout: path where the HTML file will be saved\n template_path: path for the HTML template\n Returns\n -------\n None\n \"\"\"\n # read and render HTML template\n with open(template_path, \"r\") as f:\n template_str = f.read()\n template = Template(template_str)\n rendered = template.render(**kwargs)\n with open(pathout, 'w') as out:\n out.write(rendered)\n\n\nif __name__ == '__main__':\n with open('pages.json') as f:\n pages = json.load(f)\n pb = PageBuilder(pages)\n pb.build_pages()\n","sub_path":"Tools/Page-Builder/page_builder/pagebuilder.py","file_name":"pagebuilder.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"54031747","text":"from mrjob.step import MRStep\nfrom mrjob.protocol import JSONProtocol\n\nfrom predict_receiver import MRPredictReceiver\n\n\nclass MRSuggestReceiver(MRPredictReceiver):\n def configure_options(self):\n super(MRSuggestReceiver, self).configure_options()\n\n self.add_passthrough_option(\n \"--join-only\",\n action=\"store_true\",\n dest=\"join_only\",\n help=\"Only perform the join steps on a preprocessed dataset.\")\n\n def input_protocol(self):\n if self.options.join_only:\n return JSONProtocol()\n\n return super(MRSuggestReceiver, self).input_protocol()\n\n def steps(self):\n if self.options.join_only:\n return [\n MRStep(mapper=self.mapper_join,\n reducer=self.reducer_join),\n MRStep(mapper=self.mapper_reassemble,\n reducer=self.reducer_reassemble)\n ]\n\n return super(MRSuggestReceiver, self).steps() + [\n MRStep(mapper=self.mapper_join,\n reducer=self.reducer_join),\n MRStep(mapper=self.mapper_reassemble,\n reducer=self.reducer_reassemble)\n ]\n\n def mapper_join(self, sender, receivers):\n receivers = list(receivers)\n yield sender, receivers\n for receiver in receivers:\n yield receiver, [sender]\n\n def reducer_join(self, email, links):\n links = list(links)\n metadata = max(enumerate(links), key=lambda tup: len(tup[1]))\n email_recv_pred = metadata[1]\n # Only yield if there is enough emails to form a top 3 list\n if len(email_recv_pred) == 3:\n del links[metadata[0]]\n for link in links:\n yield link[0], email_recv_pred\n\n def mapper_reassemble(self, sender, suggestions):\n suggestions = list(set(suggestions))\n if sender in suggestions:\n suggestions.remove(sender)\n yield sender, suggestions\n\n def reducer_reassemble(self, sender, suggestions):\n yield sender, list(set(sum(suggestions, [])))\n\n\nif __name__ == \"__main__\":\n MRSuggestReceiver.run()\n","sub_path":"mapreduce/suggestreceiver/suggest_receiver.py","file_name":"suggest_receiver.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"540469706","text":"from django.urls import path\nfrom . import views\n\napp_name = \"chatapp\"\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('room//', views.room, name='room'),\n path('load/messages/', views.get_message, name='chat_message'),\n path('create/chat/', views.create_chat, name='create_chat'),\n]","sub_path":"cargomarket/chatapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"416403894","text":"import pygame, sys, random, math\nfrom pygame.locals import *\n\n# Using Python 3.9 and pygame==2.0.0\n\n#pygame.mixer.pre_init(frequency=22050)\npygame.init()\n\ndef draw_bg(bg, bg_x, bg_y, screen_height, screen):\n screen.blit(bg, (bg_x, bg_y))\n screen.blit(bg, (bg_x, bg_y + screen_height))\n\ndef load_map(path):\n f = open(path, \"r\")\n data = f.read()\n f.close()\n data = data.split('\\n')\n product = []\n for line in data:\n product.append(list(line))\n return product\n\na = 1\na2 = 26.6\n\nmatrix = [\n [a, round(math.tan(math.radians(a2)), 3)],\n [a, round(-math.tan(math.radians(a2)), 3)]\n]\n\ndef transform_cords(cords):\n cords = [cords[0] * 32, cords[1] * 32]\n return [(cords[0] * matrix[0][0]) + (cords[1] * matrix[1][0]),\n (cords[0] * matrix[0][1]) + (cords[1] * matrix[1][1])]\n\ndef blit_map(map, offset):\n #y = offset[1] + len(map[0])-1\n y = offset[1] \n x = offset[0] \n\n tile_rects = []\n\n for ind in range(0, len(map)):\n for block in map[ind]:\n if block == \"1\":\n iso = transform_cords([x, y])\n rect = pygame.Rect(tuple(iso), (32, 32))\n tile_rects.append(rect)\n\n x += 1\n y -= 1\n x = offset[0]\n\n return tile_rects\n\n\n\nsize = (960, 720)\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption('Productivity')\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\nfont = pygame.font.Font(\"files/text.ttf\", 50)\nfont2 = pygame.font.Font(\"files/text.ttf\", 40)\n\nmap = load_map(\"files/map.txt\")\n\ntile = pygame.image.load('files/sprites/tile_thin.png').convert_alpha()\ntile_down = pygame.image.load('files/sprites/tile.png').convert_alpha()\nplant_tile = pygame.image.load('files/sprites/plant.png').convert_alpha()\n\n\nrunning = True\nplaying = True\n\nfps = pygame.time.Clock()\n\nclick = False\n\n#tile_rects = blit_map(map, [17, -5])\n\n#tile_rects2 = blit_map(map, [17, -5])\n\ntile_rects = blit_map(map, [17, -2])\n\ntile_rects2 = blit_map(map, [17, -2])\n\ntile_rects3 = []\n\nbg = pygame.transform.scale(pygame.image.load('files/sprites/bg3.png').convert_alpha(), (size[0], size[1]))\nbg_rect = bg.get_rect(center = (0, 0))\n\nindex = None\n\nclick_right = False\n\nnum_of_seeds = 0\n\nmax_time = None\n\nstart_time = False\n\nseconds = None\n\nnum = None\n\ntime = 0\n\ntime_up = False\n\nscore = 0\n\nplanting = pygame.mixer.Sound('files/sounds/plant.wav')\nplanting.set_volume(0.5)\n\ndeplant = pygame.mixer.Sound('files/sounds/plant2.wav')\ndeplant.set_volume(0.5)\n\nwhile running:\n\n while playing and time_up == False:\n\n screen.fill(white)\n bg_rect.centery -= 1\n\n draw_bg(bg, bg_rect.centerx, bg_rect.centery, size[1], screen)\n\n if bg_rect.centery <= -size[1]:\n bg_rect.centery = 0\n\n mx, my = pygame.mouse.get_pos()\n\n click = False\n click_right = False\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n playing = False\n\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n for i in tile_rects2:\n screen.blit(tile_down, (i.x - 16, i.y - 8))\n\n for i in tile_rects:\n screen.blit(tile, (i.x - 16, i.y - 8))\n\n if i.collidepoint((mx, my)) and click:\n planting.play()\n \n index = tile_rects.index(i)\n\n tile_rects3.append(tile_rects[index])\n\n tile_rects.pop(index)\n\n click = False\n \n num_of_seeds += 1\n\n start_time = True\n \n for i in tile_rects3:\n screen.blit(plant_tile, (i.x - 16, i.y - 8))\n\n if i.collidepoint((mx, my)) and click:\n deplant.play()\n\n index = tile_rects3.index(i)\n\n tile_rects.append(tile_rects3[index])\n\n tile_rects3.pop(index)\n\n click = False\n \n num_of_seeds -= 1\n\n\n if start_time:\n time += 1\n\n prod_text = font2.render(\"Do Something Productive for 25 minutes / 1500 Seconds\", True, white)\n screen.blit(prod_text, (size[0] / 2 - 375, size[1] / 2 - 300))\n\n time_text = font2.render(\"Seconds Passed: \" + str(int(time / 120)), True, white)\n screen.blit(time_text, (size[0] / 2 - 100, size[1] / 2 - 270))\n\n if int(time / 120) >= 1500:\n time = 0\n start_time = False\n time_up = True\n playing = False\n tile_rects = blit_map(map, [17, -5])\n tile_rects2 = blit_map(map, [17, -5])\n tile_rects3 = []\n\n score_text = font2.render('Score: ' + str(score), True, white)\n screen.blit(score_text, (size[0] / 2 + 200, size[1] / 2 + 200))\n\n\n pygame.display.update()\n fps.tick(120)\n\n while time_up:\n text = font.render(\"Press anything to reset and start the timer again\", True, white)\n screen.blit(text, (size[0] / 2 - 425, size[1] / 2))\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == KEYDOWN:\n playing = True\n time_up = False\n score += 10\n\n\n pygame.display.update()\n fps.tick(120)\n","sub_path":"Python-GameDev/Isometric-Productivity/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"243914103","text":"# -*- coding: utf-8 -*-\n\"\"\" Dataset operations\n Author: Kai JIN\n\"\"\"\nimport random\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef name(filename, app, ext='.txt'):\n basename = os.path.basename(filename).split('.')[0]\n return basename + '_' + app + ext\n\n\ndef divide_filelist(filelist, ratio, shuffle):\n \"\"\" partition dataset describe file\n e.g. filepath1 0\n filepath2 0\n ...\n into two part: train/test with ratio\n \"\"\"\n items = []\n with open(filelist, 'r') as fp:\n for line in fp:\n items.append(line)\n if shuffle:\n random.shuffle(items)\n basename = os.path.basename(filelist).split('.')[0]\n fw_1 = open(name(filelist, 'train'), 'w')\n fw_2 = open(name(filelist, 'test'), 'w')\n num = len(items)\n for i, c in enumerate(items):\n if i < num * ratio:\n fw_1.write(c)\n else:\n fw_2.write(c)\n fw_1.close()\n fw_2.close()\n\n\ndef clip(filelist, num, span, sorted_idx=2, shuffle=False):\n \"\"\" each invl select sampler <= num\n span: (start, end, invl)\n e.g. (0.1, 0.8, 0.5)\n num: select number samplers from each invl\n \"\"\"\n items = []\n with open(filelist, 'r') as fp:\n for line in fp:\n v = float(line.split(' ')[sorted_idx - 1])\n items.append((v, line))\n if shuffle:\n random.shuffle(items)\n\n dist = []\n new_itmes = []\n for i in range(int((span[1] - span[0]) / span[2])):\n start = span[0] + i * span[2]\n end = span[0] + (i + 1) * span[2]\n count = 0\n for e in items:\n if e[0] > start and e[0] < end:\n new_itmes.append(e[1])\n dist.append(e[0])\n count += 1\n if count == num:\n break\n\n with open(name(filelist, 'clip'), 'w') as fw:\n for line in new_itmes:\n fw.write(line)\n\n remain_items = []\n for old_e in items:\n find = False\n for new_e in new_itmes:\n if new_e == old_e[1]:\n find = True\n break\n if find is False:\n remain_items.append(old_e[1])\n\n with open(name(filelist, 'clip_remain'), 'w') as fw:\n for line in remain_items:\n fw.write(line)\n\n\ndef distribution(filelist, idx=2):\n dist = []\n with open(filelist, 'r') as fp:\n for line in fp:\n v = float(line.split(' ')[idx - 1])\n dist.append(v)\n plt.hist(dist, bins=100)\n plt.grid()\n plt.show()\n","sub_path":"tools/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"134037355","text":"map = {\n\n}\n\ndef findSet():\n\n\tn = int(raw_input()) \n\n\tfor i in range(n) :\n\t\tstring = input() #string\n\n\t\ta = string.split(' ')[0]\n\t\tb = int(string.split(' ')[1]) #bool\t\n\t\t\n\t\tif a in map.keys():\n\t\t\tmap[a][b] = map[a][b] + 1\n\n\t\telse :\n\t\t\tif b == 0:\n\t\t\t\tmap[a]=[1,0]\n\n\t\t\telse:\n\t\t\t\tmap[a] = [0,1]\n\n\t\t\t\t\t\n\n\tcount = 0\n\tfor string,val in map.items():\n\t\tcount += max(val[0],val[1])\n\n\tprint(count)\n\n\n\n\nT = int(raw_input()) #test cases\n\nfor t in range(T):\n\tfindSet()","sub_path":"Unkown/SelectTrainingSet.py","file_name":"SelectTrainingSet.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"135901031","text":"import cv2\r\nimport argparse\r\nimport os\r\nfrom CarTrack import *\r\nfrom Detectron2 import *\r\nfrom Yolo import *\r\n\r\nCOLOR = (255,0,255)\r\n\r\ndef parser():\r\n parser = argparse.ArgumentParser(description=\"Tracking with Detectron2\")\r\n parser.add_argument(\"-input\",type = str,default=\"./input/3_persp.avi\",\r\n help = \"Input video path\")\r\n parser.add_argument(\"-output\",type = str,default=\"./output/3_persp_demo2.avi\",\r\n help = \"Output video path. Not save if empty.\")\r\n parser.add_argument(\"-weights\",default = \"./model_final1.pth\",\r\n help = \"Detector model weights\")\r\n parser.add_argument(\"-cfg\",default = \"./yolo/custom-yolov4-detector.cfg\",\r\n help = \"Detector model config file\")\r\n parser.add_argument(\"-data\",default = \"./yolo/custom.data\",\r\n help = \"Path to data file\")\r\n parser.add_argument(\"-show\",action='store_true',\r\n help = \"Not show for headless systems\")\r\n parser.add_argument(\"-thresh\",type=float,default=.50,\r\n help = \"Remove detections below this score\")\r\n parser.add_argument(\"-classes\",default=\"./coco_classes.txt\",\r\n help = \"Path to classes file\")\r\n parser.add_argument(\"-numc\",type=int,default=\"3\",\r\n help = \"Number of classes\")\r\n parser.add_argument(\"-detector\", type = int, default=2,\r\n help=\"Detecor type. 1 = yolo, 2 = Detectron2\")\r\n return parser.parse_args()\r\n\r\ndef get_fps(video):\r\n fps = int(video.get(cv2.CAP_PROP_FPS))\r\n return fps\r\n\r\ndef set_saved_video(input_video, output_video, size):\r\n fourcc = cv2.VideoWriter_fourcc(*\"MP42\")\r\n fps = int(input_video.get(cv2.CAP_PROP_FPS))\r\n video = cv2.VideoWriter(output_video, fourcc, fps, size)\r\n return video\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parser()\r\n video = args.input\r\n cap = cv2.VideoCapture(video)\r\n fps = get_fps(cap)\r\n frame_count = 0\r\n ret,frame = cap.read()\r\n f_height, f_width, _ = frame.shape\r\n\r\n if args.detector == 1:\r\n #yolo Detector\r\n detector = Yolo(args.data, args.weights,args.cfg,args.thresh)\r\n if args.detector == 2:\r\n #Detectron2 Detector\r\n detector = Detectron2(args.classes,args.numc,args.weights,args.thresh)\r\n\r\n\r\n track = CarTrack(frame,frame_count,COLOR,fps,detector)\r\n\r\n #Output video settings\r\n saved_video = set_saved_video(cap,args.output,(f_width,f_height))\r\n (path, filename) = os.path.split(args.output)\r\n (f,ext) = os.path.splitext(filename)\r\n\r\n try:\r\n while cap.isOpened():\r\n if not ret:\r\n break\r\n\r\n track.updating_frame(frame,frame_count)\r\n tracked_frame = track.visualize()\r\n\r\n if args.output is not None:\r\n saved_video.write(tracked_frame)\r\n \r\n if args.show:\r\n cv2.imshow('Tracking with Detectron2',tracked_frame)\r\n\r\n frame_count += 1\r\n\r\n ret,frame = cap.read()\r\n finally:\r\n cap.release()\r\n saved_video.release()\r\n cv2.destroyAllWindows()\r\n\r\n track.write_csv_data(f)\r\n \r\n\r\n\r\n\r\n \r\n","sub_path":"track/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"409135548","text":"from svmutil import *\n\ny, x = svm_read_problem('Data/iris.t')\n\ndef rekClasses(yOri,xOri,yRestBits):\n\n #erstellen eines neuen Ausschnits aus yOri\n yOriAusschnitt = []\n xOriAusschnitt = []\n\n #erstelle Arrays die nur die momentan interessanten Daten enthalten\n for i in range(len(yRestBits)):\n if yRestBits[i] != -1:\n yOriAusschnitt.append(yOri[i])\n xOriAusschnitt.append(xOri[i])\n\n\n\n #als neue Klasse wird der erste Wert des Array verwendet.\n nextClass = int(yOriAusschnitt[0])\n\n #finde ein weiteres Label. Das erste Label!=nextClass wird als Label für alle Label!=nextClass verwendet.\n restClassThere = True\n restClass = 2\n yOriAusschnitttmp = yOriAusschnitt\n for i in range(len(yOriAusschnitttmp)):\n if yOriAusschnitttmp[i] != nextClass:\n if restClassThere:\n restClassThere = False\n restClass = yOriAusschnitttmp[i]\n else:\n yOriAusschnitttmp[i] = restClass\n\n #Abbruch erfolgt wenn kein zweites Label gefunden wurde. In diesem Falle ist restClassThere True\n if restClassThere:\n return\n\n #nextClass wird aus den Restlabels gelöschen. yRestBits wird dann in der Rekurrsion weitergegeben.\n for i in range(len(yRestBits)):\n if yRestBits[i] == nextClass:\n yRestBits[i] = -1\n\n m = svm_train(yOriAusschnitt, xOriAusschnitt, '-s 0 -t 2')\n mGesamt = []\n mGesamt.append(m)\n\n rekM = rekClasses(yOri,xOri,yRestBits)\n mGesamt.append(rekM)\n\n return mGesamt\n\nmGesamt = rekClasses(y,x,y)","sub_path":"testrek.py","file_name":"testrek.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"84411256","text":"import schedule\r\nimport datetime\r\n#import xml2json\r\nimport requests\r\nimport optparse\r\nimport csv\r\nimport json\r\nimport os.path\r\nimport xmltodict\r\nimport pprint\r\nimport time\r\nfrom google.cloud import storage\r\nfrom google.oauth2 import service_account\r\nfrom xml.etree import ElementTree as ET\r\n\r\ndef perfromStock_Summary():\r\n pp = pprint.PrettyPrinter(indent=4)\r\n #pp.pprint(json.dumps(xmltodict.parse(xml)))\r\n def getstocksummary(godowname,arr):\r\n xml =\"\"\"\r\n
\r\n Export Data\r\n
\r\n \r\n \r\n \r\n \r\n \r\n {}\r\n \r\n Yes\r\n \r\n Yes\r\n \r\n \r\n Godown Summary\r\n \r\n \r\n \r\n
\"\"\".format(godowname)\r\n headers = {'Content-Type': 'application/xml'}\r\n # set what your server accepts\r\n response=requests.post('http://localhost:9002', data=xml, headers=headers).text\r\n result=xmltodict.parse(response)\r\n print(result)\r\n #print(len(result['ENVELOPE']['DSPACCNAME']))\r\n #print(json.dumps(result['ENVELOPE']['DSPACCNAME'],indent=4,sort_keys=True))\r\n #pp.pprint(result)\r\n #pp.pprint(json.dumps(xmltodict.parse(response)))\r\n #date \r\n date = datetime.datetime.now()\r\n print(date)\r\n\r\n headings = (\"SKU\", \"Stock Location\", \"Report Year\", \"Report Month\",\" Report Day\", \"Closing Quantity\") \r\n print(headings)\r\n\r\n savedFilename = date.strftime(\"Stock-summary-%d-%m-%y\") \r\n print(savedFilename)\r\n\r\n root_path=os.environ['USERPROFILE']\r\n save_path =root_path+'/Desktop/'\r\n\r\n completeName = os.path.join(save_path, savedFilename +\".csv\")\r\n print(completeName)\r\n\r\n\r\n try:\r\n prodList =len(result['ENVELOPE']['DSPACCNAME'])\r\n print(prodList)\r\n print(result['ENVELOPE']['DSPACCNAME'][0]['DSPDISPNAME'])\r\n for i in range(prodList):\r\n prodName =result['ENVELOPE']['DSPACCNAME'][i]['DSPDISPNAME']\r\n #print(prodName)\r\n #print(result['ENVELOPE']['DSPSTKINFO'][i]['DSPSTKCL']['DSPCLQTY'])\r\n clsQty =result['ENVELOPE']['DSPSTKINFO'][i]['DSPSTKCL']['DSPCLQTY']\r\n #split data (num2Cls = numCls[0].split(' ');var num3Cls = num2Cls[0];)\r\n numCls=clsQty.split()[0]\r\n #print(numCls)\r\n #print(len(arr))\r\n for j in range(len(arr)):\r\n #print(arr[j]['reatailor_name'])\r\n #print(j)\r\n if arr[j]['reatailor_name']==prodName:\r\n sku = arr[j]['SKU']\r\n #print(\"{}\".format(arr[j]['reatailor_name']))\r\n #print(arr[j]['SKU'])\r\n values = str(sku) + \",\" + godowname+ \",\" +date.strftime('%Y')+ \",\" +date.strftime('%m')+ \",\"+date.strftime('%d')+ \",\"+ numCls \r\n print(values)\r\n \r\n csvRow = [values]\r\n #print(csvRow)\r\n \r\n with open (completeName, \"a\",newline='') as file:\r\n headings = (\"SKU\", \"Stock Location\", \"Report Year\", \"Report Month\",\" Report Day\", \"Closing Quantity\") \r\n writer = csv.writer(file, delimiter=' ', quotechar=' ', dialect='excel')\r\n writer.writerow(csvRow)\r\n #file1 = open(completeName, \"w\")\r\n #csvwriter = csv.writer(file1)\r\n\r\n\r\n \r\n credentials = service_account.Credentials.from_service_account_file('Os.getcwd()/Tally-connector-6d187b87ff6d.json',scopes=[\"https://www.googleapis.com/auth/cloud-platform\"],)\r\n client = storage.Client(credentials=credentials, project='tally-connector')\r\n bucket = client.get_bucket('tally-connector')\r\n blob = bucket.blob('myfile')\r\n blob.upload_from_filename(completeName)\r\n if(blob.public_url):\r\n print(\"file uploded successfully\")\r\n\r\n\r\n \r\n except TypeError:\r\n print('nothing')\r\n \r\n csvFilePath = \"config.csv\"\r\n jsonFilePath = \"file.json\"\r\n arr = []\r\n #read the csv and add the arr to a arrayn\r\n\r\n with open (csvFilePath) as csvFile:\r\n fieldsname = (\"reatailor_name\",\"SKU\",\"Godownname\")\r\n csvReader = csv.DictReader(csvFile,fieldsname)\r\n print(csvReader)\r\n for csvRow in csvReader:\r\n arr.append(csvRow)\r\n \r\n print(arr)\r\n with open(jsonFilePath, \"w\") as jsonFile:\r\n jsonFile.write(json.dumps(arr, indent = 4))\r\n print()\r\n\r\n\r\n checkForGodowns = True\r\n godownNames=[]\r\n for gdwns in range(len(arr)):\r\n\r\n if checkForGodowns:\r\n print(arr[gdwns]['Godownname']) \r\n gdwnName = arr[gdwns]['Godownname']\r\n \r\n if (len(gdwnName)>0):\r\n godownNames.append(gdwnName)\r\n \r\n \r\n else:\r\n checkForGodowns = False\r\n break\r\n print(godownNames)\r\n for g in godownNames:\r\n getstocksummary(g,arr)\r\n\r\n \r\n\r\n def job():\r\n getSummaryInDetail(fromDate, toDate, ledgers)\r\n \r\n with open('cron.csv', 'r') as f:\r\n res=f.read()\r\n minutes=res.split(\",\")[1].split('\"')[0]\r\n hours=res.split(\",\")[0][1:]\r\n #print(miniutes+\" \"+hours)\r\n \r\n schedule.every(2).minutes.do(job)\r\n schedule.every().hour.do(job)\r\n print(hours+\" \"+minutes)\r\n schedule.every().day.at(\"{}:{}\".format(hours,minutes)).do(job)\r\n\r\n while 1:\r\n schedule.run_pending()\r\n \r\n \r\n","sub_path":"stock_summary.py","file_name":"stock_summary.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"528765273","text":"# !/usr/bin/python\r\n\r\n# from __future__ import division\r\nfrom __future__ import print_function\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.snowball import EnglishStemmer\r\nfrom collections import defaultdict\r\n\r\nimport Algorithms as Alg\r\nimport datetime as dt\r\nimport string\r\n\r\nimport nltk # , re, pprint\r\nimport argparse\r\n\r\npunctuations = ['.', '?', '\"', '\\'', '\\'\\'', '!', ',', ';', ':', '/', '\\\\', '`',\r\n '``', '_', '(', ')', '[', ']', '{', '}', '<', '>']\r\ndebug_on = False\r\ntimer = False\r\nduplicates = 0\r\n\r\n\r\nclass DocParse:\r\n max_sentence = 10000\r\n include_file = ''\r\n outfile = ''\r\n count = 0\r\n\r\n def __init__(self):\r\n self.stemming_on = False\r\n self.stop_word_on = False\r\n self.summary = False\r\n self.use_threshold = False\r\n self.max_words_in_summary = 100\r\n self.keep_all = True # by default do not exclude dupe words\r\n self.normalize = False\r\n self.score = 'size' # size | tfidf | stfidf\r\n self.update = False\r\n self.total_sentences = 0\r\n self.sentence_dictionary = defaultdict(list) # map of modified sentences to actual sentences (tokenized)\r\n\r\n self.dictionary = {}\r\n # keys are final tokenized output\r\n # values are 2-tuple of original sentence and size\r\n\r\n self.mod_words = () # all unique words of document\r\n self.mod_sentences = ((),)\r\n self.unique_sent = ((),)\r\n self.alg = Alg.Algorithms()\r\n self.stemmer = EnglishStemmer()\r\n self.doc_size = 0\r\n\r\n def tokenize(self, in_file):\r\n \"\"\"Reads in_file and tokenizes into words.\"\"\"\r\n\r\n global debug_on\r\n global punctuations\r\n if debug_on: print('stem:', self.stemming_on)\r\n if debug_on: print('stop:', self.stop_word_on)\r\n if debug_on: print('keep:', self.keep_all)\r\n f = open(in_file)\r\n raw = f.read()\r\n sentences_list = []\r\n words_list = []\r\n dictionary_values = []\r\n sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\r\n raw_sentences = sent_tokenizer.tokenize(raw)\r\n self.total_sentences = len(raw_sentences)\r\n\r\n # operate on each sentence string from in_file\r\n for s, sentence in enumerate(raw_sentences):\r\n if debug_on: print('sentence #', str(s + 1))\r\n\r\n # remove newlines, after tokenizer.\r\n sentence = sentence.replace('\\n', ' ')\r\n # change sentence into tokens (words)\r\n tokens = nltk.word_tokenize(sentence)\r\n # create table that maps all punctuation marks to None\r\n table = str.maketrans({key: None for key in string.punctuation if key != '\\''})\r\n # keep only words and numbers\r\n words = [word.lower() for word in tokens if (word.translate(table) and word != '\\'')]\r\n if debug_on:\r\n print(\"nltk tokens\", end=\":\")\r\n print(tokens)\r\n print(\"parsed words\", end=\": \")\r\n print(words)\r\n print(len(words))\r\n sentence_size = len(words)\r\n if debug_on: print('sent len:', str(sentence_size))\r\n # remove stop words\r\n if self.stop_word_on:\r\n filtered_words = [word for word in words if word not in stopwords.words('english')]\r\n words = filtered_words\r\n # stem words\r\n if self.stemming_on:\r\n filtered_words = [self.stemmer.stem(word) for word in words]\r\n words = filtered_words\r\n if debug_on: print('after filters:', str(words))\r\n # compress sentences to unique words only if not doing greedy3 or tf-idf\r\n if self.keep_all:\r\n unique_words = words\r\n # removes repeated sentences\r\n if words not in sentences_list:\r\n sentences_list.append(words)\r\n dictionary_values.append((sentence, sentence_size, s))\r\n else:\r\n # make list of unique words from current sentence\r\n unique_words = list(set(words))\r\n # if unique word set not in sentence list than add this set\r\n # all repeated sentences will be removed at this stage\r\n if unique_words not in sentences_list:\r\n sentences_list.append(unique_words)\r\n # update local dictionary that maps index to tuple (original sentence, and length)\r\n dictionary_values.append((sentence, sentence_size, s))\r\n if debug_on: print(sentences_list)\r\n\r\n # add unique words to doc word list\r\n for w, word in enumerate(words):\r\n if word not in words_list:\r\n words_list.append(word)\r\n\r\n # add the modified sentence into dictionary\r\n self.sentence_dictionary[tuple(unique_words)].append(sentence)\r\n\r\n # this loop changes all the sentences of sentence_list into tuples\r\n for s, sentence in enumerate(sentences_list):\r\n sentences_list[s] = tuple(sentence)\r\n self.dictionary[sentences_list[s]] = dictionary_values[s]\r\n\r\n # store word list as tuple\r\n # store sentence list as tuple\r\n self.mod_words = tuple(words_list)\r\n self.mod_sentences = tuple(sentences_list)\r\n self.doc_size = len(self.mod_sentences)\r\n\r\n def find_dominating_set(self, option='greedy'):\r\n if option == 'greedy':\r\n if self.score == 'size':\r\n if self.normalize:\r\n self.do_g_unique()\r\n else:\r\n self.do_g_size()\r\n elif self.score == 'tfidf':\r\n self.do_g_tfidf()\r\n elif self.score == 'stfidf':\r\n self.do_g_stfidf()\r\n elif option == 'dynamic':\r\n self.do_dynamic()\r\n elif option == 'optimal':\r\n self.do_bottomup()\r\n elif option == 'mcdonald':\r\n self.do_mcdonald()\r\n\r\n def do_mcdonald(self):\r\n global debug_on\r\n answer = self.alg.mcdonald(self.mod_sentences, self.mod_words, self.dictionary,\r\n use_threshold=self.use_threshold, word_count=self.max_words_in_summary)\r\n print(answer)\r\n\r\n def do_g_size(self):\r\n global debug_on\r\n answer = self.alg.greedy(self.mod_sentences, self.mod_words, self.dictionary,\r\n update=self.update,\r\n word_threshold=self.use_threshold,\r\n word_count=self.max_words_in_summary)\r\n if debug_on: print('greedy answer', answer)\r\n if self.summary:\r\n print(self.make_summary(answer))\r\n else:\r\n print('len(ans):', len(answer))\r\n print('len(doc):', self.total_sentences)\r\n if debug_on:\r\n print('*****')\r\n print(self.sentence_dictionary)\r\n print('*****')\r\n\r\n def do_g_unique(self):\r\n global debug_on\r\n answer = self.alg.greedy2(self.mod_sentences, self.mod_words, self.sentence_dictionary, self.dictionary,\r\n update=self.update,\r\n word_threshold=self.use_threshold,\r\n word_count=self.max_words_in_summary)\r\n if debug_on: print('greedy answer', answer)\r\n if self.summary:\r\n print(self.make_summary(answer))\r\n else:\r\n print('len(ans):', len(answer))\r\n print('len(doc):', self.total_sentences)\r\n if debug_on:\r\n print('*****')\r\n print(self.sentence_dictionary)\r\n print('*****')\r\n\r\n def do_g_tfidf(self):\r\n global debug_on\r\n answer = self.alg.tfidf(self.mod_sentences, self.mod_words, self.dictionary,\r\n ratio=self.normalize,\r\n update=self.update,\r\n word_count=self.max_words_in_summary,\r\n use_threshold=self.use_threshold)\r\n if debug_on: print('tfidf answer', answer)\r\n if self.summary:\r\n print(self.make_summary(answer))\r\n else:\r\n print('len(ans):', len(answer))\r\n print('len(doc):', self.total_sentences)\r\n if debug_on:\r\n print('*****')\r\n print(self.sentence_dictionary)\r\n print('*****')\r\n\r\n def do_g_stfidf(self):\r\n global debug_on\r\n answer = self.alg.stfidf(self.mod_sentences, self.mod_words, self.dictionary,\r\n update=self.update,\r\n ratio=self.normalize,\r\n word_count=self.max_words_in_summary,\r\n use_threshold=self.use_threshold)\r\n if debug_on: print('tfidf answer', answer)\r\n if self.summary:\r\n print(self.make_summary(answer))\r\n else:\r\n print('len(ans):', len(answer))\r\n print('len(doc):', self.total_sentences)\r\n if debug_on:\r\n print('*****')\r\n print(self.sentence_dictionary)\r\n print('*****')\r\n\r\n def do_g_rtfidf(self):\r\n global debug_on\r\n answer = self.alg.tfidf(self.mod_sentences, self.mod_words, self.dictionary, ratio=True,\r\n use_threshold=self.use_threshold)\r\n if debug_on: print('tfidf answer', answer)\r\n if self.summary:\r\n print(self.make_summary(answer))\r\n else:\r\n print('len(ans):', len(answer))\r\n print('len(doc):', self.total_sentences)\r\n if debug_on:\r\n print('*****')\r\n print(self.sentence_dictionary)\r\n print('*****')\r\n\r\n def do_bottomup(self):\r\n global debug_on\r\n self.alg.bottom_up(self.mod_sentences)\r\n if self.summary:\r\n print(self.make_summary(self.alg.dynamic_ans))\r\n else:\r\n print('len(ans):', len(self.alg.dynamic_ans))\r\n print('len(doc):', self.total_sentences)\r\n\r\n def do_dynamic(self):\r\n global debug_on\r\n if debug_on: print(self.mod_sentences)\r\n if debug_on: print(self.mod_words)\r\n if self.doc_size > 20:\r\n print('too many sentences:', self.doc_size)\r\n return\r\n # else:\r\n # print('there are', self.doc_size, 'sentences')\r\n self.alg.dynamic(self.mod_sentences, self.mod_words)\r\n # self.sd.dynamic_lookup(set_of_sents, set_of_words)\r\n if debug_on: print('')\r\n self.alg.dynamic_calc_answer(self.mod_sentences, self.mod_words)\r\n if debug_on: print(self.alg.dynamic_ans)\r\n if debug_on:\r\n for i, items in enumerate(self.alg.dynamic_ans):\r\n print(i, \":\", items)\r\n if self.summary:\r\n print(self.make_summary(self.alg.dynamic_ans))\r\n else:\r\n print('len(ans):', len(self.alg.dynamic_ans))\r\n print('len(doc):', self.total_sentences)\r\n # print 'dynamic answer', answer\r\n pass\r\n\r\n def make_summary(self, sentences):\r\n global debug_on\r\n ret_val = []\r\n word_count = 0\r\n for sentence in sentences:\r\n if self.dictionary[sentence][1] <= (self.max_words_in_summary - word_count) or \\\r\n self.max_words_in_summary == 0:\r\n if debug_on: print(str(self.dictionary[sentence][1]) + \": \" + self.dictionary[sentence][0])\r\n ret_val.append(self.dictionary[sentence][0])\r\n word_count += self.dictionary[sentence][1]\r\n else:\r\n if debug_on: print(str(self.dictionary[sentence][1]) + \": \" + self.dictionary[sentence][0])\r\n ret_val.append(self.shorten(self.dictionary[sentence][0], self.max_words_in_summary - word_count))\r\n break\r\n pass\r\n if self.outfile:\r\n with open(self.outfile, 'w') as f:\r\n f.write(\" \".join(ret_val))\r\n pass\r\n return \" \".join(ret_val)\r\n pass\r\n\r\n def shorten(self, sentence, length):\r\n global punctuations\r\n global debug_on\r\n tokens = nltk.word_tokenize(sentence)\r\n # remove all non-alphanumeric characters\r\n words_used = 0\r\n words = []\r\n for word in tokens:\r\n if words_used == length:\r\n break\r\n if debug_on: print(word, end=' ')\r\n words.append(word)\r\n if word not in punctuations:\r\n words_used += 1\r\n if debug_on: print('keep', words_used)\r\n else:\r\n if debug_on: print('remove')\r\n # words = [word for word in tokens if word not in punctuations]\r\n # return \" \".join(words[:length-len(words)])\r\n return \" \".join(words)\r\n\r\n\r\ndef main():\r\n global debug_on\r\n global timer\r\n before = dt.datetime.now()\r\n parser = argparse.ArgumentParser(description='Graph from Text')\r\n parser.set_defaults(run='greedy', score='size')\r\n parser.add_argument(\"infile\", help='name of input file')\r\n parser.add_argument(\"-r\", \"--run\", metavar='[greedy|optimal|mcdonald]',\r\n choices=['greedy', 'optimal', 'mcdonald'],\r\n help='finds dominating set from sentences of document')\r\n parser.add_argument(\"-c\", \"--score\",\r\n choices=['size', 'tfidf', 'stfidf'],\r\n default='size')\r\n # unique word option.\r\n parser.add_argument(\"-d\", \"--distinct\", action='store_true', help='reduce sentences to distinct words')\r\n # normalize sentence\r\n parser.add_argument(\"-n\", \"--normalize\", action='store_true', help='normalize score by sentence size')\r\n # sets parser to do stemming\r\n parser.add_argument(\"-s\", \"--stem\", action='store_true', help='turns on stemming function')\r\n # option to give a word threshold for the summary.\r\n parser.add_argument(\"-t\", \"--threshold\", metavar='', help='enables summary mode, and sets threshold')\r\n # option for requesting summary to console\r\n parser.add_argument(\"-e\", \"--echo\", action='store_true', help='script output prints summary')\r\n # option to update all score entries based on each incremental selection\r\n parser.add_argument(\"-u\", \"--update\", action='store_true', help='update score values per selected sentence')\r\n # option to view debug messages\r\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\r\n # sets parser to remove stopwords\r\n parser.add_argument(\"-w\", \"--stopword\", action='store_true', help='turns on stop word removal')\r\n parser.add_argument(\"-i\", metavar='', help='uses as list of words to include')\r\n parser.add_argument(\"-o\", metavar='', help='outputs summary to ')\r\n args = parser.parse_args()\r\n dp = DocParse()\r\n if args.stem: dp.stemming_on = True\r\n if args.stopword: dp.stop_word_on = True\r\n if args.echo:\r\n dp.summary = True\r\n dp.max_words_in_summary = 0\r\n if args.threshold:\r\n dp.summary = True\r\n dp.use_threshold = True\r\n dp.max_words_in_summary = int(args.threshold)\r\n if args.distinct:\r\n dp.keep_all = False\r\n if args.normalize:\r\n dp.normalize = True\r\n if args.score:\r\n dp.score = args.score\r\n if args.update:\r\n dp.update = True\r\n if args.o:\r\n dp.outfile = args.o\r\n if args.run:\r\n if args.i:\r\n dp.include_file = args.i\r\n dp.tokenize(args.infile)\r\n dp.find_dominating_set(args.run)\r\n after = dt.datetime.now()\r\n if timer:\r\n print('before:', before)\r\n print('after :', after)\r\n print('total :', after - before)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"DocSumm.py","file_name":"DocSumm.py","file_ext":"py","file_size_in_byte":15901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"62111845","text":"# coding=utf-8\n# Ejemplo de consumo de API, versión de pruebas.\n# Elaborado por: Marlon Jodar, miembro del equipo de desarrollo SunApiPerú.\n# Mas información en: https://www.sunapiperu.com/pruebas\n\n'''\nIMPORTANTE:\nEste script esta elaborado para ejecutarse en una versión de Python igual o posterior a la 2.7.9.\nEsta limitante viene dada por el uso del módulo requests que internamente hace uso de la librería urllib3.\nEn la documentación de dicha librería se especifica que las versiones de python anteriores a la 2.7.9 presentan\nrestricciones en su módulo ssl lo cuál limita la configuración que urllib3 puede aplicar. En particular esto causa \nque peticiones Https fallen y ciertas caracteristicas de seguridad no esten disponibles emitiendo como resultado\nun InsecurePlatformWarning. Mas información en: http://urllib3.readthedocs.org/en/latest/security.html#certifi-with-urllib3.\n'''\n\n# Módulo para realizar las peticiones https. (Mas información: http://docs.python-requests.org/)\nimport requests\n\n# URL donde se realiza la petición https. (Ver https://www.sunapiperu.com/pruebas)\nAPI_URL = \"https://sunapiperu.com/api_qa/calculadora\"\n\n# Api key para poder consultar la API de pruebas\nAPI_KEY = 'sunapi'\n\nmensaje_bienvenida = \"Hola, bienvenido a este pequeño demo sobre como utilizar la calculadora monetaria. Esperamos que te sea \"+\\\n\t\t\t \t\t \"de utilidad en tus futuras implementaciones. Cualquier duda o inquietud no dudes en comunicarte con nuestro \"+\\\n\t\t\t \t\t \"equipo utilizando el formulario de contacto en nuestra página web https://www.sunapiperu.com. Saludos pythonicos, \"+\\\n\t\t \t\t\t \"equipo de desarrollo SunApiPerú.\\n\"\t\t\t\t\t \n\n# Función que captura la entrada del usuario, realiza la solicitud e imprime el resultado en pantalla\ndef realiza_solicitud():\t\n\ttry:\n\t\t# Captura la entrada del usuario\n\t\tvalor = input('A continuación escriba el valor que desea convertir y presione la tecla Enter:\\n')\n\t\tde = input('Escriba la moneda desde la cual desea convertir, Ej: pen y presione la tecla Enter:\\n')\n\t\ta = input('Escriba la moneda a la cuál desea convertir, Ej: eur y presione la tecla Enter:\\n')\n\t\tfecha = input('Escriba la fecha en la cuál desea aplicar la tasa de cambio y presione la tecla Enter:\\n')\n\t\tdecimal = input('Escriba a cuantas cifras decimales desea redondear y presione la tecla Enter:\\n')\n\t\t# Prepara los parametros para realiza la solicitud\n\t\tparametros = {'apikey':API_KEY, 'valor':valor, 'de':de, 'a':a, 'fecha':fecha, 'decimal':decimal}\n\t\t# Realiza la solicitud teniendo en cuenta un timeout de 5 segundos\n\t\trespuesta = requests.get(API_URL, params = parametros, timeout=5.0)\n\t\t# Parsea la respuesta del servidor a un json utilizando el json decoder provisto por requests\n\t\trespuesta_json = respuesta.json()\t\n\t\t# Verifica la respuesta del servidor e imprime en consola de acorde a los resultados\n\t\tif \"mensaje\" in respuesta_json:\n\t\t\tprint(respuesta_json['mensaje'])\n\t\telse: \n\t\t\tprint(\"El servidor respondió:\\n\")\t\t\t\t\t\t\n\t\t\tfor key in respuesta_json:\n\t\t\t\tprint('- ' + key + ': ' + str(respuesta_json[key]))\n\texcept Exception as e:\n\t\t# Ante cualquier excepción imprime la excepción\n\t\tprint (e)\t\n\n# Asegura la ejecución procedural del script como módulo primario de ejecución\nif __name__ == '__main__':\n\tprint(mensaje_bienvenida)\n\trealiza_solicitud()\nelse:\n\tprint (\"Ejecuta este script directamente, no lo importes.Gracias!\")","sub_path":"Python/7. calculadora_monetaria.py","file_name":"7. calculadora_monetaria.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"573512870","text":"from flask import Flask, request\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\n# The example we saw in our prep work has us creating a form, for getting the time from the user.\ntime_form = \"\"\"\n\n

Validate Time

\n
\n \n

{hours_error}

\n \n

{minutes_error}

\n \n
\n\"\"\"\n\n\n@app.route('/validate-time')\ndef index():\n return time_form.format(hours='', hours_error='', minutes='', minutes_error='')\n\ndef is_integer(str_num):\n # return True if it can be converted to int\n # return False if it can't be converted to int\n print(\"IN INTEGER FOR\", str_num)\n try:\n int(str_num)\n return True\n except ValueError:\n return False\n\n@app.route('/validate-time', methods=['POST'])\ndef validate_time():\n hours = request.form['hours']\n minutes = request.form['minutes']\n\n hours_error = ''\n minutes_error = ''\n\n # Validate steps\n # We are expecting integers, did they give us integers\n # Are the integers in the correct range\n \n\n # is the hour input an integer?\n if not is_integer(hours):\n hours_error = 'Not a valid integer'\n # their input was invalid, so let's wipe it out for them\n hours = ''\n else:\n # is the hour in the range of 0-24\n hours = int(hours)\n if hours < 0 or hours > 23:\n hours_error = 'Hour value out of range (0-23)'\n # is the minute input an integer?\n if not is_integer(minutes):\n minutes_error = 'Not a valid integer'\n # their input was invalid, so let's wipe it out for them\n minutes = ''\n else:\n # is the minute in the range of 0-59\n minutes = int(minutes)\n if minutes < 0 or minutes > 59:\n minutes_error = 'Minute value out of range (0-59)'\n\n # now that we have created the code that validates if hours & minutes are integers\n # and validates that hours & minutes are in the correct range\n # we can now serve up the HTTP Response\n\n # didn't catch any errors in the form, so we need to send a success message\n if minutes_error == '' and hours_error == '':\n # success message\n return \"SUCCESS\"\n else:\n return time_form.format(hours_error=hours_error,\n minutes_error=minutes_error,\n hours=hours,\n minutes=minutes)\n \n\napp.run()","sub_path":"examples/2.5/prep_work_server-side_example.py","file_name":"prep_work_server-side_example.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"547004750","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Author Tobias Ottenweller\n# 24.10.2010\n# Gertz Gutsche Rümenapp Gbr\n\nimport datetime\nimport time\nimport sys\nimport psycopg2\nimport thread\n\nfrom graphserver.core import WalkOptions, State\nfrom graphserver.graphdb import GraphDatabase\n\nfrom graphserver_tools.utils import utils\n\n\nclass Proccessing():\n def get_gs_vertex(self, point_id):\n self.cursor.execute('SELECT vertex_label FROM cal_corres_vertices WHERE point_id=%s', ( point_id, ))\n return self.cursor.fetchone()[0]\n\n\n def get_point(self, vertex_label):\n self.cursor.execute('SELECT point_id FROM cal_corres_vertices WHERE vertex_label=%s', ( vertex_label, ))\n return [x[0] for x in self.cursor][0]\n\n\n def prepare_times(self, start_time, end_time):\n times = []\n\n start = time.mktime(start_time.timetuple())\n end = time.mktime(end_time.timetuple())\n\n t = start\n while t <= end:\n times.append(t)\n t += self.time_step\n\n return times\n\n\n def get_route_dict(self):\n self.cursor.execute('SELECT origin, destination, time FROM cal_routes WHERE NOT done LIMIT 1')\n row = self.cursor.fetchone()\n if row:\n origin, destination, time = row\n else: # there are no routes to Compute\n return None\n\n self.cursor.execute('SELECT start_time, end_time, is_arrival_time FROM cal_times WHERE id=%s', ( time, ))\n start_time, end_time, is_arrival = self.cursor.fetchone()\n\n if is_arrival:\n return self.get_retro_dict(destination, time, start_time, end_time)\n else:\n return self.get_dict(origin, time, start_time, end_time)\n\n\n def get_retro_dict(self, destination, time, start_time, end_time):\n self.cursor.execute('''SELECT id, origin FROM cal_routes WHERE destination=%s AND time=%s''', ( destination, time ))\n origins = list(self.cursor.fetchall())\n\n self.cursor.execute('UPDATE cal_routes SET done=%s WHERE destination=%s AND time=%s', ( True, destination, time ))\n self.conn.commit()\n\n return { 'destination':self.get_gs_vertex(destination), 'times':self.prepare_times(start_time, end_time),\n 'arrival':True,\n 'origins':[ ( self.get_gs_vertex(orig[1]), orig[0] ) for orig in origins ] }\n\n\n def get_dict(self, origin, time, start_time, end_time):\n self.cursor.execute('''SELECT id, destination FROM cal_routes WHERE origin=%s AND time=%s''', ( origin, time ))\n destinations = list(self.cursor.fetchall())\n\n self.cursor.execute('UPDATE cal_routes SET done=%s WHERE origin=%s AND time=%s', ( True, origin, time ))\n self.conn.commit()\n\n return { 'origin':self.get_gs_vertex(origin), 'times':self.prepare_times(start_time, end_time),\n 'arrival':False,\n 'destinations':[ ( self.get_gs_vertex(dest[1]), dest[0] ) for dest in destinations ] }\n\n\n def process_paths(self, routes):\n for t in routes['times']:\n s = State(1, t)\n\n # build the shortest path tree at time 't'\n try:\n if len(routes['destinations']) > 1:\n spt = self.graph.shortest_path_tree(routes['origin'], None, s, self.walk_ops)\n else:\n spt = self.graph.shortest_path_tree(routes['origin'],routes['destinations'][0][0], s, self.walk_ops) # faster but only ONE destination\n except:\n pass\n\n # extract the actual routes and write them into the database\n for dest in routes['destinations']:\n\n try:\n vertices, edges = spt.path(dest[0])\n\n if not vertices: raise Exception()\n\n except:\n self.write_error_trip(t, dest[1])\n else:\n self.write_trip(vertices, dest[1])\n\n # cleanup\n try:\n spt.destroy()\n except:\n pass\n\n\n def process_retro_paths(self, routes):\n for t in routes['times']:\n s = State(1, t)\n\n # build the shortest path tree at time 't'\n try:\n if len(routes['origins']) > 1:\n spt = self.graph.shortest_path_tree_retro(None, routes['destination'], s,self.walk_ops)\n else:\n spt = self.graph.shortest_path_tree_retro(routes['origins'][0][0], routes['destination'], s, self.walk_ops) # faster but only ONE destination\n except:\n pass\n\n # extract the actual routes and write them into the database\n for orig in routes['origins']:\n try:\n vertices, edges = spt.path_retro(orig[0])\n\n if not vertices: raise Exception()\n\n except:\n self.write_error_trip(t, orig[1])\n else:\n self.write_retro_trip(vertices, orig[1])\n\n # cleanup\n try:\n spt.destroy()\n except:\n pass\n\n\n def run(self):\n '''\n method for processing (calculating shortest paths) all routes stored inside the databases\n associated with this object.\n [only routes with the processed flag not set will be processed]\n '''\n routes = self.get_route_dict()\n while ( routes ):\n if routes['arrival']:\n self.process_retro_paths(routes)\n else:\n self.process_paths(routes)\n\n routes = self.get_route_dict()\n \n\n def write_retro_trip(self, vertices, route_id):\n ''' in retro_paths the walking distance is counted in the wrong direction.\n this method corrects this.\n '''\n\n # now done in write_results\n\n self.write_trip(vertices, route_id)\n\n\n def write_trip(self, vertices, route_id):\n current_trip_id = str(self.trip_id)\n self.trip_id += 1\n\n start_time = datetime.datetime.fromtimestamp(vertices[0].state.time)\n end_time = datetime.datetime.fromtimestamp(vertices[-1].state.time)\n\n self.cursor.execute('INSERT INTO cal_paths VALUES (%s,%s,%s,%s,%s)', ( self.trip_prefix + current_trip_id, route_id, start_time, end_time, (vertices[-1].state.time - vertices[0].state.time ) ))\n\n for c, v in enumerate(vertices):\n time = datetime.datetime.fromtimestamp(v.state.time)\n\n self.cursor.execute('INSERT INTO cal_paths_details VALUES (%s,%s,%s,%s,%s,%s,%s,%s)', ( self.trip_prefix + current_trip_id, c, v.label, time, v.state.weight, v.state.dist_walked, v.state.num_transfers, v.state.trip_id ))\n if not self.trips_calculated % 1000:\n self.conn.commit()\n self.logfile.write('%s routes calculated by %s, last route: %s \\n' %(self.trips_calculated, self.trip_prefix, route_id))\n self.logfile.flush()\n self.trips_calculated += 1\n\n\n ''' this method will write a very long trip into the database. '''\n def write_error_trip(self, start_time, route_id):\n current_trip_id = str(self.trip_id)\n self.trip_id += 1\n\n start_date_time = datetime.datetime.fromtimestamp(start_time)\n end_time = datetime.datetime(2030,12,31)\n\n self.cursor.execute('INSERT INTO cal_paths VALUES (%s,%s,%s,%s,%s)', (self.trip_prefix + current_trip_id, route_id, start_date_time, end_time, (time.mktime(end_time.timetuple()) - start_time ) ))\n\n\n def __init__(self, graph, db_connection_string, time_step=240, walking_speed=1.2, max_walk=1080, walking_reluctance=2, trip_prefix='', logfile = None):\n\n self.trip_prefix = trip_prefix\n self.time_step = time_step\n\n self.walk_ops = WalkOptions()\n self.walk_ops.walking_speed = walking_speed\n self.walk_ops.max_walk = max_walk\n self.walk_ops.walking_reluctance = walking_reluctance\n\n self.graph = graph\n self.conn = psycopg2.connect(db_connection_string)\n self.cursor = self.conn.cursor()\n self.trip_id = 0\n self.trips_calculated = 0\n self.logfile = logfile\n\n self.run()\n\n\n def __del__(self):\n self.walk_ops.destroy()\n self.cursor.close()\n self.conn.commit()\n self.graph.destroy()\n\n\ndef create_db_tables(connection, recreate=False):\n cursor = connection.cursor()\n\n cursor.execute(\"select tablename from pg_tables where schemaname='public'\" )\n tables = cursor.fetchall()\n\n\n if ( 'cal_paths', ) not in tables or recreate:\n\n if recreate:\n cursor.execute('DROP TABLE IF EXISTS cal_paths CASCADE')\n\n cursor.execute('''CREATE TABLE cal_paths ( id TEXT PRIMARY KEY,\n route_id INTEGER REFERENCES cal_routes,\n start_time TIMESTAMP NOT NULL,\n end_time TIMESTAMP NOT NULL,\n total_time INTEGER NOT NULL )''')\n\n cursor.execute('UPDATE public.cal_routes SET done = FALSE;')\n\n\n if ( 'cal_paths_details', ) not in tables or recreate:\n\n if recreate:\n cursor.execute('DROP TABLE IF EXISTS cal_paths_details CASCADE')\n\n cursor.execute('''CREATE TABLE cal_paths_details ( path_id TEXT REFERENCES cal_paths,\n counter INTEGER NOT NULL,\n label TEXT NOT NULL,\n time TIMESTAMP NOT NULL,\n weight INTEGER NOT NULL,\n dist_walked REAL NOT NULL,\n num_transfers INTEGER NOT NULL,\n gtfs_trip_id TEXT,\n UNIQUE (path_id, counter)) ''')\n\n connection.commit()\n cursor.close()\n\n\ndef print_status(connection, logfile=None):\n\n def calc_calculation_time(routes_previously_wating, routes_waiting, all_routes, time_finished):\n if routes_previously_wating != routes_waiting:\n\n if (not routes_previously_wating) or (all_routes - routes_previously_wating == 0):\n return None, routes_waiting\n\n routes_processed = all_routes - routes_previously_wating\n routes_previously_wating = routes_waiting\n\n routes_per_second = (time.time() - time_started) / routes_processed\n time_finished = (all_routes - routes_processed) * routes_per_second\n\n return time_finished, routes_previously_wating\n\n\n time_started = time.time()\n time_finished = None\n routes_previously_wating = None\n routes_waiting = None\n\n cursor = connection.cursor()\n cursor.execute('SELECT count(*) FROM cal_routes')\n all_routes = cursor.fetchone()[0]\n\n finished = False\n while not finished:\n time.sleep(1.0)\n cursor.execute('SELECT count(*) FROM cal_routes WHERE NOT done')\n routes_waiting = cursor.fetchone()[0]\n if not all_routes:\n finished = True\n\n else:\n if time_finished:\n text = '\\r%s routes waiting to be processed. Finished in about %s ' % (routes_waiting,\n utils.seconds_time_string(time_finished))\n sys.stdout.write(text)\n sys.stdout.flush()\n## if logfile:\n## logfile.write(text)\n## logfile.flush()\n\n else:\n text = '\\r%s routes waiting to be processed. Please wait ... ' % routes_waiting\n sys.stdout.write(text)\n sys.stdout.flush()\n## if logfile:\n## logfile.write(text)\n## logfile.flush()\n\n time_finished, routes_previously_wating = calc_calculation_time(routes_previously_wating, routes_waiting, all_routes, time_finished)\n\n connection.close()\n\n sys.stdout.write('\\rThe last routes getting processed. Please wait ... \\n')\n sys.stdout.flush()\n","sub_path":"graphserver_tools/graphserver_tools/process_routes.py","file_name":"process_routes.py","file_ext":"py","file_size_in_byte":12419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"496018596","text":"# coding=utf-8\nfrom django.conf.urls import patterns, url, include\nfrom django.contrib.auth.decorators import login_required as lr\nimport views\n\nrelationship_patterns = patterns(\n '',\n url(\n r'^friend_requests/$',\n lr(views.FriendRequestListView.as_view()),\n name='friend_requests'\n ),\n\n url(\n r'^friend_requests/(?P\\d+)/accept/$',\n lr(views.AcceptFriendRequestView.as_view()),\n name='accept_friend_request'\n ),\n\n url(\n r'^friend_requests/(?P\\d+)/deny/$',\n lr(views.DenyFriendRequestView.as_view()),\n name='deny_friend_request'\n ),\n\n url(\n r'^(?P\\w+)/toggle_follow/$',\n lr(views.FollowerRelationshipToggleView.as_view()),\n name=\"toggle_follow\"\n ),\n\n url(\n r'^(?P\\w+)/follow/$',\n lr(views.FollowerRelationshipCreateView.as_view()),\n name=\"follow\"\n ),\n\n url(\n r'^(?P\\w+)/unfollow/$',\n lr(views.FollowerRelationshipDestroyView.as_view()),\n name=\"unfollow\"\n ),\n \n url(\n r'^(?P\\w+)/buttons/$',\n lr(views.FriendshipButtonsTemplateView.as_view()),\n name='friendship_buttons'\n ),\n\n url(\n r'^(?P\\w+)/request_friendship/$',\n lr(views.BaseFriendRequestCreateView.as_view()),\n name='request_friendship'\n ),\n\n url(\n r'^(?P\\w+)/request_friendship_inline/$',\n lr(views.FriendRequestCreateView.as_view()),\n name='request_friendship_inline'\n ),\n\n url(\n r'^(?P\\w+)/groups/$',\n views.UserSocialGroupList.as_view(),\n name='groups'\n ),\n\n url(\n r'^(?P\\w+)/comment/$',\n lr(views.BaseProfileCommentCreateView.as_view()),\n name=\"profile_comment\"\n ),\n\n url(\n r'^(?P\\w+)/comment_inline/$',\n lr(views.ProfileCommentCreateView.as_view()),\n name=\"profile_comment_inline\"\n ),\n\n)\n\ngroup_patterns = patterns(\n '',\n url(\n r'^$',\n views.SocialGroupListView.as_view(),\n name='all'\n ),\n\n url(\n r'^new/$',\n lr(views.BaseSocialGroupCreateView.as_view()),\n name='new'\n ),\n\n url(\n r'^new_inline/$',\n lr(views.SocialGroupCreateView.as_view()),\n name='new_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/$',\n views.SocialGroupDetailView.as_view(),\n name='details'\n ),\n\n url(\n r'^(?P[\\w-]+)/members/$',\n views.SocialGroupMembersList.as_view(),\n name='members'\n ),\n\n url(\n r'^(?P[\\w-]+)/feed/$',\n views.SocialGroupFeedView.as_view(),\n name='feed'\n ),\n\n url(\n r'^(?P[\\w-]+)/requests/$',\n lr(views.SocialGroupMembershipRequestsList.as_view()),\n name='requests'\n ),\n\n url(\n r'^(?P[\\w-]+)/buttons/$',\n lr(views.MembershipButtonsTemplateView.as_view()),\n name='membership_buttons'\n ),\n\n url(\n r'^(?P[\\w-]+)/edit/$',\n lr(views.BaseSocialGroupUpdateView.as_view()),\n name='edit'\n ),\n\n url(\n r'^(?P[\\w-]+)/edit_inline/$',\n lr(views.SocialGroupUpdateView.as_view()),\n name='edit_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/post/$',\n lr(views.GenericGroupPostCreateView.as_view()),\n name='post'\n ),\n\n url(\n r'^(?P[\\w-]+)/(?P\\d+)/edit/$',\n lr(views.GenericGroupPostUpdateView.as_view()),\n name='edit_post'\n ),\n\n url(\n r'^(?P[\\w-]+)/(?P\\d+)/delete/$',\n lr(views.GroupPostDeleteView.as_view()),\n name='delete_post'\n ),\n\n url(\n r'^(?P[\\w-]+)/comment/$',\n lr(views.BaseGroupCommentCreateView.as_view()),\n name='comment'\n ),\n\n url(\n r'^(?P[\\w-]+)/comment_inline/$',\n lr(views.GroupCommentCreateView.as_view()),\n name='comment_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/post_link/$',\n lr(views.BaseGroupLinkCreateView.as_view()),\n name='post_link'\n ),\n\n url(\n r'^(?P[\\w-]+)/post_link_inline/$',\n lr(views.GroupLinkCreateView.as_view()),\n name='post_link_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/post_photo/$',\n lr(views.BaseGroupPhotoCreateView.as_view()),\n name='post_photo'\n ),\n\n url(\n r'^(?P[\\w-]+)/post_photo_inline/$',\n lr(views.GroupPhotoCreateView.as_view()),\n name='post_photo_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/join/$',\n lr(views.SocialGroupJoinView.as_view()),\n name='join'\n ),\n\n url(\n r'^(?P[\\w-]+)/request_membership/$',\n lr(views.BaseSocialGroupRequestCreateView.as_view()),\n name='request_membership'\n ),\n\n url(\n r'^(?P[\\w-]+)/request_membership_inline/$',\n lr(views.SocialGroupRequestCreateView.as_view()),\n name='request_membership_inline'\n ),\n\n url(\n r'^(?P[\\w-]+)/requests/(?P\\d+)/accept/$',\n lr(views.SocialGroupRequestAcceptView.as_view()),\n name='accept_request'\n ),\n\n url(\n r'^(?P[\\w-]+)/requests/(?P\\d+)/deny/$',\n lr(views.SocialGroupRequestDenyView.as_view()),\n name='deny_request'\n ),\n\n)\n\nurlpatterns = patterns(\n '',\n url(\n r'^group/', include(group_patterns, namespace='group')\n ),\n\n url(\n r'^', include(relationship_patterns, namespace='user')\n ),\n)\n","sub_path":"social_network/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"422284052","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport string\nimport re\n\nif len(sys.argv) < 3:\n print (\"usage: %s in.file out.file \"%(sys.argv[0]));\n print (\"usage: %s res.eval out.log \"%(sys.argv[0]));\n sys.exit(0);\n\nout_log = sys.argv[2] + \".log\"\n\nfp_res = open(sys.argv[1]);\nfp_out_err = open(sys.argv[2],\"w\");\nfp_log = open(out_log,\"w\");\n\nmap_final = {}; ### [存储spk1和2之间的相似度 ]\nmap_lab = {}; ### [spk0_spk1:num]\nnum_all_wav_ok = 0;\nnum_all_wav_err = 0;\n\n\n#### 读取 res 文件 组成 spk_1:spk_2 对 \n## KASR_137_2_200S0_Speaker0015_Session0_101\tSpeaker0015\tSpeaker0106\tSpeaker0072\tSpeaker0015\n#### spkeaker spk1_num spk2_num spk_3_num ok?\nnum_spk = 0; ## 记录当前是第几个spk模型识别的结果 \n\nspk_max = [\"xxx\", 'xxx', 'xxx']; ## 记录 spk1 spk2 spk3 名字 \nnum_max = [0,0,0]; ## spk1 spk2 spk3 对应的数目/百分比 \nnum_all = 0; ## 记录每个speaker 的的总测试语音数 num=150\nspk_f = \"xxx\"; ## 上一个speaker \n\nfor line in fp_res:\n\n line = line[:-1]\n list_line = line.split(\"\\t\");\n if len(list_line)<5:\n print(\"ERROR:resule format err: %s\"%(line));\n break;\n\n spk0 = list_line[4]\n \n ### 当前这个spk 与上一行不相等了 \n ### 开始统计上一个spk0 对应的相似spk1 spk2 spk3 \n if spk0 != spk_f:\n ## 前面已经计算完毕 选取top3的spk 输出 \n for key in map_lab.keys():\n if map_lab[key] > num_max[0]:\n num_max[2] = num_max[1];\n spk_max[2] = spk_max[1];\n num_max[1] = num_max[0];\n spk_max[1] = spk_max[0];\n num_max[0] = map_lab[key];\n spk_max[0] = key;\n elif map_lab[key] > num_max[1]:\n num_max[2] = num_max[1];\n spk_max[2] = spk_max[1];\n num_max[1] = map_lab[key];\n spk_max[1] = key;\n elif map_lab[key] > num_max[2]:\n num_max[2] = map_lab[key];\n spk_max[2] = key;\n\n #print num_max;\n ### 去除掉第1行 和 最后一行 \n num_max_all = num_max[0] + num_max[1] + num_max[2];\n if num_max_all > 3:\n rate2 = float(num_max[1]) / float(num_max[0]);\n rate3 = float(num_max[2]) / float(num_max[0]);\n\n fp_log.write(\"%s\\t%.4f\\n\"%(spk_max[1], rate2));\n fp_log.write(\"%s\\t%.4f\\n\"%(spk_max[2], rate3));\n\n\n\n\n ### 第一候选不是自己 +5 第二候选大于0.4 +4 \n ### 第二候选大于0.33 +3 第二候选大于0.25 +2 \n if spk_max[0].split(\"+\")[0] != spk_max[0].split(\"+\")[1]:\n new_pair = spk_max[0].split(\"+\")[1] + \"+\" + spk_max[0].split(\"+\")[0];\n\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair]+5;\n else:\n map_final[spk_max[0]] = 5;\n else:\n ### 第二候选占第一候选的比例 0.8 0.7 0.6 0.5\n new_pair = spk_max[1].split(\"+\")[1] + \"+\" + spk_max[1].split(\"+\")[0];\n if rate2 > 0.8:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 4;\n else:\n map_final[spk_max[1]] = 5;\n elif rate2 > 0.7:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 3;\n else:\n map_final[spk_max[1]] = 4;\n elif rate2 > 0.6:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 2;\n else:\n map_final[spk_max[1]] = 3;\n elif rate2 > 0.5:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 1;\n else:\n map_final[spk_max[1]] = 2;\n elif rate2 > 0.4:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 1;\n else:\n map_final[spk_max[1]] = 1;\n\n ### 第三候选 占 第一候选比例 \n new_pair = spk_max[2].split(\"+\")[1] + \"+\" + spk_max[2].split(\"+\")[0];\n if rate3 > 0.8:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 4;\n else:\n map_final[spk_max[2]] = 5;\n elif rate3 > 0.7:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 3;\n else:\n map_final[spk_max[2]] = 4;\n elif rate3 > 0.6:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 2;\n else:\n map_final[spk_max[2]] = 3;\n elif rate3 > 0.5:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 1;\n else:\n map_final[spk_max[2]] = 2;\n elif rate3 > 0.4:\n if map_final.has_key(new_pair):\n map_final[new_pair] = map_final[new_pair] + 1;\n else:\n map_final[spk_max[2]] = 1;\n\n\n\n #fp_log.write(\"%s\\t%d\\n\"%(spk_max[0], num_max[0]) );\n #fp_log.write(\"%s\\t%d\\n\"%(spk_max[1], num_max[1]) );\n #fp_log.write(\"%s\\t%d\\n\"%(spk_max[2], num_max[2]) );\n\n #print line;\n map_lab={}\n spk_f = spk0;\n spk_max = [\"xxx\", 'xxx', 'xxx']; \n num_max = [0,0,0,0]; \n num_all = 0; \n\n ## 计算这一行对应的 spk 123 数\n for ii in range(1,4):\n\n spk1 = list_line[ii]\n spk01 = spk0 + \"+\" + spk1;\n\n if map_lab.has_key(spk01):\n map_lab[spk01] += 1;\n else:\n map_lab[spk01] = 1;\n\n\n### map_final 中 spk1--spk2 spk2---spk1 合并 \n##for key in map_final.keys():\n\n### 最终error输出\nfor key in map_final.keys():\n #print (\"%s\\t%s\"%(key,map_final[key]));\n fp_out_err.write(\"%s\\t%s\\n\"%(key, map_final[key]));\n\nfp_res.close();\nfp_log.close();\nfp_out_err.close();\n","sub_path":"run_si/eval_sr_judge.py","file_name":"eval_sr_judge.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"154370644","text":"'''\nCreated on 06-Jan-2019\n\n@author: bkher\n'''\n\n\nnum = input('enter number: ')\ntemp = num\nfact =1\nif(num>1):\n while(num>0):\n fact = fact * num\n num =num-1\n print ('factorial of', temp, 'is', fact)\n \nelse:\n print ('enter valid number')","sub_path":"PythneTest/test1/Factorial.py","file_name":"Factorial.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"211010515","text":"# -*- coding: utf-8 -*-\n\"\"\"My Connecting Module\n\n\"\"\"\n__author__ = 'xero'\n__email__ = \"volleyp7689@gmail.com\"\n\nimport urllib\nimport urllib2\nimport time\nimport random\n\n\ndef get_post_request(url, req_arg, delay=1):\n \"\"\"Sending a post request, and return the string of it's response.\n\n Args:\n url (string): URL of target web page.\n req_arg (dict): Arguments which are going to be passing by POST request.\n delay (int): Stop the url request temporarily, and retry to connect to the server.\n\n Returns:\n response.read() (string)\n \"\"\"\n while True:\n try:\n data = urllib.urlencode(req_arg)\n request = urllib2.Request(url, data)\n response = urllib2.urlopen(request)\n return response.read()\n except urllib2.URLError:\n time.sleep(random.random() * 10)\n continue","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"7637576","text":"\"\"\"\n和V5功能一样,只是利用request功能实现\n利用parse模块模拟post请求\n分析百度辞典\n分析步骤\n1.打开F12\n2.尝试输入girl,发现每次桥一个字母都有请求\n3.请求地址为http://fanyi.baidu.com/sug\n4.利用Network-ALL,找到文件的url地址\n5.\n\"\"\"\n\nimport urllib.request,urllib.parse\n\nimport json\n\nbaseurl = 'https://fanyi.baidu.com/sug'\n\n# 存放的用来模拟form的数据一定是dict模式\n#**kw:表示就是形参中按照关键字传值,多余的值都给kw,且以字典*的方式呈现\n\ndata = {\n 'kw': 'girl'\n}\n # girl是翻译输入的英文内容,应该是用户输入,此四处使用硬编码\n\n# 需要哟个parse模块对data进行编码\n\ndata = urllib.parse.urlencode(data).encode(\"utf-8\")\n\nprint(type(data))\n\nprint(data)\n\n# 我们需要构造一个请求头,请求头部至少包含传入的数据长度\n#request需要川区的请求头是一个dict格式\n\nheaders= {\n #因为是post格式,至少应该包含content-length字段\n 'Content-Length':len(data)\n}\n\nreq = urllib.request.Request(url= baseurl,data = data,headers = headers)\n\n# 因为已经构造了一个Request的请求实力,则所有的请求信息都可以封装在Request实力李\nrsp = urllib.request.urlopen(req)\n\njson_data = rsp.read().decode('utf-8')\n\n#json.loads 用于解码 JSON 数据。该函数返回 Python 字段的数据类型\njson_data = json.loads(json_data)\nprint(json_data)\n\nfor item in json_data['data']:\n print(item['k'],'...',item['v'])","sub_path":"V6.py","file_name":"V6.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"380354878","text":"\"\"\"Views for billing app.\"\"\"\n\nimport re\n\nfrom rest_framework import mixins, viewsets\n\nfrom billing.models import Bill, CallRecord, Tariff\nfrom billing.serializers import BillSerializer, CallRecordSerializer, TariffSerializer\n\n\nclass CallRecordViewSet(\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"Call record viewset.\n\n list:\n Return a list with all existing call records.\n\n retrieve:\n Return the given call record.\n\n create:\n Create a new call record.\n \"\"\"\n\n queryset = CallRecord.objects.all()\n serializer_class = CallRecordSerializer\n\n def create(self, request):\n \"\"\"Create a new call record.\n\n Args:\n request: HTTP request object holding request data and information.\n\n Returns:\n HTTP response with the new call record object or a list of errors.\n \"\"\"\n\n source = request.data.get(\"source\", None)\n\n if source and type(source) == str:\n request.data[\"source\"] = {\"number\": source}\n\n destination = request.data.get(\"destination\", None)\n\n if destination and type(destination) == str:\n request.data[\"destination\"] = {\"number\": destination}\n\n return super(CallRecordViewSet, self).create(request)\n\n\nclass TariffViewSet(\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"Tariff viewset.\n\n list:\n Return a list with all existing tariffs.\n\n retrieve:\n Return the given tariff.\n\n create:\n Create a new tariff.\n \"\"\"\n\n queryset = Tariff.objects.all()\n serializer_class = TariffSerializer\n\n\nclass BillViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"Bill viewset.\n\n list:\n Return a list with all existing bills.\n\n retrieve:\n Return the given bill.\n \"\"\"\n\n serializer_class = BillSerializer\n lookup_field = \"subscriber__number\"\n\n def get_queryset(self):\n \"\"\"Return a filtered bill queryset.\n\n Returns:\n A bill queryset.\n \"\"\"\n queryset = Bill.objects.all()\n period = self.request.query_params.get(\"period\", \"\")\n matches = re.match(r\"^(\\d{4})-(\\d{1,2})$\", period)\n\n if matches:\n queryset = queryset.filter(year=int(matches.group(1)))\n queryset = queryset.filter(month=int(matches.group(2)))\n\n return queryset\n","sub_path":"billing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"285602416","text":"from testtools import TestCase\nfrom testtools.matchers import Equals, Is, MatchesListwise, raises\n\nfrom eliottree.test.tasks import (\n action_task, action_task_end, message_task, nested_action_task)\nfrom eliottree.tree import Tree, _TaskNode, task_name\n\n\nclass TaskNameTests(TestCase):\n \"\"\"\n Tests for ``eliottree.tree.task_name``.\n \"\"\"\n def test_none(self):\n \"\"\"\n Cannot create a task name for ``None``.\n \"\"\"\n self.assertThat(\n lambda: task_name(None),\n raises(ValueError))\n\n def test_message_type(self):\n \"\"\"\n If the task includes a ``message_type`` key use it to construct the\n name.\n \"\"\"\n self.assertThat(\n task_name(message_task),\n Equals(u'twisted:log@1/None'))\n\n def test_no_message_type(self):\n \"\"\"\n If the task does not include a ``message_type`` key use the\n ``action_type`` and ``action_status`` keys to construct a name.\n \"\"\"\n self.assertThat(\n task_name(action_task),\n Equals(u'app:action@1/started'))\n\n def test_levels(self):\n \"\"\"\n Include the task level in the task name.\n \"\"\"\n self.assertThat(\n task_name(nested_action_task),\n Equals(u'app:action:nested@1,1/started'))\n\n\nclass TaskNodeTests(TestCase):\n \"\"\"\n Tests for ``eliottree.tree._TaskNode``.\n \"\"\"\n def test_repr_root(self):\n \"\"\"\n Representation of a root node.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n self.assertThat(\n repr(node),\n Equals('<_TaskNode root foo children=0>'))\n\n def test_repr(self):\n \"\"\"\n Representation of a normal task node.\n \"\"\"\n node = _TaskNode(task=action_task)\n self.assertThat(\n repr(node),\n Equals('<_TaskNode f3a32bb3-ea6b-457c-aa99-08a3d0491ab4 '\n 'app:action@1/started children=0>'))\n\n def test_repr_childen(self):\n \"\"\"\n Representation of a task node with children.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n node.add_child(_TaskNode(task=action_task))\n self.assertThat(\n repr(node),\n Equals('<_TaskNode root foo children=1>'))\n\n def test_first_child(self):\n \"\"\"\n ``_TaskNode.first_child`` returns the first child node that was added.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n child = _TaskNode(task=action_task)\n child2 = _TaskNode(task=action_task_end)\n node.add_child(child2)\n node.add_child(child)\n self.assertThat(\n node.first_child(),\n Equals(child2))\n\n def test_no_children(self):\n \"\"\"\n ``_TaskNode.children`` returns an empty list for a node with no\n children.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n self.assertThat(\n node.children(),\n Equals([]))\n\n def test_children(self):\n \"\"\"\n ``_TaskNode.children`` returns an list of child nodes sorted by their\n level regardless of the order they were added.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n child = _TaskNode(task=action_task)\n child2 = _TaskNode(task=action_task_end)\n node.add_child(child2)\n node.add_child(child)\n self.assertThat(\n node.children(),\n Equals([child, child2]))\n\n def test_nested_children(self):\n \"\"\"\n ``_TaskNode.children`` does not include grandchildren.\n \"\"\"\n node = _TaskNode(task=None, name=u'foo')\n child = _TaskNode(task=action_task)\n node.add_child(child)\n child2 = _TaskNode(task=nested_action_task)\n node.add_child(child2)\n self.assertThat(\n node.children(),\n Equals([child]))\n self.assertThat(\n child.children(),\n Equals([child2]))\n\n\nclass TreeTests(TestCase):\n \"\"\"\n Tests for ``eliottree.tree.Tree``.\n \"\"\"\n def test_initial(self):\n \"\"\"\n The initial state of a tree is always empty.\n \"\"\"\n tree = Tree()\n self.assertThat(tree.nodes(), Equals([]))\n\n def test_merge_tasks(self):\n \"\"\"\n Merge tasks into the tree and retrieve an list of key-node\n pairs ordered by task timestamp.\n \"\"\"\n tree = Tree()\n matches = tree.merge_tasks([message_task, action_task])\n self.expectThat(matches, Is(None))\n keys, nodes = zip(*tree.nodes())\n self.expectThat(\n list(keys),\n Equals(['cdeb220d-7605-4d5f-8341-1a170222e308',\n 'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))\n self.assertThat(\n [c.task for n in nodes for c in n.children()],\n MatchesListwise([Equals(message_task),\n Equals(action_task)]))\n\n def test_merge_nested_tasks(self):\n \"\"\"\n Merge nested tasks into the tree and retrieve an list of key-node\n pairs ordered by task timestamp.\n \"\"\"\n tree = Tree()\n matches = tree.merge_tasks([action_task_end, action_task])\n self.expectThat(matches, Is(None))\n keys, nodes = zip(*tree.nodes())\n self.expectThat(\n list(keys),\n Equals(['f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))\n self.assertThat(\n [c.task for n in nodes for c in n.children()],\n MatchesListwise([Equals(action_task),\n Equals(action_task_end)]))\n\n def test_merge_tasks_filtered(self):\n \"\"\"\n Merge tasks into the tree with a filter function, generating a set of\n matches that can be used to prune the tree.\n \"\"\"\n tree = Tree()\n filters = [lambda task: task.get(u'action_type') == u'app:action']\n matches = tree.merge_tasks([action_task, message_task], filters)\n keys, nodes = zip(*tree.nodes(matches))\n self.expectThat(\n list(keys),\n Equals(['f3a32bb3-ea6b-457c-aa99-08a3d0491ab4']))\n self.expectThat(\n list(keys),\n Equals(list(matches)))\n self.assertThat(\n [c.task for n in nodes for c in n.children()],\n MatchesListwise([Equals(action_task)]))\n","sub_path":"eliottree/test/test_tree.py","file_name":"test_tree.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"268277785","text":"class PlayingBadmintonFee:\n def __init__(self, old=0, is_women=False, weekday='mon'):\n self.old = old\n self.is_women = is_women\n self.weekday = weekday\n\n def calculate_fee(self):\n fee = 1800\n if self.old < 0 or self.old > 120:\n return -1\n if self.weekday == 'tues':\n return 1200\n else:\n if self.old < 13:\n fee = 900\n elif self.is_women and self.weekday == 'fri':\n fee = 1400\n elif self.old > 65:\n fee = 1600\n return fee\n","sub_path":"dungnt/ex_8/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"373272215","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 11 15:11:33 2019\n\n@author: mdhvk\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('D:\\Machine Learning A-Z Template Folder\\Part 3 - Classification\\Section 15 - K-Nearest Neighbors (K-NN)\\Social_Network_Ads.csv')\n\nX = dataset.iloc[:,[2,3]].values\nY = dataset.iloc[:,4].values\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.25,random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#Fitting into Training set\nfrom sklearn.neighbors import KNeighborsClassifier\n#by default it have 5 neighbor and we use euclidean distance which is under root x1-x2 square and y1-y2 square\nclassifier = KNeighborsClassifier(n_neighbors= 5,metric = 'minkowski',p=2)\nclassifier.fit(X_train,Y_train)\n\nY_pred = classifier.predict(X_test)\n\n#Confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y_test,Y_pred)\n\n#Visulation the trainig set\nfrom matplotlib.colors import ListedColormap\nX_set, Y_set = X_train, Y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(Y_set)):\n plt.scatter(X_set[Y_set == j, 0], X_set[Y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('K-NN (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated SalarY')\nplt.legend()\nplt.show()\n\n\n#Visulatoion of test set\n\nfrom matplotlib.colors import ListedColormap\nX_set, Y_set = X_test, Y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(Y_set)):\n plt.scatter(X_set[Y_set == j, 0], X_set[Y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('K-NN(Train set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated SalarY')\nplt.legend()\nplt.show()\n\n\n\n\n\n","sub_path":"tutorial data science/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"85677029","text":"from astropy.io import fits\nimport numpy as np\n\n#Save arrays x and y into table of two columns\ndef save_fits(x, y, filename, namex=\"wavelength\", namey=\"flux\"):\n \n #Create columns\n col1 = fits.Column(name=namex, format=\"E\",array=x)\n col2 = fits.Column(name=namey, format=\"E\",array=y)\n\n #Put columns together to create table from\n cols = fits.ColDefs([col1,col2])\n\n #Create table from columns\n tbhdu = fits.BinTableHDU.from_columns(cols)\n\n #Save file\n tbhdu.writeto(filename)\n\n#Open fits file and returns two arrays\ndef open_fits(filename):\n\n #Open fits file\n hdulist = fits.open(filename)\n\n #Extract data - looks like [(x1,y1),(x2,y2)...(xn,yn)] \n data = hdulist[1].data\n\n #Turn data into columns respectively \n x = data.field(0)\n y = data.field(1)\n\n return x,y\n","sub_path":"fits_functions.py","file_name":"fits_functions.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"644129188","text":"#! Python 3.6.4\n# Author: Alejandro Bautista R.\n\nfrom openpyxl import load_workbook\nfrom datetime import datetime\nimport openpyxl_styles\n\n\ndef write_to_workbook(*args):\n print(\"Opening existing workbook\")\n # do not take into account the headers, so start in row 2\n row_cursor = 2\n\n # Load the existing workbook\n workbook = load_workbook(\n \"C:\\\\Temp\\\\PythonOutputFiles\\\\Dump_\" + str(datetime.now()).split(' ')[0] + \".xlsx\")\n\n # Create a new worksheet\n worksheet = workbook.create_sheet()\n\n worksheet.title = \"Groups_per_Site\"\n\n # this line affects the first row\n worksheet.row_dimensions[1].height = 20\n\n # Titles for each column\n worksheet['A1'] = \"Site\"\n worksheet.column_dimensions['A'].width = 40\n worksheet.cell(row=1, column=1).font = openpyxl_styles.openpyxl_styles()\n\n worksheet['B1'] = \"User\"\n worksheet.column_dimensions['B'].width = 40\n worksheet.cell(row=1, column=2).font = openpyxl_styles.openpyxl_styles()\n\n worksheet['C1'] = \"Group\"\n worksheet.column_dimensions['C'].width = 40\n worksheet.cell(row=1, column=3).font = openpyxl_styles.openpyxl_styles()\n\n worksheet['D1'] = \"Created\"\n worksheet.column_dimensions['D'].width = 40\n worksheet.cell(row=1, column=4).font = openpyxl_styles.openpyxl_styles()\n\n # Insert values in each column #\n for index, value in enumerate(args[0]):\n try:\n worksheet.cell(row=row_cursor, column=1).value = value[0]\n worksheet.cell(row=row_cursor, column=2).value = value[1]\n worksheet.cell(row=row_cursor, column=3).value = value[2]\n worksheet.cell(row=row_cursor, column=4).value = value[3]\n except:\n print(\"Error in line %s\\n data=%s\" % (index, value))\n\n # Counter for going to the next row\n row_cursor = row_cursor + 1\n\n # save the workbook\n workbook.save(\n \"C:\\\\Temp\\\\PythonOutputFiles\\\\Dump_\" + str(datetime.now()).split(' ')[0] + \".xlsx\")\n print(\"The results in the workbook have been saved.\")\n\n","sub_path":"Python scripts/S_Dump/SyteLineDump_Groups_per_Site.py","file_name":"SyteLineDump_Groups_per_Site.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"370796829","text":"'''\nhttps://leetcode.com/problems/decode-ways/description/\n用动态规划, 自己没解出来\n\n参考\n1. https://blog.csdn.net/u011095253/article/details/9248109\n2. https://segmentfault.com/a/1190000003813921\n参考1, 2中都解释到, dp数组的计算规则如下面, 但是不明白为什么:\n\n1. 如果dp[i]所对应的的单个字符可以解码,那么dp[i]就包括前dp[i-1]位所积累的组合数 dp[i] = dp[i-1]\n\n2. 如果不仅dp[i]所对应的的单个字符可以解码,dp[i-1] - dp[i],两个字符组成的也可以解码,那么不仅包括dp[i-1]积累的组合数,也包括dp[i-2]位积累的组合数 dp[i] = dp[i-1] + dp[i-2]\n'''\n\n\ndef decode_ways(n):\n len_n = len(n)\n if n == '0' or len_n == 0:\n return 0\n if len_n == 1:\n return 1\n if n[0] == '0':\n return 0\n dp = {0: 1, 1: 1}\n for index in range(2, len_n + 1):\n dp[index] = 0\n if 10 <= int(n[index - 2: index]) <= 26:\n dp[index] += dp[index - 2]\n if int(n[index - 1: index]) != 0:\n dp[index] += dp[index - 1]\n return dp[len_n]\n\n\ndef main():\n ds = ['00', '10', '', '0', '12', '121', '12192', '9124', '121212', '12121']\n for d in ds:\n print(decode_ways(d))\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"medium/decode_ways.py","file_name":"decode_ways.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629399976","text":"__author__ = 'PCW-MacBookProRet'\nfrom PyQt5 import QtCore\nfrom PyQt5.QtGui import QIcon, QKeySequence, QFont\nfrom PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QMainWindow,\n QMessageBox, QTextEdit, QDialog, QMenuBar, QMenu)\nfrom PyQt5.QtPrintSupport import QPrintDialog, QPrinter\n\n\nclass ui_TextEditor(object):\n def setupUi(self):\n\n self.curFile = ''\n\n self.textEdit = QTextEdit()\n self.setCentralWidget(self.textEdit)\n\n self.createActions()\n self.createMenus()\n self.createToolBars()\n self.createStatusBar()\n\n self.readSettings()\n\n self.textEdit.document().contentsChanged.connect(self.documentWasModified)\n\n self.setCurrentFile('')\n\n def closeEvent(self, event):\n if self.maybeSave():\n self.writeSettings()\n event.accept()\n else:\n event.ignore()\n\n def newFile(self):\n if self.maybeSave():\n self.textEdit.clear()\n self.setCurrentFile('')\n\n def open(self):\n if self.maybeSave():\n fileName, _ = QFileDialog.getOpenFileName(self)\n if fileName:\n self.loadFile(fileName)\n\n def print_(self):\n document = self.textEdit.document()\n printer = QPrinter()\n\n dlg = QPrintDialog(printer, self)\n if dlg.exec_() != QDialog.Accepted:\n return\n\n document.print_(printer)\n\n self.statusBar().showMessage(\"Ready\", 2000)\n\n def save(self):\n if self.curFile:\n return self.saveFile(self.curFile)\n\n return self.saveAs()\n\n def saveAs(self):\n fileName, _ = QFileDialog.getSaveFileName(self)\n if fileName:\n return self.saveFile(fileName)\n\n return False\n\n def about(self):\n QMessageBox.about(self, \"About VGenes Text Editor\",\n \"The VGenes Text Editor allows \"\n \"you to edit, save, and print documents \"\n \"generated by VGenes.\")\n\n def IncreaseFont(self):\n FontIs = self.textEdit.currentFont()\n font = QFont(FontIs)\n\n FontSize = int(font.pointSize())\n FontFam = font.family()\n if FontSize < 36:\n FontSize += 1\n font.setPointSize(FontSize)\n font.setFamily(FontFam)\n\n self.textEdit.setFont(font)\n\n def DecreaseFont(self):\n FontIs = self.textEdit.currentFont()\n font = QFont(FontIs)\n\n FontSize = int(font.pointSize())\n FontFam = font.family()\n if FontSize > 6:\n FontSize -= 1\n font.setPointSize(FontSize)\n font.setFamily(FontFam)\n\n self.textEdit.setFont(font)\n\n def documentWasModified(self):\n self.setWindowModified(self.textEdit.document().isModified())\n\n def createActions(self):\n self.newAct = QAction(QIcon(':/PNG-Icons/page.png'), \"&New\", self,\n shortcut=QKeySequence.New, statusTip=\"Create a new file\",\n triggered=self.newFile)\n\n self.openAct = QAction(QIcon(':/PNG-Icons/folder.png'), \"&Open...\", self,\n shortcut=QKeySequence.Open, statusTip=\"Open an existing file\",\n triggered=self.open)\n\n # self.closeAct = QAction(\"Close\", self, shortcut=QKeySequence.Close,\n # statusTip=\"Close window\", triggered=self.close)\n\n self.closeAct = QAction(\"&Close\", self,\n shortcut=QKeySequence.Close,\n statusTip=\"Close window\", triggered=self.close)\n\n self.saveAct = QAction(QIcon(':/PNG-Icons/SaveIcon.png'), \"&Save\", self,\n shortcut=QKeySequence.Save,\n statusTip=\"Save the document to disk\", triggered=self.save)\n\n self.saveAsAct = QAction(\"Save &As...\", self,\n shortcut=QKeySequence.SaveAs,\n statusTip=\"Save the document under a new name\",\n triggered=self.saveAs)\n\n self.exitAct = QAction(\"E&xit\", self, shortcut=\"Ctrl+Q\",\n statusTip=\"Exit VGenes Text Editor\", triggered=self.close)\n\n self.cutAct = QAction(QIcon(':/PNG-Icons/scissor.png'), \"Cu&t\", self,\n shortcut=QKeySequence.Cut,\n statusTip=\"Cut the current selection's contents to the clipboard\",\n triggered=self.textEdit.cut)\n\n self.IncreaseAct = QAction(QIcon(':/PNG-Icons/plus.png'), \"&Increase\", self,\n statusTip=\"Increase font size\",\n triggered=self.IncreaseFont)\n\n self.DecreaseAct = QAction(QIcon(':/PNG-Icons/minus.png'), \"&Decrease\", self,\n statusTip=\"Decrease font size\",\n triggered=self.DecreaseFont)\n\n self.printAct = QAction(QIcon(':/PNG-Icons/print.png'), \"&Print...\", self,\n shortcut=QKeySequence.Print,\n statusTip=\"Print the current form letter\",\n triggered=self.print_)\n\n self.copyAct = QAction(QIcon(':/PNG-Icons/pages.png'), \"&Copy\", self,\n shortcut=QKeySequence.Copy,\n statusTip=\"Copy the current selection's contents to the clipboard\",\n triggered=self.textEdit.copy)\n\n self.pasteAct = QAction(QIcon(':/PNG-Icons/Paste.png'), \"&Paste\", self,\n shortcut=QKeySequence.Paste,\n statusTip=\"Paste the clipboard's contents into the current selection\",\n triggered=self.textEdit.paste)\n\n self.aboutAct = QAction(\"&About\", self,\n statusTip=\"Show the application's About box\",\n triggered=self.about)\n\n # self.aboutQtAct = QAction(\"About &Qt\", self,\n # statusTip=\"Show the Qt library's About box\",\n # triggered=QApplication.instance().aboutQt)\n\n self.cutAct.setEnabled(False)\n self.copyAct.setEnabled(False)\n self.textEdit.copyAvailable.connect(self.cutAct.setEnabled)\n self.textEdit.copyAvailable.connect(self.copyAct.setEnabled)\n\n def createMenus(self):\n\n self.menubar = QMenuBar(self)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1029, 22))\n self.menubar.setDefaultUp(False)\n self.menubar.setNativeMenuBar(False)\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QMenu(self.menubar)\n\n\n self.setMenuBar(self.menubar)\n\n\n\n self.fileMenu = self.menuBar().addMenu(\"&File\")\n self.fileMenu.addAction(self.newAct)\n self.fileMenu.addAction(self.openAct)\n self.fileMenu.addAction(self.closeAct)\n self.fileMenu.addAction(self.saveAct)\n self.fileMenu.addAction(self.saveAsAct)\n self.fileMenu.addAction(self.printAct)\n self.fileMenu.addSeparator();\n self.fileMenu.addAction(self.exitAct)\n\n self.editMenu = self.menuBar().addMenu(\"&Edit\")\n self.editMenu.addAction(self.cutAct)\n self.editMenu.addAction(self.copyAct)\n self.editMenu.addAction(self.pasteAct)\n\n self.menuBar().addSeparator()\n\n self.helpMenu = self.menuBar().addMenu(\"&Help\")\n self.helpMenu.addAction(self.aboutAct)\n # self.helpMenu.addAction(self.aboutQtAct)\n\n def createToolBars(self):\n self.fileToolBar = self.addToolBar(\"File\")\n self.fileToolBar.addAction(self.newAct)\n self.fileToolBar.addAction(self.openAct)\n # self.fileToolBar.addAction(self.closeACT)\n self.fileToolBar.addAction(self.saveAct)\n self.fileToolBar.addAction(self.printAct)\n\n self.editToolBar = self.addToolBar(\"Edit\")\n self.editToolBar.addAction(self.cutAct)\n self.editToolBar.addAction(self.copyAct)\n self.editToolBar.addAction(self.pasteAct)\n\n self.FontSizeToolBar = self.addToolBar(\"FontSize\")\n self.FontSizeToolBar.addAction(self.IncreaseAct)\n self.FontSizeToolBar.addAction(self.DecreaseAct)\n\n def createStatusBar(self):\n self.statusBar().showMessage(\"Ready\")\n\n def readSettings(self):\n settings = QtCore.QSettings(\"Trolltech\", \"VGenes Text Editor\")\n pos = settings.value(\"pos\", QtCore.QPoint(200, 200))\n size = settings.value(\"size\", QtCore.QSize(400, 400))\n self.resize(size)\n self.move(pos)\n\n def writeSettings(self):\n settings = QtCore.QSettings(\"Trolltech\", \"VGenes Text Editor\")\n settings.setValue(\"pos\", self.pos())\n settings.setValue(\"size\", self.size())\n\n def maybeSave(self):\n if self.textEdit.document().isModified():\n ret = QMessageBox.warning(self, \"VGenes Text Editor\",\n \"The document has been modified.\\nDo you want to save \"\n \"your changes?\",\n QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n\n if ret == QMessageBox.Save:\n return self.save()\n\n if ret == QMessageBox.Cancel:\n return False\n\n return True\n\n def loadFile(self, fileName):\n file = QtCore.QFile(fileName)\n if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):\n QMessageBox.warning(self, \"VGenes Text Editor\",\n \"Cannot read file %s:\\n%s.\" % (fileName, file.errorString()))\n return\n\n inf = QtCore.QTextStream(file)\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.textEdit.setPlainText(inf.readAll())\n QApplication.restoreOverrideCursor()\n\n self.setCurrentFile(fileName)\n self.statusBar().showMessage(\"File loaded\", 2000)\n\n def saveFile(self, fileName):\n file = QtCore.QFile(fileName)\n if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):\n QMessageBox.warning(self, \"VGenes Text Editor\",\n \"Cannot write file %s:\\n%s.\" % (fileName, file.errorString()))\n return False\n\n outf = QtCore.QTextStream(file)\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n outf << self.textEdit.toPlainText()\n QApplication.restoreOverrideCursor()\n\n self.setCurrentFile(fileName);\n self.statusBar().showMessage(\"File saved\", 2000)\n return True\n\n def setCurrentFile(self, fileName):\n self.curFile = fileName\n self.textEdit.document().setModified(False)\n self.setWindowModified(False)\n\n if self.curFile:\n shownName = self.strippedName(self.curFile)\n else:\n shownName = 'untitled.txt'\n\n self.setWindowTitle(\"%s[*] - VGenes Text Editor\" % shownName)\n\n def strippedName(self, fullFileName):\n return QtCore.QFileInfo(fullFileName).fileName()","sub_path":"ui_VGenesTextEdit.py","file_name":"ui_VGenesTextEdit.py","file_ext":"py","file_size_in_byte":10528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"187878989","text":"\nimport pandas as pd\nimport json\n\n\n\ndf_genre_file = pd.read_excel(r'../input/Utterances.xlsx', sheet_name='genres_parts')\n#print(df_genre_file)\nlist_genre_parts = df_genre_file.values.tolist()\n#print(list_genre_parts)\n\ndf_unique_genres = pd.read_excel(r'../input/Utterances.xlsx', sheet_name='unique_genres')\n#print(df_unique_genres)\nlist_unique_genres = df_unique_genres.values.tolist()\n#print(list_unique_genres)\n\naction = \"retrieve_movie_by_genre\"\nfinal_utterance = []\nshort_list = []\nmy_NaN = float(\"NaN\")\nn = \"n \"\nmovie = 'movie'\nfilm = 'film'\npicture = 'picture'\nfor genre, genre1 in list_unique_genres:\n genre = str(genre).lower()\n genre1 = str(genre1).lower()\n for part1, part3 in list_genre_parts:\n part1 = str(part1).lower()\n part3 = str(part3).lower()\n if (genre[0] in ['a','e','i','o','u','h'] and part1[-1] == \"a\"):\n n = \"n \"\n else:\n n = \" \"\n if pd.isnull(part3) or part3 == \"nan\":\n print(str(part1) + \" \" + str(genre).strip('[]\\''))\n value_movie = str(part1) + \" \" + genre1\n else:\n print(str(part1) + n + str(genre).strip('[]\\'') + str(part3))\n value_movie = str(part1) + n + genre1 + str(part3)\n short_list = [genre, value_movie, \"\", action]\n final_utterance.append(short_list)\n\n value_film = value_movie.replace(movie, film)\n short_list = [genre, value_film, \"\", action]\n final_utterance.append(short_list)\n\n value_picture = value_movie.replace(movie, picture)\n short_list = [genre, value_picture, \"\", action]\n final_utterance.append(short_list)\n\nvalue_list = []\nprevious_genre = \"\"\nobject_dict = {}\nbig_list = []\nfor genre, value, target, action in final_utterance:\n if previous_genre == \"\":\n previous_genre = genre\n genre1, value1, target1, action1 = genre, value, target, action\n if previous_genre == genre:\n value_list.append(value)\n else:\n object_dict = {\n \"intent\": genre1,\n \"userinputs\": value_list,\n \"responses\": target1,\n \"action\": action1\n }\n big_list.append(object_dict)\n value_list = []\n previous_genre = genre\n genre1, value1, target1, action1 = genre, value, target, action\n\nfinal_obj = {\n\"intents\": big_list\n}\n\n\nwith open('../output/output.json', 'w') as outfile:\n json.dump(final_obj, outfile, sort_keys=False, indent=4)\n\nprint('Debuguer')","sub_path":"src/create_genre.py","file_name":"create_genre.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"250893998","text":"import pytesseract\nfrom PIL import Image\n\n\nclass ImageParser:\n \n def __init__(self, filename, tess_path=None):\n \"\"\"\n\n Parameters\n ----------\n filename : The file that needs to be processed.\n tess_path : The path to the tesseract cmd (Only for windows.)\n \"\"\"\n self.file = filename\n self.path = tess_path\n\n def extract_image(self):\n \"\"\"\n\n Returns\n -------\n ImageParser for Image formats.\n\n \"\"\"\n out_list = []\n if self.path is not None:\n pytesseract.pytesseract.tesseract_cmd = self.path\n img = Image.open(self.file)\n text = pytesseract.image_to_string(img)\n out_list.append(text)\n else:\n img = Image.open(self.file)\n text = pytesseract.image_to_string(img)\n out_list.append(text)\n return out_list\n","sub_path":"pyostie/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"159962426","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 16 16:29:49 2019\n\n@author: Lee\n\"\"\"\n\nimport requests\nfrom lxml import etree\n\n\nclass Answer(object):\n \n def __init__(self):\n \n self.url='http://syszr.hfut.edu.cn/redir.php'\n self.headers={\n 'cookie': '',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\n self.params={\n 'catalog_id': 6,\n 'cmd': 'dajuan_chakan',\n 'huihuabh': 278192,\n 'mode': 'test',\n }\n \n def get_html(self):\n \n r=requests.get(self.url,params=self.params,headers=self.headers)\n r.encoding=r.apparent_encoding\n return r.text\n \n def save_answer(self,text):\n dic1=[]\n tree=etree.HTML(text)\n _type=tree.xpath('//div[@class=\"shiti\"]/span/text()')\n _type=[i[1:-1] for i in _type]\n question=tree.xpath('//div[@class=\"shiti\"]/strong/text()|//div[@class=\"shiti\"]/strong/p/text()[1]')\n question=[a.replace(\"\\xa0\",\"\") for a in question]\n for i in range(len(_type)):\n if _type[i]=='单选题':\n loc=i\n break\n answer1=tree.xpath('//div[@class=\"shiti\"]/text()[5]')[:loc]\n answer1=[a.replace(\"\\r\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\")[9:] for a in answer1]\n answer2=tree.xpath('//div[@class=\"shiti\"]/text()[6]')\n answer2=[a.replace(\"\\r\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\")[9:] for a in answer2]\n with open('answer3.txt','w') as f:\n for i in range(loc):\n f.write(question[i]+answer1[i]+\"\\n\")\n for i in range(loc,len(_type)):\n f.write(question[i]+answer2[i-loc]+\"\\n\")\n\n dic1=dict(zip(question[:loc],answer1[:loc]))\n\n dic2=dict(zip(question[loc:len(_type)],answer2))\n dic_elec=dic1.copy()\n dic_elec.update(dic2)\n print(dic_elec)\n \nif __name__=='__main__':\n \n a=Answer()\n text=a.get_html()\n a.save_answer(text)","sub_path":"auto_answer/elec.py","file_name":"elec.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"337552840","text":"\"\"\"\n Copyright 2014-2015 Quantiply Corporation. All rights reserved.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nUsage:\n druid-metrics-collector.py [--host=] [--port=]\n\"\"\"\nfrom docopt import docopt\n\nimport cherrypy\nimport json\n\nfrom kafka.client import KafkaClient\nfrom kafka.producer import SimpleProducer\nimport logging\n\n\nclass KafkaMetrics(object):\n \n def __init__(self, broker_list, kafka_topic):\n logging.basicConfig(level=logging.INFO)\n self.log = logging.getLogger('druid-kafka-metrics')\n self.log.info(\"Kafka (brokers=%s, topic=%s)\" %(broker_list, kafka_topic))\n client = KafkaClient(broker_list)\n self.producer = SimpleProducer(client)\n self.msg_count = 0\n self.kafka_topic = kafka_topic\n \n @cherrypy.expose\n @cherrypy.tools.json_out()\n @cherrypy.tools.json_in()\n def metrics(self):\n messages = cherrypy.request.json\n\n for message in messages:\n self.msg_count += 1\n self.log.debug(\"%s - %s\" % (self.msg_count, str(message)))\n self.producer.send_messages(self.kafka_topic, json.dumps(message))\n\n if self.msg_count % 100 == 0 :\n self.log.info(\"%s messages processed.\" % (self.msg_count, ))\n\n return \"{'code':200}\"\n \n\nif __name__ == '__main__': \n arguments = docopt(__doc__, version='0.1.1rc')\n BROKER_LIST = arguments['']\n SOCKET_PORT = int(arguments['--port'] or 9999)\n SOCKET_HOST = arguments['--host'] or '0.0.0.0'\n TOPIC = arguments[''] or \"druid-metrics\"\n\n cherrypy.config.update({'server.socket_port': SOCKET_PORT})\n cherrypy.config.update({'server.socket_host': SOCKET_HOST})\n cherrypy.quickstart(KafkaMetrics(BROKER_LIST, TOPIC))\n","sub_path":"druid-metrics-collector.py","file_name":"druid-metrics-collector.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"22938986","text":"from json import load\nfrom collections import namedtuple\nfrom database import *\nfrom bs4 import BeautifulSoup\nfrom re import sub, findall\nimport requests\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduleGenerator:\n \"\"\"\n Class that generates the schedule by scraping HS's schedule page.\n \"\"\"\n\n def __init__(self):\n self.config = load(open('config.json', 'r'))\n self.days = self.config['en_gb']['day_array']\n self.show = namedtuple('Show', ['day', 'title', 'time', 'link'])\n self.schedulelink = 'https://horriblesubs.info/release-schedule/'\n self.baselink = 'https://horriblesubs.info'\n self.req = requests.get(self.schedulelink).text\n self.soup = BeautifulSoup(self.req, 'lxml')\n self.id = 0\n\n def iter_schedule(self, days=None):\n for titleElem, timeElem in zip(self.soup.find_all(attrs={'title': 'See all releases for this show'}),\n self.soup.find_all(attrs={'schedule-time'})):\n\n day = sub(r\" \\((.*?)\\)\", \"\", titleElem.find_previous(attrs={'weekday'}).contents[0])\n titlecheck = titleElem.find(attrs={'data-cfemail': True})\n title = titleElem.contents[0]\n time = timeElem.contents[0]\n link = self.baselink + titleElem.get('href')\n\n if titlecheck is not None:\n title = sub(r\"\\[(.*?)\\]\", self.decode(titlecheck.get(\"data-cfemail\")), titleElem.getText())\n\n if days is None:\n yield self.show(day, title, time, link)\n\n elif days in day:\n yield self.show(day, title, time, link)\n\n else:\n pass\n\n @staticmethod\n def check_show_internal_id(link):\n for element in BeautifulSoup(requests.get(link).text, 'lxml').find_all(attrs={'type': 'text/javascript'}):\n if 'hs_showid' in element.getText():\n return findall(r\"\\d+\", element.getText())[0]\n\n @staticmethod\n def check_show_up(link):\n show_id = ScheduleGenerator.check_show_internal_id(link)\n url = f'https://horriblesubs.info//api.php?method=getshows&type=show&showid={show_id}'\n soup = BeautifulSoup(requests.get(url).text, 'lxml')\n showinfo = soup.find(attrs={'rls-info-container'}).contents[0]\n magnetfind_args = 'a', {'title': 'Magnet Link'}\n ret_si = namedtuple('ShowInfo', ['released', 'title', 'episode', 'magnet480', 'magnet720', 'magnet1080'])\n\n if showinfo.span.text == \"Today\":\n magnet480 = soup.find(*magnetfind_args)\n magnet720 = magnet480.find_next(*magnetfind_args)\n magnet1080 = magnet720.find_next(*magnetfind_args)\n\n return ret_si(True, showinfo.span.next_sibling.strip(), showinfo.strong.text,\n magnet480.get('href'), magnet720.get('href'), magnet1080.get('href'))\n\n else:\n return ret_si(False, showinfo.span.next_sibling.strip(), showinfo.strong.text,\n None, None, None)\n\n def update_schedule(self):\n # TODO : Rewrite update function to use partial differences instead of deleting everything\n self.req = requests.get(self.schedulelink)\n self.id += 1\n showlist = [show.title for show in self.iter_schedule()]\n \n if showlist == list_all_shows():\n logger.info(f\"Update successful, id: {self.id}\")\n return True\n else:\n logger.warning(\"Show mismatch found, flushing old data...\")\n delete_data()\n show_insert_loop(self)\n return False\n\n @staticmethod\n def shorten_magnet(magnet_link):\n r = requests.get(f'http://mgnet.me/api/create?m={magnet_link}')\n return r.json().get('shorturl')\n\n @staticmethod\n def decode(encstr):\n return ''.join([chr(int(encstr[i:i + 2], 16) ^ int(encstr[:2], 16)) for i in range(2, len(encstr), 2)])\n\n\ndef show_insert_loop(schedule: ScheduleGenerator):\n \"\"\"\n Grabs all the shows from the schedule and inserts them\n into the database\n \"\"\"\n logger.info('Entering show_insert_loop...')\n for show in schedule.iter_schedule():\n try:\n insert_show(show.title, show.day, show.time, show.link)\n if not get_internal_show_id(show.title):\n set_internal_show_id(show.title, schedule.check_show_internal_id(show.link))\n\n except TransactionIntegrityError:\n pass\n","sub_path":"hsubs.py","file_name":"hsubs.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"248845035","text":"\"\"\"\nProvides various validators.\n\"\"\"\n\nimport re\nimport uuid\n\nfrom abc import ABCMeta\nfrom .exceptions import ValidationError\nfrom .utils.formatting import format_error_message\n\n\nMISSING_ERROR_MESSAGE = 'ValidationError raised by `{class_name}`, but error key `{key}` does ' \\\n 'not exist in the `error_messages` dictionary.'\n\n\nclass Validator(metaclass=ABCMeta):\n \"\"\"\n A base class from which all validator classes should inherit.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {}\n\n def __init__(self, error_messages=None):\n messages = {}\n for cls in reversed(self.__class__.__mro__):\n messages.update(getattr(cls, 'default_error_messages', {}))\n messages.update(error_messages or {})\n self.error_messages = messages\n\n def _fail(self, key, **kwargs):\n \"\"\"\n Raises a `ValidationError`.\n :param key: The key message to be fetched.\n :param kwargs: The kwargs used to replace the messages token.\n \"\"\"\n try:\n message = self.error_messages[key]\n message = format_error_message(message, **kwargs)\n if isinstance(message, dict):\n raise ValidationError(**message)\n raise ValidationError(message)\n except KeyError:\n class_name = self.__class__.__name__\n message = format_error_message(MISSING_ERROR_MESSAGE, class_name=class_name, key=key)\n raise AssertionError(message)\n\n\nclass ChoiceValidator(Validator):\n \"\"\"\n Validator which succeeds if the `value` is a member of the `choices`.\n\n :param iterable choices: An array of valid values.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {\n 'invalid': 'Not a valid choice.'\n }\n\n def __init__(self, choices, error_messages=None):\n super().__init__(error_messages)\n\n choices = choices or ()\n assert isinstance(choices, (list, tuple)), '`choices` has to be a list or tuple'\n\n self.choices = choices\n\n def __call__(self, value):\n if value not in self.choices:\n self._fail('invalid', input=value)\n\n\nclass EmailValidator(Validator):\n \"\"\"\n Validator which validates an email address.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n USER_REGEX = re.compile(\n r\"(^[-!#$%&'*+/=?^`{}|~\\w]+(\\.[-!#$%&'*+/=?^`{}|~\\w]+)*$\" # dot-atom\n # quoted-string\n r'|^\"([\\001-\\010\\013\\014\\016-\\037!#-\\[\\]-\\177]'\n r'|\\\\[\\001-\\011\\013\\014\\016-\\177])*\"$)', re.IGNORECASE | re.UNICODE)\n\n DOMAIN_REGEX = re.compile(\n # domain\n r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+'\n r'(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$'\n # literal form, ipv4 address (SMTP 4.1.3)\n r'|^\\[(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)'\n r'(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\]$', re.IGNORECASE | re.UNICODE)\n\n DOMAIN_WHITELIST = ('localhost',)\n\n default_error_messages = {\n 'invalid': 'Not a valid email address.'\n }\n\n def __call__(self, value):\n if not value or '@' not in value:\n self._fail('invalid')\n\n user_part, domain_part = value.rsplit('@', 1)\n\n if not self.USER_REGEX.match(user_part):\n self._fail('invalid')\n\n if domain_part in self.DOMAIN_WHITELIST:\n return\n\n if self.DOMAIN_REGEX.match(domain_part):\n return\n\n domain_part = domain_part.encode('idna').decode('ascii')\n if self.DOMAIN_REGEX.match(domain_part):\n return\n\n self._fail('invalid')\n\n\nclass LengthValidator(Validator):\n \"\"\"\n Validator which succeeds if the value passed to it has a length between a minimum and maximum.\n\n :param int min_length: The minimum length. If not provided, minimum length will not be checked.\n :param int max_length: The maximum length. If not provided, maximum length will not be checked.\n :param int equal_length: The exact length. If provided, maximum and minimum length will not be checked.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {\n 'min_length': 'Shorter than minimum length {min_length}.',\n 'max_length': 'Longer than maximum length {max_length}.',\n 'equal_length': 'Length must be {equal_length}.'\n }\n\n def __init__(self, min_length=None, max_length=None, equal_length=None, error_messages=None):\n if equal_length is not None and (min_length or max_length):\n raise ValueError('The `equal_length` parameter was provided, maximum or '\n 'minimum parameter must not be provided.')\n\n super().__init__(error_messages)\n\n self.min_length = min_length\n self.max_length = max_length\n self.equal_length = equal_length\n\n def __call__(self, value):\n length = len(value)\n\n if self.equal_length is not None:\n if length != self.equal_length:\n self._fail('equal_length', equal_length=self.equal_length)\n return\n\n if self.min_length is not None and length < self.min_length:\n self._fail('min_length', min_length=self.min_length)\n\n if self.max_length is not None and length > self.max_length:\n self._fail('max_length', max_length=self.max_length)\n\n\nclass RangeValidator(Validator):\n \"\"\"\n Validator which succeeds if the value it is passed is greater\n or equal to `min_value` and less than or equal to `max_value`.\n :param min_value: The minimum value (lower bound). If not provided, minimum value will not be checked.\n :param max_value: The maximum value (upper bound). If not provided, maximum value will not be checked.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {\n 'min_value': 'Must be at least {min_value}.',\n 'max_value': 'Must be at most {max_value}.',\n }\n\n def __init__(self, min_value=None, max_value=None, error_messages=None):\n super().__init__(error_messages)\n\n self.min_value = min_value\n self.max_value = max_value\n\n def __call__(self, value):\n if self.min_value is not None and value < self.min_value:\n self._fail('min_value', min_value=self.min_value)\n\n if self.max_value is not None and value > self.max_value:\n self._fail('max_value', max_value=self.max_value)\n\n\nclass RegexValidator(Validator):\n \"\"\"\n Validator which succeeds if the `value` matches with the regex.\n\n :param regex: The regular expression string to use. Can also be a compiled regular expression pattern.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {\n 'invalid': 'This value does not match the required pattern.'\n }\n\n def __init__(self, regex, error_messages=None):\n super().__init__(error_messages)\n self.regex = regex\n\n def __call__(self, value):\n if self.regex.match(value) is None:\n self._fail('invalid', value=value, regex=self.regex.pattern)\n return value\n\n\nclass UUIDValidator(Validator):\n \"\"\"\n Validator which succeeds if the value is an UUID.\n :param dict error_messages: The error messages for various kinds of errors.\n \"\"\"\n\n default_error_messages = {\n 'invalid': 'Not a valid uuid.'\n }\n\n def __call__(self, value):\n try:\n uuid.UUID(hex=value)\n except (AttributeError, ValueError):\n self._fail('invalid', value=value)\n","sub_path":"flask_webapi/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":7696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336984925","text":"from ..fixtures.image import (\n evaluate_model_on_pixel_grid,\n gaussian_point_source,\n uncorrelated_gaussian_noise_background\n)\nfrom fastimgproto.sourcefind.image import (SourceFindImage, _estimate_rms)\nimport numpy as np\n\nydim = 128\nxdim = 64\nrms = 1.0\nbright_src = gaussian_point_source(x_centre=48.24, y_centre=52.66, amplitude=10.0)\nfaint_src = gaussian_point_source(x_centre=32, y_centre=64, amplitude=3.5)\n\n\ndef test_rms_estimation():\n img = uncorrelated_gaussian_noise_background(shape=(ydim, xdim),\n sigma=rms)\n img += evaluate_model_on_pixel_grid(img.shape, bright_src)\n img += evaluate_model_on_pixel_grid(img.shape, faint_src)\n rms_est= _estimate_rms(img)\n # print \"RMS EST:\", rms_est\n assert np.abs((rms_est - rms) / rms) < 0.05\n\ndef test_basic_source_detection():\n \"\"\"\n We use a flat background (rather than noisy) to avoid random-noise fluctuations\n causing erroneous detections (and test-failures).\n \"\"\"\n img = np.zeros((ydim, xdim))\n img += evaluate_model_on_pixel_grid(img.shape, bright_src)\n # img += evaluate_model_on_pixel_grid(img.shape, faint_src)\n\n sf = SourceFindImage(img, detection_n_sigma=4,\n analysis_n_sigma=3,\n rms_est=rms)\n assert len(sf.islands) == 1\n src = sf.islands[0]\n # print(bright_src)\n # print(src)\n assert np.abs(src.peak_x_idx - bright_src.x_mean) <0.5\n assert np.abs(src.peak_y_idx - bright_src.y_mean) <0.5\n assert np.abs(src.xbar - bright_src.x_mean) <0.1\n assert np.abs(src.ybar - bright_src.y_mean) <0.1\n\n\n # We expect to detect the bright source, but not the faint one.\n img += evaluate_model_on_pixel_grid(img.shape, faint_src)\n sf = SourceFindImage(img, detection_n_sigma=4,\n analysis_n_sigma=3,\n rms_est=rms)\n assert len(sf.islands) == 1\n # Unless we add it again and effectively double the faint_src flux\n img += evaluate_model_on_pixel_grid(img.shape, faint_src)\n sf = SourceFindImage(img, detection_n_sigma=4,\n analysis_n_sigma=3,\n rms_est=rms)\n assert len(sf.islands) == 2\n\n","sub_path":"tests/test_sourcefind/test_detection.py","file_name":"test_detection.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"318452210","text":"import random\r\nLIM = 1000 #Limit for Crash Multiplier\r\nH = 10 #Multiplier Variable (Random Int < LIM)\r\n\r\n\r\ndef start_game(cashout_multiplier):\r\n return isWinner(calc_multiplier(), cashout_multiplier)\r\n\r\ndef calc_multiplier():\r\n H = random.randint(0,(LIM-1)) #Sets H to a random int between 0 and LIM\r\n multiplier = 0.99*LIM/(LIM-H)\r\n return multiplier\r\n\r\ndef isWinner(multiplier, cashout_multiplier):\r\n if cashout_multiplier > multiplier:\r\n return {'won':False,\r\n 'multiplier':multiplier}\r\n else:\r\n return {'won':True,\r\n 'multiplier':multiplier}","sub_path":"crash/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"475660946","text":"\"\"\"Custom middleware. Some of this may be generally useful.\"\"\"\n\nfrom google.appengine.api import users\n\nimport models\n\n\nclass AddUserToRequestMiddleware(object):\n \"\"\"Add a user object and a user_is_admin flag to each request.\"\"\"\n\n def process_request(self, request):\n request.user = users.get_current_user()\n request.user_is_admin = users.is_current_user_admin()\n\n # Update the cached value of the current user's Account\n account = None\n if request.user is not None:\n account = models.Account.get_account_for_user(request.user)\n models.Account.current_user_account = account\n\n","sub_path":"puzzles/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"474679937","text":"import urllib.parse\nimport string\n\nfrom tokenservices.handlers import BaseHandler, JsonBodyMixin\nfrom tokenservices.database import DatabaseMixin\nfrom tokenservices.errors import JSONHTTPError\nfrom tokenservices.handlers import RequestVerificationMixin\nfrom tokenservices.clients import IdServiceClient\nfrom tornado.web import StaticFileHandler, HTTPError\nfrom tokenservices.log import log\nfrom decimal import Decimal\nfrom tokenservices.utils import validate_address, validate_decimal_string, parse_int\n\ndef sofa_manifest_from_row(row):\n return {\n \"displayName\": row['name'],\n \"protocol\": row['protocol'],\n \"avatarUrl\": row['avatar_url'],\n \"interfaces\": row['interfaces'],\n \"ownerAddress\": row['token_id'],\n \"paymentAddress\": row['payment_address'],\n \"featured\": row['featured'],\n \"webApp\": row['web_app'],\n \"languages\": row['languages'],\n \"initRequest\": {\"values\": row['init_request']}\n }\n\ndef app_from_row(row):\n return {\n \"token_id\": row['token_id'],\n \"username\": row['username'],\n \"reputation_score\": float(row['reputation_score']) if row['reputation_score'] else None,\n \"review_count\": row['review_count'],\n \"is_app\": True,\n \"payment_address\": row['payment_address'],\n \"about\": row['description'],\n \"name\": row['name'],\n \"avatar\": row['avatar_url'],\n # for backwards compatability\n \"custom\": {\n \"about\": row['description'],\n \"name\": row['name'],\n \"avatar\": row['avatar_url'],\n #\"manifest\": sofa_manifest_from_row(row)\n }\n }\n\nclass AppsHandler(DatabaseMixin, BaseHandler):\n async def get(self, token_id):\n\n async with self.db:\n row = await self.db.fetchrow(\n \"SELECT apps.*, sofa_manifests.* FROM apps \"\n \"JOIN sofa_manifests ON \"\n \"sofa_manifests.token_id = apps.token_id \"\n \"WHERE apps.token_id = $1\", token_id)\n if row is None:\n raise JSONHTTPError(404, body={'errors': [{'id': 'not_found', 'message': 'Not Found'}]})\n result = app_from_row(row)\n self.write(result)\n\nclass SearchAppsHandler(DatabaseMixin, BaseHandler):\n async def get(self, force_featured=None):\n try:\n offset = int(self.get_query_argument('offset', 0))\n limit = int(self.get_query_argument('limit', 10))\n except ValueError:\n raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})\n\n if force_featured:\n featured = True\n else:\n featured = self.get_query_argument('featured', 'false')\n if featured.lower() == 'false':\n featured = False\n else:\n featured = True\n query = self.get_query_argument('query', None)\n\n args = []\n sql = \"SELECT * FROM apps JOIN sofa_manifests ON sofa_manifests.token_id = apps.token_id \"\n if query:\n # strip any punctuation\n query = ''.join([c for c in query if c not in string.punctuation])\n args.append('%{}%'.format(query))\n sql += \" WHERE apps.name ILIKE $1\"\n if featured:\n sql += \" AND apps.featured IS TRUE\"\n elif featured:\n sql += \" WHERE apps.featured IS TRUE\"\n countsql = \"SELECT COUNT(*) \" + sql[8:]\n countargs = args[:]\n sql += \" ORDER BY apps.name OFFSET ${} LIMIT ${}\".format(len(args) + 1, len(args) + 2)\n args.extend([offset, limit])\n\n async with self.db:\n count = await self.db.fetchrow(countsql, *countargs)\n rows = await self.db.fetch(sql, *args)\n\n results = [app_from_row(row) for row in rows]\n\n self.write({\n 'query': query or '',\n 'offset': offset,\n 'limit': limit,\n 'apps': results,\n 'featured': featured,\n 'total': count['count']\n })\n\n\nclass SofaManifestHandler(DatabaseMixin, BaseHandler):\n async def get(self, token_id):\n\n async with self.db:\n row = await self.db.fetchrow(\n \"SELECT apps.*, sofa_manifests.* FROM apps \"\n \"JOIN sofa_manifests ON \"\n \"sofa_manifests.token_id = apps.token_id \"\n \"WHERE apps.token_id = $1\", token_id)\n if row is None:\n raise JSONHTTPError(404, body={'errors': [{'id': 'not_found', 'message': 'Not Found'}]})\n result = sofa_manifest_from_row(row)\n self.write(result)\n\n\nclass ReputationUpdateHandler(RequestVerificationMixin, DatabaseMixin, BaseHandler):\n\n async def post(self):\n\n if 'reputation' not in self.application.config or 'id' not in self.application.config['reputation']:\n raise HTTPError(404)\n\n try:\n address = self.verify_request()\n except JSONHTTPError:\n raise HTTPError(404)\n\n if address != self.application.config['reputation']['id']:\n raise HTTPError(404)\n\n if not all(x in self.json for x in ['address', 'score', 'count']):\n raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})\n\n token_id = self.json['address']\n if not validate_address(token_id):\n raise JSONHTTPError(400, body={'errors': [{'id': 'invalid_address', 'message': 'Invalid Address'}]})\n\n count = self.json['count']\n count = parse_int(count)\n if count is None:\n raise JSONHTTPError(400, body={'errors': [{'id': 'invalid_count', 'message': 'Invalid Count'}]})\n\n score = self.json['score']\n if isinstance(score, str) and validate_decimal_string(score):\n score = Decimal(score)\n if not isinstance(score, (int, float, Decimal)):\n raise JSONHTTPError(400, body={'errors': [{'id': 'invalid_score', 'message': 'Invalid Score'}]})\n\n async with self.db:\n await self.db.execute(\"UPDATE apps SET reputation_score = $1, review_count = $2 WHERE token_id = $3\",\n score, count, token_id)\n await self.db.commit()\n\n self.set_status(204)\n\n\nclass UserMixin:\n\n def get_current_user(self):\n val = self.get_secure_cookie(\"user\")\n if isinstance(val, bytes):\n val = val.decode('ascii')\n if not val:\n # make sure empty strings aren't counted as valid\n return None\n return val\n\nclass LogoutHandler(BaseHandler):\n\n def post(self):\n\n redirect = urllib.parse.urlparse(self.request.headers.get('Referer', '')).path\n\n self.clear_all_cookies()\n self.redirect(\"/login?redirect={}\".format(redirect))\n\nclass LoginPageHandler(JsonBodyMixin, StaticFileHandler):\n\n def initialize(self):\n super().initialize('public/')\n\n def get(self):\n return super().get('login.html')\n\n async def post(self):\n if 'auth_token' not in self.json:\n raise JSONHTTPError(400, body={'errors': [{'id': 'bad_arguments', 'message': 'Bad Arguments'}]})\n token = self.json['auth_token']\n\n idclient = IdServiceClient(use_tornado=True)\n try:\n user = await idclient.whodis(token)\n except:\n log.exception(\"...\")\n user = None\n\n if user:\n self.set_secure_cookie(\"user\", user['token_id'])\n self.set_status(204)\n else:\n raise JSONHTTPError(400, body={'errors': [{'id': 'invalid_token', 'message': 'Invalid token'}]})\n\nclass CurrentUserHandler(UserMixin, BaseHandler):\n\n async def get(self):\n address = self.current_user\n if address:\n idclient = IdServiceClient(use_tornado=True)\n user = await idclient.get_user(address)\n else:\n raise JSONHTTPError(401)\n\n self.write({\"user\": user})\n","sub_path":"tokendirectory/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"215879075","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport corr\nimport struct\nimport calandigital as calan\nimport time\n\n\nIP = '192.168.0.40'\nbof = '../debbug.bof'\nfpga = corr.katcp_wrapper.FpgaClient(IP)\ntime.sleep(1)\nfpga.upload_program_bof(bof,3000)\ntime.sleep(1)\n\nfpga.write_int('msdft_sel',0)\nfpga.write_int('twidd_num',0)\nfpga.write_int('acc_len', 2**10)\nfpga.write_int('rst',1)\nfpga.write_int('sync',0)\nfpga.write_int('rst_save',1)\nfpga.write_int('rst_fft',1)\nfpga.write_int('start_fft',1)\nfpga.write_int('rst_fft',0)\n\nfreq = np.linspace(0,67.5,128, endpoint=False)\n\n\n\ndef repetitive(n_iter, index):\n \"\"\"Repeat several times one measurement reset the system\n \"\"\"\n stats = np.zeros([n_iter, 4])\n for i in range(n_iter):\n fpga.write_int('rst',1)\n fpga.write_int('sync',0)\n fpga.write_int('rst_save',1)\n fpga.write_int('rst_save',0)\n fpga.write_int('msdft1_msdft_rst_bram',1)\n fpga.write_int('msdft1_msdft_rst_bram',1)\n fpga.write_int('rst',0)\n fpga.write_int('sync',1)\n time.sleep(1)\n vals = take_data()\n stats[i,:] = calc_stats(vals, index)\n print('Values iter %i' %i)\n print('mean pow: %.4f \\t std pow: %.4f' %(stats[i,0], stats[i,1]))\n print('mean ang: %.4f \\t std ang: %.4f' %(stats[i,2], stats[i,3]))\n print('\\n')\n return stats \n \n \n\ndef take_data():\n a1 = np.array(struct.unpack('>1024Q', fpga.read('A1', 1024*8)))\n a2 = np.array(struct.unpack('>1024Q', fpga.read('A2', 1024*8)))\n a3 = np.array(struct.unpack('>1024Q', fpga.read('A3', 1024*8)))\n a4 = np.array(struct.unpack('>1024Q', fpga.read('A4', 1024*8)))\n a5 = np.array(struct.unpack('>1024Q', fpga.read('A5', 1024*8)))\n\n powA = np.vstack([a1,a2,a3,a4,a5])\n\n b1 = np.array(struct.unpack('>1024Q', fpga.read('B1', 1024*8)))\n b2 = np.array(struct.unpack('>1024Q', fpga.read('B2', 1024*8)))\n b3 = np.array(struct.unpack('>1024Q', fpga.read('B3', 1024*8)))\n b4 = np.array(struct.unpack('>1024Q', fpga.read('B4', 1024*8)))\n b5 = np.array(struct.unpack('>1024Q', fpga.read('B5', 1024*8)))\n\n powB = np.vstack([b1,b2,b3,b4,b5])\n\n corr1 = np.array(struct.unpack('>2048q', fpga.read('corr1', 2048*8)))\n corr2 = np.array(struct.unpack('>2048q', fpga.read('corr2', 2048*8)))\n corr3 = np.array(struct.unpack('>2048q', fpga.read('corr3', 2048*8)))\n corr4 = np.array(struct.unpack('>2048q', fpga.read('corr4', 2048*8)))\n corr5 = np.array(struct.unpack('>2048q', fpga.read('corr5', 2048*8)))\n\n ang1 = np.rad2deg(np.arctan2(corr1[1::2], corr1[::2]))\n ang2 = np.rad2deg(np.arctan2(corr2[1::2], corr2[::2]))\n ang3 = np.rad2deg(np.arctan2(corr3[1::2], corr3[::2]))\n ang4 = np.rad2deg(np.arctan2(corr4[1::2], corr4[::2]))\n ang5 = np.rad2deg(np.arctan2(corr5[1::2], corr5[::2]))\n\n angs = np.vstack([ang1, ang2, ang3, ang4, ang5])\n\n return (powA, powB, angs)\n\n\ndef calc_stats(data, index):\n powA = data[0][index,:]\n powB = data[1][index,:]\n angs = data[2][index,:]\n mean_pow = np.mean(10*(np.log10(powA)-np.log10(powB)))\n std_pow = np.std(10*(np.log10(powA)-np.log10(powB)))\n mean_ang = np.mean(angs)\n std_ang = np.std(angs)\n return np.array([mean_pow, std_pow, mean_ang, std_ang])\n \n\n\ndef continuos(n_iter, index):\n fpga.write_int('rst',1)\n fpga.write_int('sync',0)\n fpga.write_int('rst_save',1)\n fpga.write_int('rst_save',0)\n fpga.write_int('rst',0)\n fpga.write_int('sync',1)\n stats = np.zeros([n_iter, 4])\n for i in range(n_iter):\n fpga.write_int('rst_save',1)\n fpga.write_int('rst_save',0)\n time.sleep(5)\n vals = take_data()\n stats[i,:] = calc_stats(vals, index)\n print('Values iter %i' %i)\n print('mean pow: %.4f \\t std pow: %.4f' %(stats[i,0], stats[i,1]))\n print('mean ang: %.4f \\t std ang: %.4f' %(stats[i,2], stats[i,3]))\n print('\\n')\n return stats\n\n \ndef spect_evolution(n_iter):\n fpga.write_int('rst',1)\n fpga.write_int('sync',0)\n fpga.write_int('rst_save',1)\n fpga.write_int('rst_save',0)\n fpga.write_int('rst',0)\n fpga.write_int('sync',1)\n powA = np.zeros([n_iter, 5])\n powB = np.zeros([n_iter, 5])\n for i in range(n_iter):\n fpga.write_int('rst_save',1)\n fpga.write_int('rst_save',0)\n time.sleep(1)\n vals = take_data()\n powA[i,:] = vals[0][:,200]\n powB[i,:] = vals[1][:,200]\n return (powA, powB)\n \n\n\n\n\n\n \n","sub_path":"msdft/test/msdft_256/time_evolution/time_test.py","file_name":"time_test.py","file_ext":"py","file_size_in_byte":4476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"81575659","text":"import os\nfrom importlib import import_module\nfrom time import sleep, time\n\nfrom ..utility.base_workflow import BaseWorkflow\n\n\nWORKFLOW_NAME = 'MicrosoftPaint'\nWORKFLOW_DESCRIPTION = 'Create a blank MS Paint file (Windows)'\n\nDEFAULT_INPUT_WAIT_TIME = 2\nDEFAULT_PAINT_PATH = paint_path = 'C:\\Windows\\System32\\mspaint.exe'\n\n\ndef load():\n pyautogui = import_module('pyautogui')\n return msPaint(pyautogui=pyautogui)\n\n\nclass msPaint(BaseWorkflow):\n\n def __init__(self, pyautogui, input_wait_time=DEFAULT_INPUT_WAIT_TIME, paint_path=DEFAULT_PAINT_PATH):\n super().__init__(name=WORKFLOW_NAME, description=WORKFLOW_DESCRIPTION)\n\n self.pyautogui = pyautogui\n self.input_wait_time = input_wait_time\n self.paint_path = paint_path\n\n def action(self, extra=None):\n self._ms_paint()\n\n \"\"\" PRIVATE \"\"\"\n\n def _ms_paint(self):\n os.startfile(self.paint_path)\n self.pyautogui.getWindowsWithTitle('Paint')\n sleep(self.input_wait_time)\n self.pyautogui.hotkey('ctrl', 's')\n file_name = int(time())\n sleep(self.input_wait_time)\n self.pyautogui.typewrite(str(file_name))\n sleep(self.input_wait_time)\n self.pyautogui.press('enter')\n sleep(self.input_wait_time)\n self.pyautogui.getWindowsWithTitle('Paint')\n self.pyautogui.hotkey('alt', 'f4')\n","sub_path":"pyhuman/app/workflows/ms_paint.py","file_name":"ms_paint.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"518029315","text":"import json\nimport os\nfrom wsgiref.util import FileWrapper\n\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.http import (HttpResponse, HttpResponseRedirect, JsonResponse,\n HttpResponseBadRequest)\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import urlencode, force_escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils import translation\nfrom django.views.generic.base import TemplateView\nfrom django.urls import reverse\nfrom django.http import Http404 \n\nfrom content.models import Spoiler, StaticPage, GetCredit, MenuAboutItem,\\\n MainPageStatic, IndexPageStatic, StaticPageDefault\nfrom credit.models import CreditRate, CreditRateUp\nfrom communication.models import Response, QuestionComment, UserQuestion,\\\n QuestionConfig, CallbackSuccessForm,\\\n SocialNetUnderHeader\nfrom communication.forms import WriteCommentForm, WriteQuestionForm\nfrom department.models import Department\nfrom efin.settings import GOOGLE_MAPS_API_KEY, BASE_DIR\nfrom content.helpers import get_city_name, process_bid\nfrom bids.models import Bid\nfrom users.forms import RegisterNumberForm\nfrom users.models import Profile\n\n\ndef pages(request, page_url):\n page = StaticPage.objects.filter(link=page_url).first()\n template = 'spoiler-page.html'\n if not page:\n page = StaticPageDefault.objects.filter(link=page_url).first()\n if not page:\n raise Http404\n template = 'default.html'\n menu_about = MenuAboutItem.objects.all()\n return render(request, template, {'page':page,\n 'menu_about':menu_about})\n\n\ndef main(request):\n city = get_city_name(request)\n main = MainPageStatic.get_solo()\n nets = SocialNetUnderHeader.objects.all()\n departments = []\n for obj in main.departments.get_queryset():\n if obj.city not in departments:\n departments.append(obj.city)\n departments = sorted(departments)\n length = len(departments)\n column_list = [0, 0, 0, 0]\n while length > 0:\n for i in range(0, 4):\n if length > 0:\n column_list[i] += 1\n length -= 1\n return render(request, 'main.html', {'main':main,\n 'departments':departments,\n 'nets':nets,\n 'column_list':column_list,\n 'user_city': city if city else 'Другой город'})\n\n\ndef index(request):\n city = get_city_name(request)\n index = IndexPageStatic.get_solo()\n main = MainPageStatic.get_solo()\n departments = []\n for obj in index.departments.get_queryset():\n if obj.city not in departments:\n departments.append(obj.city)\n departments = sorted(departments)\n length = len(departments)\n column_list = [0, 0, 0, 0]\n while length > 0:\n for i in range(0, 4):\n if length > 0:\n column_list[i] += 1\n length -= 1\n form = RegisterNumberForm()\n return render(request, 'index.html', {'index':index,\n 'main':main,\n 'form':form,\n 'user_city': city if city else 'Другой город',\n 'departments':departments,\n 'column_list':column_list})\n\n\ndef departments_generate(request, dep_id):\n if translation.get_language() == 'ua':\n lang = 'ua'\n else:\n lang = 'ru'\n city_departs = dict()\n departments = Department.objects.filter(city=dep_id)\n for obj in departments:\n #link = mark_safe('https://www.google.com/maps/embed/v1/place?key=%s&q=%s,%s' % \\\n # (GOOGLE_MAPS_API_KEY,\n # obj.geolocation.lat,\n # obj.geolocation.lon))\n address = obj.address if lang == 'ru' else obj.address_ua\n city_departs[obj.id] = {'city':obj.city,\n 'lats':{'lat':obj.geolocation.lat,\n 'lng':obj.geolocation.lon},\n 'address':address,\n 'schedule':obj.schedule,\n 'email':obj.email,\n 'phone':obj.phone}\n return JsonResponse(city_departs)\n\n\ndef slider_filler(request):\n data = CreditRateUp.objects.all()\n result = dict()\n for obj in data:\n sum_min = obj.credit_rate.sum_min if not obj.credit_rate.is_insurance \\\n else obj.credit_rate.sum_min / 1.25\n sum_max = obj.credit_rate.sum_max if not obj.credit_rate.is_insurance \\\n else obj.credit_rate.sum_max / 1.25\n result[str(obj.id)] = {'term_min':obj.credit_rate.term_min,\n 'term_max':obj.credit_rate.term_max,\n 'sum_min':sum_min,\n 'sum_max':sum_max}\n return JsonResponse(result)\n\n\ndef agreement(request):\n menu_about = MenuAboutItem.objects.all()\n return render(request, 'default.html', {'menu_about':menu_about})\n\n\ndef credit_calculator(request, rate_id, term, summ):\n rate = CreditRate.objects.filter(id=rate_id).first()\n if rate.is_insurance:\n summ /= 0.8\n json_data = rate.rate_file.read()\n data = json.loads(json_data.decode('utf-8'))\n key = ''\n if not term:\n term = rate.term_min\n if not summ:\n summ = rate.sum_min\n for obj in sorted(map(int, data.keys())):\n if summ >= obj:\n continue\n else:\n key = str(obj)\n else:\n if not key:\n key = str(sorted(map(int, data.keys()))[-1])\n percents = data[key]\n for obj in sorted(map(int, percents.keys())):\n if term >= int(obj):\n rate_percent = percents[str(obj)]\n else:\n break\n if rate.term_type:\n rate_percent /= 12\n else:\n rate_percent /= 52\n # rate_percent , term, summ\n if not rate_percent:\n return 0\n else:\n on_loan = (1 + rate_percent) ** term\n res = round(summ * rate_percent * on_loan / (on_loan - 1) , 2)\n result = {'result':res}\n return JsonResponse(result)\n\n\ndef download_pdf(request, spoiler_id):\n spoiler = Spoiler.objects.filter(id=spoiler_id).first()\n if os.path.exists(spoiler.file.path):\n with open(spoiler.file.path, 'rb') as fh:\n response = HttpResponse(fh.read(), content_type=\"application/pdf\")\n response['Content-Disposition'] = 'attachment; filename=%s.pdf' % os.path.basename(spoiler.file.path)\n return response\n raise Http404\n\n\ndef open_pdf(request, spoiler_id):\n spoiler = Spoiler.objects.filter(id=spoiler_id).first()\n file_data = open(spoiler.file.path, 'rb').read()\n return HttpResponse(file_data, content_type='application/pdf')\n\n\ndef translate(request, lang_code):\n translation.activate(lang_code)\n request.session[translation.LANGUAGE_SESSION_KEY] = lang_code\n return HttpResponseRedirect(\"/\")\n\n\nclass CallbackView(TemplateView):\n template_name = \"form-callback.html\"\n\n def get_context_data(self, **kwargs):\n context = super(CallbackView, self).get_context_data()\n\n bid_id = None\n # get bid's ID form session which set in save_credit_request function\n if self.request.session.has_key('bid_id'):\n bid_id = self.request.session.get('bid_id')\n del self.request.session['bid_id']\n\n context['bid_id'] = bid_id\n\n city = get_city_name(self.request)\n context['city'] = city or \"Другой город\"\n context['form'] = RegisterNumberForm()\n if kwargs.get('status_message'):\n context['status_message'] = kwargs.get('status_message')\n\n return context\n\n\ndef save_credit_request(request):\n if request.method == 'POST':\n # create new Bid\n new_bid = Bid.objects.create(\n credit_sum=request.POST.get('credit_sum', 0),\n termin=request.POST.get('termin'),\n termin_type=request.POST.get('term_type'),\n city=request.POST.get('city')\n )\n new_bid.save()\n\n if new_bid.id:\n # set bid's id to session for accesing in other page\n request.session[\"bid_id\"] = new_bid.id\n\n # for ajax query from index.html shoudn't redirect\n if request.POST.get('no_redirect', False):\n return JsonResponse({'status':'200',\n 'bid_id':new_bid.id})\n else:\n return HttpResponseRedirect(redirect_to=request.POST.get('callback'))\n\n\ndef request_callback(request):\n if request.method == 'POST':\n bid_id = request.POST.get(\"bid_id\", None)\n if bid_id:\n # if Bid has been created in save_credit_request function\n if Bid.objects.filter(id=int(bid_id)).exists():\n bid = Bid.objects.get(id=int(bid_id))\n bid.contact_phone = request.POST.get(\"contact_phone\")\n bid.name = request.POST.get(\"client_name\")\n bid.save()\n process_bid(bid)\n else:\n new_bid = Bid.objects.create(\n city=request.POST.get('city'),\n name=request.POST.get(\"client_name\"),\n contact_phone=request.POST.get(\"contact_phone\")\n )\n new_bid.save()\n process_bid(new_bid)\n else:\n new_bid = Bid.objects.create(\n city=request.POST.get('city'),\n name=request.POST.get(\"client_name\"),\n contact_phone=request.POST.get(\"contact_phone\")\n )\n new_bid.save()\n process_bid(new_bid)\n\n callback_success = CallbackSuccessForm.get_solo()\n url = reverse('success', kwargs={'id_mess':callback_success.success.id,\n 'redirect_url':'main'})\n return HttpResponseRedirect(url)\n return HttpResponseBadRequest()\n\n\nclass CallbackSuccessView(TemplateView):\n template_name = \"form-success.html\"\n\n\ndef comment_add(request):\n if request.method == 'POST':\n content = {'content':request.POST.get('content')}\n comment_form = WriteCommentForm(content)\n if comment_form.is_valid():\n question = UserQuestion.objects.filter(id=request.POST.get('id_quest')).first()\n comment = QuestionComment(content=comment_form.cleaned_data.get('content'))\n comment.save()\n question.comments.add(comment)\n question.save()\n result = render(request, 'ajax_generate/comment_generate.html', {'comment':comment})\n return HttpResponse(result)\n else:\n result = 'fail'\n return HttpResponse(result)\n else:\n result = 'fail'\n return HttpResponse(result)\n\n\ndef question_add(request):\n if request.method == 'POST':\n question_form = WriteQuestionForm(request.POST, request.FILES)\n if question_form.is_valid():\n user = Profile.objects.filter(user=request.user).first()\n content = question_form.cleaned_data.get('support_text')\n file = question_form.cleaned_data.get('file')\n question = UserQuestion.objects.create(content=content,\n file=file,\n user=user,\n is_read='force read')\n url = reverse('profile', kwargs={'active':'mess'})\n return HttpResponseRedirect(url)\n else:\n return HttpResponseBadRequest()\n else:\n return HttpResponseBadRequest()\n\n\ndef question_generate(request):\n try:\n page = request.GET.get('page')\n start = int(page) * 8 - 8\n end = int(page) * 8\n user = Profile.objects.filter(user=request.user).first()\n questions = UserQuestion.objects.filter(user=user).order_by('updated_at').reverse()[start:end]\n str1 = render_to_string('ajax_generate/quest_generate.html', {'questions':questions})\n str2 = render_to_string('ajax_generate/chat_generate.html', {'questions':questions})\n result = 'ёёёёё'.join([str1, str2])\n return HttpResponse(result)\n except:\n result = 'fail'\n return HttpResponse(result)\n","sub_path":"ExFin/content/views/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"203394544","text":"class Solution:\n def arraysIntersection_brute_force(self, arr1, arr2, arr3):\n helper = [0] * 2000\n for itm in arr1:\n helper[itm - 1] += 1\n for itm in arr2:\n helper[itm - 1] += 1\n for itm in arr3:\n helper[itm - 1] += 1\n\n cnt = 0\n for itm in helper:\n if itm == 3:\n cnt += 1\n res = [0] * cnt\n it = 0\n for i in range(len(helper)):\n if helper[i] == 3:\n res[it] = i + 1\n it += 1\n\n return res\n\n def arraysIntersection_optimal(self, arr1, arr2, arr3):\n pass\n\n\nif __name__ == '__main__':\n s = Solution()\n arr1 = [1,2,3,4,5]\n arr2 = [1,2,5,7,9]\n arr3 = [1,3,4,5,8]\n print(s.arraysIntersection(arr1, arr2, arr3))","sub_path":"LeetCodeLearn/String_Array/1213.py","file_name":"1213.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"495763736","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport pandas as pd\nimport os, json\n\nclass StockEnv(gym.Env):\n\tmetadata = {'render.modes': ['human']}\n\n\tdef __init__(self):\n\t\tsuper(StockEnv, self).__init__()\n\t\tself.action_space = ['b', 'h', 's']\n\t\tself.n_actions = len(self.action_space)\n\t\tself.n_features = 2\n\t\tself.build_stock_market()\n\t\tamount = 100\n\n\tdef build_stock_market(self):\n\t\tself.sym = pd.read_csv(\"PTT.BK.csv\")\n\t\tself.i = 0\n\t\tself.market = self.sym.iloc[[0]]\n\t\tself.balance = 1000000\n\t\tself.equity = self.balance\n\t\tself.portfolio = pd.DataFrame(None , columns = \n\t\t[\n\t\t\"Date\",\n\t\t\"Symbol\",\n\t\t'Volume',\n\t\t\"Average Price\",\n\t\t\"Market Price\",\n\t\t\"Amount (Price)\",\n\t\t\"Market Value\",\n\t\t\"Unrealized P/L\",\n\t\t\"%Unrealized P/L\"\n\t\t])\n\t\t# self.reward = 0\n\tdef step(self, action):\n\t\tif self.market.isnull().values.any():\n\t\t\tprint(\"Today Maket Close\")\n\t\telse:\n\t\t\tif action == 0: #BUY\n\t\t\t\taverageprice = self.market['Open'][self.i]\n\t\t\t\tamountprice = self.market['Open'][self.i] * amount\n\t\t\t\tmarketprice = self.market['Open'][self.i]\n\t\t\t\tmarketvalue = self.market['Open'][self.i] * amount\n\t\t\t\tunrealized = amountprice - marketvalue\n\t\t\t\tperunrealized = (unrealized / amountprice)*100\n\t\t\t\tif self.balance < amountprice:\n\t\t\t\t\tprint(\"Not Enough Money\")\n\t\t\t\telse:\n\t\t\t\t\tself.balance = self.balance - amountprice\n\t\t\t\t\tself.equity = self.balance\n\t\t\t\t\tself.portfolio = self.portfolio.append(\n\t\t\t\t\t{'Date' : self.market['Date'][self.i] , \"Symbol\" : 'PTT' ,'Volume' : amount, 'Average Price' : averageprice ,'Market Price' : marketprice ,\n\t\t\t\t\t'Amount (Price)' : amountprice ,\"Market Value\" : marketvalue,\"Unrealized P/L\":unrealized,\"%Unrealized P/L\":perunrealized} \n\t\t\t\t\t, ignore_index=True)\n\t\t\t\t\tself.equity = self.equity + self.portfolio['Unrealized P/L'].sum()\n\t\t\t\t\tprint(\"Success\")\n\t\t\telif action == 1: #SELL\n\t\t\t\tself.balance = self.equity\n\t\t\t\tself.portfolio = pd.DataFrame(None , columns = [\n\t\t\t\t\"Date\",\"Symbol\",'Volume',\"Average Price\" ,\"Market Price\",\"Amount (Price)\" , \"Market Value\",\"Unrealized P/L\",\"%Unrealized P/L\"\n\t\t\t\t])\n\t\t\t\tself.equity = self.balance\n\t\t\telse: #HOLD\n\t\t\t\tprint('Hold')\n\t\t\t\tpass\n\n\n\n\t\t\tif self.equity - self.balance > 0:\n\t\t\t\treward = 1\n\t\t\telif self.equity == self.balance:\n\t\t\t\treward = 0\n\t\t\telse:\n\t\t\t\treward = -1\n\n \n\t\t\tif self.balance == 0:\n\t\t\t\tdone = True\n\t\t\telse:\n\t\t\t\tdone = False\n\t\t\treturn reward, done\n\t\t\t\n\tdef reset(self):\n\t\tself.i = 0\n\t\tself.market = self.sym.iloc[[0]]\n\t\tself.balance = 100000\n\t\tself.equity = self.balance\n\t\tself.portfolio = pd.DataFrame(None , columns = \n\t\t\t[\n\t\t \"Date\",\n\t\t \"Symbol\",\n\t\t 'Volume',\n\t\t \"Average Price\",\n\t\t \"Market Price\",\"Amount (Price)\",\n\t\t \"Market Value\",\n\t\t \"Unrealized P/L\",\n\t\t \"%Unrealized P/L\"\n\t\t\t])\n\t\treturn (self.balance , self.equity , self.portfolio)\n\n\tdef render(self, mode='human', close=False):\n\t\tprint(\"STOCK MARKET \\n\")\n\t\tprint(self.market.to_string())\n\t\tprint(\"-----------------------------------------------------------------------------------\")\n\t\tprint(\"\\nPORTFOLIO\\n\")\n\t\tif self.portfolio.empty:\n\t\t\tprint(\"\")\n\t\t\tprint(\"\\nCash \" , self.balance ,\" Capital \" , self.equity)\n\t\telse:\n\t\t\tprint(self.portfolio.to_string())\n\t\t\tprint(\"\\nCash \" , self.balance , \" Volume \" , self.portfolio['Volume'].sum() , \" Capital \" , self.equity ,)\n","sub_path":"gym_stock/gym_stock/envs/stock_env.py","file_name":"stock_env.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"388953639","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 12 10:59:40 2018\n\n@author: Luc Deike\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport fluids2d.backlight as backlight\nimport fluids2d.bubble_pinchoff as pinchoff\nimport fluids2d.geometry\nimport comps\nimport pims\nimport skimage.measure\nimport scipy\nimport pickle\nimport pandas as pd\n\n#folder = comps.cf('comp3d')+r'180601\\\\'\n#folder = comps.cf('comp3d')+r'180612\\\\'\n#folder = comps.cf('comp3d')+r'180620\\\\'\n#folder = comps.cf('comp3c')+r'180621\\\\'\n#folder = comps.cf('comp3c')+r'180725\\\\'\n#folder = comps.cf('comp3c')+r'180726\\\\'\n#folder = comps.cf('comp3c')+r'180731\\\\'\n#folder = comps.cf('comp3c')+r'180801\\\\'\nfolder = comps.cf('comp3c')+r'180802\\\\'\n#folder = comps.cf('comp3c')+r'180803\\\\'\n\n\n'''\nmovies from 180601\n'''\n\n## cine 4\n#case_name = r'breakup_zoomIn_viewD_fps10000_4'\n#crop_lims = [500,750,200,550]\n#breakup_frame = 974\n\n# cine 2\n#case_name = r'breakup_zoomIn_viewD_fps10000_2'\n#crop_lims = [190,475,300,550]\n#breakup_frame = 639\n\n# cine 5\n#case_name = r'breakup_zoomIn_viewD_fps10000_5'\n#crop_lims = [30,300,750,1000]\n#breakup_frame = 306\n\n# cine 7\n#case_name = r'breakup_zoomIn_viewD_fps10000_7'\n#crop_lims = [60,300,740,1000]\n#breakup_frame = 251\n\n# cine 8\n#case_name = r'breakup_zoomIn_viewD_fps10000_8'\n##def mask(im): return im[130:350,850:1150]\n#crop_lims = [130,350,850,1150]\n#breakup_frame = 348\n\n'''\nmovies from 180612\n'''\n\n# F 1\n#case_name = r'backlight_bubbleBreakup_fps50000_viewF_1'\n#crop_lims = [46,350,0,120]\n#breakup_frame = 142\n\n# F 2\n#case_name = r'backlight_bubbleBreakup_fps50000_viewF_2'\n#crop_lims = [215,450,130,400]\n#breakup_frame = 583\n\n# F 3\n#case_name = r'backlight_bubbleBreakup_fps50000_viewF_3'\n#crop_lims = [155,430,105,330]\n#breakup_frame = 1810\n\n# F 4 - needs work, issue with binary_fill_holes\n#case_name = r'backlight_bubbleBreakup_fps50000_viewF_4'\n#crop_lims = [0,200,0,130]\n#breakup_frame = 364\n\n# F 5\n#case_name = r'backlight_bubbleBreakup_fps50000_viewF_5'\n#crop_lims = [100,210,25,210]\n#breakup_frame = 396\n\n# G 1\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_1'\n#crop_lims = [0,100,490,640]\n#breakup_frame = 903\n\n# G 2\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_2'\n#crop_lims = [0,125,80,440]\n#breakup_frame = 726\n\n# G 3\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_3'\n#crop_lims = [220,460,530,769]\n#breakup_frame = 2798\n\n# G 5\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_5'\n#crop_lims = [0,400,0,600]\n#breakup_frame = 3706\n\n# G 7\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_7'\n#crop_lims = [30,250,175,550]\n#breakup_frame = 1148\n\n# G 8\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_8'\n#crop_lims = [0,175,500,769]\n#breakup_frame = 428\n\n# G 11\n#case_name = r'backlight_bubbleBreakup_fps50000_viewG_11'\n#crop_lims = [0,300,0,270]\n#breakup_frame = 2219\n\n'''\nMovies from 180620\n'''\n\n# M 8\n#case_name = r'backlight_bubbleBreakup_fps100000_viewM_8'\n#crop_lims = [150,321,65,270]\n#breakup_frame = 2073\n\n# M 9\n#case_name = r'backlight_bubbleBreakup_fps100000_viewM_9'\n#crop_lims = [30,321,120,380]\n#breakup_frame = 4716\n\n# M 13\n#case_name = r'backlight_bubbleBreakup_fps100000_viewM_13'\n#crop_lims = [0,215,0,260]\n#breakup_frame = 4652\n\n# M 14\n#case_name = r'backlight_bubbleBreakup_fps100000_viewM_14'\n#crop_lims = [30,321,0,250]\n#breakup_frame = 4738\n\n'''\nMovies from 180621\n'''\n\n# M 16\n#case_name = r'backlight_bubbleBreakup_fps100000_viewM_16'\n#crop_lims = [0,350,240,513]\n#breakup_frame = 7196\n\n'''\nMovies from 180725\n'''\n\n# 1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewF_fps34k_v1_noCap'\n#crop_lims = [400,769,400,769]\n#breakup_frame = 2626\n\n# 4\n#case_name = r'backlight_breakup_pumpsSeries23V_viewF_fps34k_v4'\n#crop_lims = [380,769,100,350]\n#breakup_frame = 2313\n\n# 9\n#case_name = r'backlight_breakup_pumpsSeries23V_viewF_fps34k_v9'\n#crop_lims = [200,768,200,768]\n#breakup_frame = 3432\n\n'''\nMovies from 180726\n'''\n\n# v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v1'\n#crop_lims = [400,769,525,769]\n#breakup_frame = 12825-11781\n\n# v2 (first)\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v2_twice'\n#crop_lims = [475,769,65,280]\n#breakup_frame = 15587-14035\n\n# v3\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v3'\n#crop_lims = [500,769,350,769]\n#breakup_frame = 12303-10485\n\n# v4\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v4'\n#crop_lims = [450,769,180,600]\n#breakup_frame = 17745-8733\n\n# v6\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v6'\n#crop_lims = [500,769,500,769]\n#breakup_frame = 18865-18094\n\n# v7\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v7'\n#crop_lims = [550,769,100,300]\n#breakup_frame = 4777-4419\n\n# v8\n#case_name = r'backlight_breakup_pumpsSeries23V_viewG_fps34k_v8'\n#crop_lims = [150,769,0,375]\n#breakup_frame = -16900 + 18455\n\n\n'''\nMovies from 180731\n'''\n\n# v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v1'\n#crop_lims = [550,769,100,550]\n#breakup_frame = 14491-13825\n\n# v2_multiple\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v2_multiple'\n#crop_lims = [160,769,25,375]\n#breakup_frame = 17141-11535\n\n# v3_multiple\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v3_multiple'\n#crop_lims = [450,769,0,250]\n#breakup_frame = 31003-30358\n\n# v4\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v4'\n#crop_lims = [70,769,80,600]\n#breakup_frame = 23977-13893\n\n# v5\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v5'\n#crop_lims = [450,769,0,260]\n#breakup_frame = 31574 - 28186\n\n# v6\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v6'\n#crop_lims = [300,769,400,715]\n#breakup_frame = 20175-15664\n\n# v7\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v7'\n#crop_lims = [350,769,80,520]\n#breakup_frame = 16964 - 14927\n\n# v9\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v9'\n#crop_lims = [450,769,0,200]\n#breakup_frame = 21084 - 19348\n\n# v11 -- can get more if another bubble is filtered out!\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v11'\n#crop_lims = [245,560,495,680]\n#breakup_frame = 46832 - 43334\n\n# v12\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v12'\n#crop_lims = [620,769,180,320]\n#breakup_frame = 37849 - 37628 \n\n# v13\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v13'\n#crop_lims = [250,769,210,540]\n#breakup_frame = 2017+510\n\n# v15\n#case_name = r'backlight_breakup_pumpsSeries23V_viewH_fps34k_v15'\n#crop_lims = [480,769,400,600]\n#breakup_frame = 19190-16935\n\n'''\n180731 - 50k\n'''\n\n# v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewI_fps50k_v1'\n#crop_lims = [220,481,0,250]\n#breakup_frame = 32592-32441\n\n'''\n180801\n'''\n\n# 22k v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps22k_v1'\n#crop_lims = [470,801,650,975]\n#breakup_frame = 19847-19630\n\n# 22k v2\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps22k_v2'\n#crop_lims = [30,801,860,1281]\n#breakup_frame = 12604-11703\n\n# 22k v4\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps22k_v4'\n#crop_lims = [420,801,700,1070]\n#breakup_frame = 12735-12014\n\n# 50k v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps50k_v1'\n#crop_lims = [50,481,510,720]\n#breakup_frame = 25341-24304\n\n# 100k v1\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps100k_v1'\n#crop_lims = [0,321,0,513]\n#breakup_frame = 84096-82421\n\n# 100k v2\n#case_name = r'backlight_breakup_pumpsSeries23V_viewJ_fps100k_v2'\n#crop_lims = [0,321,0,513]\n#breakup_frame = 48566-46899\n\n'''\n180802\n'''\n\n# 22k v1\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v1'\n#crop_lims = [0,550,130,570]\n#breakup_frame = 11500-10810\n\n# 22k v5\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v5'\n#crop_lims = [100,801,140,1180]\n#breakup_frame = 11879-9442\n\n# 22k v8\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v8'\n#crop_lims = [200,801,0,370]\n#breakup_frame = 12093-11245\n\n# 22k v9\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v9'\n#crop_lims = [370,700,600,1281]\n#breakup_frame = 11506-9819\n\n# 22k v10\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v10'\n#crop_lims = [0,600,760,1070]\n#breakup_frame = 3079-2414\n\n# 22k v12\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v12'\n#crop_lims = [170,500,0,450]\n#breakup_frame = 12253-10712\n\n# 22k v13\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v13'\n#crop_lims = [150,510,730,1050]\n#breakup_frame = 16190-14955\n\n# 22k v16\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v16'\n#crop_lims = [0,360,170,700]\n#breakup_frame = 10849-10079\n\n# 22k v18\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v18_multiple'\n#crop_lims = [0,801,0,580]\n#breakup_frame = 11031-10847\n\n# 22k v20\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewK_fps22k_v20_multiple'\n#crop_lims = [0,650,0,530]\n#breakup_frame = 18575-18029\n\n'''\n180803\n'''\n\n# 12v v3\n#case_name = r'backlight_breakup_pumpsSeries12V_closer_viewL_fps34k_v3'\n#crop_lims = [100,410,0,300]\n#breakup_frame = 22432-19036\n\n# 12v v5\n#case_name = r'backlight_breakup_pumpsSeries12V_closer_viewL_fps34k_v5'\n#crop_lims = [400,769,0,470]\n#breakup_frame = 15905-10983\n\n# 12v v6\n#case_name = r'backlight_breakup_pumpsSeries12V_closer_viewL_fps34k_v6'\n#crop_lims = [160,370,0,240]\n#breakup_frame = 19667-16264\n\n# 12v v7\n#case_name = r'backlight_breakup_pumpsSeries12V_closer_viewL_fps34k_v7'\n#crop_lims = [0,600,580,769]\n#breakup_frame = 29985-23404\n\n# 12v v9\n#case_name = r'backlight_breakup_pumpsSeries12V_closer_viewL_fps34k_v9'\n#crop_lims = [120,380,0,140]\n#breakup_frame = 23299-22352\n\n# 18V v1\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v1'\n#crop_lims = [0,350,0,300]\n#breakup_frame = 18737-17928\n\n# 18V v2\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v2'\n#crop_lims = [0,380,0,400]\n#breakup_frame = 12993-11118\n\n# 18V v8\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v8'\n#crop_lims = [500,769,0,680]\n#breakup_frame = 23199-18426\n\n# 18V v12\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v12'\n#crop_lims = [100,400,0,420]\n#breakup_frame = 19917-16680\n\n# 18V v13 - error\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v13'\n#crop_lims = [200,769,0,350]\n#breakup_frame = 43887-38304\n\n# 18V v10\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v10_multiple'\n#crop_lims = [230,590,30,250]\n#breakup_frame = 23882-21316\n\n# 18V v14\n#case_name = r'backlight_breakup_pumpsSeries18V_closer_viewL_fps34k_v14_multiple'\n#crop_lims = [60,580,500,769]\n#breakup_frame = 7346-5280\n\n# 23v v3\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewL_fps34k_v3'\n#crop_lims = [330,530,0,250]\n#breakup_frame = 28544-27487\n\n# 23v v4\n#case_name = r'backlight_breakup_pumpsSeries23V_closer_viewL_fps34k_v4'\n#crop_lims = [330,530,0,250]\n#breakup_frame = 11050-10431\n\n'''\nCalibration\n'''\n#dx = 4.9554007791E-05 # viewF\n#dx = 4.9382565516E-05 # viewG\n#dt = 1./50000\n#dx = 5.9288074428E-05 # 180620, viewM\n#dt = 1./100000\n#dx = 7.2896637871E-05 # 180726, viewG\n#dt = 1./34000\n\n#dx = 7.3289853679E-05 # 180731, viewH\n#dt = 1./34000\n\n#dx = 2.7262229956E-05 # 180731, viewI\n#dt = 1./50000\n\n#dx = 3.9627508487E-05 # 180801, viewJ\n#dt = 1./22000\n\n#dx = 3.3353495645E-05 # 180802, viewK\n#dt = 1./22000\n\n#dx = 7.3469357156E-05 # 180803, viewL\n#dt = 1./34000\n\nmeta = pd.read_csv(r'file:///C:\\Users\\user\\Documents\\2d-fluids-analysis\\scripts\\\\bubble_pinchoff_processing_metadata.csv')\ncase_name = r'backlight_cavityCollapse_fps22k_noPumps_washer1_heightHalf_v2'\ndate = 180814\nmeta_case = meta[meta['case_name']==case_name]\nif len(meta_case)!=1:\n raise ValueError('name is not unique or defined')\nfolder = comps.cf(meta_case['data_loc'].iloc[0])+str(meta_case['date_folder'].iloc[0])+r'\\\\'\ndx = meta_case['dx'].iloc[0]\ndt = 1./meta_case['fps'].iloc[0]\nbreakup_frame = meta_case['rel_breakup_frame'].iloc[0]-meta_case['initial_frame'].iloc[0]\ncrop_lims = [int(meta_case['top'].iloc[0]),int(meta_case['bottom'].iloc[0]),int(meta_case['left'].iloc[0]),int(meta_case['right'].iloc[0])]\n\n'''\nRun\n'''\n\nc = pims.open(folder+case_name+'.cine')\n\nbg = backlight.construct_bg_image(c,n_frames=20)\n\nif True:\n '''\n get and save the data\n '''\n\n thresh = -50\n \n c = pims.open(folder+case_name+'.cine')\n \n bg = backlight.construct_bg_image(c,n_frames=20)\n \n bt = pinchoff.BreakupTrackedInfo(folder,case_name,breakup_frame,crop_lims,bg,dx,dt)\n \n #d_frames = np.arange(1,22480-21316-1,1)*-1\n d_frames = np.unique(np.geomspace(1,500,500,dtype=int))*-1 # np.flipud(\n\n points = []\n dists = []\n contours = []\n L_c = []\n major = []\n minor = []\n largest_dist = []\n largest_dist_from_neck = []\n area = []\n df_split = []\n \n fig = plt.figure(figsize=(8,8))\n ax_actual = fig.add_subplot(111)\n figfolder = r'\\\\Mae-deike-lab3\\c\\Users\\Luc Deike\\data_comp3_C\\180621\\M_16_frames\\\\'\n \n for dfi,df in enumerate(d_frames):\n \n print(df)\n \n #if np.random.rand()>0.9 or dfi<10:\n if True:\n ax = ax_actual\n ax.clear()\n else:\n ax = None\n\n print('...getting the region props')\n im = bt.get_im_rel_breakup(df,c=c)\n filled = backlight.get_filled(im,thresh,filter_size=1)\n props = backlight.filled2regionpropsdf(filled,g=None,min_area=0,frame=None)\n ix_use = props['filled_area'].idxmax()\n props = props.loc[ix_use] # only need the region with the largest area\n major.append(props['major_axis_length'])\n minor.append(props['minor_axis_length'])\n area.append(props['filled_area'])\n\n \n print('...getting the contour')\n contour = pinchoff.get_contour(im,ax=ax,thresh=-25)\n contours.append(contour.copy())\n \n if dfi>0 and (np.isnan(dist)==False):\n '''\n If there was a valid neck found previously, limit where in the image\n it can find a neck in this frame\n '''\n target_loc = np.mean(neck_points,axis=0)\n target_dist = 10+dist/1.5\n #target_dist = np.min([10+dist/2.,40])\n else:\n plt.show()\n target_loc = np.flipud(plt.ginput(1,timeout=-1)[0])\n target_dist=50\n #target_loc = None\n #target_dist = None\n \n print('...getting the neck')\n dist,neck_points = pinchoff.get_neck(contour,np.shape(im),ax=ax,target_loc=target_loc,target_dist=target_dist)\n print('...getting L_c')\n L_c_thisFrame,intersection_points = pinchoff.get_Lc(contour,neck_points,ax=ax)\n print('...done')\n \n \n dists.append(dist)\n points.append(neck_points)\n L_c.append(L_c_thisFrame)\n \n largest_dist.append(np.max(scipy.spatial.distance.cdist(contour,contour)))\n \n if dist is not np.nan:\n objects, num_objects = scipy.ndimage.label(filled)\n objects = objects==ix_use+1\n split_im = pinchoff.divide_region(objects,neck_points)\n df_split.append(backlight.labeled_props(split_im,None))\n largest_dist_from_neck.append(np.max(scipy.spatial.distance.cdist(contour,neck_points)))\n else:\n df_split.append(None)\n largest_dist_from_neck.append(np.nan)\n \n print(dist,L_c_thisFrame)\n\n if ax is not None:\n #ax.set_xlim([0,272])\n #ax.set_ylim([320,-1])\n ax.set_title('$t_\\mathrm{b} - t = $'+str(-1*df*dt*1000.)+str(' ms'))\n #fig.savefig(figfolder+'frame_'+str(df)+'.png')\n plt.show()\n plt.pause(.1)\n #_ = input('go on ')\n \n other_params = {'major':np.array(major),'minor':np.array(minor),'largest_dist':np.array(largest_dist),'largest_dist_from_neck':np.array(largest_dist_from_neck),'L_c':np.array(L_c),'area':np.array(area),'df_split':df_split}\n bt.enter_data(contours,points,dists,d_frames,L_c=L_c,other_params=other_params) \n \n plt.figure()\n plt.plot(bt.t,bt.dists_m)\n plt.plot(bt.t,np.array(L_c)*dx)\n \n #stophere\n \n pickle.dump(bt,open(folder+case_name+'_breakupInfo.pkl','wb'))\n \n fig = plt.figure()\n ax = fig.add_subplot(111) \n ax.loglog(bt.t*-1,bt.dists_m,'-x',label='$d_0$')\n ax.loglog(bt.t*-1,np.array(L_c)*dx,'-x',label='$L_\\mathrm{c}$')\n ax.loglog(bt.t*-1,bt.other_params['largest_dist']*dx,'-x',label='largest dist')\n ax.loglog(bt.t*-1,bt.other_params['largest_dist_from_neck']*dx,'-x',label='largest dist from neck')\n ax.loglog(bt.t*-1,np.sqrt(np.array(bt.other_params['area'])/np.pi)*dx,'-x',label='$r_\\mathrm{b}$')\n ax.loglog(bt.t*-1,bt.other_params['major']*dx,'-x',label='major')\n ax.loglog(bt.t*-1,bt.other_params['minor']*dx,'-x',label='minor')\n #ax.loglog([10**-4,10**-3],[10**-4,10**(-4+.52)],'--')\n ax.legend() \n ax.set_xlabel('$t - t_\\mathrm{b}$ [s]')\n ax.set_ylabel('$L$ [m]')\n \n gamma = bt.dists / np.sqrt(bt.dists*np.array(L_c))\n plt.figure()\n plt.loglog(bt.t*-1,gamma)\n plt.xlabel('$t - t_\\mathrm{b}$ [s]')\n plt.ylabel('$\\gamma$')\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n d_0 = 0.006\n epsilon = 2000./100**2\n dVdt = 2.**(-1./2) * 3.**(1./3) * d_0**(7./3) * epsilon**(1./3)\n u_neck = dVdt / (np.pi * (bt.dists_m/2.)**2 )\n u_sound = 340.\n ax.loglog(bt.t*-1,u_neck/u_sound,label='$Ma_\\mathrm{g,neck}$')\n \n rho_air = 1.2\n mu_air = 1.8e-5 # kg/(m*s)\n Re_neck = rho_air*bt.dists_m*u_neck/mu_air\n ax.loglog(bt.t*-1,Re_neck,'--',label='$Re_\\mathrm{g,neck}$')\n \n ax.legend()\n ax.set_xlabel('$t - t_\\mathrm{b}$ [s]')\n fig.tight_layout()\n \n stophere\n \nif True:\n '''\n deformation over time\n '''\n \n thresh = -20 \n #c = pims.open(folder+case_name+'.cine') \n #bg = backlight.construct_bg_image(c,n_frames=20)\n \n d_frames = np.arange(breakup_frame*-1,0,20)\n b = pinchoff.BreakupTrackedInfo(folder,case_name,breakup_frame,crop_lims,bg,dx,dt)\n #d_frames = np.unique(np.flipud(np.geomspace(1,1500,1,dtype=int))*-1)\n \n fig = plt.figure(figsize=(9,7))\n ax = fig.add_subplot(111)\n \n points = []\n dists = []\n contours = []\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n data = pd.DataFrame()\n \n epsilon = 2000./100**2\n \n for df in d_frames:\n \n print(df)\n \n ax.clear()\n #ax=None\n \n im = b.get_im_rel_breakup(df,c=c)\n filled = backlight.get_filled(im,thresh,filter_size=1)\n ellipses = backlight.filled2regionpropsdf(filled)\n \n i_use = ellipses['filled_area'].argmax()\n \n\n \n ellipses['orientation'] = ellipses['orientation']\n \n for val in ['y','x','orientation','major_axis_length','minor_axis_length','eccentricity','filled_area']:\n data.loc[df,val] = ellipses.loc[i_use,val]\n \n# ax.imshow(im)\n# ax.plot(ellipses.loc[i_use,'x'],ellipses.loc[i_use,'y'],'x') \n# backlight.add_ellipses_to_ax(ellipses[ellipses.index==i_use],ax)\n# plt.show()\n# plt.pause(0.1)\n \n r_0 = np.sqrt(data['filled_area'].mean())/np.pi * dx\n \n t_lam = 2.*r_0 / (np.sqrt(2) * (epsilon*2*r_0)**(1./3))\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(data.index*dt*-1/t_lam,data['major_axis_length']/data['minor_axis_length'])\n ax.set_xlabel('$(t_\\mathrm{b} - t)/t_\\lambda$ [s]')\n ax.set_ylabel('$d_1/d_2$')\n fig.tight_layout()\n fig.savefig(folder+case_name+'_elongation.pdf')\n\n \nif False:\n '''\n individual plots with images\n '''\n \n d_frames = np.arange(-100,0)\n \n bu = pickle.load(open(folder+case_name+'_breakupInfo.pkl','rb'))\n \n c = pims.open(folder+case_name+'.cine')\n bg = backlight.construct_bg_image(c,n_frames=20)\n \n crop_lims = bu.crop_lims\n def mask(im):\n return im[crop_lims[0]:crop_lims[1],crop_lims[2]:crop_lims[3]]\n \n dists = np.array(bu.dists)*dx\n times = d_frames*dt\n \n fig = plt.figure(figsize=(7,5))\n ax = fig.add_subplot(111)\n \n ax.loglog(times*-1,dists,'x',color='k')\n ax.set_xlabel('$t - t_\\mathrm{b}$ [s]')\n ax.set_ylabel('$d_\\mathrm{neck}$ [m]')\n ax.set_title(case_name)\n \n fig.tight_layout()\n \n go_on = True \n while go_on:\n \n print('Click on the time at which to show the image.')\n t,_ = plt.ginput(1,timeout=0)[0]\n f = np.argmin(np.abs(times+t))\n df = d_frames[f]\n print('... will show df = '+str(df))\n \n print('Click where to show the image.')\n ax_coords = plt.ginput(1,timeout=0)[0]\n disp_coords = ax.transData.transform(ax_coords)\n fig_coords = fig.transFigure.inverted().transform(disp_coords)\n \n # show the image\n ax_size = .2\n ax_im = fig.add_axes([fig_coords[0]-ax_size/2.,fig_coords[1]-ax_size/2.,ax_size,ax_size])\n ax_im.imshow(mask(c[bu.breakup_frame+df]-bg),cmap='gray',vmin=-300,vmax=0)\n ax_im.plot(bu.contours[f][:,1],bu.contours[f][:,0],'-',color='r')\n ax_im.spines['bottom'].set_color('b')\n ax_im.spines['top'].set_color('b') \n ax_im.spines['right'].set_color('b')\n ax_im.spines['left'].set_color('b')\n ax_im.set_xticks([])\n ax_im.set_yticks([])\n if bu.amins[f] is not None:\n '''\n Draw the neck\n '''\n amin = bu.amins[f]\n contour = bu.contours[f]\n ax_im.plot([contour[amin[1],1],contour[amin[0],1]],[contour[amin[1],0],contour[amin[0],0]],color='cyan')\n \n # draw an arrow from the image to the point\n ax.plot([ax_coords[0],t],[ax_coords[1],dists[f]],color='b')\n \n plt.show()\n plt.pause(0.1)\n \n go_on = input('Enter 1 to continue. ')\n fig.savefig(folder+case_name+'_plot.pdf')\n\n\nif False:\n \n '''\n Animation with all the cases together\n '''\n \n #cine_nums = [2,4,5,7,8]\n #breakups = [pickle.load(open(folder+r'breakup_zoomIn_viewD_fps10000_'+str(i)+'_breakupInfo.pkl','rb')) for i in cine_nums]\n \n cine_nums = [1,2,3,5]\n breakups = [pickle.load(open(folder+r'backlight_bubbleBreakup_fps50000_viewF_'+str(i)+'_breakupInfo.pkl','rb')) for i in cine_nums]\n cine_nums = [1,2,3,7,8,11]\n breakups = breakups + [pickle.load(open(folder+r'backlight_bubbleBreakup_fps50000_viewG_'+str(i)+'_breakupInfo.pkl','rb')) for i in cine_nums]\n \n for bi,bu in enumerate(breakups):\n \n np.savetxt(folder+'breakup_number_'+str(bi)+'.txt',np.array(bu.dists)*dx)\n \n stophere\n \n \n colors = ['r','b','g','orange','purple','magenta','yellow','brown','gray','olive']\n \n fig = plt.figure(figsize=(15,6))\n \n ax_plot = fig.add_subplot(1,3,3)\n \n nrows=3\n ncols=6\n axs_im = [fig.add_subplot(nrows,ncols,1),\n fig.add_subplot(nrows,ncols,2),\n fig.add_subplot(nrows,ncols,3),\n fig.add_subplot(nrows,ncols,4),\n fig.add_subplot(nrows,ncols,7),\n fig.add_subplot(nrows,ncols,8),\n fig.add_subplot(nrows,ncols,9),\n fig.add_subplot(nrows,ncols,10),\n fig.add_subplot(nrows,ncols,13),\n fig.add_subplot(nrows,ncols,14)]\n \n [ax.set_axis_off() for ax in axs_im]\n \n for dfi,df in enumerate(d_frames):\n \n ax_plot.clear()\n for bi,b in enumerate(breakups):\n\n '''\n plot the neck as a function of time\n ''' \n ax_plot.loglog(times*-1,np.array(b.dists)*dx,'-',color=colors[bi],alpha=0.5)\n \n '''\n Show the image and the contour\n '''\n ax = axs_im[bi]\n ax.clear()\n c = pims.open(b.folder+b.cine_name+'.cine')\n ax.imshow(b.mask(c[b.breakup_frame+df]-b.bg),cmap='gray')\n ax.plot(b.contours[dfi][:,1],b.contours[dfi][:,0],'-',color=colors[bi])\n ax.set_axis_off()\n \n if b.amins[dfi] is not None:\n '''\n Draw the neck\n '''\n amin = b.amins[dfi]\n contour = b.contours[dfi]\n ax.plot([contour[amin[1],1],contour[amin[0],1]],[contour[amin[1],0],contour[amin[0],0]],color='cyan')\n \n ax_plot.loglog([10**-4.5,10**-4],[5*10**-4,5*10**(-4+(2./3)*0.5)],'--',color='k')\n ax_plot.text(10**-4.5,9*10**-4,'$d \\sim -t^{2/3}$')\n ax_plot.loglog([10**-3.5,10**-3],[1*10**-3,1*10**(-3+(0.5)*0.5)],'--',color='k')\n ax_plot.text(10**-3.5,1.5*10**-3,'$d \\sim -t^{1/2}$')\n ax_plot.axvline(times[dfi]*-1,color='k')\n ax_plot.set_xlabel('$t_\\mathrm{b}-t$ [s]')\n ax_plot.set_ylabel('$d_\\mathrm{neck}$ [m]')\n \n if dfi==0:\n fig.tight_layout()\n \n #plt.show()\n #plt.pause(1)\n \n figfolder = r'C:\\Users\\Luc Deike\\data_comp3_C\\180612\\tracking_frames\\\\'\n fig.savefig(figfolder+'frame_'+str(dfi)+'.png')\n \n\nif False:\n '''\n Manual neck width\n '''\n \n c = pims.open(folder+case_name+'.cine')\n \n d_frames = range(-100,-20,5) + range(-20,0)\n d_frames = d_frames[::-1]\n \n fig = plt.figure(figsize=(9,7))\n ax = fig.add_subplot(111)\n \n points = {}\n dists = {}\n \n def mask(im):\n return im[crop_lims[0]:crop_lims[1],crop_lims[2]:crop_lims[3]]\n \n for fi,df in enumerate(d_frames):\n \n ax.clear()\n ax.imshow(mask(c[breakup_frame+df]))\n \n plt.show()\n plt.pause(0.1)\n \n clicked_points = plt.ginput(3,timeout=0,show_clicks=True)\n clicked_points = clicked_points[0:2]\n dists[df] = dist_points(clicked_points[0],clicked_points[1])\n \n points[df] = clicked_points\n \n bu = pickle.load(open(folder+case_name+'_breakupInfo.pkl','rb'))\n d_frames_bu = np.arange(-100,0)\n \n pickle.dump(dists,open(folder+case_name+'_manual_dists.pkl','wb'))\n \n fig = plt.figure()\n ax = fig.add_subplot(111) \n ax.loglog(d_frames_bu*-1,bu.dists,'x',label='automated')\n ax.loglog(np.array(dists.keys())*-1,np.array(dists.values()),'o',label='manual')\n ax.legend()\n ax.set_ylabel('neck width [pix]')\n ax.set_xlabel('frame before pinch-off')\n fig.tight_layout()\n fig.savefig(folder+case_name+'_automated_manual_comparison.pdf')\n \nif False:\n '''\n Text files for the manual comparison\n '''\n \n bu = pickle.load(open(folder+case_name+'_breakupInfo.pkl','rb'))\n d_frames_bu = np.arange(-100,0)\n \n manual = pickle.load(open(folder+case_name+'_manual_dists.pkl','rb'))\n \n df = pd.DataFrame(index=d_frames_bu)\n df['automatic'] = bu.dists\n for f in list(manual.keys()):\n df.loc[f,'manual'] = manual[f]\n \n df[pd.isnull(df)] = -1\n df.to_csv(folder+case_name+r'_automated_and_manual_points.txt',header=False,index=True)","sub_path":"scripts/bubble_breakup_pinchoff_analysis.py","file_name":"bubble_breakup_pinchoff_analysis.py","file_ext":"py","file_size_in_byte":27156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"255968188","text":"#! /usr/bin/python3 \r\nimport re\r\n\r\nfin = open('./doc/g26_project_report.tex','r')\r\nfout = open('./doc/g26_project_report.html','w');\r\nl = fout.write('\\n\\n\\n'+\r\n\t\t\t\t'g26_prof_report'+\r\n\t\t\t\t'\\n\\n');\r\nline = fin.readline()\r\nwhile(line):\r\n\tsection = re.match(r'\\\\section\\{Graph analysis\\}',line);\r\n\tif(section):\r\n\t\tl = fout.write('

Profiling Report for CS296 Project simulation

');\r\n\t\tbreak;\r\n\tline = fin.readline()\r\n\r\nwhile line:\r\n\tline = fin.readline();\r\n\tsection = re.search(r'\\\\section',line);\r\n\tif section:\r\n\t\tbreak;\r\n\tif '\\\\subsection' in line:\r\n\t\tsubsection = re.findall(r'\\{([A-z0-9\\ \\,\\.\\/\\:]*)\\}',line);\r\n\t\tfout.write('
'+ subsection[0] + '
' );\r\n\t\tcontinue;\t\r\n\tif '\\\\begin' in line:\r\n\t\tif '{center}' in line:\r\n\t\t\tfout.write('
');\t\r\n\t\telif '{equation}' in line or '{lstlisting}' in line:\r\n\t\t\tfout.write('
');\t\t\r\n\t\telse:\r\n\t\t\tfout.write('
');\r\n\t\tcontinue;\r\n\tif '\\\\end' in line:\r\n\t\tif '{equation}' in line or '{lstlisting}' in line:\r\n\t\t\tfout.write('
');\t\r\n\t\telse:\t\r\n\t\t\tfout.write('
');\r\n\t\tcontinue;\t\t\t\r\n\t\r\n\tif '\\\\includegraphics' in line:\r\n\t\tfout.write('
');\r\n\t\timage = re.findall(r'\\{([A-z0-9\\/]*)\\}',line);\r\n\t\timage = image[0];\r\n\t\tstrimage= image\r\n\t\tfout.write('');\r\n\t\tfout.write('
');\r\n\t\tcontinue;\t\t\t\r\n\tcomment = re.match(r'\\%+',line);\r\n\tl = fout.write(line.replace(\"$\",\"\").replace(\"\\\\\",\"\").replace(\"`\",\"\\'\"));\r\n\t\r\nl = fout.write('\\n');\r\nfout.close()\r\nfin.close()\r\n\r\n","sub_path":"CS296-Software-Lab/scripts/g26_gen_html.py","file_name":"g26_gen_html.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"9691641","text":"class Solution:\n def lemonadeChange(self, bills: List[int]) -> bool:\n a=[0,0,0,0,0]\n for i in bills:\n a[int(i/5)]+=1\n a[int(i/10)]-=1\n a[int(i/20)]-=1\n if a[1]<0 or a[1]+2*a[2]<0:\n return False\n return True\n\n'''\n使用贪心算法,可以使得5元的零钱剩下的数量达到最多,如果这样5元零钱都不足,\n那就是肯定无法正确找零。由于判断a[1]<0,是在假设有足够10元数量的情况下得到的,\n但是实际中10元零钱的数量即a[2]可能是负值,所以这是需要用5元零钱去填补,\n所以要保证a[1]+2*a[2]>=0才行,否则无法正确找零\n'''","sub_path":"Week_04/柠檬水找零.py","file_name":"柠檬水找零.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"283497080","text":"import numpy as np\n\n\n# This is where you can build a decision tree for determining throttle, brake and steer\n# commands based on the output of the perception_step() function\ndef decision_step(Rover):\n\n # Wait for vision data\n if Rover.nav_angles == None:\n return Rover\n\n #\n # Calculate state variables\n #\n\n # Check the space in front for obstacles and samples\n navigable_space = len(Rover.nav_angles)\n navigable_space_left = (Rover.nav_angles > 0).sum()\n navigable_space_right = (Rover.nav_angles < 0).sum()\n rock_space = len(Rover.roc_angles)\n\n # Check for the vehicle being stuck\n if Rover.throttle > 0 and Rover.vel < 0.1:\n Rover.stuck_counter += 1\n else:\n Rover.stuck_counter = 0\n\n #\n # Check for transitions\n #\n if Rover.mode == 'forward':\n\n # Rotate when the rover runs out of space in front\n if navigable_space_left < Rover.stop_forward or navigable_space_right < Rover.stop_forward or \\\n Rover.stuck_counter > 100:\n Rover.mode = 'stop'\n\n # Steer towards rock samples\n elif rock_space >= Rover.go_to_sample:\n Rover.mode = 'goto_sample'\n\n elif Rover.mode == 'stop':\n\n # Rotate once the rover is stopped\n if Rover.vel < 0.2:\n Rover.mode = 'rotate_left'\n\n elif Rover.mode == 'rotate_left':\n\n # Steer towards rock samples\n if rock_space >= Rover.go_to_sample:\n Rover.mode = 'goto_sample'\n\n # Go forward when possible\n elif navigable_space_left > Rover.go_forward/2 and navigable_space_right > Rover.go_forward/2:\n Rover.mode = 'forward'\n\n elif Rover.mode == 'goto_sample':\n\n # Pickup sample if it is near\n if Rover.near_sample:\n Rover.mode = 'pickup_sample'\n\n # # Rotate in place if we were close\n # elif Rover.max_sample_value > 50:\n # Rover.mode = 'rotate_sample'\n # Rover.sample_start_time = Rover.total_time\n\n # Continue normal operation (spurious input or not close enough)\n elif rock_space <= Rover.lost_sample:\n Rover.mode = 'forward'\n\n # elif Rover.mode == 'rotate_sample':\n\n # print(Rover.max_sample_value)\n\n # if rock_space >= Rover.go_to_sample:\n # Rover.mode = 'goto_sample'\n # elif Rover.sample_start_time > Rover.total_time + 3:\n # Rover.mode = 'forward'\n\n elif Rover.mode == 'pickup_sample':\n\n if not Rover.near_sample:\n Rover.mode = 'forward'\n # Rover.max_sample_value = 0\n\n else:\n raise ValueError('Invalid state: ' + Rover.mode)\n\n #\n # Do something in each state\n #\n if Rover.mode == 'forward':\n\n # Veer left/right while moving forward\n Rover.throttle = Rover.throttle_set if Rover.vel < Rover.max_vel else 0\n Rover.brake = 0\n\n # Set steering based on the weighted nav_angles\n # scaled_inverse_dists = (-Rover.nav_dists / 161) + 1\n # scaled_angles = np.multiply(Rover.nav_angles * 180 / np.pi, scaled_inverse_dists)\n # Rover.steer = np.clip(np.mean(scaled_angles), -15, 15)\n\n steer = np.mean(Rover.nav_angles * 180 / np.pi)\n if navigable_space_right > Rover.stop_forward * 2:\n steer -= 3\n Rover.steer = np.clip(steer, -15, 15)\n\n elif Rover.mode == 'rotate_left':# or Rover.mode == 'rotate_sample':\n\n # Rotate in place\n Rover.throttle = 0\n Rover.brake = 0\n Rover.steer = 15\n\n elif Rover.mode == 'stop':\n\n # Stop the rover\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n\n elif Rover.mode == 'goto_sample':\n\n # Rover.max_sample_value = max(Rover.max_sample_value, rock_space)\n\n # Veer towards the rock sample at half max speed\n if Rover.vel > Rover.max_vel/2:\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n else:\n Rover.throttle = Rover.throttle_set/4\n Rover.brake = 0\n\n # Calculate steering angle based on both the rock sample heading and the terrain\n roc_steer = np.mean(Rover.roc_angles * 180 / np.pi)\n nav_steer = np.mean(Rover.nav_angles * 180 / np.pi)\n\n alpha = 0.65\n mix_steer = roc_steer * alpha + (1 - alpha) * nav_steer\n Rover.steer = np.clip(mix_steer, -15, 15)\n\n elif Rover.mode == 'pickup_sample':\n\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n\n if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n Rover.send_pickup = True\n\n else:\n raise ValueError('Invalid state: ' + Rover.mode)\n\n print('\\n' + Rover.mode)\n print(Rover.throttle, Rover.brake, Rover.steer, '\\n')\n return Rover\n\n\n # # If in a state where want to pickup a rock send pickup command\n # if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n # Rover.send_pickup = True\n","sub_path":"code/decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"647869275","text":"# Merge sort\n\ndef mergesort(nums):\n out = list(nums)\n mergesplit(out, 0, len(nums))\n return out\n\ndef mergesplit(nums, start, end):\n\n # Recursion end condition: 0-1 member interval\n if end-start < 2:\n return None\n\n # Else recursively split and merge\n mid = int( (start+end)/2 )\n mergesplit(nums, start, mid)\n mergesplit(nums, mid, end)\n mergemerge(nums, start, mid, end)\n\ndef mergemerge(nums, start, mid, end):\n tmp = list(nums)\n i1 = start\n i2 = mid\n for i3 in range(start,end):\n if i1 < mid and (end <= i2 or tmp[i1] < tmp[i2]):\n nums[i3] = tmp[i1]\n i1 += 1\n else:\n nums[i3] = tmp[i2]\n i2 += 1\n\n# Quicksort\n\ndef swap(arr, i1, i2):\n if not i1 == i2:\n tmp = arr[i1]\n arr[i1] = arr[i2]\n arr[i2] = tmp\n\ndef quicksort(nums):\n out = list(nums)\n quicksort_helper(out, 0, len(nums))\n return out\n\ndef quicksort_helper(arr, start, end):\n if start < end:\n p = partition(arr, start, end)\n quicksort_helper(arr, start, p)\n quicksort_helper(arr, p+1, end)\n\ndef partition(arr, start, end):\n # Start p at the lowest i where arr[i] < arr[end-1]\n p = start - 1\n # For each i in range except pivot\n for i in range(start, end-1):\n # If arr[i] less than pivot, swap with next low index\n if arr[i] < arr[end-1]:\n p += 1\n swap(arr, p, i) \n swap(arr, p+1, end-1)\n return p+1\n\ndef main():\n\n a = [3, 9, 2, 5]\n\n print()\n print(\"Merge\")\n print()\n print(mergesort(a))\n print(mergesort([]))\n print(mergesort([1,2,3,4]))\n print(mergesort([4,3,2,1]))\n print(mergesort([4,3,2,4]))\n print()\n\n print()\n print('Quick')\n print()\n print(quicksort(a))\n print(quicksort([]))\n print(quicksort([1,2,3,4]))\n print(quicksort([4,3,2,1]))\n print(quicksort([4,3,2,4]))\n print()\n\nif __name__ == '__main__':\n main()\n","sub_path":"algorithms/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"128977692","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport time\nimport random\nimport datetime\n\nimport quepy\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\nsparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\ndbpedia = quepy.install(\"PropertyMapper\")\n#dbpedia = quepy.install('dbpedia')\nprint(\"dbpedia endpoint\", dbpedia)\n\nclass QuepyMain():\n\n def print_define(self, results, target, metadata=None):\n for result in results[\"results\"][\"bindings\"]:\n if result[target][\"xml:lang\"] == \"en\":\n print(result[target][\"value\"].encode('utf-8').strip())\n print()\n\n\n def print_enum(self, results, target, metadata=None):\n used_labels = []\n\n for result in results[\"results\"][\"bindings\"]:\n if result[target][\"type\"] == u\"literal\":\n if result[target][\"xml:lang\"] == \"en\":\n label = result[target][\"value\"]\n if label not in used_labels:\n used_labels.append(label)\n print(label)\n\n\n def print_literal(self, results, target, metadata=None):\n for result in results[\"results\"][\"bindings\"]:\n literal = result[target][\"value\"]\n if metadata:\n print(metadata.format(literal))\n else:\n print(literal)\n\n\n def print_time(self, results, target, metadata=None):\n gmt = time.mktime(time.gmtime())\n gmt = datetime.datetime.fromtimestamp(gmt)\n\n for result in results[\"results\"][\"bindings\"]:\n offset = result[target][\"value\"].replace(u\"−\", u\"-\")\n\n if (\"to\" in offset) or (\"and\" in offset):\n if \"to\" in offset:\n connector = \"and\"\n from_offset, to_offset = offset.split(\"to\")\n else:\n connector = \"or\"\n from_offset, to_offset = offset.split(\"and\")\n\n from_offset, to_offset = int(from_offset), int(to_offset)\n\n if from_offset > to_offset:\n from_offset, to_offset = to_offset, from_offset\n\n from_delta = datetime.timedelta(hours=from_offset)\n to_delta = datetime.timedelta(hours=to_offset)\n\n from_time = gmt + from_delta\n to_time = gmt + to_delta\n\n location_string = random.choice([\"where you are\",\n \"your location\"])\n\n print(\"Between %s %s %s, depending on %s\" % \\\n (from_time.strftime(\"%H:%M\"),\n connector,\n to_time.strftime(\"%H:%M on %A\"),\n location_string))\n\n else:\n offset = int(offset)\n\n delta = datetime.timedelta(hours=offset)\n the_time = gmt + delta\n\n print(the_time.strftime(\"%H:%M on %A\"))\n\n\n def print_age(self, results, target, metadata=None):\n assert len(results[\"results\"][\"bindings\"]) == 1\n\n birth_date = results[\"results\"][\"bindings\"][0][target][\"value\"]\n year, month, days = birth_date.split(\"-\")\n\n birth_date = datetime.date(int(year), int(month), int(days))\n\n now = datetime.datetime.utcnow()\n now = now.date()\n\n age = now - birth_date\n print(\"{} years old\".format(age.days / 365))\n\n\n def wikipedia2dbpedia(self, wikipedia_url):\n \"\"\"\n Given a wikipedia URL returns the PropertyMapper resource\n of that page.\n \"\"\"\n\n query = \"\"\"\n PREFIX foaf: \n SELECT * WHERE {\n ?url foaf:isPrimaryTopicOf <%s>.\n }\n \"\"\" % wikipedia_url\n\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n\n if not results[\"results\"][\"bindings\"]:\n print(\"Snorql URL not found\")\n sys.exit(1)\n else:\n return results[\"results\"][\"bindings\"][0][\"url\"][\"value\"]\n\n\n","sub_path":"AlgorithmQuestionAnswering/QuepyTest.py","file_name":"QuepyTest.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"548832935","text":"def wykaz(lista,slownik):\r\n for i in range(len(lista)):\r\n print(lista[i],slownik[listakluczy[i]])\r\n\r\n\r\nslownik = {\"banan\":\"2zl\",\"gruszka\":\"3zl\",\"mango\":\"5zl\",\"papaja\":\"7zl\", \"avocado\":\"10zl\",\"ananas\":\"9zl\",\"kiwi\":\"4zl\"}\r\nlista = [\"banan\", \"gruszka\", \"mango\", \"papaja\", \"avocado\", \"ananas\", \"kiwi\"]\r\nlistakluczy = list(slownik.keys())\r\n\r\nwykaz(lista,slownik)\r\n","sub_path":"Laborki/Ćwiczenia/2.Laborka/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"382808748","text":"# Authors of the project:\n# 1-MachnevEgor_https://vk.com/machnev_egor\n# 2-SchalimovDmitriy_https://vk.com/astronaut_without_spaceship\n# 3-ArsenyKarimov_https://vk.com/id222338543\n# 4-MihailMarkov_https://vk.com/mixxxxail\n# Contacts in email:\n# 1-meb.official.com@gmail.com\n# 2-dmitriy-shalimov@yandex.ru\n# 3-arseny.karimov@gmail.com\n# 4-mihailmarkov2004@gmail.com\n\n# import module\nimport openpyxl\n\n# the path to the branching database\nexcel_database_source = \"workWithExcelFile/excelDatabase\"\n\n\n# data search and processing\ndef selective_data_search(excel_source, columns, extra_cells, sheet_name, start_data, end_data):\n # output variables - declaration\n global output_day_schedule\n output_day_schedule = [\"Расписание на заданный день:\"]\n # sending data to the terminal\n print(f\"Schedule source: {excel_source}/{sheet_name}.xlsx({columns}, {extra_cells}, [{start_data}, {end_data}])\")\n # searcher logic\n try:\n # open excel file\n excel_document = openpyxl.load_workbook(f\"{excel_database_source}/{excel_source}/{sheet_name}.xlsx\")\n # days - import data from a graph and transfer it to a separate array\n days_data_array = []\n sheet = excel_document.get_sheet_by_name(sheet_name)\n for row in sheet[f\"{columns[0]}1\":f\"{columns[0]}{sheet.max_row}\"]:\n for cell in row:\n days_data_array.append(str(cell.value))\n # lessons - import data from a graph and transfer it to a separate array\n lessons_data_array = []\n sheet = excel_document.get_sheet_by_name(sheet_name)\n for row in sheet[f\"{columns[1]}1\":f\"{columns[1]}{sheet.max_row}\"]:\n for cell in row:\n lessons_data_array.append(str(cell.value))\n # cabinets - import data from a graph and transfer it to a separate array\n cabinets_data_array = []\n sheet = excel_document.get_sheet_by_name(sheet_name)\n for row in sheet[f\"{columns[2]}1\":f\"{columns[2]}{sheet.max_row}\"]:\n for cell in row:\n cabinets_data_array.append(str(cell.value))\n # search for relevant information\n lessons_output_data_array = []\n cabinets_output_data_array = []\n for quantity_checks in range(len(days_data_array)):\n if days_data_array[quantity_checks].lower() == start_data.lower():\n for quantity_recording_data in range(len(lessons_data_array) - quantity_checks - 1 - extra_cells):\n if lessons_data_array[\n quantity_recording_data + quantity_checks + 1 + extra_cells].lower() == end_data.lower():\n break\n # writing the necessary information to separate arrays\n lessons_output_data_array.append(\n (lessons_data_array[quantity_recording_data + quantity_checks + 1 + extra_cells]).title())\n if cabinets_data_array[quantity_recording_data + quantity_checks + 1 + extra_cells] == \"None\":\n cabinets_output_data_array.append(\"Узнавать у классного руководителя\")\n else:\n try:\n cabinets_output_data_array.append(int(float(cabinets_data_array[\n quantity_recording_data + quantity_checks + 1 + extra_cells])))\n except Exception as E:\n cabinets_output_data_array.append(cabinets_data_array[\n quantity_recording_data + quantity_checks + 1 + extra_cells].title())\n # the preparation of a reply\n for quantity_transfers in range(len(lessons_output_data_array)):\n output_day_schedule.append(\n f\"{quantity_transfers + 1}. {lessons_output_data_array[quantity_transfers]}({cabinets_output_data_array[quantity_transfers]})\")\n if output_day_schedule == [\"Расписание на заданный день:\"]:\n output_day_schedule = \"Кажись в этот день технопарк🙃\"\n else:\n output_day_schedule = \"\\n\".join(output_day_schedule)\n except Exception as E:\n # sending data to the terminal\n print(f\"!!! ERROR: Broken user data for excel searcher !!!\")\n print(f\"Reason: {E}\")\n output_day_schedule = \"Очень странно - ты есть в базе, но некоторые данные неправильные. Напиши в основную беседу, прикрепленную к сообществу - там тебе помогут решить данную проблему😬\"\n\n# Authors of the project:\n# 1-MachnevEgor_https://vk.com/machnev_egor\n# 2-SchalimovDmitriy_https://vk.com/astronaut_without_spaceship\n# 3-ArsenyKarimov_https://vk.com/id222338543\n# 4-MihailMarkov_https://vk.com/mixxxxail\n# Contacts in email:\n# 1-meb.official.com@gmail.com\n# 2-dmitriy-shalimov@yandex.ru\n# 3-arseny.karimov@gmail.com\n# 4-mihailmarkov2004@gmail.com\n","sub_path":"botCode/workWithExcelFile/ExcelSearcher.py","file_name":"ExcelSearcher.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"326679884","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport copy\n\nfrom pipeline.param.base_param import BaseParam\nfrom pipeline.param.encrypt_param import EncryptParam\nfrom pipeline.param import consts\n\n\nclass TransformParam(BaseParam):\n \"\"\"\n Define how to transfer the cols\n\n Parameters\n ----------\n transform_cols : list of column index, default: -1\n Specify which columns need to be transform. If column index is None, None of columns will be transformed.\n If it is -1, it will use same columns as cols in binning module.\n\n Note tha columns specified by `transform_cols` and `transform_names` will be combined.\n\n transform_names: list of string, default: []\n Specify which columns need to calculated. Each element in the list represent for a column name in header.\n\n Note tha columns specified by `transform_cols` and `transform_names` will be combined.\n\n\n transform_type: str, 'bin_num'or 'woe' or None default: 'bin_num'\n Specify which value these columns going to replace.\n 1. bin_num: Transfer original feature value to bin index in which this value belongs to.\n 2. woe: This is valid for guest party only. It will replace original value to its woe value\n 3. None: nothing will be replaced.\n \"\"\"\n\n def __init__(self, transform_cols=-1, transform_names=None, transform_type=\"bin_num\"):\n super(TransformParam, self).__init__()\n self.transform_cols = transform_cols\n self.transform_names = transform_names\n self.transform_type = transform_type\n\n def check(self):\n descr = \"Transform Param's \"\n if self.transform_cols is not None and self.transform_cols != -1:\n self.check_defined_type(self.transform_cols, descr, ['list'])\n self.check_defined_type(self.transform_names, descr, ['list', \"NoneType\"])\n if self.transform_names is not None:\n for name in self.transform_names:\n if not isinstance(name, str):\n raise ValueError(\"Elements in transform_names should be string type\")\n self.check_valid_value(self.transform_type, descr, ['bin_num', 'woe', None])\n\n\nclass OptimalBinningParam(BaseParam):\n \"\"\"\n Indicate optimal binning params\n\n Parameters\n ----------\n metric_method: str, default: \"iv\"\n The algorithm metric method. Support iv, gini, ks, chi-square\n\n\n min_bin_pct: float, default: 0.05\n The minimum percentage of each bucket\n\n max_bin_pct: float, default: 1.0\n The maximum percentage of each bucket\n\n init_bin_nums: int, default 100\n Number of bins when initialize\n\n mixture: bool, default: True\n Whether each bucket need event and non-event records\n\n init_bucket_method: str default: quantile\n Init bucket methods. Accept quantile and bucket.\n\n \"\"\"\n\n def __init__(self, metric_method='iv', min_bin_pct=0.05, max_bin_pct=1.0,\n init_bin_nums=1000, mixture=True, init_bucket_method='quantile'):\n super().__init__()\n self.init_bucket_method = init_bucket_method\n self.metric_method = metric_method\n self.max_bin = None\n self.mixture = mixture\n self.max_bin_pct = max_bin_pct\n self.min_bin_pct = min_bin_pct\n self.init_bin_nums = init_bin_nums\n self.adjustment_factor = None\n\n def check(self):\n descr = \"hetero binning's optimal binning param's\"\n self.check_string(self.metric_method, descr)\n\n self.metric_method = self.metric_method.lower()\n if self.metric_method in ['chi_square', 'chi-square']:\n self.metric_method = 'chi_square'\n self.check_valid_value(self.metric_method, descr, ['iv', 'gini', 'chi_square', 'ks'])\n self.check_positive_integer(self.init_bin_nums, descr)\n\n self.init_bucket_method = self.init_bucket_method.lower()\n self.check_valid_value(self.init_bucket_method, descr, ['quantile', 'bucket'])\n\n if self.max_bin_pct not in [1, 0]:\n self.check_decimal_float(self.max_bin_pct, descr)\n if self.min_bin_pct not in [1, 0]:\n self.check_decimal_float(self.min_bin_pct, descr)\n if self.min_bin_pct > self.max_bin_pct:\n raise ValueError(\"Optimal binning's min_bin_pct should less or equal than max_bin_pct\")\n\n self.check_boolean(self.mixture, descr)\n self.check_positive_integer(self.init_bin_nums, descr)\n\n\nclass FeatureBinningParam(BaseParam):\n \"\"\"\n Define the feature binning method\n\n Parameters\n ----------\n method : str, 'quantile', 'bucket' or 'optimal', default: 'quantile'\n Binning method.\n\n compress_thres: int, default: 10000\n When the number of saved summaries exceed this threshold, it will call its compress function\n\n head_size: int, default: 10000\n The buffer size to store inserted observations. When head list reach this buffer size, the\n QuantileSummaries object start to generate summary(or stats) and insert into its sampled list.\n\n error: float, 0 <= error < 1 default: 0.001\n The error of tolerance of binning. The final split point comes from original data, and the rank\n of this value is close to the exact rank. More precisely,\n floor((p - 2 * error) * N) <= rank(x) <= ceil((p + 2 * error) * N)\n where p is the quantile in float, and N is total number of data.\n\n bin_num: int, bin_num > 0, default: 10\n The max bin number for binning\n\n bin_indexes : list of int or int, default: -1\n Specify which columns need to be binned. -1 represent for all columns. If you need to indicate specific\n cols, provide a list of header index instead of -1.\n\n Note tha columns specified by `bin_indexes` and `bin_names` will be combined.\n\n bin_names : list of string, default: []\n Specify which columns need to calculated. Each element in the list represent for a column name in header.\n\n Note tha columns specified by `bin_indexes` and `bin_names` will be combined.\n\n adjustment_factor : float, default: 0.5\n the adjustment factor when calculating WOE. This is useful when there is no event or non-event in\n a bin. Please note that this parameter will NOT take effect for setting in host.\n\n category_indexes : list of int or int, default: []\n Specify which columns are category features. -1 represent for all columns. List of int indicate a set of\n such features. For category features, bin_obj will take its original values as split_points and treat them\n as have been binned. If this is not what you expect, please do NOT put it into this parameters.\n\n The number of categories should not exceed bin_num set above.\n\n Note tha columns specified by `category_indexes` and `category_names` will be combined.\n\n category_names : list of string, default: []\n Use column names to specify category features. Each element in the list represent for a column name in header.\n\n Note tha columns specified by `category_indexes` and `category_names` will be combined.\n\n local_only : bool, default: False\n Whether just provide binning method to guest party. If true, host party will do nothing.\n Warnings: This parameter will be deprecated in future version.\n\n transform_param: TransformParam\n Define how to transfer the binned data.\n\n need_run: bool, default True\n Indicate if this module needed to be run\n\n skip_static: bool, default False\n If true, binning will not calculate iv, woe etc. In this case, optimal-binning\n will not be supported.\n\n \"\"\"\n\n def __init__(self, method=consts.QUANTILE,\n compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD,\n head_size=consts.DEFAULT_HEAD_SIZE,\n error=consts.DEFAULT_RELATIVE_ERROR,\n bin_num=consts.G_BIN_NUM, bin_indexes=-1, bin_names=None, adjustment_factor=0.5,\n transform_param=TransformParam(),\n local_only=False,\n category_indexes=None, category_names=None,\n need_run=True, skip_static=False):\n super(FeatureBinningParam, self).__init__()\n self.method = method\n self.compress_thres = compress_thres\n self.head_size = head_size\n self.error = error\n self.adjustment_factor = adjustment_factor\n self.bin_num = bin_num\n self.bin_indexes = bin_indexes\n self.bin_names = bin_names\n self.category_indexes = category_indexes\n self.category_names = category_names\n self.transform_param = copy.deepcopy(transform_param)\n self.need_run = need_run\n self.skip_static = skip_static\n self.local_only = local_only\n\n def check(self):\n descr = \"Binning param's\"\n self.check_string(self.method, descr)\n self.method = self.method.lower()\n self.check_positive_integer(self.compress_thres, descr)\n self.check_positive_integer(self.head_size, descr)\n self.check_decimal_float(self.error, descr)\n self.check_positive_integer(self.bin_num, descr)\n if self.bin_indexes != -1:\n self.check_defined_type(self.bin_indexes, descr, ['list', 'RepeatedScalarContainer', \"NoneType\"])\n self.check_defined_type(self.bin_names, descr, ['list', \"NoneType\"])\n self.check_defined_type(self.category_indexes, descr, ['list', \"NoneType\"])\n self.check_defined_type(self.category_names, descr, ['list', \"NoneType\"])\n self.check_open_unit_interval(self.adjustment_factor, descr)\n self.check_boolean(self.local_only, descr)\n\n\nclass HeteroFeatureBinningParam(FeatureBinningParam):\n \"\"\"\n split_points_by_index: dict, default None\n Manually specified split points for local features;\n key should be feature index, value should be split points in sorted list;\n along with `split_points_by_col_name`, keys should cover all local features, including categorical features;\n note that each split point list should have length equal to desired bin num(n),\n with first (n-1) entries equal to the maximum value(inclusive) of each first (n-1) bins,\n and nth value the max of current feature.\n\n split_points_by_col_name: dict, default None\n Manually specified split points for local features;\n key should be feature name, value should be split points in sorted list;\n along with `split_points_by_index`, keys should cover all local features, including categorical features;\n note that each split point list should have length equal to desired bin num(n),\n with first (n-1) entries equal to the maximum value(inclusive) of each first (n-1) bins,\n and nth value the max of current feature.\n \"\"\"\n\n def __init__(self, method=consts.QUANTILE, compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD,\n head_size=consts.DEFAULT_HEAD_SIZE,\n error=consts.DEFAULT_RELATIVE_ERROR,\n bin_num=consts.G_BIN_NUM, bin_indexes=-1, bin_names=None, adjustment_factor=0.5,\n transform_param=TransformParam(), optimal_binning_param=OptimalBinningParam(),\n local_only=False, category_indexes=None, category_names=None,\n encrypt_param=EncryptParam(),\n need_run=True, skip_static=False,\n split_points_by_index=None, split_points_by_col_name=None):\n super(HeteroFeatureBinningParam, self).__init__(method=method, compress_thres=compress_thres,\n head_size=head_size, error=error,\n bin_num=bin_num, bin_indexes=bin_indexes,\n bin_names=bin_names, adjustment_factor=adjustment_factor,\n transform_param=transform_param,\n category_indexes=category_indexes,\n category_names=category_names,\n need_run=need_run, local_only=local_only,\n skip_static=skip_static)\n self.optimal_binning_param = copy.deepcopy(optimal_binning_param)\n self.encrypt_param = encrypt_param\n self.split_points_by_index = split_points_by_index\n self.split_points_by_col_name = split_points_by_col_name\n\n def check(self):\n descr = \"Hetero Binning param's\"\n super(HeteroFeatureBinningParam, self).check()\n self.check_valid_value(self.method, descr, [consts.QUANTILE, consts.BUCKET, consts.OPTIMAL])\n self.optimal_binning_param.check()\n self.encrypt_param.check()\n if self.encrypt_param.method != consts.PAILLIER:\n raise ValueError(\"Feature Binning support Paillier encrypt method only.\")\n if self.skip_static and self.method == consts.OPTIMAL:\n raise ValueError(\"When skip_static, optimal binning is not supported.\")\n self.transform_param.check()\n if self.skip_static and self.transform_param.transform_type == 'woe':\n raise ValueError(\"To use woe transform, skip_static should set as False\")\n\n\nclass HomoFeatureBinningParam(FeatureBinningParam):\n def __init__(self, method=consts.VIRTUAL_SUMMARY,\n compress_thres=consts.DEFAULT_COMPRESS_THRESHOLD,\n head_size=consts.DEFAULT_HEAD_SIZE,\n error=consts.DEFAULT_RELATIVE_ERROR,\n sample_bins=100,\n bin_num=consts.G_BIN_NUM, bin_indexes=-1, bin_names=None, adjustment_factor=0.5,\n transform_param=TransformParam(),\n category_indexes=None, category_names=None,\n need_run=True, skip_static=False, max_iter=100):\n super(HomoFeatureBinningParam, self).__init__(method=method, compress_thres=compress_thres,\n head_size=head_size, error=error,\n bin_num=bin_num, bin_indexes=bin_indexes,\n bin_names=bin_names, adjustment_factor=adjustment_factor,\n transform_param=transform_param,\n category_indexes=category_indexes, category_names=category_names,\n need_run=need_run,\n skip_static=skip_static)\n self.sample_bins = sample_bins\n self.max_iter = max_iter\n\n def check(self):\n descr = \"homo binning param's\"\n super(HomoFeatureBinningParam, self).check()\n self.check_string(self.method, descr)\n self.method = self.method.lower()\n self.check_valid_value(self.method, descr, [consts.VIRTUAL_SUMMARY, consts.RECURSIVE_QUERY])\n self.check_positive_integer(self.max_iter, descr)\n if self.max_iter > 100:\n raise ValueError(\"Max iter is not allowed exceed 100\")\n","sub_path":"python/fate_client/pipeline/param/feature_binning_param.py","file_name":"feature_binning_param.py","file_ext":"py","file_size_in_byte":15821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"362915192","text":"# tensorflow version should >= 2.0\nimport tensorflow as tf\nimport numpy as np\n\ntf.enable_eager_execution()\n\n\"\"\"\nPrepare Data (from simple to what we want)\n\"\"\"\n\ndataset = tf.data.Dataset.range(10)\nfor val in dataset:\n print(val.numpy())\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1)\nfor window_dataset in dataset:\n for val in window_dataset:\n print(val.numpy(), end=\" \")\n print()\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\nfor window_dataset in dataset:\n for val in window_dataset:\n print(val.numpy(), end=\" \")\n print()\n\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\nfor window in dataset:\n print(window.numpy())\n\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\ndataset = dataset.shuffle(buffer_size=10)\nfor x, y in dataset:\n print(x.numpy(), y.numpy())\n\n\ndataset = tf.data.Dataset.range(10)\ndataset = dataset.window(5, shift=1, drop_remainder=True)\ndataset = dataset.flat_map(lambda window: window.batch(5))\ndataset = dataset.map(lambda window: (window[:-1], window[-1:]))\ndataset = dataset.shuffle(buffer_size=10)\ndataset = dataset.batch(2).prefetch(1)\nfor x, y in dataset:\n print(\"x = \", x.numpy())\n print(\"y = \", y.numpy())\n\n\n# Prepare Time Series Data\ndef trend(time, slope=0):\n return slope * time\n\ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n\ndef noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level\n\ntime = np.arange(4 * 365 + 1, dtype=\"float32\")\nbaseline = 10\nseries = trend(time, 0.1) \nbaseline = 10\namplitude = 40\nslope = 0.05\nnoise_level = 5\n\n# Create the series\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n# Update with noise\nseries += noise(time, noise_level, seed=42)\n\nsplit_time = 1000\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]\n\nwindow_size = 20\nbatch_size = 32\nshuffle_buffer_size = 1000\n\n\"\"\"\nFinal version of preparing feature from time series\n\"\"\"\ndef windowed_dataset(series, window_size, batch_size, shuffle_buffer):\n dataset = tf.data.Dataset.from_tensor_slices(series)\n dataset = dataset.window(window_size+1, shift=1, drop_remainder=True)\n dataset = dataset.flat_map(lambda window: window.batch(window_size+1))\n dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))\n dataset = dataset.batch(2).prefetch(1)\n return dataset\n\n\"\"\"\nSimple Linear Regression\n\"\"\"\ndataset = window_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\nprint(dataset)\nl0 = tf.keras.layers.Dense(1, input_shape=[window_size])\nmodel = tf.keras.models.Sequential([l0])\n\nmodel.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))\nmodel.fit(dataset, epochs=100, verbose=0)\n\nprint(\"Layer weights {}\".format(l0.get_weights()))\n\n\nforecast = []\n\nfor time in range(len(series) - window_size):\n forecast.append(model.predict(series[time:time + window_size][np.newaxis]))\n\nforecast = forecast[split_time-window_size:]\nresults = np.array(forecast)[:, 0, 0]\ntf.keras.metrics.mean_absolute_error(x_valid, results).numpy()\n\n\n\"\"\"\nDNN\n\"\"\"\ndataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(10, input_shape=[window_size], activation=\"relu\"), \n tf.keras.layers.Dense(10, activation=\"relu\"), \n tf.keras.layers.Dense(1)\n])\n\nmodel.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(lr=1e-6, momentum=0.9))\nmodel.fit(dataset,epochs=100,verbose=0)\n\n\nforecast = []\n\nfor time in range(len(series) - window_size):\n forecast.append(model.predict(series[time:time + window_size][np.newaxis]))\n\nforecast = forecast[split_time-window_size:]\nresults = np.array(forecast)[:, 0, 0]\ntf.keras.metrics.mean_absolute_error(x_valid, results).numpy()\n","sub_path":"Keras_L4_TimeSeries/Week2_PrepareFeatures.py","file_name":"Week2_PrepareFeatures.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"125387733","text":"#!/usr/bin/python\n# name file : nested_function.py\n\nfrom __future__ import print_function\nimport time\n\ndef hitungMundur(n):\n\tli = [n]\n\tdef next():\n\t\tr = li[0]\n\t\tli[0] -= 1\n\t\treturn r\n\treturn next\n\ndef main():\n\t# memanggil fungsi hitungMundur()\n\tnext = hitungMundur(3)\n\twhile True:\n\t\tval = next()\n\t\tif val == 0:\n\t\t\tprint(\"GO!!!\")\n\t\t\tbreak\n\t\tprint(val, end=' ')\n\t\ttime.sleep(1) # jeda 1 detik\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"nested_function.py","file_name":"nested_function.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"509808780","text":"# Import required modules\nimport argparse\nimport time\nimport speech_recognition as sr\nfrom utils.argument import str2bool\nimport docx\nimport sys\nfrom NLU.rpa_notepad import rpa_notepad\n\n############ Add argument parser for command line arguments ############\nparser = argparse.ArgumentParser(description=\"Speech recognition demo\")\nparser.add_argument(\n \"--input\", \"-i\", type=str, required=True, help=\"Input audio file for analysis\"\n)\nparser.add_argument(\n \"--lang\", \"-l\", type=str, default=\"en-US\", help=\"Select the audio language\"\n)\nparser.add_argument(\n \"--stt\",\n \"-e\",\n type=str,\n default=\"google\",\n help=\"Select the speech to language engine\",\n)\nparser.add_argument(\n \"--save\", \"-s\", type=str, help=\"Select the file format to save the recognize text.\"\n)\nparser.add_argument(\"--si\", type=str, help=\"Select the speech input application.\")\nargs = parser.parse_args()\n\n\ndef main():\n # start speech recognizer\n recognizer = sr.Recognizer()\n\n if args.si:\n if args.si in \"notepad\":\n rpa = rpa_notepad()\n rpa.application_start()\n\n # open input audio file\n with sr.AudioFile(args.input) as source:\n audio = recognizer.record(source)\n\n # recognize speech using Google Speech Recognition\n if args.stt in \"google\":\n try:\n # for testing purposes, we're just using the default API key\n # to use another API key, use `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n # instead of `r.recognize_google(audio)`\n print(\"[INFO] Google Speech Recognition thinks you said: \")\n # save program start time\n start_time = time.time()\n textRecognize = recognizer.recognize_google(audio, language=args.lang)\n print(textRecognize)\n # Calculate processing time\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 1000)\n print(\"[INFO] \" + label)\n except sr.UnknownValueError:\n print(\"[ERROR] Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\n \"[ERROR] Could not request results from Google Speech Recognition service; {0}\".format(\n e\n )\n )\n\n # recognize speech using Sphinx\n if args.stt in \"sphinx\":\n try:\n print(\"[INFO] Sphinx thinks you said:\")\n # save program start time\n start_time = time.time()\n print(recognizer.recognize_sphinx(audio))\n # Calculate processing time\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 1000)\n print(\"[INFO] \" + label)\n except sr.UnknownValueError:\n print(\"Sphinx could not understand audio\")\n except sr.RequestError as e:\n print(\"Sphinx error; {0}\".format(e))\n\n \"\"\"\n # recognize speech using Microsoft Azure Speech\n AZURE_SPEECH_KEY = \"\" # Microsoft Speech API keys 32-character lowercase hexadecimal strings\n try:\n print(\"Microsoft Azure Speech thinks you said: \")\n # save program start time\n start_time = time.time()\n print(recognizer.recognize_bing(audio, key=AZURE_SPEECH_KEY))\n # Calculate processing time\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 1000)\n print(\"[INFO] \" + label)\n except sr.UnknownValueError:\n print(\"Microsoft Azure Speech could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Microsoft Azure Speech service; {0}\".format(e))\n \"\"\"\n\n \"\"\"\n # recognize speech using Wit.ai\n WIT_AI_KEY = \"\" # Wit.ai keys are 32-character uppercase alphanumeric strings\n try:\n print(\"Wit.ai thinks you said:\")\n # save program start time\n start_time = time.time()\n print(recognizer.recognize_wit(audio, key=WIT_AI_KEY))\n # Calculate processing time\n label = \"Process time: %.2f ms\" % ((time.time() - start_time) * 1000)\n print(\"[INFO] \" + label)\n except sr.UnknownValueError:\n print(\"Wit.ai could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Wit.ai service; {0}\".format(e))\n \"\"\"\n\n # save recognize text to file\n if args.save:\n if args.save in \"txt\":\n fileTxt = open(\"speech.txt\", \"w+\")\n fileTxt.write(textRecognize)\n fileTxt.close()\n print(\"[INFO] Save recognized text to speech.txt\")\n elif args.save in \"doc\":\n fileDoc = docx.Document()\n fileDoc.add_paragraph(textRecognize)\n fileDoc.save(\"speech.docx\")\n print(\"[INFO] Save recognized text to speech.docx\")\n\n # type recognize text to application\n if args.si:\n if args.si in \"notepad\":\n rpa.type_keys(textRecognize)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"300937830","text":"from django.shortcuts import render,redirect\nfrom database.models import CITY, CITYPREFERENCE, PREFERENCE, SPOTPREFERENCE, SPOT, SPOTREVIEW\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom .distance import *\nfrom .weather import *\nimport time\nfrom datetime import date, timedelta\nfrom RegLogin import views,urls\nimport ast\n\n# Create your views here.\ndef display_cityChoice_page(request):\n template = loader.get_template('cityChoice.html')\n context = {}\n all_cities=CITY.objects.all()\n no_of_cities=all_cities.count()\n rows=int(no_of_cities/3)\n if (no_of_cities%3) != 0:\n rows=rows+1\n counter=0\n city_ids=[]\n city_names={}\n city_images={}\n city_descs={}\n for i in range(0,rows):\n ids=[]\n for j in range(0,3):\n if counter < no_of_cities:\n index=all_cities[counter].id\n ids.append(index)\n city_names[index]=all_cities[counter].cityName\n city_images[index]=str(all_cities[counter].image)\n city_descs[index]=all_cities[counter].description\n counter=counter+1\n city_ids.append(ids)\n\n context['city_ids']=city_ids\n context['city_names']=city_names\n context['city_descs']=city_descs\n context['city_images']=city_images\n\n if request.user.is_authenticated:\n context['logged_in']=True\n else:\n context['logged_in'] =False\n return HttpResponse(template.render(context, request))\n\ndef city_choice_page(request):\n\n context={}\n all_city = CITY.objects.all()\n city_names={}\n city_images={}\n city_ids={}\n count={}\n counter=0\n for a in all_city:\n city_names[a.pk] = a.cityName\n city_images[a.pk] = str(a.image)\n counter = counter + 1\n\n\n print(counter)\n rows=int(counter/3)\n\n if(counter%3!=0):\n rows=rows+1\n print(rows)\n context['rows']=rows\n\n context['city_names'] = city_names\n context['images']=city_images\n context['city_ids']=city_ids\n context['count']=count\n if request.user.is_authenticated:\n context['logged_in']=True\n else:\n context['logged_in'] =False\n return render(request, 'cityChoice.html', context)\n\n\ndef preference_page(request, city):\n # pref_ids = CITYPREFERENCE.objects.select_related('preferenceID').filter(cityId_id=city)\n print(\"City: \" + str(city) )\n if request.method == 'POST':\n some_var = request.POST.getlist('selected_spot[]')\n print(\"some_var1: \" + str(some_var) )\n\n print(some_var)\n chosen_spots=request.session.get('chosen_spots',default=None)\n chosen_cities=request.session.get('chosen_cities',default=None)\n print(\"previous_spots: \" + str(chosen_spots) )\n print(\"previous_cities: \" + str(chosen_cities) )\n if chosen_spots is None:\n #print(\"chosen_spots_None\")\n chosen_spots = some_var\n request.session.__setitem__('chosen_spots',chosen_spots)\n else:\n #print(\"chosen_spots_Not_None\")\n chosen_spots=chosen_spots+some_var\n #chosen_spots=[]\n #print(\"chosen_spots2:\" + str(chosen_spots) )\n request.session.__setitem__('chosen_spots',chosen_spots)\n\n print(\"Current Spots: \" + str(chosen_spots) )\n\n if chosen_cities is None:\n if len(some_var)!=0:\n chosen_cities=[]\n chosen_cities.append(city)\n request.session.__setitem__('chosen_cities',chosen_cities)\n else:\n if len(some_var)!=0:\n chosen_cities.append(city)\n request.session.__setitem__('chosen_cities',chosen_cities)\n\n print(\"Current City: \" + str(chosen_cities) )\n\n\n #if request.user.id is None\n #return\n\n if 'next' in request.POST:\n return render(request,\"extraDetailsForm.html\",{})\n elif 'add_more_cities' in request.POST:\n return redirect('city')\n #return city_choice_page(request)\n\n else:\n preference_list={}\n pref_spotName_list={}\n #pref_spotID_list={}\n pref_spotDescription_list = {}\n pref_spotImage_list={}\n pref_spotRating_list={}\n pref_ids=CITYPREFERENCE.objects.filter(cityID=city)\n for p in pref_ids:\n pID = p.preferenceID\n pName=pID.prefName\n preference_list[pID.id]=pID.prefName\n spot_ids=SPOTPREFERENCE.objects.filter(preferenceID=pID)\n spots={}\n #spotIDs={}\n spotImages={}\n spotDescriptions={}\n spotRatings={}\n for s in spot_ids:\n sID=s.spotID\n if sID.cityID.id == city:\n spot_rating_qs=SPOTREVIEW.objects.filter(spotID=sID)\n #print(spot_rating_qs)\n avg_rating=0;\n count=0;\n for sr in spot_rating_qs:\n #print(str(sr.spotID.id)+str(sr.rating))\n avg_rating=avg_rating+float(sr.rating)\n count=count+1;\n if count!=0:\n avg_rating=avg_rating/count;\n print(avg_rating)\n if (avg_rating>0 and avg_rating<0.25):\n avg_rating=0.25\n elif (avg_rating>0.25 and avg_rating<0.5):\n avg_rating=0.5 \n elif (avg_rating>0.5 and avg_rating<0.75):\n avg_rating=0.75 \n elif (avg_rating>0.75 and avg_rating<1.0):\n avg_rating=1.0\n elif (avg_rating>1.0 and avg_rating<1.25):\n avg_rating=1.25\n elif (avg_rating>1.25 and avg_rating<1.5):\n avg_rating=1.5 \n elif (avg_rating>1.5 and avg_rating<1.75):\n avg_rating=1.75\n elif (avg_rating>1.75 and avg_rating<2.0):\n avg_rating=2.0\n elif (avg_rating>2 and avg_rating<2.25):\n avg_rating=2.25\n elif (avg_rating>2.25 and avg_rating<2.5):\n avg_rating=2.5 \n elif (avg_rating>2.5 and avg_rating<2.75):\n avg_rating=2.75\n elif (avg_rating>2.75 and avg_rating<3.0):\n avg_rating=3.0\n elif (avg_rating>3 and avg_rating<3.25):\n avg_rating=3.25\n elif (avg_rating>3.25 and avg_rating<3.5):\n avg_rating=3.5 \n elif (avg_rating>3.5 and avg_rating<3.75):\n avg_rating=3.75\n elif (avg_rating>3.75 and avg_rating<4.0):\n avg_rating=4.0\n elif (avg_rating>4 and avg_rating<4.25):\n avg_rating=4.25\n elif (avg_rating>4.25 and avg_rating<4.5):\n avg_rating=4.5 \n elif (avg_rating>4.5 and avg_rating<4.75):\n avg_rating=4.75\n elif (avg_rating>4.75 and avg_rating<5.0):\n avg_rating=5.0\n spots[sID.id]=sID.spotName\n spotImages[sID.id]=sID.image\n spotRatings[sID.id]=avg_rating\n spotDescriptions[sID.id]=sID.spotInfo\n #print(sID.spotInfo)\n #print(city, sID.spotName, pID.prefName)\n pref_spotName_list[pID.id]=spots\n pref_spotImage_list[pID.id]=spotImages\n pref_spotDescription_list[pID.id]=spotDescriptions\n pref_spotRating_list[pID.id]=spotRatings\n # print(pref_spotDescription_list)\n\n\n all_city=CITY.objects.all()\n other_cities={}\n for c in all_city:\n #print(c.id)\n if c.id!=city:\n other_cities[c.id]=c.cityName\n\n if request.user.is_authenticated:\n logged_var=True \n else:\n logged_var=False\n \n\n return render(request, 'prefV2.html', {'preference_list':preference_list,\n 'pref_spotName_list':pref_spotName_list,\n 'pref_spotDescription_list': pref_spotDescription_list,\n 'pref_spotImage_list': pref_spotImage_list,\n 'pref_spotRating_list': pref_spotRating_list,\n 'other_cities':other_cities,\n 'logged_in':logged_var,\n })\n\ndef data_form_view_page(request):\n template = loader.get_template('extraDetailsForm.html')\n context={}\n return HttpResponse(template.render(context,request))\ndef plan_page_temp(request):\n template = loader.get_template('ShowPlanPage.html')\n context={}\n return HttpResponse(template.render(context,request))\n\n\ndef generatePlanFirstTime(request):\n print(\"gen first time ->\")\n template = loader.get_template('ShowPlanPage.html')\n start_date = request.POST['start_date']\n end_date = request.POST['end_date']\n start_time=request.POST['start_time']\n end_time=request.POST['end_time']\n start_city=request.POST['start_location']\n\n request.session.__setitem__('user_start_date', start_date )\n request.session.__setitem__('user_start_time', start_time )\n request.session.__setitem__('user_end_date', end_date )\n request.session.__setitem__('user_end_time', end_time )\n request.session.__setitem__('user_start_location', start_city )\n #budget=request.POST['budget']\n context = {}\n if request.user.is_authenticated:\n context['logged_in'] = True\n else:\n context['logged_in'] = False \n\n chosen_spots = request.session.get('chosen_spots', default=None)\n chosen_cities = request.session.get('chosen_cities', default=None)\n\n print( start_city )\n print(start_date)\n print(end_date)\n print(start_time)\n print(end_time)\n print(chosen_spots)\n \n\n start_city = CITY.objects.get(cityName=start_city).id\n end_city = start_city\n\n city_list, city_spot = generate_city_spot(chosen_spots, start_city,end_city)\n #print(city_spot)\n #print(city_list)\n\n\n temp = start_date.split('-')\n start_date = date( int( temp[0] ), int(temp[1]), int(temp[2]) )\n total_city = len(city_list)\n cur_date = start_date\n cityName_list = []\n desc_data = {}\n distance_data = {}\n time_data = {}\n weather_data = {}\n for i in range(total_city):\n if i == total_city - 1:\n continue\n cur_city = city_list[i]\n nxt_city = city_list[i+1]\n #visit_by_day_description, visit_by_day_time, visit_by_day_distance, cur_date = generate_plan(cur_city, city_spot[cur_city], nxt_city, cur_date )\n visit_by_day_description, visit_by_day_time, visit_by_day_distance, cur_date = generate_plan2(cur_city, city_spot[cur_city], nxt_city, cur_date, start_time, end_time )\n #print(visit_by_day_distance)\n #rint(visit_by_day_time)\n #print(visit_by_day_description)\n cc = CITY.objects.get( id = cur_city )\n cityName = cc.cityName\n desc_data[ cityName ] = visit_by_day_description\n distance_data[ cityName ] = visit_by_day_distance\n time_data[cityName ] = visit_by_day_time\n weather_data[ cityName ], baad1, baad2 = getWeather(cc.latitude, cc.longitude)\n #print(cur_date)\n cityName_list.append( cityName )\n #print(\"Time Data: \"+ str(time_data))\n #print(\"City Spot: \"+ str(city_spot))\n city_travel_start_dates={}\n city_travel_total_days={}\n for temp in cityName_list:\n l=time_data[temp]\n #print(len(l))\n #print(list(l.keys())[0])\n city_travel_start_dates[temp]=list(l.keys())[0]\n city_travel_total_days[temp]=len(l)\n \n cityName_spotName={}\n cityName_spotTime={}\n for k,v in city_spot.items():\n cc = CITY.objects.get( id = k )\n cn=cc.cityName\n temp_list_1=[]\n temp_list_2=[]\n for s in v:\n spot=SPOT.objects.get(id=s)\n spotName=spot.spotName\n spotTime=spot.totalVisitTime\n spotTime=spotTime.split(' ')[0]\n temp_list_1.append(spotName)\n temp_list_2.append(spotTime)\n cityName_spotName[cn]=temp_list_1\n cityName_spotTime[cn]=temp_list_2\n\n \n \n\n context['cityName'] = cityName_list\n context['description_data'] = desc_data\n context[ 'distance_data' ] = distance_data\n context[ 'time_data' ] = time_data\n context[ 'weather_data' ] = weather_data\n\n \n\n request.session.__setitem__('plan_city_list',cityName_list)\n request.session.__setitem__('plan_city_start_dates',city_travel_start_dates)\n request.session.__setitem__('plan_city_total_days',city_travel_total_days)\n request.session.__setitem__('plan_spots_names',cityName_spotName)\n request.session.__setitem__('plan_spots_times',cityName_spotTime)\n\n #request.session.__setitem__('',chosen_cities)\n request.session.__setitem__('context_for_saving',context)\n if request.user.is_authenticated:\n \n return HttpResponse(template.render(context, request))\n else:\n request.session.__setitem__('plan_context_data',context)\n return redirect('login',view_func_name=\"tour_plan\")\n\n\ndef generatePlanEdit(request):\n print(\"edit plan ->\")\n context = {}\n \n template = loader.get_template('ShowPlanPage.html')\n start_date = request.session.get('user_start_date',default=None)\n end_date = request.session.get('user_end_date',default=None)\n start_time = request.session.get('user_start_time',default=None)\n end_time = request.session.get('user_end_time',default=None)\n start_city = request.session.get('user_start_location',default=None)\n chosen_spots = request.session.get('chosen_spots', default=None)\n \n chosen_cities = request.session.get('chosen_cities', default=None)\n\n city_time = {}\n\n start_city = CITY.objects.get(cityName=start_city).id\n end_city = start_city\n\n city_list, city_spot = generate_city_spot(chosen_spots, start_city,end_city)\n #print(city_spot)\n #print(city_list)\n print(\"post----->\")\n print( request.POST ) \n for c in city_list:\n ls = city_spot[c]\n tm = {}\n for s in ls:\n spt = SPOT.objects.get( id = s )\n name = spt.spotName\n print( name + \" \" + str(request.POST[name]) )\n tm[s] = float(request.POST[name])\n \n city_time[c] = tm\n\n \n\n print( start_city )\n print(start_date)\n print(end_date)\n print(start_time)\n print(end_time)\n print(chosen_spots)\n \n\n\n temp = start_date.split('-')\n start_date = date( int( temp[0] ), int(temp[1]), int(temp[2]) )\n total_city = len(city_list)\n cur_date = start_date\n cityName_list = []\n desc_data = {}\n distance_data = {}\n time_data = {}\n weather_data = {}\n for i in range(total_city):\n if i == total_city - 1:\n continue\n cur_city = city_list[i]\n nxt_city = city_list[i+1]\n #visit_by_day_description, visit_by_day_time, visit_by_day_distance, cur_date = generate_plan(cur_city, city_spot[cur_city], nxt_city, cur_date )\n visit_by_day_description, visit_by_day_time, visit_by_day_distance, cur_date = generate_plan2(cur_city, city_spot[cur_city], nxt_city, cur_date, start_time, end_time, city_time[cur_city] )\n #print(visit_by_day_distance)\n #rint(visit_by_day_time)\n #print(visit_by_day_description)\n cc = CITY.objects.get( id = cur_city )\n cityName = cc.cityName\n desc_data[ cityName ] = visit_by_day_description\n distance_data[ cityName ] = visit_by_day_distance\n time_data[cityName ] = visit_by_day_time\n weather_data[ cityName ], baad1, baad2 = getWeather(cc.latitude, cc.longitude)\n #print(cur_date)\n cityName_list.append( cityName )\n #print(\"Time Data: \"+ str(time_data))\n #print(\"City Spot: \"+ str(city_spot))\n city_travel_start_dates={}\n city_travel_total_days={}\n for temp in cityName_list:\n l=time_data[temp]\n #print(len(l))\n #print(list(l.keys())[0])\n city_travel_start_dates[temp]=list(l.keys())[0]\n city_travel_total_days[temp]=len(l)\n \n cityName_spotName={}\n cityName_spotTime={}\n\n for k,v in city_spot.items():\n cc = CITY.objects.get( id = k )\n cn=cc.cityName\n temp_list_1=[]\n temp_list_2=[]\n for s in v:\n spot=SPOT.objects.get(id=s)\n spotName=spot.spotName\n spotTime=spot.totalVisitTime\n spotTime=spotTime.split(' ')[0]\n temp_list_1.append(spotName)\n temp_list_2.append(spotTime)\n cityName_spotName[cn]=temp_list_1\n cityName_spotTime[cn]=temp_list_2\n\n \n \n\n context['cityName'] = cityName_list\n context['description_data'] = desc_data\n context[ 'distance_data' ] = distance_data\n context[ 'time_data' ] = time_data\n context[ 'weather_data' ] = weather_data\n\n request.session.__setitem__('plan_city_list',cityName_list)\n request.session.__setitem__('plan_city_start_dates',city_travel_start_dates)\n request.session.__setitem__('plan_city_total_days',city_travel_total_days)\n request.session.__setitem__('plan_spots_names',cityName_spotName)\n request.session.__setitem__('plan_spots_times',cityName_spotTime)\n\n #request.session.__setitem__('',chosen_cities)\n request.session.__setitem__('context_for_saving',context)\n if request.user.is_authenticated:\n return HttpResponse(template.render(context, request))\n else:\n request.session.__setitem__('plan_context_data',context)\n return redirect('login',view_func_name=\"tour_plan\")\n\ndef extra_data_fetching(request, function_name):\n #print(\"here\")\n if function_name == 'extra_details':\n return generatePlanFirstTime(request)\n else:\n return generatePlanEdit(request)\n \n\n\ndef plan_edit_options_view_page(request):\n cityName_list=request.session.get('plan_city_list',default=None)\n city_travel_start_dates=request.session.get('plan_city_start_dates',default=None)\n city_travel_total_days=request.session.get('plan_city_total_days',default=None)\n cityName_spotName=request.session.get('plan_spots_names',default=None)\n cityName_spotTime=request.session.get('plan_spots_times',default=None)\n context={}\n if request.user.is_authenticated:\n context['logged_in'] = True\n else:\n context['logged_in'] = False\n context['cityName_list']=cityName_list\n context['city_travel_start_dates']=city_travel_start_dates\n context['city_travel_total_days']=city_travel_total_days\n context['cityName_spotName']=cityName_spotName\n context['cityName_spotTime']=cityName_spotTime\n print(\"plan edit options->\")\n print(context)\n #print(cityName_list)\n #print(city_travel_start_dates)\n #print(city_travel_total_days)\n #print(cityName_spotName)\n #print(cityName_spotTime)\n template = loader.get_template('editPlan.html')\n return HttpResponse(template.render(context, request))\n\n\ndef spotDetail(request, spot_id):\n context = {}\n if request.method == 'POST':\n rt = request.POST['user_rating']\n rv = request.POST['user_review']\n temp = SPOTREVIEW.objects.filter(userID=PROFILE.objects.get(user=request.user), spotID=spot_id)\n if len(temp) > 0 :\n print(\"saving....\")\n temp[0].rating = rt\n temp[0].review = rv\n temp[0].save()\n else:\n print('creating....')\n f = SPOTREVIEW.objects.create( userID = PROFILE.objects.get(user=request.user), spotID = SPOT.objects.get(id=spot_id), rating = rt, review = rv)\n f.save()\n \n return redirect('spotDetail', spot_id)\n else: \n template = loader.get_template('spot_detail.html')\n \n profile = PROFILE.objects.get(user=request.user)\n \n temp = SPOTREVIEW.objects.filter(userID=profile, spotID=spot_id)\n if( len(temp) > 0 ):\n context['userrating'] = temp[0].rating\n context['userreview'] = temp[0].review\n else:\n context['userrating'] = '0'\n context['userreview'] = '-'\n\n s = SPOT.objects.get(id=spot_id)\n context['spotName'] = s.spotName \n \n rating = {}\n review = {}\n temp = SPOTREVIEW.objects.filter(spotID=spot_id)\n totrating = 0\n for x in temp:\n rating[x.userID.user.username] = x.rating\n totrating += int(x.rating)\n review[x.userID.user.username] = x.review\n if( len(temp) > 0 ):\n context['avgrating'] = 1.0*totrating/len(temp)\n else:\n context['avgrating'] = 0\n context['allrating'] = rating\n context['allreview'] = review\n return HttpResponse(template.render(context, request))\n\n\ndef save_plan_view_page( request ):\n print(\"plan saving ->\")\n template = loader.get_template('show_saved_plans.html')\n start_date = request.session.get('user_start_date', default = None )\n start_time = request.session.get('user_start_time', default = None )\n end_date = request.session.get('user_end_date', default = None )\n end_time = request.session.get('user_end_time', default = None )\n start_location = request.session.get('user_start_location', default = None )\n context = request.session.get('context_for_saving', default = None )\n print(context)\n f = TOURINFO.objects.create( userID = PROFILE.objects.get(user=request.user), startDate = start_date, endDate = end_date, startTime = start_time, endTime = end_time, startLocation = start_location, context = str(context) )\n f.save()\n\n return HttpResponse(template.render(context, request))\n\ndef show_chosen_previous_plan(request, plan_id):\n p=TOURINFO.objects.get(pk=plan_id)\n context=ast.literal_eval(p.context)\n template = loader.get_template('show_saved_plans.html')\n return HttpResponse(template.render(context, request))\n\ndef list_all_previous_plans(request):\n profile=PROFILE.objects.get(user=request.user)\n all_plans=TOURINFO.objects.filter(userID=profile)\n plans={}\n cnt = 0\n for p in all_plans:\n cnt += 1\n plans[p.id]=\"Plan no: \"+ str(cnt)\n context={}\n context['all_plans']=plans\n template = loader.get_template('all_previous_plans.html')\n return HttpResponse(template.render(context, request))","sub_path":"ChoiceSubsystem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"11043908","text":"import sys\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nimport optparse\nimport time\nimport datetime\nimport pytz\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom keras.models import Sequential, load_model\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom collections import OrderedDict\ndef read_log(path):\n #'predict_data.txt'为用于预测的实际数据,在当前路径下\n if path.split('/')[-1] == 'predict_data.txt':\n # if path.split('/')[-1] == 'access.log':\n dataframe = pd.read_table(path, sep=' ', header=None,\n names=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'],\n dtype = {'1': str, '2': str, '3': str, '4': str, '5':str, '6': int, '7': float,\n '8': int})\n dataset = dataframe.sample(frac=1).values\n info = np.array(dataset)\n b = []\n for x in info:\n b.append(list(x))\n b = np.array(b)\n return b\n\ndef utc2timestamp(utc_matrix):\n timeStamp = []\n for x in utc_matrix:\n x = x[0] + ' ' + x[-1]\n timeArray = datetime.datetime.strptime(x, \"%d/%b/%Y:%H:%M:%S %z\")\n timeStamp.append([int(time.mktime(timeArray.astimezone(pytz.utc).timetuple()))])\n timestamp_matrix = np.array(timeStamp)\n return timestamp_matrix\n\ndef ip2bina(ip_matrix):\n matrix = np.zeros((1, 32))\n for ip in ip_matrix:\n temp = []\n for i in ip.split('.'):\n binary = bin(int(i))[2:].zfill(8)\n for b in binary:\n temp.append(int(b))\n matrix = np.row_stack((matrix, np.array(temp)))\n matrix = np.delete(matrix, 0, 0)\n\n return matrix\n\ndef bool2bina(axis, bool_matrix):\n tmp = np.zeros((1, axis))\n #遍历矩阵\n for x in bool_matrix:\n if str(x)[0] == '2':\n tmp = np.row_stack((tmp, [1, 0, 0, 0]))\n elif str(x)[0] == '3':\n tmp = np.row_stack((tmp, [0, 1, 0, 0]))\n elif str(x)[0] == '4':\n tmp = np.row_stack((tmp, [0, 0, 1, 0]))\n elif str(x)[0] == '5':\n tmp = np.row_stack((tmp, [0, 0, 0, 1]))\n tmp = np.delete(tmp, 0, 0)\n return tmp\n\ndef cookie2bina(axis, cookie_matrix):\n tmp = np.zeros((1, axis))\n for x in cookie_matrix:\n if x == \"\":\n tmp = np.row_stack((tmp, [1, 0]))\n else:\n tmp = np.row_stack((tmp, [0, 1]))\n tmp = np.delete(tmp, 0, 0)\n return tmp\n\ndef completion2bina(axis, compt_matrix):\n tmp = np.zeros((1, axis))\n for x in compt_matrix:\n if x == \"\":\n tmp = np.row_stack((tmp, [1, 0]))\n else:\n tmp = np.row_stack((tmp, [0, 1]))\n tmp = np.delete(tmp, 0, 0)\n return tmp\n\n#输入数据归一化\n#求出matrix矩阵在axis=0即列方向的最大值和最小值\ndef normalize(num_matrix):\n amax = np.apply_along_axis(np.max, 0, num_matrix)\n amin = np.apply_along_axis(np.min, 0, num_matrix)\n #遍历元素,求出归一化数值\n for j in range(num_matrix.shape[1]):\n for i in range(num_matrix.shape[0]):\n num_matrix[i, j] = (num_matrix[i, j] - amin[j]) / (amax[j] - amin[j])\n return num_matrix\n#\ndef conn_matx(num_matrix, bool_matrix, ip_matrix, timestamp_matrix, compt_matrix, cookie_matrix):\n matrix = np.hstack((num_matrix, bool_matrix, ip_matrix,\n timestamp_matrix, compt_matrix, cookie_matrix))\n return matrix\n\n#合成矩阵维���\t\ndef add_dimension(matrix):\n ti = []\n for i in matrix:\n temp_list = []\n for j in i:\n temp_list.append([j])\n ti.append(np.array(temp_list))\n return ti\n\ndef matp(data, prediction):\n C = []\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n type0_x = []\n type0_y = []\n type0_z = []\n type1_x = []\n type1_y = []\n type1_z = []\n type2_x = []\n type2_y = []\n type2_z = []\n type3_x = []\n type3_y = []\n type3_z = []\n type4_x = []\n type4_y = []\n type4_z = []\n\n for x in range(len(prediction)):\n if(prediction[x][-1] > 0.45):\n sub = 4\n else:\n sub = np.argmax(prediction[x])\n if(sub == 0):\n type0_x.append(data[x][0])\n type0_y.append(data[x][1])\n type0_z.append(data[x][2])\n elif(sub == 1):\n type1_x.append(data[x][0])\n type1_y.append(data[x][1])\n type1_z.append(data[x][2])\n elif(sub == 2):\n type2_x.append(data[x][0])\n type2_y.append(data[x][1])\n type2_z.append(data[x][2])\n elif(sub == 3):\n type3_x.append(data[x][0])\n type3_y.append(data[x][1])\n type3_z.append(data[x][2])\n elif(sub == 4):\n type4_x.append(data[x][0])\n type4_y.append(data[x][1])\n type4_z.append(data[x][2])\n\n type0 = ax.scatter(type0_x, type0_y, type0_z, s = 10, c = 'red')\n type1 = ax.scatter(type1_x, type1_y, type1_z, s = 10, c = 'black')\n type2 = ax.scatter(type2_x, type2_y, type2_z, s = 10, c = 'blue')\n type3 = ax.scatter(type3_x, type3_y, type3_z, s = 10, c = 'orange')\n type4 = ax.scatter(type4_x, type4_y, type4_z, s = 10, c = 'green')\n plt.legend((type0, type1, type2, type3, type4 ), (\"DDos\", \"General\", \"Slow Links\", \"Blasting\", \"Hit the library\"))\n plt.show()\n\ndef pca(prediction, n):\n pca = PCA(n_components=n)\n newdata = pca.fit_transform(prediction)\n matp(newdata, prediction)\n\ndef predict(csv_file):\n input_infos = read_log(csv_file)\n input_info = input_infos[:, 0:8]\n bool_matrix = input_info[:, 5].astype(int)\n ip_matrix = input_info[:, 2]\n timestamp_matrix = input_info[:, 0:2]\n compt_matrix = input_info[:, 4]\n cookie_matrix = input_info[:, 3]\n num_matrix = input_info[:, 6:8].astype(float)\n\n bool_matrix = bool2bina(4, bool_matrix)\n ip_matrix = ip2bina(ip_matrix)\n timestamp_matrix = normalize(utc2timestamp(timestamp_matrix))\n compt_matrix = completion2bina(2, compt_matrix)\n cookie_matrix = cookie2bina(2, cookie_matrix)\n num_matrix = normalize(num_matrix)\n\t\n input_matrix = conn_matx(num_matrix, bool_matrix,\n ip_matrix, timestamp_matrix, compt_matrix, cookie_matrix)\n \n model = load_model('../traindata/securitai-lstm-model.h5')\n model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n input_matrix = np.reshape(input_matrix, (input_matrix.shape[0], 1, input_matrix.shape[1]))\n\n prediction = model.predict(input_matrix)\n pca(prediction, 3)\n local_time = time.strftime('%Y-%m-%d-%H-%M',time.localtime(time.time()))\n path = \"result-\" + str(local_time)\n if(os.path.exists(path)):\n os.remove(path)\n else:\n os.mkdir(path)\n for x in range(len(prediction)):\n np.set_printoptions(precision=3)\n sub = np.argmax(prediction[x])\n f = open(path + '/' +str(local_time) +'_'+str(sub),'a')\n # f = open(\"/home/ELK/ELK/nginxlog/predictdata/result/\" + str(local_time) + '_'+ str(sub), 'a')\n #\"result/\"为预测结果的输出、保存路径,在当前路径下\n for i in(input_info[x]):\n f.write(str(i) + ' ')\n f.write('[' + str('{:.5f}'.format(prediction[x][0])) + ' ')\n for j in range(1, len(prediction[x])-1):\n f.write(str('{:.5f}'.format(prediction[x][j])) + ' ')\n f.write(str('{:.5f}'.format(prediction[x][-1])) + ']' +'\\n')\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('-f', '--file', action=\"store\", dest=\"file\", help=\"data file\")\n options, args = parser.parse_args()\n\n if options.file is not None:\n csv_file = options.file\n else:\n # csv_file = 'access.log'\n csv_file = 'predict_data.txt'\n #'predict_data.txt'为用于预测的实际数据,在当前路径下\n # np.set_printoptions(suppress=True)\n predict(csv_file)\n","sub_path":"研发代码/神经网络模块/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":8993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"352046291","text":"# Copyright (c) 2016 Catalyst IT Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom distil import exceptions\nfrom distil import rater\nfrom distil.common import constants\nfrom distil.db import api as db_api\nfrom distil.common import general\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\ndef _validate_project_and_range(project_id, start, end):\n try:\n if start is not None:\n try:\n start = datetime.strptime(start, constants.iso_date)\n except ValueError:\n start = datetime.strptime(start, constants.iso_time)\n else:\n raise exceptions.DateTimeException(\n message=(\n \"Missing parameter:\" +\n \"'start' in format: y-m-d or y-m-dTH:M:S\"))\n if not end:\n end = datetime.utcnow()\n else:\n try:\n end = datetime.strptime(end, constants.iso_date)\n except ValueError:\n end = datetime.strptime(end, constants.iso_time)\n except ValueError:\n raise exceptions.DateTimeException(\n message=(\n \"Missing parameter: \" +\n \"'end' in format: y-m-d or y-m-dTH:M:S\"))\n\n if end <= start:\n raise exceptions.DateTimeException(\n message=\"End date must be greater than start.\")\n\n if not project_id:\n raise exceptions.NotFoundException(\"Missing parameter: project_id\")\n valid_project = db_api.project_get(project_id)\n\n return valid_project, start, end\n\n\ndef get_usage(project_id, start, end):\n cleaned = _validate_project_and_range(project_id, start, end)\n try:\n valid_project, start, end = cleaned\n except ValueError:\n return cleaned\n\n LOG.debug(\"Calculating unrated data for %s in range: %s - %s\" %\n (valid_project.id, start, end))\n\n usage = db_api.usage_get(valid_project.id, start, end)\n\n project_dict = _build_project_dict(valid_project, usage)\n\n # add range:\n project_dict['start'] = str(start)\n project_dict['end'] = str(end)\n\n return project_dict\n\n\ndef get_costs(project_id, start, end):\n\n valid_project, start, end = _validate_project_and_range(\n project_id, start, end)\n\n LOG.debug(\"Calculating rated data for %s in range: %s - %s\" %\n (valid_project.id, start, end))\n\n costs = _calculate_cost(valid_project, start, end)\n\n return costs\n\n\ndef _calculate_cost(project, start, end):\n \"\"\"Calculate a rated data dict from the given range.\"\"\"\n\n usage = db_api.usage_get(project.id, start, end)\n\n # Transform the query result into a billable dict.\n project_dict = _build_project_dict(project, usage)\n project_dict = _add_costs_for_project(project_dict)\n\n # add sales order range:\n project_dict['start'] = str(start)\n project_dict['end'] = str(end)\n\n return project_dict\n\n\ndef _build_project_dict(project, usage):\n \"\"\"Builds a dict structure for a given project.\"\"\"\n\n project_dict = {'name': project.name, 'tenant_id': project.id}\n\n all_resource_ids = [entry.get('resource_id') for entry in usage]\n res_list = db_api.resource_get_by_ids(project.id, all_resource_ids)\n project_dict['resources'] = {row.id: json.loads(row.info)\n for row in res_list}\n\n for entry in usage:\n service = {'name': entry.get('service'),\n 'volume': entry.get('volume'),\n 'unit': entry.get('unit')}\n\n resource = project_dict['resources'][entry.get('resource_id')]\n service_list = resource.setdefault('services', [])\n service_list.append(service)\n\n return project_dict\n\n\ndef _add_costs_for_project(project):\n \"\"\"Adds cost values to services using the given rates manager.\"\"\"\n\n current_rater = rater.get_rater()\n\n project_total = 0\n for resource in project['resources'].values():\n resource_total = 0\n for service in resource['services']:\n try:\n rate = current_rater.rate(service['name'])\n except KeyError:\n # no rate exists for this service\n service['cost'] = \"0\"\n service['volume'] = \"unknown unit conversion\"\n service['unit'] = \"unknown\"\n service['rate'] = \"missing rate\"\n continue\n\n volume = general.convert_to(service['volume'],\n service['unit'],\n rate['unit'])\n\n # round to 2dp so in dollars.\n cost = round(volume * Decimal(rate['rate']), 2)\n\n service['cost'] = str(cost)\n service['volume'] = str(volume)\n service['unit'] = rate['unit']\n service['rate'] = str(rate['rate'])\n\n resource_total += cost\n resource['total_cost'] = str(resource_total)\n project_total += resource_total\n project['total_cost'] = str(project_total)\n\n return project\n","sub_path":"distil/service/api/v2/costs.py","file_name":"costs.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"594243028","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-fat/egg/minideblib/DpkgDatalist.py\n# Compiled at: 2007-11-06 15:08:00\nimport os, sys\nfrom UserDict import UserDict\nfrom OrderedDict import OrderedDict\nimport SafeWriteFile\nfrom types import StringType\n\nclass DpkgDatalistException(Exception):\n UNKNOWN = 0\n SYNTAXERROR = 1\n\n def __init__(self, message='', reason=UNKNOWN, file=None, line=None):\n self.message = message\n self.reason = reason\n self.filename = file\n self.line = line\n\n\nclass _DpkgDatalist:\n\n def __init__(self, fn=''):\n \"\"\"Initialize a DpkgDatalist object. An optional argument is a\n file from which we load values.\"\"\"\n self.filename = fn\n if self.filename:\n self.load(self.filename)\n\n def store(self, fn=None):\n \"\"\"Store variable data in a file.\"\"\"\n if fn == None:\n fn = self.filename\n if not fn:\n self._store(sys.stdout)\n return\n if type(fn) == StringType:\n vf = SafeWriteFile(fn + '.new', fn, 'w')\n else:\n vf = fn\n try:\n self._store(vf)\n finally:\n if type(fn) == StringType:\n vf.close()\n\n return\n\n\nclass DpkgDatalist(UserDict, _DpkgDatalist):\n\n def __init__(self, fn=''):\n UserDict.__init__(self)\n _DpkgDatalist.__init__(self, fn)\n\n\nclass DpkgOrderedDatalist(OrderedDict, _DpkgDatalist):\n\n def __init__(self, fn=''):\n OrderedDict.__init__(self)\n _DpkgDatalist.__init__(self, fn)","sub_path":"pycfiles/minideblib-0.6.21.29-py2.5/DpkgDatalist.py","file_name":"DpkgDatalist.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"252970791","text":"from ctypes import *\nimport os\nimport sys\nimport threading\nimport time\nimport ctypes\n\npr = None\n\nroot = os.path.dirname(__file__)\n\nif sys.platform in [\"linux\", \"linux2\"]:\n object_name = \"_pyrift.so\"\nelif sys.platform == \"win32\":\n object_name = \"pyrift\"\nelse:\n raise Exception(\"This OS is not yet supported\")\n \ntry:\n pr = CDLL(os.path.join(root, object_name))\nexcept OSError:\n pr = CDLL(object_name)\n\n# Set initial values in the DLL \npr.prepare()\n\ndef _raise_errors(error_code):\n if error_code == 1:\n raise Exception(\"Could not detect an Oculus Rift\", \"NO_DEVICE\")\n elif error_code == 2:\n raise Exception(\"Device not initialized; call initialize() first.\", \"NOT_INITIALIZED\")\n\ndef initialize():\n _raise_errors(pr.initialize())\n\ndef get_orientation():\n yaw = ctypes.c_float()\n pitch = ctypes.c_float()\n roll = ctypes.c_float()\n \n _raise_errors(pr.get_orientation(ctypes.byref(yaw), ctypes.byref(pitch), ctypes.byref(roll)))\n return yaw.value, pitch.value, roll.value\n\ndef get_orientation_quaternion():\n x = ctypes.c_float()\n y = ctypes.c_float()\n z = ctypes.c_float()\n w = ctypes.c_float()\n\n _raise_errors(pr.get_orientation_quaternion(ctypes.byref(x), ctypes.byref(y), ctypes.byref(z), ctypes.byref(w)))\n return x.value, y.value, z.value, w.value\n\n\n","sub_path":"pyrift/pyrift.py","file_name":"pyrift.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"433632790","text":"import keyboard\nimport time\nimport serial\n \nVar1=serial.Serial('COM12',9600)\ntime.sleep(1)\nwhile 1:\n data=0\n data=Var1.read()\n print(data)\n if data == b'A':\n keyboard.press('z')\n print (\"hell is here friend\")\n while 1:\n time.sleep(2)\n data = Var1.read()\n print (\"cello the best company ever\")\n if data != b'A':\n print (\"cello doesnt loook so good now huh\")\n break\n keyboard.release('z')\n","sub_path":"piano_code/python_slow_sharp.py","file_name":"python_slow_sharp.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"50790452","text":"# 递归 Recursive\r\n# 创建一个函数,求任意数的阶乘\r\n\r\n# 创建一个变量保存结果\r\n# n = 10\r\n# for i in range(1, 11):\r\n# n *= i\r\n# print(n)\r\n\r\n\r\ndef factorial(n):\r\n \"\"\"\r\n 该函数用来求任意数的阶乘\r\n 参数:\r\n n\r\n \"\"\"\r\n\r\n # 创建一个变量,来保存结果\r\n result = n\r\n\r\n for i in range(1, n):\r\n result *= i\r\n\r\n return result\r\n\r\n\r\n# print(factorial(20))\r\n\r\n# 无穷递归\r\n# def fn():\r\n# fn()\r\n# fn\r\n\r\n# 递归式的函数\r\n# 递归 --自己引用自己 将一个大问题分解为小问题,直到无法分解,再解决\r\n# 1.基线条件\r\n# 2.递归条件\r\n\r\n\r\ndef factorial(n):\r\n if n == 1:\r\n return 1\r\n return n * factorial(n - 1)\r\n\r\n\r\nprint(factorial(10))\r\n","sub_path":"Recursive1.py","file_name":"Recursive1.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"238629959","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport folium\nimport pickle\nimport matplotlib.pyplot as plt\nfrom geopy.geocoders import Nominatim\nfrom streamlit_folium import folium_static\nfrom sklearn.impute import SimpleImputer\n\n\n#read in external files\nstates = pd.read_csv('data/states/states_lanlon.csv')\nall_districts = pd.read_csv('data/full_school_district_with_zip.csv')\noriginal_districts = pd.read_csv('../data/dataset.csv')\nstate_dict = {states.loc[i][3]: states.loc[i][0] for i in range(1, states.shape[0])}\n\n#load the prediction model\nfile = open(\"models/random_forest\",'rb')\nmodel = pickle.load(file)\n\n#this function takes in the central lonlat coordinates of a given state and display the map\n#zoom in more with smaller states, and less with larger states\ndef get_state_map(longitude, latitude, state):\n if state == 'Alaska':\n map = folium.Map([latitude, longitude], tiles=\"OpenStreetMap\", zoom_start=4)\n elif state in ['Hawaii', 'Delaware', 'New Jersey', 'Connecticut', 'Massachusetts']:\n map = folium.Map([latitude, longitude], tiles=\"OpenStreetMap\", zoom_start=8)\n elif state == 'Rhode Island':\n map = folium.Map([latitude, longitude], tiles=\"OpenStreetMap\", zoom_start=10)\n else:\n map = folium.Map([latitude, longitude], tiles=\"OpenStreetMap\", zoom_start=6.5)\n #folium.TileLayer('Stamen Toner').add_to(map)\n # folium_static(map)\n return map\n\n\n\n#this function takes in the lonlat coordinates of a given school district and display the map\n#@st.cache(suppress_st_warning=True)\ndef get_school_district_map(school_district_choice, latitude, longitude, graduation_rate_real):\n map = folium.Map([latitude, longitude], tiles=\"OpenStreetMap\", zoom_start=13)\n folium.Marker(\n location=[latitude, longitude],\n tooltip=f\"\"\"\n In {school_district_choice.title()}
\n The real-world graduation rate is {round(graduation_rate_real * 100)}%\n \"\"\",\n draggable=True,\n icon=folium.Icon(icon=\"cloud\")).add_to(map)\n folium_static(map)\n return None\n\n\n\n\n\n#####################PAGE STARTS#####################\n\n#ask the user to select a state\nstate_choice = st.selectbox(\n 'Select your state:',\n states['Name']\n)\n\n\n#if user selects a state from the menu, \nif state_choice != 'Select your state':\n #pop:\n '**Select a school district from the menu**'\n\n #read in the address file of the selected state, and display a drop menu of all school districts of this state\n school_districts = pd.read_csv(f'data/states/zip/{state_dict[state_choice]}.csv', dtype={'MZIP': 'object'})\n school_district_choice = st.selectbox('Select your school district', school_districts['NAME'])\n\n\n #if user selects a school district from the menu, get the zipcode of the district and call geopy for lanlon coordinates\n if school_district_choice != 'Select your school district':\n zipcode = school_districts.loc[school_districts['NAME']==school_district_choice]['MZIP'].values[0]\n district_id = school_districts.loc[school_districts['NAME']==school_district_choice]['LEAID'].values[0]\n graduation_rate_real = school_districts.loc[school_districts['NAME']==school_district_choice]['Graduation Rate'].values[0]\n geolocator = Nominatim(user_agent=\"project5 GA\")\n location = geolocator.geocode({'postalcode': zipcode})\n this_district = all_districts.loc[all_districts['leaid']==district_id]\n \n \n\n\n #if lanlon coordinates obtained, call get_school_district_map function to display the map of the district\n try: \n get_school_district_map(school_district_choice, location.latitude, location.longitude, graduation_rate_real)\n \n #otherwise prompt 'No such address' (or something nice we can discuss later)\n except:\n 'No such address'\n\n \n\n #diaplay features\n old = original_districts.loc[original_districts['LEAID']==district_id]\n features = old[['TFEDREV', 'TSTREV', 'TLOCREV', 'Z33', 'Z34']]\n features.rename(columns={'TFEDREV':'Federal Revenue', \n 'TSTREV': 'State Revenue', \n 'TLOCREV': 'Local Revenue', \n 'Z33': 'Teacher Salaries',\n 'Z34': 'Employee Benefits'}, inplace=True)\n\n #change the display format of currency values\n for col in features.columns:\n features[col] = features[col].apply(lambda x: \"${:,.2f}k\".format((x/1000))) \n\n st.table(features) \n #user select input\n df = original_districts.loc[original_districts['LEAID']==district_id]\n \n\n\n #set sliders for key features\n side_menu = st.checkbox('Would you like to play around?')\n if side_menu:\n #change Total Federal Revenue\n min_tfedrev = int(df['TFEDREV'].values[0])\n max_tfedrev = int(df['TFEDREV'].values[0]*2)\n tfedrev = st.sidebar.slider('Total Federal Revenue', min_value=min_tfedrev, max_value=max_tfedrev, step=round(min_tfedrev/5))\n df['TFEDREV'] = tfedrev\n\n #change Salaries - support service - instructional staff\n min_v14 = int(df['TSTREV'].values[0])\n max_v14 = int(df['TSTREV'].values[0]*2)\n tstrev = st.sidebar.slider('Total State Revenue', min_value=min_v14, max_value=max_v14, step=round(min_v14/5))\n df['TSTREV'] = tstrev\n\n #change Total Local Revenue\n min_tlocrev = int(df['TLOCREV'].values[0])\n max_tlocrev = int(df['TLOCREV'].values[0]*2)\n tlocrev = st.sidebar.slider('Total Local Revenue', min_value=min_tlocrev, max_value=max_tlocrev, step=round(min_tlocrev/5))\n df['TLOCREV'] = tlocrev\n\n #change Teacher Salaries - support service - instructional staff\n min_z33 = int(df['Z33'].values[0])\n max_z33 = int(df['Z33'].values[0]*2)\n z33 = st.sidebar.slider('Total Teacher Salaries', min_value=min_z33, max_value=max_z33, step=round(min_z33/5))\n df['Z33'] = z33\n\n\n #change Employee Benefits\n min_z34 = int(df['Z34'].values[0])\n max_z34 = int(df['Z34'].values[0]*2)\n tz34 = st.sidebar.slider('Total Employee Benefits', min_value=min_z34, max_value=max_z34, step=round(min_z34/5))\n df['Z34'] = tz34\n\n \n #prepare the school district row for prediction\n df.columns = df.columns.str.lower()\n df.set_index('leaid', inplace=True)\n num_cols = df.drop(columns=['name', 'stabbr', 'agchrt', 'v33', 'graduation rate']).columns\n no_pop = df[df['v33'] <= 0].index\n df.drop(no_pop, inplace=True)\n for col in num_cols:\n df[col] = df[col] / df['v33']\n df[num_cols] = np.where(df[num_cols] <= 0, 0, np.log(df[num_cols]))\n # df[num_cols] = df[num_cols].replace(0, np.nan)\n # imputer = SimpleImputer(strategy='mean')\n # df[num_cols] = imputer.fit_transform(df[num_cols])\n df['graduation rate'] = np.where(df['graduation rate'] >= .95, .95,\n np.where(df['graduation rate'] <= .05, .05, \n df['graduation rate']))\n #school district row prepared\n\n\n\n #make and display prediction\n X = df[['2', '3', 'tfedrev', 'tstrev', 'a13', 't06', 'a11', 'u30', 'totalexp', 't40', \n 'v93', 'z33', 'z35', 'z36', 'z38', 'z37', 'v11', 'v13', 'v17', 'v37', 'v10', \n 'v12', 'v14', 'v18', 'v24', 'v38', 'w01', 'w31', 'w61', '_19h', '_21f', '_41f', \n '_61v']]\n pred = model.predict(X)[0]\n #print(df)\n\n\n #write out two graduation rates to the screen\n st.write(f'For {school_district_choice.title()}:')\n st.write(f\"\"\"\n The **real-world** graduation rate of is **{round(graduation_rate_real * 100)}%**\n \"\"\")\n st.write(f'The **predicted** graduation rate is **{round(pred * 100)}%**')\n \n\n \n\n\n #if user doesn't select a school district from the menu, call get_state_map function to display the map of the state \n else:\n longitude = states.loc[states['Name']==state_choice]['Longitude']\n latitude = states.loc[states['Name']==state_choice]['Latitude']\n get_state_map(longitude, latitude, state_choice)\n\n\n","sub_path":"streamlit/project5.py","file_name":"project5.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"146838805","text":"import stripe\nfrom django.utils.encoding import smart_str\n\nfrom .. import utils\nfrom .. import models\nfrom . import charges\n\ndef create(customer, items, currency=\"usd\", source=None, shipping=None, coupon=None, metadata=None, pay_immediately=False):\n \"\"\"\n Creates a product\n\n Args:\n customer: The customer to use for this order. \n items: List of Sku instances constituting the order.\n currency: optionally, Three-letter ISO currency code, in lowercase, default to \"usd\"\n source: optionally, The source you provide must either be a token, like the ones returned by Stripe.js, or a dictionary containing a user's credit card details\n shipping: optionally, Shipping address for the order. Required if any of the SKUs are for products that have shippable set to true.\n coupon: optionally, A coupon code that represents a discount to be applied to this order.\n metadata: optionally, A set of key/value pairs that you can attach to an order object.\n pay_immediately: optionally, send True if you want to charge the invoice right after it is created\n \n Returns:\n the data representing the order object that was created\n \"\"\"\n\n items = list(map(lambda sku: sku.convert_to_order_item() if isinstance(sku, models.Sku) else sku, items))\n\n params = {\n \"customer\": customer.stripe_id,\n \"currency\": currency,\n \"items\": items,\n \"shipping\": shipping\n }\n\n if pay_immediately and not source:\n raise ValueError(\"You need a 'source' in order to use 'pay_immediately'\")\n\n if coupon:\n params.update({\"coupon\": coupon})\n\n if metadata:\n params.update({\"metadata\": metadata})\n\n stripe_order = stripe.Order.create(**params)\n if pay_immediately:\n try:\n stripe_order = pay(stripe_order, source)\n except stripe.InvalidRequestError:\n # we are failing silently here so we can return the order\n pass\n\n return sync_order_from_stripe_data(stripe_order)\n\ndef update(order, coupon=None, metadata=None, selected_shipping_method=None, shipping=None, status=None):\n \"\"\"\n Updates a product\n\n Args:\n order: the order to update\n coupon: optionally, A coupon code that represents a discount to be applied to this order.\n metadata: optionally, A set of key/value pairs that you can attach to a product object.\n selected_shipping_method: optionally, The shipping method to select for fulfilling this order.\n shipping: optionally, racking information once the order has been fulfilled. (e,g {\"carrier\": \"UPS\", \"tracking_number\": \"1212kj21k2\"})\n status: optionally, Current order status. One of created, paid, canceled, fulfilled, or returned\n \"\"\"\n\n stripe_order = order.stripe_order\n\n if coupon:\n stripe_order.coupon = coupon\n if metadata:\n stripe_order.metadata = metadata\n if selected_shipping_method:\n stripe_order.selected_shipping_method = selected_shipping_method\n if shipping:\n stripe_order.shipping = shipping\n if status:\n stripe_order.status = status\n\n stripe_order.save()\n return sync_order_from_stripe_data(stripe_order)\n\ndef retrieve(stripe_order_id):\n \"\"\"\n Retrieve a Order object from Stripe's API\n\n Stripe throws an exception if the order has been deleted that we are\n attempting to sync. In this case we want to just silently ignore that\n exception but pass on any other.\n\n Args:\n stripe_order_id: the Stripe ID of the order you are fetching\n\n Returns:\n the data for a order object from the Stripe API\n \"\"\"\n if not stripe_order_id:\n return\n\n try:\n return stripe.Order.retrieve(stripe_order_id)\n except stripe.InvalidRequestError as e:\n if smart_str(e).find(\"No such order\") >= 0:\n # Not Found\n return\n else:\n raise e\n\n\ndef pay(order, source=None):\n \"\"\"\n Pays an order\n \n :param order: a stripe order or a models.Order instance\n :param source: the source you provide must either be a token, like the ones returned by Stripe.js, or a dictionary containing a user's credit card details \n :return: stripe api object\n \"\"\"\n stripe_order = order.stripe_order if hasattr(order, \"stripe_order\") else order\n params = {}\n if source:\n params.update({\"source\": source})\n\n paid_order = stripe_order.pay(**params)\n return sync_order_from_stripe_data(paid_order)\n\n\ndef create_return(order, items=None):\n \"\"\"\n :param order: the order to be returned \n :param items: optional, the full or partial order items to be returned, None to return all the items\n :return: the data for a order object from the Stripe API\n \"\"\"\n\n return_params = {}\n stripe_order = order.stripe_order\n\n if items:\n return_params.update({\"items\": items})\n\n stripe_order.return_order(**return_params)\n return sync_order_from_stripe_data(stripe_order)\n\ndef sync_orders():\n \"\"\"\n Synchronizes all the orders from the Stripe API\n \"\"\"\n\n try:\n orders = stripe.Order.auto_paging_iter()\n except AttributeError:\n orders = iter(stripe.Order.list().data)\n\n for stripe_order in orders:\n sync_order_from_stripe_data(stripe_order)\n\ndef sync_order_from_stripe_data(stripe_order):\n \"\"\"\n Create or update the order represented by the data from a Stripe API query.\n\n Args:\n stripe_order: the data representing an order object in the Stripe API\n\n Returns:\n a pinax.stripe.models.Order object\n \"\"\"\n\n customer = models.Customer.objects.get(stripe_id=stripe_order.get(\"customer\"))\n\n charge = stripe_order.get(\"charge\")\n if charge:\n charge = charges.sync_charge_from_stripe_data(stripe.Charge.retrieve(charge))\n\n amount = stripe_order.get(\"amount\")\n amount_returned = stripe_order.get(\"amount_returned\")\n currency = stripe_order.get(\"currency\")\n\n defaults = dict(\n amount=utils.convert_amount_for_db(amount, currency),\n amount_returned=utils.convert_amount_for_db(amount_returned, currency) if amount_returned else None,\n charge=charge,\n currency=currency,\n customer=customer,\n livemode=stripe_order.get(\"livemode\"),\n metadata = stripe_order.get(\"metadata\"),\n selected_shipping_method = stripe_order.get(\"selected_shipping_method\"),\n shipping = stripe_order.get(\"shipping\"),\n shipping_methods = stripe_order.get(\"shipping_methods\"),\n status = stripe_order.get(\"status\"),\n status_transitions = stripe_order.get(\"status_transitions\"),\n items = stripe_order.get(\"items\")\n )\n\n order, created = models.Order.objects.get_or_create(\n stripe_id=stripe_order.get(\"id\"),\n defaults=defaults\n )\n\n order = utils.update_with_defaults(order, defaults, created)\n return order\n\ndef sync_orders_from_customer(customer):\n \"\"\"\n Synchronizes all orders for a customer\n\n Args:\n customer: the customer for whom to synchronize the invoices\n \"\"\"\n\n stripe_customer = customer.stripe_customer\n try:\n orders = stripe.Order.auto_paging_iter(customer=stripe_customer)\n except AttributeError:\n orders = iter(stripe.Order.list(customer=stripe_customer).data)\n\n for stripe_order in orders:\n sync_order_from_stripe_data(stripe_order)\n","sub_path":"pinax/stripe/actions/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"544136651","text":"#!/usr/bin/env python\n#Search for kmers with maximum mismatch argument\nimport regex\nimport sys\ninputlist = sys.stdin.read().splitlines()\npattern = inputlist[0]\nstr1 = inputlist[1]\nmax_fuzzy = inputlist[2]\nlister = [str(m.start()) for m in regex.finditer('(?:' + pattern + '){s<=' + max_fuzzy + '}', str1, overlapped=True)]\nstringer = \" \".join(lister)\nprint(stringer)","sub_path":"PythonScripts/Kmer_mismatch.py","file_name":"Kmer_mismatch.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"163973933","text":"token = \"\" # Your Discord bot token goes in here.\n\nimport time\ntimerStart = time.time() # This is used for showing how long the bot took to load.\n\nimport discord\nfrom discord.ext import commands # Basically imports everything you need in this file. (the main one, commands will be written in cogs)\n\nclient = commands.Bot(command_prefix=\"!\") # This is the bot object. Definitely change the command prefix.\n\nextensions = [\"cogs.general\", \"jishaku\"] # Put your cogs in here using the formatting displayed here (imports the cog in cogs/general.py). Jishaku allows you to execute code from your own account, check the bot's status, debug errors and more.\n\nfor extension in extensions: # This loads all the extensions in the list. (if for some reason you don't know how for loops work)\n try:\n print('Attempting to load extension \"{}\"'.format(extension))\n client.load_extension(extension) # This is the function to load an extension / cog.\n except Exception as e:\n print('Failed to load extension \"{}\" ({})'.format(extension, e)) # Pretty self explanatory.\n \n@client.event\nasync def on_ready():\n sec = round(time.time() - timerStart, 4) # How long the bot took to load.\n print(\"Bot loaded in {} seconds.\".format(sec))\n \nprint(\"Starting bot...\")\nclient.run(token) # Actually runs the bot. Nothing can be ran after this in the file, hence the on_ready() function.\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"106247885","text":"# forward_np.py\n#\n# Rob Churchill\n# rjc111@georgetown.edu\n# \n# The hidden markov model forward algorithm\n#\t Given an observation sequence O and an HMM l\n#\t Return a probability of l producing O\n#\n# Reference:\n# \t This algorithm was implementing following the\n# \t pseudocode for alpha-pass from this website:\n# \t https://www.cs.sjsu.edu/~stamp/RUA/HMM.pdf\n#\n\nimport numpy as np\n\ndef forward(l, O):\n\tT = len(O)\n\tN = l.N\n\t# alpha[i, j] = alpha of i states at time j\n\talpha = np.zeros((N, T))\n\n\t# cnot is for scaling. scaling = no underflows (hopefully)\n\tcnot = 0\n\tfor i in range(0, N):\n\t\talpha[i,0] = l.pi[i] * l.B[i,O[0]]\n\t\tcnot = cnot + alpha[i,0]\n\n\tif cnot == 0:\n\t\tcnot = 0.000001\n\tcnot = 1/cnot\n\tfor i in range(0, N):\n\t\talpha[i,0] = cnot*alpha[i,0]\n\tconstants = [cnot]\n\n\tfor t in range(1, T):\n\t\tct = 0\n\t\tfor i in range(0, N):\n\t\t\talpha[i,t] = 0\n\t\t\tfor j in range(0, N):\n\t\t\t\t# make sure that a's indices are in the right order\n\t\t\t\talpha[i,t] = alpha[i,t] + (alpha[j, t-1]*l.A[j,i])\n\t\t\talpha[i,t] = alpha[i,t] * l.B[i,O[t]]\n\t\t\tct = ct + alpha[i,t]\n\n\t\tif ct == 0:\n\t\t\tct = 0.000001\n\t\tct = 1/ct\n\t\tfor i in range(0, N):\n\t\t\talpha[i,t] = ct * alpha[i,t]\n\n\t\tconstants.append(ct)\n\t\n\treturn alpha, constants","sub_path":"mark/hmm_np/forward_np.py","file_name":"forward_np.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"581288014","text":"from PyQt5 import QtGui\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QWidget, QGridLayout, QLabel, QPushButton, QFileDialog\n\n\n# 메인 화면 구성요소 - 드래그앤드랍(라벨), 파일업로드(버튼)\n\n\nclass FileUploadLayout(QWidget):\n\n def __init__(self, app):\n super().__init__()\n self.path = None\n self.app = app\n\n self.resize(400, 400)\n self.setAcceptDrops(True)\n self.setWindowTitle(\"파일업로드\")\n\n fileUploadLayout = QGridLayout()\n\n self.dragAndDropsLabel = QLabel()\n self.dragAndDropsLabel.setAlignment(Qt.AlignCenter)\n self.dragAndDropsLabel.setText('파일을 드래그해서 올려주세요.')\n self.dragAndDropsLabel.setFont(QtGui.QFont(\"맑음\", 14))\n self.dragAndDropsLabel.setStyleSheet('''\n QLabel{\n border: 4px dashed #aaa\n }\n ''')\n\n # 웹캠 테스트\n self.camTestButton = QPushButton(\"웹캠 테스트\")\n self.camTestButton.setFont(QtGui.QFont(\"맑음\", 10))\n self.camTestButton.clicked.connect(self.next)\n\n # 뒤로가기\n self.backButton = QPushButton(\"사용설명서 보러가기\")\n self.backButton.setFont(QtGui.QFont(\"맑음\", 10))\n self.backButton.clicked.connect(self.back)\n\n # 파일 업로드 버튼\n self.pushButton = QPushButton(\"파일 업로드\")\n self.pushButton.setMaximumHeight(60)\n\n self.pushButton.setChecked(True)\n self.pushButton.toggle()\n\n # 그리드 사용\n fileUploadLayout.addWidget(self.dragAndDropsLabel, 0, 1)\n fileUploadLayout.addWidget(self.pushButton, 1, 1)\n fileUploadLayout.addWidget(self.backButton, 2, 1)\n fileUploadLayout.addWidget(self.camTestButton, 3, 1)\n\n self.pushButton.clicked.connect(self.addOpen)\n self.setLayout(fileUploadLayout)\n\n def back(self):\n pass\n\n def next(self):\n pass\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls:\n event.accept()\n else:\n event.ignore()\n\n def dragMoveEvent(self, event):\n if event.mimeData().hasUrls:\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n if event.mimeData().hasUrls:\n event.setDropAction(Qt.CopyAction)\n\n # 파일 경로\n self.app.ppt.path = event.mimeData().urls()[0].toLocalFile()\n\n # 준비 완료(init)\n self.readyForSlideShow()\n\n event.accept()\n else:\n event.ignore()\n\n def addOpen(self):\n if self.pushButton.text() == \"시작\":\n self.app.ppt.fullScreen()\n self.app.nextPage()\n self.app.start()\n pass\n else:\n FileOpen = QFileDialog.getOpenFileName(self, 'Open file', './')\n\n # 파일 경로\n self.app.ppt.path = str(FileOpen[0])\n\n # 준비 완료(init)\n self.readyForSlideShow()\n return\n\n def readyForSlideShow(self):\n if self.app.ppt.path == \"\":\n return\n\n # 경로 받아오기\n self.pushButton.setText(\"시작\")\n self.dragAndDropsLabel.setText(\n \"파일경로\\n\" + self.app.ppt.path + \"\\n\\n시작 버튼을 누르면 슬라이드쇼가 실행됩니다.\")\n","sub_path":"MotionRecognitionPPTControl/layout/FileUploadLayout.py","file_name":"FileUploadLayout.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"537702406","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom polyaxon import settings\nfrom polyaxon.env_vars.getters.project import get_project_or_local\nfrom polyaxon.env_vars.keys import (\n EV_KEYS_COLLECT_ARTIFACTS,\n EV_KEYS_COLLECT_RESOURCES,\n EV_KEYS_RUN_INSTANCE,\n)\nfrom polyaxon.exceptions import PolyaxonClientException\nfrom polyaxon.managers.run import RunConfigManager\nfrom polyaxon.utils.bool_utils import to_bool\nfrom polyaxon.utils.formatting import Printer\n\n\ndef get_run_or_local(run_uuid=None, is_cli: bool = False):\n if run_uuid:\n return run_uuid\n if is_cli:\n return RunConfigManager.get_config_or_raise().uuid\n\n try:\n run = RunConfigManager.get_config()\n except TypeError:\n Printer.print_error(\n \"Found an invalid run config or run config cache, \"\n \"if you are using Polyaxon CLI please run: \"\n \"`polyaxon config purge --cache-only`\",\n sys_exit=True,\n )\n if run:\n return run.uuid\n return None\n\n\ndef get_project_run_or_local(project=None, run_uuid=None, is_cli: bool = True):\n user, project_name = get_project_or_local(project, is_cli=is_cli)\n run_uuid = get_run_or_local(run_uuid, is_cli=is_cli)\n return user, project_name, run_uuid\n\n\ndef get_collect_artifacts(arg: bool = None, default: bool = None):\n \"\"\"If set, Polyaxon will collect artifacts\"\"\"\n return (\n arg\n if arg is not None\n else to_bool(os.getenv(EV_KEYS_COLLECT_ARTIFACTS, default), handle_none=True)\n )\n\n\ndef get_collect_resources(arg: bool = None, default: bool = None):\n \"\"\"If set, Polyaxon will collect resources\"\"\"\n return (\n arg\n if arg is not None\n else to_bool(os.getenv(EV_KEYS_COLLECT_RESOURCES, default), handle_none=True)\n )\n\n\ndef get_log_level():\n \"\"\"If set on the polyaxonfile it will return the log level.\"\"\"\n return settings.CLIENT_CONFIG.log_level\n\n\ndef get_run_info(run_instance: str = None):\n run_instance = run_instance or os.getenv(EV_KEYS_RUN_INSTANCE, None)\n if not run_instance:\n raise PolyaxonClientException(\n \"Could not get run info, \"\n \"please make sure this is run is correctly started by Polyaxon.\"\n )\n\n parts = run_instance.split(\".\")\n if not len(parts) == 4:\n raise PolyaxonClientException(\n \"run instance is invalid `{}`, \"\n \"please make sure this is run is correctly started by Polyaxon.\".format(\n run_instance\n )\n )\n return parts[0], parts[1], parts[-1]\n","sub_path":"core/polyaxon/env_vars/getters/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"488643644","text":"\"\"\"Views da aplicação contato.\"\"\"\n\nfrom django.views import generic\nfrom rest_framework import generics\n\nfrom . import serializers\n\n\nclass ContatoView(generic.TemplateView):\n \"\"\"Página de contato.\"\"\"\n\n template_name = 'contato/contato.html'\n\n\ncontato_view = ContatoView.as_view()\n\n\nclass ContatoFormView(generic.TemplateView):\n \"\"\"Server side partial para o formulário de contato.\"\"\"\n\n template_name = 'contato/form.html'\n\n\ncontato_form_view = ContatoFormView.as_view()\n\n\nclass EnviarFormulárioView(generics.CreateAPIView):\n \"\"\"Processa o formulário de contato.\"\"\"\n\n serializer_class = serializers.FormulárioContatoSerializer\n\n\nenviar_formulário_view = EnviarFormulárioView.as_view()\n","sub_path":"contato/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348414051","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\scrapcore\\logger.py\n# Compiled at: 2017-08-18 10:54:52\n# Size of source mod 2**32: 761 bytes\nimport logging, sys\n\nclass Logger:\n level = logging.INFO\n logger = None\n\n def setup_logger(self, level=logging.INFO):\n \"\"\"Configure global log settings\"\"\"\n if isinstance(level, int):\n self.level = logging.getLevelName(level)\n self.logger = logging.getLogger()\n self.logger.setLevel(self.level)\n if not len(self.logger.handlers):\n ch = logging.StreamHandler(stream=(sys.stderr))\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(logformat)\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n\n def get_logger(self):\n return self.logger","sub_path":"pycfiles/SerpScrap-0.13.0-py3.7/logger.cpython-37.py","file_name":"logger.cpython-37.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"147110692","text":"import string,cgi,time\nfrom collections import deque\nimport random\nimport urlparse\nimport uuid\nimport logging\nimport wsgiref.handlers\nimport pickle\nfrom os import path;\n\ntry:\n from urlparse import parse_qs, parse_qsl\nexcept ImportError:\n # fall back for Python 2.5 \n from cgi import parse_qs, parse_qsl\n \nSILENCE = 0; \nNOT_BUSY = 0;\nMAX_USER = 5000; \nELAPSE_PERIOD = 3600 * 24 * 7 * 2;\nCLEAN_MEMORY_USER_THRESHOLD = 2000;\nCLEAN_INTERVAL = 3600 * 2;\n \nclass User:\n def __init__(self,number, token):\n self.phoneNumber_ = number\n self.connectTo_ = -1;\n self.messageQueue_ = deque(\"\");\n self.token_ = token;\n self.ring_ = 0\n self.busy_ = 0\n self.lastAccessTime_ = time.time();\n def __del__(self): \n pass\t\t\n \n def id(self):\n return self.phoneNumber_;\n\n def token(self):\n return self.token_;\n \n def hasNewMessage(self):\n return (len(self.messageQueue_) >= 1)\n\n def read(self):\n msg = \"\";\n if (len(self.messageQueue_) >= 1):\n UserCollection.getInstance().notifyContentChanged();\n msg += self.readSingleMsg();\n if (msg != \"\"):\n UserCollection.getInstance().notifyContentChanged();\n self.lastAccessTime_ = time.time(); \n return msg; \n \n def readSingleMsg(self):\n if (len(self.messageQueue_) >= 1):\n return self.messageQueue_.popleft() \n\n def write(self, msg):\n if (msg != \"\"):\n UserCollection.getInstance().notifyContentChanged();\n self.messageQueue_.append(msg)\n self.lastAccessTime_ = time.time(); \n \n def ring(self,callingFrom):\n UserCollection.getInstance().notifyContentChanged();\n self.ring_ = callingFrom\n self.lastAccessTime_ = time.time(); \n \n def fetchRing(self):\n global SILENCE\n ring = self.ring_\n self.ring_ = SILENCE\n if (ring != SILENCE):\n UserCollection.getInstance().notifyContentChanged();\n return ring\n \n def notifyBusy(self, sourceLine):\n UserCollection.getInstance().notifyContentChanged();\n self.busy_ = sourceLine\n self.lastAccessTime_ = time.time(); \n \n def fetchNotifyBusy(self):\n global NOT_BUSY\n busy = self.busy_\n self.busy_ = NOT_BUSY;\n if (busy != NOT_BUSY):\n UserCollection.getInstance().notifyContentChanged();\n return busy;\n \n def keepAlive(self):\n self.lastAccessTime_ = time.time(); \n \n def isDead(self):\n global ELAPSE_PERIOD\n if (time.time() - self.lastAccessTime_ > ELAPSE_PERIOD):\n return True;\n else:\n return False;\n \nclass UserCollection:\n __g_userCollection__ = None;\n \n @staticmethod\n def getInstance(): \n if (UserCollection.__g_userCollection__ == None):\n UserCollection.__g_userCollection__ = UserCollection();\n return UserCollection.__g_userCollection__;\n\n @staticmethod\n def notCreatedYet():\n if (UserCollection.__g_userCollection__ == None):\n return True;\n else:\n return False;\n\n @staticmethod\n def marshal():\n return pickle.dumps(UserCollection.__g_userCollection__)\n \n @staticmethod\n def unMarshal(str):\n UserCollection.__g_userCollection__ = pickle.loads(str)\n return UserCollection.getInstance();\n \n def __init__(self): \n self.userMap_ = dict();\n self.junctionBox_ = dict();\n self.lastClean_ = time.time();\n self.lastCleanAll_ = time.time();\n self.contentChanged_ = False;\n self.version_=0;\n \n def count(self):\n return len(self.userMap_);\n \n def isFull(self):\n global MAX_USER;\n if (len(self.userMap_) >= MAX_USER):\n return True;\n else:\n return False;\n \n def createUser(self, id, token):\n self.userMap_[id] = User(id, token);\n self.notifyContentChanged();\n return self.userMap_[id]\n \n def getUser(self,id):\n if (self.userMap_.has_key(id)):\n return self.userMap_[id];\n \n def hasUser(self,id):\n if (self.userMap_.has_key(id)):\n return True;\n else:\n return False;\n \n def fixup(self, line):\n return self.doFixup(line, line)\n \n def doFixup(self, line, startLine):\n if (self.junctionBox_.has_key(line)):\n toLine = self.junctionBox_[line]\n if(not self.userMap_.has_key(line) or not self.userMap_.has_key(toLine) ):\n self.notifyContentChanged();\n self.junctionBox_.pop(line);\n if (toLine != startLine):\n self.doFixup(toLine, startLine); \n \n def wire(self, fromLine, toLine):\n self.notifyContentChanged();\n self.unWire(fromLine);\n self.unWire(toLine);\n if (not self.userMap_.has_key(fromLine)):\n return False;\n if (not self.userMap_.has_key(toLine)):\n return False; \n self.junctionBox_[fromLine] = toLine\n self.junctionBox_[toLine] == fromLine\n return True;\n \n def oneWayWire(self, fromLine, toLine):\n self.notifyContentChanged();\n self.unWire(fromLine);\n if (not self.userMap_.has_key(fromLine)):\n return False;\n if (not self.userMap_.has_key(toLine)):\n return False; \n self.junctionBox_[fromLine] = toLine\n return True; \n \n def unWire(self, fromLine):\n self.notifyContentChanged();\n self.fixup(fromLine);\n if (self.junctionBox_.has_key(fromLine)):\n toLine = self.junctionBox_[fromLine];\n self.junctionBox_.pop(fromLine);\n if (self.junctionBox_.has_key(toLine)):\n if (self.junctionBox_[toLine] == fromLine):\n self.junctionBox_.pop(toLine);\n return True;\n \n \n def doIsWired(self, fromLine, toLine):\n self.fixup(fromLine);\n self.fixup(toLine);\n if (not self.junctionBox_.has_key(fromLine)):\n return False;\n if (not self.junctionBox_.has_key(toLine)):\n return False; \n if (self.junctionBox_[fromLine] == toLine and self.junctionBox_[toLine] == fromLine):\n return True;\n return False;\n \n def isWired(self, fromLine):\n self.fixup(fromLine);\n if (not self.junctionBox_.has_key(fromLine)):\n return False;\n toLine = self.junctionBox_[fromLine];\n return self.doIsWired(fromLine, toLine) \n \n def isHalfWired(self, fromLine):\n self.fixup(fromLine);\n if (not self.junctionBox_.has_key(fromLine)):\n return False;\n toLine = self.junctionBox_[fromLine]\n if (not self.junctionBox_.has_key(toLine) or self.junctionBox_[toLine] != fromLine):\n return True;\n else:\n return False;\n \n def isHalfWiredTo(self, fromLine, toLine):\n if (self.isHalfWired(fromLine) and self.junctionBox_[fromLine] == toLine):\n return True;\n return False \n \n def isNotWired(self, fromLine):\n self.fixup(fromLine);\n if (not self.junctionBox_.has_key(fromLine)):\n return True;\n return False; \n\n def shouldClean(self):\n global CLEAN_MEMORY_USER_THRESHOLD;\n global CLEAN_INTERVAL;\n if (time.time() - self.lastClean_ >= CLEAN_INTERVAL or self.count() > CLEAN_MEMORY_USER_THRESHOLD):\n # logging.info('clean' + str(time.time()) + \">>\" + str(self.lastClean_))\n return True;\n else:\n return False;\n\n def shouldCleanAll(self):\n global CLEAN_MEMORY_USER_THRESHOLD;\n global CLEAN_INTERVAL;\n if (self.count() > CLEAN_MEMORY_USER_THRESHOLD):\n # logging.info('cleanAll' + str(time.time()) + \">>\" + str(self.lastCleanAll_))\n return True;\n else:\n return False; \n \n def clean(self, shouldCleanAll):\n self.notifyContentChanged();\n self.lastClean_ = time.time();\n if (shouldCleanAll):\n self.userMap_.clear();\n self.junctionBox_.clear();\n self.lastCleanAll_ = time.time();\n else:\n for key in self.userMap_.keys():\n if (self.getUser(key).isDead()):\n del self.userMap_[key]\n self.fixup(key);\n \n def notifyContentChanged(self, status=True):\n self.contentChanged_ = status\n \n def contentChanged(self):\n return self.contentChanged_;\n \n def version(self):\n return self.version_;\n \n def setVersion(self, version):\n self.version_ = version\n \n def printAll(self):\n msg = \"Current Version \" + str(self.version_) + \"
\\n\";\n msg = msg + \"User MAP=======================
\\n\"\n for u in self.userMap_.keys():\n msg = msg + str(u);\n msg = msg + \": \";\n msg = msg + self.userMap_[u].token();\n msg = msg + \": \";\n msg = msg + \"
\";\n msg = msg + \"
junctionBOX==============
\\n\";\n for u in self.junctionBox_.keys():\n msg = msg + str(u);\n msg = msg + \": \";\n msg = msg + str(self.junctionBox_[u]);\n msg = msg + \"
\\n\"; \n return msg; \n\n \n\nclass DeepantHandler:\n def getRequestHeader(self, name): abstract\n def getQuery(self): abstract\n def getRealPath(self): abstract\n def getRequestBody(self): abstract\n def writeResponseStatus(self, status):abstract\n def writeHeader(self, name, value): abstract\n def endWriteHeader(self): abstract\n def writeBody(self, data): abstract\n\n def Log(self, msg):\n pass;\n def onStart(self):\n pass;\n def onFinish(self):\n pass;\n def serveStaticFile(self):\n try:\n f = open(path.dirname(__file__) + path.sep + \"static\" + self.getRealPath(), 'rb');\n except:\n f = open(path.dirname(__file__) + path.sep + \"static\" + self.getRealPath() + \".html\",'rb');\n self.Log(path.dirname(__file__) + path.sep + \"static\" + self.getRealPath());\n \n self.writeResponseStatus(200)\n body = f.read();\n if (self.getRealPath().endswith(\".ogg\")):\n self.writeHeader('Content-Type', 'video/ogg')\n self.writeHeader('max-age', '31536000')\n self.writeHeader('Expires', \"Fri, 01-Jan-2099 00:00:00 GMT\")\n self.writeHeader('Cache-Control', \"max-age=311040000\")\n self.writeHeader('Accept-Ranges', 'bytes')\n self.writeHeader('Content-Length', str(len(body)))\n self.writeHeader('Content-Range', \"bytes 0-\" + str(len(body) - 1) + \"/\" + str(len(body)))\n elif (self.getRealPath().endswith(\".mp3\")):\n self.writeHeader('Content-Type', 'audio/mpeg')\n self.writeHeader('Accept-Ranges', 'bytes')\n self.writeHeader('max-age', '31536000')\n self.writeHeader('Cache-Control', \"max-age=311040000\")\n self.writeHeader('Expires', \"Fri, 01-Jan-2099 00:00:00 GMT\")\n self.writeHeader('Content-Length', str(len(body)))\n self.writeHeader('Content-Range', \"bytes 0-\" + str(len(body) - 1) + \"/\" + str(len(body)))\n else:\n self.writeHeader('Content-Type', 'text/html; charset=UTF-8')\n self.endWriteHeader()\n self.writeBody(body)\n f.close()\n return \n \n def handlerGet(self):\n return self.do_server(dict());\n\n def handlerPost(self):\n post_body = self.getRequestBody();\n body = parse_qs(post_body);\n return self.do_server(body);\n \n def startProfile(self):\n self.startProfile_ = time.time();\n \n def endProfile(self, id=0):\n self.Log(\"TIME \" + str(id) + \": \" + str(time.time() - self.startProfile_))\n\n def do_server(self, data):\n# try:\n real_path = self.getRealPath();\n query = self.getQuery();\n if (real_path.endswith(\".html\") or real_path.endswith(\".js\") or query == \"\" or real_path.endswith(\".ogg\") or real_path.endswith(\".mp3\")):\n self.serveStaticFile();\n return\n if real_path.endswith(\"dochat\"):\n self.writeResponseStatus(200)\n self.writeHeader('Content-Type', 'text/html; charset=UTF-8')\n self.writeHeader(\"Expires\", \"0\");\n params = parse_qs(query);\n if (params.has_key(\"action\")):\n action = params[\"action\"][0];\n self.onStart();\n self.dispatch(action, params, data)\n self.onFinish();\n else:\n self.endWriteHeader()\n self.writeBody(\"\");\n return\n return\n \n# except IOError:\n# self.writeResponseStatus(404);\n# self.Log('File Not Found: ' + self.getRealPath())\n \n def dispatch(self, action, params, body):\n global SILENCE\n global NOT_BUSY\n data = \"\";\n id = 0;\n if (body.has_key(\"msg\")):\n data = body[\"msg\"][0];\n \n \n collection = UserCollection.getInstance(); \n if (collection.shouldCleanAll()):\n collection.clean(True);\n elif(collection.shouldClean()):\n collection.clean(False);\n\n if (action == \"createline\"):\n phone = random.randrange(1000,9999)\n token = str(uuid.uuid4())\n while(collection.hasUser(phone) == True):\n phone = random.randrange(1000,9999)\n if (collection.isFull()):\n self.writeBody(\"!FAIL the system is over capacity, please try again later\");\n return ;\n user = collection.createUser(phone, token);\n self.endWriteHeader()\n self.writeBody(str(phone) + \":\" + token);\n return ;\n \n self.endWriteHeader()\n token = \"\";\n if (params.has_key(\"token\")):\n token = params[\"token\"][0];\n \n if (token == None or token == \"\"):\n self.writeBody(\"LINEERROR\");\n self.Log(\"LINERRORERROR canoit get token\" + token);\n return ;\n\n fromLine = 0\n toLine = 0\n try:\n if (params.has_key(\"from\")):\n fromLine = string.atoi(params[\"from\"][0])\n if (params.has_key(\"to\")):\n toLine = string.atoi(params[\"to\"][0])\n except:\n fromLine = 0;\n toLine = 0;\n \n if (fromLine == 0):\n self.writeBody(\"LINEERROR\");\n return ;\n \n if (True != collection.hasUser(fromLine)):\n self.writeBody(\"LINEERROR\");\n return ;\n if (token != collection.getUser(fromLine).token()):\n self.writeBody(\"LINEERROR\");\n self.Log(\"LINERRORERROR\"+ token + \"!! \" + collection.getUser(fromLine).token());\n return ;\n \n if (action == \"keepalive\"):\n collection.getUser(fromLine).keepAlive();\n return;\n elif (action == \"connect\"):\n collection.unWire(fromLine);\n if (not collection.hasUser(toLine) or not collection.hasUser(fromLine)):\n self.writeBody(\"!FAIL user offline\");\n return ;\n if (collection.isWired(toLine)):\n self.writeBody(\"!FAIL BUSY LINE\");\n return \n if (True == collection.oneWayWire(fromLine, toLine)):\n collection.getUser(toLine).ring(fromLine)\n self.writeBody(\"!SUCCESS successfully one way wired\");\n else:\n self.writeBody(\"!FAIL unable to wire\");\n return ; \n elif (action == \"disconnect\"):\n if (not collection.hasUser(fromLine)):\n self.writeBody(\"!FAIL user offline\");\n return ;\n collection.unWire(fromLine);\n if (collection.isHalfWiredTo(toLine, fromLine)):\n collection.getUser(toLine).notifyBusy(fromLine);\n self.writeBody(\"!SUCCESS disconnect\");\n return ;\n elif (action == \"querystatus\"):\n ring = collection.getUser(fromLine).fetchRing()\n busy = collection.getUser(fromLine).fetchNotifyBusy()\n if (collection.isWired(fromLine) and collection.getUser(fromLine).hasNewMessage()):\n msg = \"NEWMESSAGE\";\n msg = msg + collection.getUser(fromLine).read();\n self.writeBody(msg);\n return ;\n elif (collection.isWired(fromLine)):\n self.writeBody(\"CONNECTED\");\n return ;\n elif (collection.isHalfWired(fromLine) and busy != NOT_BUSY):\n self.writeBody(\"BUSY:\"+str(busy))\n return ;\n elif (collection.isHalfWired(fromLine)):\n self.writeBody(\"CONNECTING\");\n return ; \n elif (collection.isNotWired(fromLine) and ring != SILENCE):\n self.writeBody(\"RINGING:\" + str(ring));\n return ;\n elif (collection.isNotWired(fromLine)):\n self.writeBody(\"DISCONNECTED\");\n return ; \n \n elif (action == \"sendmsg\"):\n toLine = string.atoi(params[\"to\"][0])\n if (not collection.hasUser(fromLine) or not collection.hasUser(toLine) ):\n self.writeBody(\"!FAIL line error\");\n return ; \n if (data != \"\"):\n msg = data;\n else: \n msg = params[\"msg\"][0]\n if (collection.doIsWired(fromLine, toLine)):\n collection.getUser(toLine).write(msg);\n self.writeBody(\"!SUCCESS sendmsg\");\n else:\n self.writeBody(\"!FAIL sendmsg\"); \n \n elif (action == \"getmsg\"):\n if (not collection.hasUser(fromLine) ):\n self.writeBody(\"\");\n return ; \n msg = collection.getUser(fromLine).read();\n self.writeBody(msg);\n \n else :\n self.writeResponseStatus(501)\n self.writeBody(\"!FAIL not support \" + action); \n return ","sub_path":"chatserver_deepant.py","file_name":"chatserver_deepant.py","file_ext":"py","file_size_in_byte":18648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"47226342","text":"\"\"\"\nMatplotlib Animation Example\n\nauthor: Jake Vanderplas\nemail: vanderplas@astro.washington.edu\nwebsite: http://jakevdp.github.com\nlicense: BSD\nPlease feel free to use and modify this, but keep the above information. Thanks!\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\nimport math\n\n# Initializing number of dots\nN = 2\n\n\n# Creating dot class\nclass dot(object):\n def __init__(self):\n self.x = 10 * np.random.random_sample()\n self.y = 10 * np.random.random_sample()\n self.velx = self.generate_new_vel()\n self.vely = self.generate_new_vel()\n\n def generate_new_vel(self):\n return (np.random.random_sample() - 0.5) / 5\n\n def move(self):\n def distance(x1, y1, x2, y2):\n return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\n def inside(x1, y1):\n if distance(x1, y1, 5, 5) <= 1:\n return True\n else:\n return False\n\n def calc_dist(d):\n ret = 0\n for x in dots:\n if inside(x.x, x.y) and x != d:\n ret = ret + distance(x.x, x.y, d.x, d.y)\n return ret\n\n # if dot is inside the circle it tries to maximize the distances to\n # other dots inside circle\n if inside(self.x, self.y):\n dist = calc_dist(self)\n for i in range(1, 10):\n self.velx = self.generate_new_vel()\n self.vely = self.generate_new_vel()\n self.x = self.x + self.velx\n self.y = self.y + self.vely\n if calc_dist(self) <= dist or not inside(self.x, self.y):\n self.x = self.x - self.velx\n self.y = self.y - self.vely\n else:\n if np.random.random_sample() < 0.95:\n self.x = self.x + self.velx\n self.y = self.y + self.vely\n else:\n self.velx = self.generate_new_vel()\n self.vely = self.generate_new_vel()\n self.x = self.x + self.velx\n self.y = self.y + self.vely\n if self.x >= 10:\n self.x = 10\n self.velx = -1 * self.velx\n if self.x <= 0:\n self.x = 0\n self.velx = -1 * self.velx\n if self.y >= 10:\n self.y = 10\n self.vely = -1 * self.vely\n if self.y <= 0:\n self.y = 0\n self.vely = -1 * self.vely\n\n# Initializing dots\ndots = [dot() for i in range(N)]\n\n# First set up the figure, the axis, and the plot element we want to animate\nfig = plt.figure()\nax = plt.axes(xlim=(0, 10), ylim=(-10, 10))\nd, = ax.plot([dot.x for dot in dots],\n [dot.y for dot in dots], 'ro')\n#xy = ax.plot(np.arange(0,10, 0.1), 6 * np.sin(2 * np.arange(0,10, 0.1)))\n\n\n# animation function. This is called sequentially\ndef animate(i):\n for dot in dots:\n dot.move()\n d.set_data([dot.x for dot in dots],\n [dot.y for dot in dots])\n return d,\n\n# call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, frames=200, interval=20)\n#anim.save(\"123.mp4\")\nplt.show()","sub_path":"animation_test.py","file_name":"animation_test.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"392319715","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\n\"\"\"A tool for converting files of MARC (.lex) records to MARC XML.\"\"\"\r\n\r\nimport marc2xml\r\nimport sys\r\n\r\n__author__ = 'Victoria Morris'\r\n__license__ = 'MIT License'\r\n__version__ = '1.0.0'\r\n__status__ = '4 - Beta Development'\r\n\r\nmarc2xml.main(sys.argv[1:])\r\n","sub_path":"bin/marc2xml.py","file_name":"marc2xml.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"583939937","text":"import pytest\nfrom src.square import square_area\n\n@pytest.mark.parametrize( # wykona się to tyle razy ile mamy zestawów danych, dekorator jest nadpisywany tylko na 1 metodę i dla każdej metody wstawiamy nowy\n \"a,b,result\",\n [\n (4,24,4), # tu muszą być przecinki pomiędzy tuplami, parametr/wynik\n\n ]\n)\n\ndef test_square_area(a,b,result): # test musi być na początku\n assert square_area(a,b) == result\n\n\n","sub_path":"tests/square_test.py","file_name":"square_test.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"43433170","text":"\"\"\"\nset of tools to deal with crispex data\n\"\"\"\nimport numpy as np\nimport scipy.interpolate as interp\n\n\ndef write_buf(intensity, outfile, wave=None, stokes=False):\n ''' Writes crispex image and spectral cubes, for when the data is already\n resident in memory. To be used when there is ample memory for all\n the cubes.\n\n IN:\n intensity: array with intensities (possibly IQUV). Its shape depends\n on the value of stokes. If stokes=False, then its shape is\n [nt, nx, ny, nwave]. If stokes=True, then its shape is\n [4, nt, nx, ny, nwave], where the first index corresponds\n to I, Q, U, V.\n outfile: name of files to be writtenp. Will be prefixed by im_ and\n sp_.\n stokes: If True, will write full stokes.\n\n '''\n from . import lp\n\n if not stokes:\n nt, nx, ny, nw = intensity.shape\n ax = [(1, 2, 0, 3), (3, 0, 2, 1)]\n rs = [(nx, ny, nt * nw), (nw, nt, ny * nx)]\n extrahd = ''\n else:\n ns, nt, nx, ny, nw = intensity.shape\n ax = [(2, 3, 1, 0, 4), (4, 1, 3, 2, 0)]\n rs = [(nx, ny, nt * ns * nw), (nw, nt, ny * nx * ns)]\n extrahd = ', stokes=[I,Q,U,V], ns=4'\n # this is the image cube:\n im = np.transpose(intensity, axes=ax[0])\n im = im.reshape(rs[0])\n # this is the spectral cube\n sp = np.transpose(intensity, axes=ax[1])\n sp = sp.reshape(rs[1])\n # write lp.put, etc.\n # , extraheader_sep=False)\n lp.writeto('im_' + outfile, im, extraheader=extrahd)\n # , extraheader_sep=False)\n lp.writeto('sp_' + outfile, sp, extraheader=extrahd)\n return\n\n\ndef write_from_rh(files, outfile, stokes=False, waveidx=None, waveinterp=None,\n verbose=False):\n ''' Writes crispex image cube from RH 1.5D netcdf output.'''\n from . import ncdf, lp\n from ..utils.shell import progressbar\n\n # open first file to get some data\n ii = ncdf.getvar(files[0], 'intensity', memmap=True)\n nx, ny, nw = ii.shape\n nt = len(files)\n dtype = ii.dtype\n del ii\n wave = ncdf.getvar(files[0], 'wavelength', memmap=False)\n if waveidx is not None:\n wave = wave[waveidx]\n if waveinterp is None:\n nw = len(wave)\n else:\n nw = len(waveinterp)\n if stokes:\n try:\n ii = ncdf.getvar(files[0], 'stokes_V', memmap=True)\n del ii\n except KeyError:\n print('(WWW) write_from_rh: stokes selected but no data in file.')\n stokes = False\n if stokes:\n vars = ['intensity', 'stokes_Q', 'stokes_U', 'stokes_V']\n extrahd = ', stokes=[I,Q,U,V], ns=4'\n else:\n vars = ['intensity']\n extrahd = ''\n ns = len(vars)\n # write image cube\n print('writing image cube, %i files' % nt)\n for i, f in enumerate(files):\n for v in vars:\n ii = ncdf.getvar(f, v, memmap=True)\n ii = np.array(ii) # Tiago new\n if waveidx is not None:\n ii = ii[:, :, waveidx]\n if waveinterp is not None:\n fint = interp.interp1d(wave, ii, kind='linear')\n ii = fint(waveinterp).astype(dtype)\n lp.writeto('im_' + outfile, ii, append=True,\n extraheader=extrahd, extraheader_sep=False)\n del ii\n if verbose:\n progressbar(i + 1, nt)\n print()\n return\n # old stuff, NOT IN USE\n # write spectral cube\n print('\\nwriting spectral cube, %i rows' % ny)\n isave = np.empty((nw, nt, nx * ns), dtype=dtype)\n for y in range(ny):\n for i, f in enumerate(files):\n for j, v in enumerate(vars):\n ii = ncdf.getvar(f, v, memmap=True)[:, y]\n if waveidx is not None:\n ii = ii[:, waveidx]\n if waveinterp is not None:\n fint = interp.interp1d(wave, ii, kind='linear')\n ii = fint(waveinterp).astype(dtype)\n isave[:, i, j::ns] = np.transpose(ii)\n lp.writeto('sp_' + outfile, isave, append=True,\n extraheader=extrahd, extraheader_sep=False)\n if verbose:\n progressbar(y + 1, ny)\n print()\n return\n\n\ndef write_from_rh_sp(files, outfile, stokes=False, waveidx=None,\n waveinterp=None, verbose=False):\n ''' Writes crispex spectral cubes only, from RH 1.5D netcdf output.'''\n from . import ncdf, lp\n from ..utils.shell import progressbar\n\n # open first file to get some data\n ii = ncdf.getvar(files[0], 'intensity', memmap=True)\n nx, ny, nw = ii.shape\n nt = len(files)\n dtype = ii.dtype\n del ii\n wave = ncdf.getvar(files[0], 'wavelength', memmap=False)\n if waveidx is not None:\n wave = wave[waveidx]\n if waveinterp is None:\n nw = len(wave)\n else:\n nw = len(waveinterp)\n if stokes:\n try:\n ii = ncdf.getvar(files[0], 'stokes_V', memmap=True)\n del ii\n except KeyError:\n print('(WWW) write_from_rh: stokes selected but no data in file.')\n stokes = False\n if stokes:\n vars = ['intensity', 'stokes_Q', 'stokes_U', 'stokes_V']\n extrahd = ', stokes=[I,Q,U,V], ns=4'\n else:\n vars = ['intensity']\n extrahd = ''\n ns = len(vars)\n # write spectral cube\n print('\\nwriting spectral cube, %i rows' % ny)\n isave = np.empty((nw, nt, nx * ns), dtype=dtype)\n for y in range(ny):\n for i, f in enumerate(files):\n for j, v in enumerate(vars):\n ii = ncdf.getvar(f, v, memmap=True)[:, y]\n if waveidx is not None:\n ii = ii[:, waveidx]\n if waveinterp is not None:\n fint = interp.interp1d(wave, ii, kind='linear')\n ii = fint(waveinterp).astype(dtype)\n isave[:, i, j::ns] = np.transpose(ii)\n lp.writeto('sp_' + outfile, isave, append=True,\n extraheader=extrahd, extraheader_sep=False)\n if verbose:\n progressbar(y + 1, ny)\n print()\n return\n\n\ndef sp_from_im(infile, outfile, nwave, maxmem=4, verbose=True):\n ''' Creates a CRISPEX spectral cube from a quasi-transposition of an\n image cube.\n\n IN:\n infile - lp image cube file to read.\n outfile - lp spectral cube file to write. Overwritten if exists.\n nwave - number of spectral points.\n maxmem - maximum memory (in GB) to use when creating temporary arrays\n '''\n\n from . import lp\n from ..utils.shell import progressbar\n\n GB = 2**30\n nx, ny, ntl = lp.getheader(infile)[0]\n ns = 1 # for now\n nt = ntl / nwave\n if (ntl % nwave != 0):\n raise ValueError('sp_from_im: image cube nlt axis not multiple of' +\n ' given nwave (%i).' % (nwave) + ' Check values!')\n ninc = maxmem * GB / (ntl * nx * ns * 4)\n if ninc < 1:\n raise MemoryError('sp_from_im: memory supplied for temporary arrays' +\n ' (%i GB) not enough.' % (maxmem) +\n ' Need at least %f.2 GB.' % (ntl * nx * ns * 4. / GB))\n for i in range(ny / ninc + 1):\n imc = lp.getdata(infile)\n isave = imc[:, i * ninc:(i + 1) * ninc]\n sy = isave.shape[1]\n isave = np.transpose(\n np.transpose(isave).reshape(nt, nwave, nx * sy), axes=(1, 0, 2))\n lp.writeto(outfile, isave, append=i != 0, extraheader='',\n extraheader_sep=False)\n imc.close()\n if verbose:\n progressbar(i + 1, ny / ninc + 1)\n print()\n return\n","sub_path":"helita/io/crispex.py","file_name":"crispex.py","file_ext":"py","file_size_in_byte":7675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"110007931","text":"#!/usr/bin/python3\n\"\"\"\nFunction that divides all elements of a matrix.\n\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Da\"\"\"\n if type(matrix) is not list:\n raise TypeError(\n \"matrix must be v matrix (list of lists) of integers/floats\")\n size = None\n for i in matrix:\n if type(i) is not list:\n raise TypeError(\n \"matrix must be v matrix (list of lists) of integers/floats\")\n if size is None:\n size = len(i)\n elif size != len(i):\n raise TypeError(\"Each row of the matrix must have the same size\")\n for v in i:\n if type(v) is not int and type(v) is not float:\n raise TypeError(\"matrix must be v matrix (list of lists) of \\\nintegers/floats\")\n if type(div) is not int and type(div) is not float:\n raise TypeError(\"div must be v number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n return [[round(v / div, 2) for v in i] for i in matrix]\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"279800951","text":"# utility class for Tell\nclass Notice:\n def __init__(self, subj, obj, message):\n self.subject = subj\n self.obj = obj\n self.message = message\n\nclass Tell:\n def __init__(self, events=None, printer_handle=None, bot=None, say=None):\n self.events = events\n self.printer = printer_handle\n self.bot = bot\n self.say = say\n self.interests = ['__privmsg__']\n self.say = say\n\n self.cmd = \".tell\"\n self.help = \".tell \"\n\n for event in events:\n if event._type in self.interests:\n event.subscribe(self)\n\n def handle(self, event):\n #try:\n if event.msg.startswith(\".tell\"):\n target = event.msg.split()[1]\n if target == self.bot.conf.getNick(self.bot.network):\n self.say(event.channel, \"I can't tell myself; gtfo\")\n return\n thing = event.msg.split()[2:] # all the way to the end\n n = Notice(event.user, target, thing)\n\n if not \"tell\" in self.bot.mem_store:\n self.bot.mem_store[\"tell\"] = list()\n\n # add it to the list of things to tell people\n self.bot.mem_store[\"tell\"].append(n)\n self.printer(\"PRIVMSG \" + event.channel + \" :I'll let \" + n.obj + \" know when they're back. \\n\")\n \n else:\n if \"tell\" in self.bot.mem_store:\n for n in self.bot.mem_store[\"tell\"]:\n if event.user.lower() == n.obj.lower():\n self.printer(\"PRIVMSG \" + event.channel + \" :Hey \" + n.obj + \", \" + n.subject + \" says \\\"\"+ \" \".join(n.message).encode('utf-8', 'ignore')+ '\\\"\\n')\n # we've said it, now delete it.\n if n in self.bot.mem_store[\"tell\"]: self.bot.mem_store[\"tell\"].remove(n)\n \n #self.printer(\"PRIVMSG \" + event.channel + \" :\" + event.user + \" spoke \" + '\\n')\n #print event.user +\" talked\"\n #except:\n # #print \"DEBUG: TypeError: \",\n # print event.channel,\n # print event.user\n\n","sub_path":"modules/tell.py","file_name":"tell.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"516123445","text":"def dijstra(start, end, graph, vertex):\n visited = [0] * vertex\n visited[start] = 1\n used = []\n used.append((start, 0))\n while True:\n idx = -1\n shortest = float(\"inf\")\n dangerous=float(\"inf\")\n for i in range(0,vertex):\n if not visited[i]:\n for v in used:\n if i in graph[v[0]]:\n dangerous=max(v[1],distance[(v[0],i)])\n if dangerous < shortest:\n shortest=dangerous\n idx = i\n if idx==-1:\n break\n else:\n if idx == end:\n return shortest\n else:\n visited[idx] = 1\n used.append((idx, shortest))\n return -1\n\n\nif __name__ == '__main__':\n distance = {}\n graph = []\n vertex, operate = map(int, input().split(\" \"))\n res=[]\n ops=[]\n for i in range(vertex):\n graph.append([])\n for i in range(operate):\n op = list(map(int, input().split(\" \")))\n ops.append(op)\n if op[0] == 0:\n graph[op[1]].append(op[2])\n graph[op[2]].append(op[1])\n distance.setdefault((op[1],op[2]),op[3])\n distance.setdefault((op[2],op[1]),op[3])\n elif op[0] == 1:\n graph[op[1]].remove(op[2])\n graph[op[2]].remove(op[1])\n distance.pop((op[1],op[2]))\n distance.pop((op[2],op[1]))\n elif op[0] == 2:\n res.append(dijstra(op[1], op[2], graph, vertex))\n for r in res:\n print(r)\n","sub_path":"Code/CodeRecords/2174/58586/288052.py","file_name":"288052.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"447192704","text":"import praw\nfrom creds import CREDENTIALS\nimport re\nimport sys\nimport random\n\nreddit = praw.Reddit(client_id=CREDENTIALS.client_id,\n client_secret=CREDENTIALS.client_secret,\n user_agent=CREDENTIALS.user_agent,\n )\n\ndef searchSubreddit(query):\n '''\n Calls upon the reddit API instance to search by the term passed in. Populates\n the global playerHighlights list.\n\n Args:\n string query: The query to pass to the reddit search function. \n\n Returns:\n Void\n\n '''\n print(query)\n player_highlights = []\n for post in reddit.subreddit('soccer').search('{} flair:media'.format(query),sort=\"new\", limit=25):\n if re.search('\\[[0-9]+\\]|[0-9]{1,2}\\'',post.title):\n player_highlights.append({'title' : post.title, 'link' : post.url})\n \n return player_highlights\n\ndef searchHighlightsByPlayer(player, club):\n '''\n Searches /r/soccer for video clip links containing a player's name. Posts are filtered\n with the flair \"media\" so that highlights are queried.\n\n Args:\n string player: Player name to search\n string club: Club name to include in the query alongside player name\n\n Returns:\n set playerHighlights: list of highlights in key-value form ( {title : link} )\n\n '''\n\n return searchSubreddit(player + ' ' + club) #search the subreddit by full name of player\n\nif __name__ == '__main__':\n\n p = sys.argv[1]\n c = ' '.join(sys.argv[2:])\n print(searchHighlightsByPlayer(p,c))\n ","sub_path":"app/RedditInterface.py","file_name":"RedditInterface.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"192534633","text":"from fill import *\nfrom ontology import *\nfrom primfamily import *\nfrom transfor import *\n\n# Describe tensors with a function on the left and right as primitive families.\n\n_a = ConstPrim0(\"a\")\n_b = ConstPrim0(\"b\")\n_ap = ConstPrim0(\"a'\")\n_bp = ConstPrim0(\"b'\")\n\n_f = ConstPrim1(\"f\", _a, _b)\n_g = ConstPrim1(\"g\", _ap, _bp)\n_fa = ConstPrim1(\"fa\", _a, _ap)\n_fb = ConstPrim1(\"fb\", _b, _bp)\n_ga = ConstPrim1(\"ga\", _a, _ap)\n_gb = ConstPrim1(\"gb\", _b, _bp)\n\n_ala = ConstPrim2(\"alpha_a\", _fa, _ga)\n_alb = ConstPrim2(\"alpha_b\", _fb, _gb)\n\n# TODO: this might be wrong; might need to replace Comp0Node with \n# a PrimitiveFamily node for a new primitive family which represents tensor\n# leftTensorId1Source = Comp0Node(\n# SourceNode(VarNode(0)),\n# VarNode(1)\n# )\n# leftTensorId1Target = Comp0Node(\n# TargetNode(VarNode(0)),\n# VarNode(1)\n# )\n# leftTensorId1 = PrimitiveFamily(\"leftTensorId1\", 1, 0, [1, 0], leftTensorId1Source, leftTensorId1Target)\n\n# # same for idTensorRight1...\n# idTensorRight1Source = Comp0Node(\n# VarNode(0),\n# SourceNode(VarNode(1))\n# )\n# idTensorRight1Target = Comp0Node(\n# VarNode(0),\n# TargetNode(VarNode(1))\n# )\n# idTensorRight1 = PrimitiveFamily(\"idTensorRight1\", 1, 0, [0, 1], idTensorRight1Source, idTensorRight1Target)\n\n# # now sigma\n# sigma2Source = Comp1Node(\n# PrimitiveFamilyNode(leftTensorId1,[\n# VarNode(0), \n# SourceNode(VarNode(1))\n# ]),\n# PrimitiveFamilyNode(idTensorRight1,[\n# TargetNode(VarNode(0)), \n# VarNode(1)\n# ]) \n# )\n\n# sigma2Target = Comp1Node(\n# PrimitiveFamilyNode(idTensorRight1,[\n# SourceNode(VarNode(0)), \n# VarNode(1)\n# ]),\n# PrimitiveFamilyNode(leftTensorId1,[\n# VarNode(0), \n# TargetNode(VarNode(1))\n# ])\n# )\n\n# sigma = PrimitiveFamily(\"sigma\", 2, 0, [1,1], sigma2Source, sigma2Target)\n\n# axioms for sigma: encode as 3-cells\n# source, target, primitivefamily\n\n\n\n\n\n\n\n# ALTERNATIVE APPROACH: Using the pseudofunctor B \\times B -> B\n\nsigma2Source = Comp0Node(\n VarNode(0),\n VarNode(1)\n)\n\nsigma2Target = sigma2Source\n\nsigma = PrimitiveFamily(\"sigma\", 2, 0, [1,1], sigma2Source, sigma2Target)\n\n# In order from K-theory for 2-categories.\n# a.alphab sigma alphaa.b\n# sigmaAx1Source = comp2s(\n# [\n# comp1(comp0(_a,_alb), comp0(_fa,_bp)),\n# sigma.fprim(_fa,_gb),\n# comp1(comp0(_ala,_b), comp0(_ap,_gb))\n# ]\n# )\n\nnala = VarNode(0)\nnalb = VarNode(1)\nnfa = SourceNode(VarNode(0))\nnfb = SourceNode(VarNode(1))\nnga = TargetNode(VarNode(0))\nngb = TargetNode(VarNode(1))\nna = SourceNode(nfa)\nnb = SourceNode(nfb)\nnap = TargetNode(nfa)\nnbp = TargetNode(nfb)\n#_ala is input 0, _alb is input 1 (to the axiom)\nsigmaAx1Source = Comp2Node(\n Comp2Node(\n Comp1Node(\n Comp0Node(\n na, nalb\n ),\n Comp0Node(\n nfa, nbp\n )\n ),\n PrimitiveFamilyNode(\n sigma,\n [nfa, ngb]\n )\n ),\n Comp1Node(\n Comp0Node(\n nala, nb\n ),\n Comp0Node(\n nap, ngb\n )\n )\n)\n\nsigmaAx1Target = Comp2Node(\n Comp2Node(\n Comp1Node(\n Comp0Node(\n na, nfb\n ),\n Comp0Node(\n nala, nbp\n )\n ),\n PrimitiveFamilyNode(\n sigma,\n [nga, nfb]\n )\n ),\n Comp1Node(\n Comp0Node(\n nga, nb\n ),\n Comp0Node(\n nap, nalb\n )\n )\n)\n\nala = _ala\nalb = _alb","sub_path":"niles.py","file_name":"niles.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"344867262","text":"import sklearn.datasets\nfrom sklearn.model_selection import KFold\n\nimport optuna.integration.lightgbm as lgb\n\n\nif __name__ == \"__main__\":\n data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)\n dtrain = lgb.Dataset(data, label=target)\n\n params = {\n \"objective\": \"binary\",\n \"metric\": \"binary_logloss\",\n \"verbosity\": -1\n }\n\n tuner = lgb.LightGBMTunerCV(\n params,\n dtrain,\n time_budget=60,\n verbose_eval=0,\n early_stopping_rounds=50,\n folds=KFold(n_splits=3)\n )\n\n tuner.run()\n\n print(\"Best score:\", tuner.best_score)\n best_params = tuner.best_params\n print(\"Best params:\", best_params)\n print(\" Params: \")\n for key, value in best_params.items():\n print(\" {}: {}\".format(key, value))\n","sub_path":"example_1.py","file_name":"example_1.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"126630195","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# Number of positions marked off along the hallway\nNUM_POSITIONS = 33\n\n# Configure print options\nnp.set_printoptions(precision=1,linewidth=94,threshold=10)\n\n# Initialize the lists to store the address, signal strengths, and counts in\naddressList = []\nsignalStrengths = [[] for i in range(NUM_POSITIONS)]\naddressCount = [[] for i in range(NUM_POSITIONS)]\n\n# Parameters for CSV to generate\ngenerateCSV = True\ntraining = True # Set this to True to use the first 2/3 to make training set, false to use last 1/3 for testing set\nTEST_TRAIN_SET_NUMBER = 4\n\n###### MEAN ######\n\nfor tile in range(NUM_POSITIONS):\n filename = 'Tile ' + str(tile) + '.csv'\n print(\"Parsing file '\", filename,\"'\",sep='')\n with open(filename,mode='r') as csvData:\n hallData = csv.reader(csvData, dialect='excel')\n rowNum = 0\n nextRow = False\n numLines = 0\n for row in hallData:\n numLines += 1\n address = row[2][:-3]\n signalStrength = row[7]\n if signalStrength == '':# or ('ArubaNet' not in address):\n nextRow = True\n if nextRow == False and rowNum > 0:\n try:\n val = addressList.index(address)\n except ValueError:\n addressList.append(address)\n nextRow = False\n rowNum += 1\n # print(\"There are \", numLines, \" data entries.\")\n signalStrengths[tile] = [0 for i in addressList]\n addressCount[tile] = [0 for i in addressList]\n\n with open(filename,mode='r') as csvData:\n hallData = csv.reader(csvData, dialect='excel')\n rowNum = 0\n for row in hallData:\n if rowNum==0:\n header = row\n elif ((training == True)):# and (rowNum < (2.0/3)*numLines)) or ((training == False) and (rowNum >= (2.0/3)*numLines)):\n address = row[2][:-3]\n signalStrength = row[7]\n if signalStrength.endswith(' dBm'):\n signalStrength = int(signalStrength[:-4])\n if signalStrength=='': # or ('ArubaNet' not in address):\n nextRow = True\n if nextRow == False:\n # try:\n position = addressList.index(address)\n signalStrengths[tile][position] += signalStrength\n addressCount[tile][position] += 1\n # except ValueError:\n # addressList.append(address[:-3])\n # signalStrengths[tile].append(signalStrength)\n # addressCount[tile].append(1)\n nextRow = False\n rowNum += 1\n # Divide the signal strengths by number observed to get the average strength for that address\n signalStrengths[tile] = [x/y if y!=0 else 0 for x,y in zip(signalStrengths[tile],addressCount[tile])]\n\nnumAddresses = len(addressList)\nprint(\"Number of Addresses: \", numAddresses)\nvarianceMatrix = np.zeros((NUM_POSITIONS, numAddresses))\n# varianceAddressCount = np.zeros((NUM_POSITIONS numAddresses)\n\n###### VARIANCE ######\n\nfor tile in range(NUM_POSITIONS):\n filename = 'Tile ' + str(tile) + '.csv'\n print(\"Re-parsing file '\", filename,\"'\",sep='')\n with open(filename,mode='r') as csvData:\n hallData = csv.reader(csvData, dialect='excel')\n numLines = sum(1 for line in hallData)\n with open(filename,mode='r') as csvData:\n hallData = csv.reader(csvData, dialect='excel')\n rowNum = 0\n for row in hallData:\n if rowNum==0:\n header = row\n elif ((training == True)):# and (rowNum < (2.0/3)*numLines)) or ((training == False) and (rowNum >= (2.0/3)*numLines)):\n address = row[2][:-3]\n signalStrength = row[7]\n if signalStrength.endswith(' dBm'):\n signalStrength = int(signalStrength[:-4])\n if signalStrength=='' or ('ArubaNet' not in address):\n nextRow = True\n if nextRow == False:\n try:\n position = addressList.index(address)\n varianceMatrix[tile][position] += np.power(signalStrength-signalStrengths[tile][position],2)\n # varianceAddressCount[position] += 1\n except IndexError:\n print(\"Tile: \", tile)\n print(\"Position: \", position)\n quit()\n nextRow = False\n rowNum += 1\n varianceMatrix[tile][0:len(addressCount[tile])] = [x/y if y!=0 else 0 for x,y in zip(varianceMatrix[tile],addressCount[tile])]\n\n# Go back and put in -100dBm for all of the addresses with frames not observed at each location\nnumAddresses = len(addressList)\nfor i in range(len(signalStrengths)):\n for j in range(len(signalStrengths[i])):\n if signalStrengths[i][j] == 0:\n signalStrengths[i][j] = -1000\n while len(signalStrengths[i]) < numAddresses:\n signalStrengths[i].append(-1000)\n addressCount[i].append(0)\n\n# This will print the number of frames recieved from each address at the locations listed\n# for location in [0, 10, 21, 32]:\n# print(\"Stations observed at location \", location, \":\\n\", addressCount[location])\n\nfor i in range(numAddresses):\n factor = max([signalStrengths[j][i] for j in range(NUM_POSITIONS)])\n for j in range(NUM_POSITIONS):\n if signalStrengths[j][i]==-1000:\n signalStrengths[j][i] =-100\n # else:\n # signalStrengths[j][i] -= factor\n\nif (generateCSV):\n if (training == True):\n outfile = 'TrainingSet' + str(TEST_TRAIN_SET_NUMBER) + '.csv'\n else:\n outfile = 'TestingSet' + str(TEST_TRAIN_SET_NUMBER) + '.csv'\n with open(outfile,mode='w', newline='') as csvFile:\n dataWriter = csv.writer(csvFile, dialect='excel')\n dataWriter.writerow(['Tile', 'Address', 'Average dBm', 'Variance in dBm', 'Address Count', 'Number of Positions', NUM_POSITIONS, 'Number of Addresses', numAddresses])\n for tile in range(NUM_POSITIONS):\n for addr in range(numAddresses):\n dataWriter.writerow([tile, addressList[addr], signalStrengths[tile][addr], varianceMatrix[tile][addr], addressCount[tile][addr]])\n","sub_path":"Trevin/DataSet1/CSV_Compiler1.py","file_name":"CSV_Compiler1.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"230784199","text":"\"\"\"cogeo_mosaic.utils: utility functions.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom concurrent import futures\nfrom typing import Dict, List, Sequence\n\nimport click\nimport mercantile\nimport numpy\nfrom pygeos import area, intersection\nfrom rio_tiler.io import COGReader\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef _filter_futures(tasks):\n \"\"\"\n Filter future task to remove Exceptions.\n\n Attributes\n ----------\n tasks : list\n List of 'concurrent.futures._base.Future'\n\n Yields\n ------\n Successful task's result\n\n \"\"\"\n for future in tasks:\n try:\n yield future.result()\n except Exception as err:\n logger.warning(str(err))\n pass\n\n\ndef get_dataset_info(src_path: str) -> Dict:\n \"\"\"Get rasterio dataset meta.\"\"\"\n with COGReader(src_path) as cog:\n bounds = cog.bounds\n return {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bounds[0], bounds[3]],\n [bounds[0], bounds[1]],\n [bounds[2], bounds[1]],\n [bounds[2], bounds[3]],\n [bounds[0], bounds[3]],\n ]\n ],\n },\n \"properties\": {\n \"path\": src_path,\n \"bounds\": cog.bounds,\n \"minzoom\": cog.minzoom,\n \"maxzoom\": cog.maxzoom,\n \"datatype\": cog.dataset.meta[\"dtype\"],\n },\n \"type\": \"Feature\",\n }\n\n\ndef get_footprints(\n dataset_list: Sequence[str], max_threads: int = 20, quiet: bool = True\n) -> List:\n \"\"\"\n Create footprint GeoJSON.\n\n Attributes\n ----------\n dataset_listurl : tuple or list, required\n Dataset urls.\n max_threads : int\n Max threads to use (default: 20).\n\n Returns\n -------\n out : tuple\n tuple of footprint feature.\n\n \"\"\"\n fout = os.devnull if quiet else sys.stderr\n with futures.ThreadPoolExecutor(max_workers=max_threads) as executor:\n future_work = [executor.submit(get_dataset_info, item) for item in dataset_list]\n with click.progressbar( # type: ignore\n futures.as_completed(future_work),\n file=fout,\n length=len(future_work),\n label=\"Get footprints\",\n show_percent=True,\n ) as future:\n for _ in future:\n pass\n\n return list(_filter_futures(future_work))\n\n\ndef tiles_to_bounds(tiles: List[mercantile.Tile]) -> List[float]:\n \"\"\"Get bounds from a set of mercator tiles.\"\"\"\n zoom = tiles[0].z\n xyz = numpy.array([[t.x, t.y, t.z] for t in tiles])\n extrema = {\n \"x\": {\"min\": xyz[:, 0].min(), \"max\": xyz[:, 0].max() + 1},\n \"y\": {\"min\": xyz[:, 1].min(), \"max\": xyz[:, 1].max() + 1},\n }\n ulx, uly = mercantile.ul(extrema[\"x\"][\"min\"], extrema[\"y\"][\"min\"], zoom)\n lrx, lry = mercantile.ul(extrema[\"x\"][\"max\"], extrema[\"y\"][\"max\"], zoom)\n return [ulx, lry, lrx, uly]\n\n\ndef _intersect_percent(tile, dataset_geoms):\n \"\"\"Return the overlap percent.\"\"\"\n inter_areas = area(intersection(tile, dataset_geoms))\n return [inter_area / area(tile) for inter_area in inter_areas]\n\n\ndef bbox_union(bbox_1: List[float], bbox_2: List[float]) -> List[float]:\n \"\"\"Return the union of two bounding boxes.\"\"\"\n return [\n min(bbox_1[0], bbox_2[0]),\n min(bbox_1[1], bbox_2[1]),\n max(bbox_1[2], bbox_2[2]),\n max(bbox_1[3], bbox_2[3]),\n ]\n","sub_path":"cogeo_mosaic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"430441688","text":"#!/usr/local/bin/python\n# -*- coding=utf-8 -*-\nimport ansible.runner\n\ndef update(entry_name):\n\n name = entry_name.split('-')\n url = name[0].split('_')\n\n run_cp = ansible.runner.Runner(\n host_list='/export/servers/ansible/hosts/hosts',\n module_name='copy',\n module_args='src=/export/Data/ansible_date/%s/%s dest=/tmp/' % (name[0], entry_name),\n pattern='%s' % (name[0]),\n forks=1\n )\n process_cp = run_cp.run()\n\n run_unzip = ansible.runner.Runner(\n host_list='/export/servers/ansible/hosts/hosts',\n module_name='shell',\n module_args='unzip -oq /tmp/%s -d /export/App/h5.%s.duolabao.dev/' % (entry_name, url[0]),\n pattern='%s' % (name[0]),\n forks=1\n )\n process_unzip = run_unzip.run()\n\n run_deltar = ansible.runner.Runner(\n host_list='/export/servers/ansible/hosts/hosts',\n module_name='shell',\n module_args='rm -f /tmp/%s' % (entry_name),\n pattern='%s' % (name[0]),\n forks=1\n )\n run_deltar.run()\n\n return process_cp, process_unzip\n","sub_path":"project/h5.py","file_name":"h5.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"316269973","text":"import numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom q2 import get_metrics\nimport matplotlib.pyplot as plt\n\nif __name__=='__main__':\n\ttrain = np.loadtxt('Data/train.csv', delimiter=',')\n\ttest = np.loadtxt('Data/test.csv', delimiter=',')\n\tprecisions_test = []\n\tprecisions_train = []\n\n\tfor i in range(1,30):\n\t\tclf = KNeighborsClassifier(n_neighbors = i, n_jobs=-1)\n\t\tclf.fit(train[:,0:20], train[:,20])\n\t\t\n\t\tpredicted_test = clf.predict(test[:,0:20])\n\t\tprecision, _, _, _ = get_metrics(test[:, 20], predicted_test)\n\t\tprecisions_test = precisions_test + [precision]\n\n\t\tpredicted_train = clf.predict(train[:,0:20])\n\t\tprecision, _, _, _ = get_metrics(train[:, 20], predicted_train)\n\t\tprecisions_train = precisions_train + [precision]\n\n\tplt.plot(range(1,30), precisions_test, color='red')\n\tplt.hold(True)\n\tplt.plot(range(1,30), precisions_train, color='blue')\n\t\n\tclf = KNeighborsClassifier(n_neighbors = 20)\n\tclf.fit(train[:,0:20], train[:,20])\n\tpredicted_test = clf.predict(test[:,0:20])\n\t_, _, _, _ = get_metrics(test[:, 20], predicted_test, display=True)\n\n\tplt.show()","sub_path":"q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"551000044","text":"import os\nfrom pathlib import Path\nimport shutil\nimport numpy as np\nfrom numpy import random as npr\nimport pandas as pd\nfrom IPython.display import display\nimport yaml\nfrom tqdm import tqdm_notebook as tqdm\n#test\n# Hyperparams\n# data_dir_str = '../../data'\ndata_dir_str = '/Users/pangda/predicting_generalization/alldata'\ndata_path_str = 'Grp13_conv_random'\nnew_path_str = 'split_set'\nsplits = [\n ('train', .8),\n ('val', .1),\n ('test', .1)\n]\n\n# seed\nnpr.seed(523324)\n\n# build paths\nbase_path = Path(data_dir_str)\nsave_path = base_path / Path(new_path_str).expanduser()\ndata_path = base_path /Path(data_path_str).expanduser()\ntype_dirs = [ f for f in data_path.iterdir() if f.is_dir() ]\n\n# determine cumulative splits and validate split sizes\ntotal = 0.\nsplits_total = []\n\nfor split_name, prop in splits:\n splits_total.append((split_name, total, total + prop))\n total += prop\n\nassert abs(splits_total[-1][-1] - 1.) < 1e-10, 'Invalid split (does not sum to 1)'\n\n# randomly split all models\nsplit_mdirs = {split_name: [] for split_name, _ in splits}\n\ndef process_type(model_dirs, rand_idx, split_name):\n \"\"\"\n Add all models in `model_dirs` with index in\n `rand_idx` to split `split name`.\n \"\"\"\n for src_idx in rand_idx:\n suffix = f'_{src_idx}'\n matches = [mdir for mdir in model_dirs if str(mdir)[-len(suffix):] == suffix]\n\n assert len(matches) == 1, f'Invalid source model name format, {len(matches)} matches found'\n\n src_path = matches[0]\n split_mdirs[split_name].append(src_path)\n\n# iterate over each model type\nfor type_dir in type_dirs:\n # get all model dirs for type and randomly partition into splits\n model_dirs = [ f for f in type_dir.iterdir() if f.is_dir() ]\n rand_idx = npr.permutation(len(model_dirs))\n\n for split_name, start, stop in splits_total:\n process_type(model_dirs,\n rand_idx[int(len(rand_idx) * start): int(len(rand_idx) * stop)],\n split_name)\n\n# Randomly reorder models in each split, copy the model to the new directory, and\n# record the mapping\nmapping = []\n\nfor split_name, model_dirs in split_mdirs.items():\n rand_idx = npr.permutation(len(model_dirs))\n\n for dst_idx, src_idx in enumerate(rand_idx):\n src_path = model_dirs[src_idx]\n dst_path = save_path / split_name / f'model_{dst_idx}'\n shutil.copytree(src_path, dst_path)\n mapping.append((split_name, dst_idx, os.path.join(*src_path.parts[-2:])))\n\n# save the mapping to a CSV\ndf = pd.DataFrame(mapping, columns=['Split Name', 'Index', 'Source File'])\ndf.to_csv(os.path.join(data_dir_str, 'mapping.csv'), index=False)\n\n# fill defaults\n\nmetadata_defaults = {\n 'batch_size_train': 512,\n 'batch_size_test': 1024,\n 'batch_size_val': 512\n}\n\nother_defaults = {}\nstats_defaults = {}\n\ndef fill_missing_yaml(path, defaults):\n with open(path, 'r') as f:\n data = yaml.safe_load(f)\n for key, val in defaults.items():\n if key not in data:\n data[key] = val\n with open(path, 'w') as f:\n f.write(yaml.dump(data, default_flow_style=False))\n\ndef fill_missing(row):\n path = os.path.join(data_dir_str, new_path_str, row['Split Name'], f\"model_{row['Index']}\")\n fill_missing_yaml(os.path.join(path, 'meta_data.yml'), metadata_defaults)\n ## fill other_data.yml\n ## fill param_stats.yml\n\nfor i, row in tqdm(df.iterrows()):\n fill_missing(row)\n\n\n# Initial Dataset Validation\n\nprint('Proportion by Split:')\ndisplay(df.groupby('Split Name')['Index'].count() / len(df))\n\nprint('\\nUnique Source Files by Split:')\ndisplay(df.groupby('Split Name')['Source File'].nunique())\n\nassert df['Source File'].unique().shape[0] == len(df), 'Every source file is not unique'\n","sub_path":"cs446-project-fa2019-master/automl/utils/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"102509655","text":"## Sort Array By Parity\n\n# Example 1:\n# Input: nums = [3,1,2,4]\n# Output: [2,4,3,1]\n# Explanation: The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.\n\n# Example 2:\n# Input: nums = [0]\n# Output: [0]\n\nclass Solution:\n def sortArrayByParity(self, nums: List[int]) -> List[int]:\n N=len(nums)\n \n i=0\n j=N-1\n \n while i= min_score:\n native_score = np.asscalar(score)\n np_box = [np.asscalar(coord) for coord in boxes[0][i]]\n pil_box = pil_box_from_np_box(np_box, im_width, im_height)\n image_objects.append({'score': native_score, 'box': pil_box})\n\n with open(json_path, 'w') as json_file:\n json_file.write(json.dumps(image_objects, indent=2))\n\n print('{}: Done {} at {}'.format(image_num, image_path, datetime.now()))\n\n\ndef get_images_in_dir(image_dir):\n files = os.listdir(image_dir)\n files = [file_name for file_name in files if '.jpg' in file_name or '.jpeg' in file_name]\n return [os.path.join(image_dir, file_name) for file_name in files]\n\ndef clamp(num, min_num, max_num):\n return min(max(num, min_num), max_num)\n\ndef crop_image_to_boxes(image_path, identified_objects, cropped_output_dir, buffer_ratio=0.10):\n file_name = os.path.basename(image_path)\n image = Image.open(image_path)\n (im_width, im_height) = image.size\n\n for object_info in identified_objects:\n (left, top, right, bottom) = object_info['box']\n\n # Add a bit to width/height depending on buffer ratio\n crop_width = right - left\n crop_height = bottom - top\n extra_width_per_side = crop_width * (buffer_ratio / 2)\n extra_height_per_side = crop_height * (buffer_ratio / 2)\n left = left - extra_width_per_side\n right = right + extra_width_per_side\n top = top - extra_height_per_side\n bottom = bottom + extra_height_per_side\n box = [\n clamp(left, 0, im_width - 1),\n clamp(top, 0, im_height - 1),\n clamp(right, 0, im_width - 1),\n clamp(bottom, 0, im_height - 1)\n ]\n box = [int(round(coord)) for coord in box]\n new_image = image.crop(box)\n (left, top, right, bottom) = box\n suffix = '{}-{}-{}-{}'.format(left, right, top, bottom)\n new_image.save(os.path.join(cropped_output_dir, '{}.{}.jpg'.format(file_name, suffix)))\n\ndef crop_images_to_boxes(image_paths, json_dir, cropped_output_dir, crop_threshold=0.96):\n for (image_num, image_path) in enumerate(image_paths):\n file_name = os.path.basename(image_path)\n json_path = os.path.join(json_dir, file_name + '.json')\n if not os.path.exists(json_path):\n print('{}: Skipped cropping {} at {}'.format(image_num, image_path, datetime.now()))\n continue\n\n with open(json_path, 'r') as json_file:\n identified_objects = json.loads(json_file.read())\n\n scores = [obj['score'] for obj in identified_objects]\n print(scores)\n identified_objects = [obj for obj in identified_objects if obj['score'] > crop_threshold]\n crop_image_to_boxes(image_path, identified_objects, cropped_output_dir)\n print('{}: Cropped {} at {}'.format(image_num, image_path, datetime.now()))\n\ndef main():\n parser = argparse.ArgumentParser(description='Runs a tensorflow model against a directory of images')\n parser.add_argument('--model', dest='model_file', required=True, help='Usually a frozen_inference_graph.pb')\n parser.add_argument('--labels', dest='labels_file', required=True, help='A label-map pbtxt')\n parser.add_argument('--output', dest='cropped_output_dir', required=True, help='Where to output cropped images')\n parser.add_argument('--json-output', dest='json_output_dir', required=True, help='Directory to put JSON outputs for each image')\n parser.add_argument('--box-output', dest='box_output_dir', default=None, help='Directory to put files with outline boxes')\n parser.add_argument('--min-score', dest='min_score', type=float, default=0.1, help='Default 0.1, Minimum score to record boxes over')\n parser.add_argument('--crop-only', dest='crop_only', action='store_true', help='Whether to only crop without running training')\n parser.add_argument('dir', help='Directory with images')\n\n args = parser.parse_args()\n\n image_paths = get_images_in_dir(args.dir)\n\n if not args.crop_only:\n detection_graph = load_graph(args.model_file)\n category_index = load_label_map_category_index(args.labels_file)\n run_graph_on_images(image_paths, detection_graph, category_index, args.min_score, args.json_output_dir, args.box_output_dir)\n\n crop_images_to_boxes(image_paths, args.json_output_dir, args.cropped_output_dir)\n\nif __name__ == '__main__':\n main()\n","sub_path":"research/object_detection/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"611823926","text":"\n\n#calss header\nclass _MAGPIE():\n\tdef __init__(self,): \n\t\tself.name = \"MAGPIE\"\n\t\tself.definitions = [u'a bird with black and white feathers and a long tail: ', u'someone who likes to collect many different types of objects, or use many different styles']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_magpie.py","file_name":"_magpie.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29603243","text":"def quicksort(array):\n if len(array) < 2:\n return array # базовый случай: массивы с 0 и 1 элементом уже отсортированы\n\n else:\n pivot = array[0] # рекурсивный случай\n less = [i for i in array[1:] if i <= pivot] # подмассив всех элементов, меньших опорного\n greater = [i for i in array[1:] if i > pivot] # подмассив всех элементов, больших опорного\n return quicksort(less) + [pivot] + quicksort(greater)\n\n\nif __name__ == '__main__':\n n = input()\n array = [int(i) for i in input().split()]\n rez = quicksort(array)\n print(' '.join(map(str, rez)))\n","sub_path":"sprint14/c_efficient_quick_sorting.py","file_name":"c_efficient_quick_sorting.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"512334682","text":"class Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n \n return [x+extraCandies>=max(candies) for x in candies]\n def kidsWithCandiesAlternative(self, candies: List[int], extraCandies: int) -> List[bool]:\n maxCandies = max(candies)\n kidsWithCandies = [False]*len(candies)\n for i in range(len(candies)):\n if candies[i]+extraCandies>=maxCandies:\n kidsWithCandies[i] = True\n return kidsWithCandies","sub_path":"1431_kids_with_maximum_candies.py","file_name":"1431_kids_with_maximum_candies.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"3832575","text":"linha=[0,1,2]\ntabuleiro=[linha]*3\nsimbolo=input('qual simbolo vc deseja usar?')\nprint('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])\njogada=str(input('qual sua jogada?'))\ni=int(jogada[0])\nj=int(jogada[1])\nif i>=0 and i<=2 and j>=0 and j<=2:\n i=int(jogada[0])\n j=int(jogada[1])\n tabuleiro[i][j]=simbolo\nelse:\n print('coordenada invalida')\nprint('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])\nprint(tabuleiro[i][j])","sub_path":"moodledata/vpl_data/303/usersdata/299/92363/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"572995511","text":"from typing import ClassVar\nimport cv2\nimport numpy as np \n\ntestimg = cv2.imread(\"/Users/jh/Documents/dss/project/ml-repo-2/OCT_small/test/CNV/CNV-3621217-5.jpeg\", cv2.IMREAD_COLOR)\ntestimg2 = cv2.imread(\"/Users/jh/Documents/dss/project/ml-repo-2/pptimg/cat.jpg\",cv2.IMREAD_COLOR)\n\n\n#1. CLAHE \ndef applyclahe(src):\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n cl = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n img = cl.apply(src)\n return img \n\n#2. HSV \ndef applyhsv(src):\n hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n v = cv2.inRange(v, 55, 100)\n masking = cv2.bitwise_and(hsv, hsv, mask = v)\n img = cv2.cvtColor(masking, cv2.COLOR_HSV2BGR)\n return img\n\n#3. DoG \ndef applydog(src):\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n dst1 = cv2.GaussianBlur(src, (0, 0), 3)\n dst2 = cv2.GaussianBlur(src, (0, 0), 1)\n img = dst2 - dst1\n return img\n\n#4. Subtraction \ndef applysubtract(src):\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n dst = cv2.GaussianBlur(src, (0, 0), 5)\n img = src - dst\n return img\n\n#5. Contour Mask\ndef applycontour(src):\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n ret, img_binary = cv2.threshold(src, 80, 255, 0)\n contours, hierarchy = cv2.findContours(img_binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n mask = np.zeros(src.shape, np.uint8)\n cv2.drawContours(mask, contours, -1, (255), 1)\n return mask\n\n#check and save\ndef showandsave(img, savedir):\n cv2.imshow(\"result\", img)\n cv2.imwrite(savedir, img)\n cv2.waitKey(0)\n return print(\"complete\")\n\n#Gaussians\ndef gaussians(src) : \n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n g1 = cv2.GaussianBlur(src, (0, 0), 1)\n g2 = cv2.GaussianBlur(src, (0, 0), 3)\n g3 = cv2.GaussianBlur(src, (0, 0), 5)\n\n for num, i in enumerate([g1, g2, g3]):\n num = num * 2 + 1\n savedir = \"pptimg/gaussian-\" + str(num) + \".jpeg\"\n cv2.imwrite(savedir, i)\n return print(\"complete\")\n\n#showandsave(applyclahe(testimg), \"pptimg/clahe.jpeg\")\n#showandsave(applyhsv(testimg), \"pptimg/hsv.jpeg\")\n#showandsave(applydog(testimg), \"pptimg/dog.jpeg\")\n#showandsave(applysubtract(testimg), \"pptimg/subtract.jpeg\")\n#showandsave(applycontour(testimg), \"pptimg/contour.jpeg\")\n#showandsave(applyclahe(testimg2), \"pptimg/clahecat.jpeg\")\n\n","sub_path":"img_preprocess.py","file_name":"img_preprocess.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"348254914","text":"#!/usr/bin/env python\n\nimport re\nimport urllib2\nimport datetime\nfrom BeautifulSoup import BeautifulSoup\nimport config\n\nclass Ship(object):\n\n def __init__(self,\n imo=None,\n user_agent=None):\n '''\n Set common variables\n '''\n if not user_agent:\n self.user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36\"\n else:\n self.user_agent = user_agent\n self.imo = imo\n self.webpage = 'http://www.marinetraffic.com/en/ais/details/ships'\n # initialize the speed to None\n self.speed = None\n\n def fetch_online_speed(self):\n '''\n Gets the data from marinetraffic.com and\n returns the speed.\n Maybe will be splitted in the future to fetch other data too...\n '''\n full_url = \"{0}/{1}\".format(self.webpage, self.imo)\n request = urllib2.Request(full_url)\n request.add_header('User-Agent',self.user_agent)\n opener = urllib2.build_opener()\n data = opener.open(request).read()\n parsed_html = BeautifulSoup(data)\n for span in parsed_html.findAll('span'):\n if span.text == 'Speed/Course:':\n raw_speed = span.findNext('span').text\n\n self.speed = re.search('(\\d\\.\\d)kn.*',raw_speed).group(1)\n\n def get_speed(self):\n '''\n Simply return out the speed\n '''\n self.fetch_online_speed()\n return self.speed\n\nif __name__ == '__main__':\n # just for test\n #\n imo = '9503639'\n a = Ship(imo)\n now = datetime.datetime.now().strftime('%y%m%d%H%M')\n filename = \"{0}/{1}.txt\".format(config.outdata,imo)\n with open(filename, 'a') as f:\n f.write(\"{0} {1}\\n\".format(now, a.get_speed()))\n f.close()\n","sub_path":"fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629932245","text":"import sys\n\nfrom netflix_dal import get_average_rating_of_movie\n\nif len(sys.argv) != 2:\n print('Usage: average_rating ')\n exit(1)\n\nmovie_id = sys.argv[1]\ntry:\n movie_count, result = get_average_rating_of_movie(movie_id)\n\n if movie_count == 0:\n print(f'There is no movie with ID {movie_id}.')\n exit(0)\n\n if result is None:\n print(f'The movie with ID {movie_id} has no ratings yet.')\n exit(0)\n\n print(f'The average rating of movie ID {movie_id} is {result}.')\nexcept ValueError:\n print(f'Sorry, something went wrong. Please ensure that “{movie_id}” is a valid movie ID.')","sub_path":"netflix-prize-document-example/elasticsearch/average_rating.py","file_name":"average_rating.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"148613178","text":"import os\nimport shutil, zipfile\nimport requests\nimport numpy as np\nimport pandas\nimport dgl\nimport torch\n\ndef _download(url, path, filename):\n fn = os.path.join(path, filename)\n if os.path.exists(fn):\n return\n\n os.makedirs(path, exist_ok=True)\n f_remote = requests.get(url, stream=True)\n sz = f_remote.headers.get('content-length')\n assert f_remote.status_code == 200, 'fail to open {}'.format(url)\n with open(fn, 'wb') as writer:\n for chunk in f_remote.iter_content(chunk_size=1024*1024):\n writer.write(chunk)\n print('Download finished.')\n\ndef get_livejournal():\n _download('https://snap.stanford.edu/data/soc-LiveJournal1.txt.gz',\n '/tmp', 'soc-LiveJournal1.txt.gz')\n df = pandas.read_csv('/tmp/soc-LiveJournal1.txt.gz', sep='\\t', skiprows=4, header=None,\n names=['src', 'dst'], compression='gzip')\n src = np.array(df['src'])\n dst = np.array(df['dst'])\n print('construct the graph')\n return dgl.DGLGraph((src, dst), readonly=True)\n\ndef get_graph(name):\n if name == 'livejournal':\n return get_livejournal()\n else:\n print(name + \" doesn't exist\")\n return None\n\nclass ogb_data(object):\n def __init__(self, g, num_labels):\n self._g = g\n self._num_labels = num_labels\n\n @property\n def num_labels(self):\n return self._num_labels\n\n @property\n def num_classes(self):\n return self._num_labels\n\n def __getitem__(self, idx):\n return self._g\n\ndef load_ogb_product(name):\n from ogb.nodeproppred import DglNodePropPredDataset\n\n os.symlink('/tmp/dataset/', os.path.join(os.getcwd(), 'dataset'))\n\n print('load', name)\n data = DglNodePropPredDataset(name=name)\n print('finish loading', name)\n splitted_idx = data.get_idx_split()\n graph, labels = data[0]\n labels = labels[:, 0]\n\n graph.ndata['label'] = labels\n in_feats = graph.ndata['feat'].shape[1]\n num_labels = len(torch.unique(labels[torch.logical_not(torch.isnan(labels))]))\n\n # Find the node IDs in the training, validation, and test set.\n train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']\n train_mask = torch.zeros((graph.number_of_nodes(),), dtype=torch.bool)\n train_mask[train_nid] = True\n val_mask = torch.zeros((graph.number_of_nodes(),), dtype=torch.bool)\n val_mask[val_nid] = True\n test_mask = torch.zeros((graph.number_of_nodes(),), dtype=torch.bool)\n test_mask[test_nid] = True\n graph.ndata['train_mask'] = train_mask\n graph.ndata['val_mask'] = val_mask\n graph.ndata['test_mask'] = test_mask\n\n return ogb_data(graph, num_labels)\n\ndef process_data(name):\n if name == 'cora':\n return dgl.data.CoraGraphDataset()\n elif name == 'pubmed':\n return dgl.data.PubmedGraphDataset()\n elif name == 'reddit':\n return dgl.data.RedditDataset(self_loop=True)\n elif name == 'ogbn-products':\n return load_ogb_product('ogbn-products')\n else:\n raise ValueError('Invalid dataset name:', name)\n\ndef get_bench_device():\n return os.environ.get('DGL_BENCH_DEVICE', 'cpu')\n\ndef setup_track_time(*args, **kwargs):\n # fix random seed\n np.random.seed(42)\n torch.random.manual_seed(42)\n\ndef setup_track_acc(*args, **kwargs):\n # fix random seed\n np.random.seed(42)\n torch.random.manual_seed(42)\n\nTRACK_UNITS = {\n 'time' : 's',\n 'acc' : '%',\n}\n\nTRACK_SETUP = {\n 'time' : setup_track_time,\n 'acc' : setup_track_acc,\n}\n\ndef parametrize(param_name, params):\n def _wrapper(func):\n if getattr(func, 'params', None) is None:\n func.params = []\n func.params.append(params)\n if getattr(func, 'param_names', None) is None:\n func.param_names = []\n func.param_names.append(param_name)\n return func\n return _wrapper\n\ndef benchmark(track_type, timeout=60):\n assert track_type in ['time', 'acc']\n def _wrapper(func):\n func.unit = TRACK_UNITS[track_type]\n func.setup = TRACK_SETUP[track_type]\n func.timeout = timeout\n return func\n return _wrapper\n","sub_path":"benchmarks/benchmarks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"57641920","text":"# -*- coding:UTF-8 -*-\n\"\"\"\n喜马拉雅专辑音频爬虫\nhttps://www.ximalaya.com/\n@author: hikaru\nemail: hikaru870806@hotmail.com\n如有问题或建议请联系\n\"\"\"\nimport os\nfrom common import *\nfrom project.ximalaya import ximalaya\n\n\nclass XiMaLaYaAlbum(ximalaya.XiMaLaYa):\n def __init__(self, **kwargs):\n # 设置APP目录\n crawler.PROJECT_APP_PATH = os.path.abspath(os.path.dirname(__file__))\n\n # 初始化参数\n sys_config = {\n const.SysConfigKey.NOT_CHECK_SAVE_DATA: False,\n const.SysConfigKey.APP_CONFIG_PATH: os.path.join(crawler.PROJECT_APP_PATH, \"album.ini\"),\n const.SysConfigKey.SAVE_DATA_FORMATE: (0, [\"\", \"0\"]), # album_id last_audio_id\n }\n ximalaya.XiMaLaYa.__init__(self, sys_config, **kwargs)\n\n # 下载线程\n self.set_crawler_thread(CrawlerThread)\n\n\nclass CrawlerThread(crawler.CrawlerThread):\n def __init__(self, main_thread, single_save_data):\n self.index_key = single_save_data[0] # album id\n if len(single_save_data) >= 3 and single_save_data[2]:\n self.display_name = single_save_data[2]\n else:\n self.display_name = single_save_data[0]\n crawler.CrawlerThread.__init__(self, main_thread, single_save_data)\n\n # 获取所有可下载音频\n def get_crawl_list(self):\n page_count = 1\n unique_list = []\n audio_info_list = []\n is_over = False\n # 获取全部还未下载过需要解析的音频\n while not is_over:\n audio_pagination_description = \"第%s页音频\" % page_count\n self.start_parse(audio_pagination_description)\n try:\n audio_pagination_response = ximalaya.get_one_page_album(self.index_key, page_count)\n except crawler.CrawlerException as e:\n self.error(e.http_error(audio_pagination_description))\n raise\n self.parse_result(audio_pagination_description, audio_pagination_response[\"audio_info_list\"])\n\n # 寻找这一页符合条件的媒体\n for audio_info in audio_pagination_response[\"audio_info_list\"]:\n # 检查是否达到存档记录\n if audio_info[\"audio_id\"] > int(self.single_save_data[1]):\n # 新增音频导致的重复判断\n if audio_info[\"audio_id\"] in unique_list:\n continue\n else:\n audio_info_list.append(audio_info)\n unique_list.append(audio_info[\"audio_id\"])\n else:\n is_over = True\n break\n\n if not is_over:\n if audio_pagination_response[\"is_over\"]:\n is_over = True\n else:\n page_count += 1\n\n return audio_info_list\n\n # 解析单首音频\n def crawl_audio(self, audio_info):\n audio_description = \"音频%s《%s》\" % (audio_info[\"audio_id\"], audio_info[\"audio_title\"])\n self.start_parse(audio_description)\n try:\n audio_play_response = ximalaya.get_audio_info_page(audio_info[\"audio_id\"])\n except crawler.CrawlerException as e:\n self.error(e.http_error(audio_description))\n raise\n\n if audio_play_response[\"is_video\"]:\n self.error(\"%s 类型是视频,跳过\" % audio_description)\n else:\n audio_url = audio_play_response[\"audio_url\"]\n audio_name = \"%09d - %s.%s\" % (audio_info[\"audio_id\"], path.filter_text(audio_info[\"audio_title\"]), url.get_file_ext(audio_url))\n audio_path = os.path.join(self.main_thread.audio_download_path, self.display_name, audio_name)\n if self.download(audio_url, audio_path, audio_description):\n self.total_audio_count += 1 # 计数累加\n\n # 音频下载完毕\n self.single_save_data[1] = str(audio_info[\"audio_id\"]) # 设置存档记录\n\n def _run(self):\n # 获取所有可下载音频\n audio_info_list = self.get_crawl_list()\n self.info(\"需要下载的全部音频解析完毕,共%s个\" % len(audio_info_list))\n\n # 从最早的媒体开始下载\n while len(audio_info_list) > 0:\n self.crawl_audio(audio_info_list.pop())\n\n\nif __name__ == \"__main__\":\n XiMaLaYaAlbum().main()\n","sub_path":"project/ximalaya/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"29015204","text":"#!venv/bin/python\n# -*- coding: utf-8 -*-import sys\n\nimport os\nimport sys\nfrom moduler.extra_diske import update_extradiske\nfrom moduler.fileOperations import fetch_config\nfrom moduler.flip_server import flip_server\nfrom moduler.global_config import global_config\nfrom moduler.wdmycloud import update_wdmycloud\nfrom moduler.user_profile import user_profile\nfrom ubuntu.apache import install_apache\nfrom ubuntu.docker import install_docker\nfrom ubuntu.chrome import install_chrome\nfrom ubuntu.mongodb import install_mongodb\nfrom ubuntu.mysql import install_mysql\nfrom ubuntu.nginx import install_nginx\nfrom ubuntu.nodejs import install_nodejs\nfrom ubuntu.packages import install_packages\nfrom ubuntu.packer import install_packer\nfrom ubuntu.php import install_php\nfrom ubuntu.vagrant import install_vagrant\nfrom ubuntu.virtualbox import install_vbox\n\nmenu = \"\"\"Menu for systeminstallation og opdateringer\n===========================================\n\\t1) Update user profile\n\\t2) Global configruation\n\\t3) Mount WD My Cloud\n\\t4) Update extra diske\n\\t5) Install basis software\n\\t6) Node.js\n\\t7) MongoDB\n\\t8) PHP incl. Composer\n\\t9) Apache2 med libapache2-mod-php\n\\t10) Nginx\n\\t11) MySQL\n\\t12) Docker\n\\t13) Flip http web server\n===========================================\nDesktop programmer til fysisk host\n===========================================\n\\t14) Vagrant\n\\t15) Packer\n\\t16) Virtualbox\n\\t17) Google Chrome\n===========================================\n\\t99) I do not know, Exit!\n\"\"\"\nswitcher = {\n 1: user_profile,\n 2: global_config,\n 3: update_wdmycloud,\n 4: update_extradiske,\n 5: install_packages,\n 6: install_nodejs,\n 7: install_mongodb,\n 8: install_php,\n 9: install_apache,\n 10: install_nginx,\n 11: install_mysql,\n 12: install_docker,\n 13: flip_server,\n 14: install_vagrant,\n 15: install_packer,\n 16: install_vbox,\n 17: install_chrome\n}\n\n\ndef not_supported():\n print('Selection is not supported')\n\n\ndef show_menu(configs):\n option = 0\n go_on = True\n while option in range(1, len(switcher)) or go_on:\n os.system('clear')\n print(menu)\n selection = input(\"Vælg en funktion: \")\n try:\n option = int(selection)\n except ValueError:\n input('Vælg et nummer mellem 1 og 20 ...')\n go_on = True\n else:\n if option == 99:\n break\n print(f'Du valgte {option}')\n action = switcher.get(option, lambda argument: not_supported())\n action(configs)\n input(\"Enter RETURN to Continue ...\")\n\n\nif __name__ == \"__main__\":\n\n if os.geteuid() != 0:\n sys.exit('Scriptet skal udføres med root access')\n\n configuration = ''\n filename = 'config/config.ini'\n try:\n configuration = fetch_config(filename)\n except Exception as err:\n print(err)\n sys.exit(f'Konfigurationsfilen {filename} kan ikke læses')\n else:\n print(f'Konfigurationsfilen {filename} er indlæst')\n\n show_menu(configuration)\n","sub_path":"install_ubuntu.py","file_name":"install_ubuntu.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"530748927","text":"import logging\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport pytz\nimport tzlocal\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.x509.oid import NameOID\n\nfrom .configuration import SmoothStreamsProxyConfiguration\nfrom .constants import DEFAULT_SSL_CERTIFICATE_FILE_PATH\nfrom .constants import DEFAULT_SSL_KEY_FILE_PATH\nfrom .utilities import SmoothStreamsProxyUtility\n\nlogger = logging.getLogger(__name__)\n\n\nclass SmoothStreamsProxySecurityManager():\n __slots__ = []\n\n _auto_generate_self_signed_certificate = True\n _certificate_file_path = None\n _key_file_path = None\n\n @classmethod\n def determine_certificate_validity(cls):\n server_hostname_loopback = SmoothStreamsProxyConfiguration.get_configuration_parameter(\n 'SERVER_HOSTNAME_LOOPBACK')\n server_hostname_private = SmoothStreamsProxyConfiguration.get_configuration_parameter(\n 'SERVER_HOSTNAME_PRIVATE')\n server_hostname_public = SmoothStreamsProxyConfiguration.get_configuration_parameter(\n 'SERVER_HOSTNAME_PUBLIC')\n\n with open(cls._certificate_file_path, 'rb') as input_file:\n certificate = x509.load_pem_x509_certificate(input_file.read(), default_backend())\n certificate_subjects = certificate.extensions.get_extension_for_oid(\n x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME).value.get_values_for_type(x509.DNSName)\n\n logger.debug(\n 'Certificate status\\n'\n 'File path => {0}\\n'\n 'Expires on => {1}\\n'\n 'Subjects => {2}\\n\\n'\n '{3}'.format(cls._certificate_file_path,\n certificate.not_valid_after.replace(tzinfo=pytz.utc).astimezone(\n tzlocal.get_localzone()).strftime('%Y-%m-%d %H:%M:%S'),\n ', '.join(certificate_subjects),\n '\\n'.join(\n ['Certificate is {0}valid for domain => {1}'.format(\n '' if server_hostname in certificate_subjects else 'not ',\n server_hostname)\n for server_hostname in\n [server_hostname_loopback, server_hostname_private, server_hostname_public]])))\n\n @classmethod\n def generate_self_signed_certificate(cls):\n ip_address_location = SmoothStreamsProxyUtility.determine_ip_address_location()\n\n if ip_address_location is None:\n pass\n\n private_key = rsa.generate_private_key(public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n with open(DEFAULT_SSL_KEY_FILE_PATH, 'wb') as output_file:\n output_file.write(private_key.private_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()))\n\n current_date_time_in_utc = datetime.now(pytz.utc)\n\n subject = issuer = x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, ip_address_location['country_code']),\n x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ip_address_location['region']),\n x509.NameAttribute(NameOID.LOCALITY_NAME, ip_address_location['city']),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'SmoothStreamsProxy'),\n x509.NameAttribute(NameOID.COMMON_NAME,\n SmoothStreamsProxyConfiguration.get_configuration_parameter(\n 'SERVER_HOSTNAME_PUBLIC'))])\n\n certificate = x509.CertificateBuilder().subject_name(\n subject\n ).issuer_name(\n issuer\n ).public_key(\n private_key.public_key()\n ).serial_number(\n x509.random_serial_number()\n ).not_valid_before(\n current_date_time_in_utc\n ).not_valid_after(\n current_date_time_in_utc + timedelta(days=10 * 365)\n ).add_extension(\n x509.SubjectAlternativeName([\n x509.DNSName(SmoothStreamsProxyConfiguration.get_configuration_parameter('SERVER_HOSTNAME_LOOPBACK')),\n x509.DNSName(SmoothStreamsProxyConfiguration.get_configuration_parameter('SERVER_HOSTNAME_PRIVATE')),\n x509.DNSName(SmoothStreamsProxyConfiguration.get_configuration_parameter('SERVER_HOSTNAME_PUBLIC'))]),\n critical=False\n ).sign(\n private_key,\n hashes.SHA256(),\n default_backend())\n\n with open(DEFAULT_SSL_CERTIFICATE_FILE_PATH, 'wb') as output_file:\n output_file.write(certificate.public_bytes(serialization.Encoding.PEM))\n\n @classmethod\n def get_auto_generate_self_signed_certificate(cls):\n return cls._auto_generate_self_signed_certificate\n\n @classmethod\n def get_certificate_file_path(cls):\n return cls._certificate_file_path\n\n @classmethod\n def get_key_file_path(cls):\n return cls._key_file_path\n\n @classmethod\n def set_auto_generate_self_signed_certificate(cls, auto_generate_self_signed_certificate):\n cls._auto_generate_self_signed_certificate = auto_generate_self_signed_certificate\n\n @classmethod\n def set_certificate_file_path(cls, certificate_file_path):\n cls._certificate_file_path = certificate_file_path\n\n @classmethod\n def set_key_file_path(cls, key_file_path):\n cls._key_file_path = key_file_path\n","sub_path":"smooth_streams_proxy/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"535601909","text":"import socket, re, multiprocessing\n\n\nclass HTTPServer(object):\n def __init__(self, port):\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 允许在7788端⼝资源没有彻底释放完毕时,可以重复绑定7788端⼝\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((\"\", port))\n self.server_socket.listen(128)\n\n def server_forever(self):\n \"\"\"循环运行web服务器,等到客户端链接,并为客户端服务\"\"\"\n while True:\n client_socket, client_addr = self.server_socket.accept()\n client_process = multiprocessing.Process(target=self.handle_client, args=(client_socket, ))\n client_process.start()\n client_socket.close()\n\n @staticmethod\n def handle_client(client_socket):\n \"\"\"为一个客户服务\"\"\"\n recv_data = client_socket.recv(1024).decode()\n #print(recv_data) # test\n \"\"\"b'GET / HTTP/1.1\\r\\n\n Host: 127.0.0.1:7788\\r\\n\n Connection: keep-alive\\r\\n\n Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\\r\\n\n Upgrade-Insecure-Requests: 1\\r\\n\n User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\\r\\n\n Accept-Encoding: gzip, deflate, sdch\\r\\n\n Accept-Language: zh-CN,zh;q=0.8\\r\\n\\r\\n'\"\"\"\n request_header_lines = recv_data.splitlines()\n path_info = re.match(r\"\\w+\\s+([^ ]+)\", request_header_lines[0]).group(1)\n\n if path_info == \"/\":\n path_info = g_document_root + \"/index.html\"\n else:\n path_info = g_document_root + path_info\n print(path_info)\n\n try:\n f = open(path_info, \"rb\")\n except IOError or FileNotFoundError as e:\n response_headers = \"HTTP/1.1 404 not found\\r\\n\\r\\n\" # 200 找到资源\n response_boody = \">>>>>>>>Sorry, file not found>>>>>>>>>\".encode()\n print(e)\n else:\n # 组织响应头\n response_headers = \"HTTP/1.1 200 OK\\r\\n\\r\\n\" # 200 找到资源\n # 组织响应体\n response_boody = f.read()\n finally:\n\n response = response_headers.encode() + response_boody\n client_socket.send(response)\n client_socket.close()\n\n\n# 配置服务器资源\ng_document_root = \"./html\"\n# 这只端口\nport = 7788\n\n\ndef main():\n web = HTTPServer(port)\n print(web)\n web.server_forever()\n\n\nif __name__ == '__main__':\n main()","sub_path":"16day/web4-类-多进程.py","file_name":"web4-类-多进程.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"985126","text":"#coding:utf-8\nfrom selenium import webdriver\nimport ddt,time\nimport unittest\nimport os\n\n#导入自定义模块\nimport myExcel\t\nfrom Login import Login_Horde\n\n\n'''导入测试数据,logindata.xlsx存放username和password '''\nfilepath = os.path.join(os.getcwd(),'testcase\\logindata.xlsx')\t#相对路径\n\nsheetName = 'Sheet1'\ndata = myExcel.ExcelUtil(filepath,sheetName)\ntestData = data.dict_data()\n\n#用unittest的装饰器@classmethod修饰测试用例的前置和后置\n#避免在执行测试用例的时候多次打开关闭浏览器,提高代码执行效率\n@ddt.ddt\nclass Test(unittest.TestCase):\n\t'''\t数据驱动设计模式测试用例'''\t\t\t\n\t@classmethod\n\tdef setUpClass(cls):\n\t\t'''打开浏览器Firefox'''\n\t\tcls.driver = webdriver.Firefox()\n\t\turl = 'http://www.testerhorde.com'\n\t\tcls.driver.get(url)\n\t\t#添加隐式等待10s\n\t\tcls.driver.implicitly_wait(10)\t\t\n\t\t\n\t@classmethod\n\tdef tearDownClass(cls):\n\t\tcls.driver.quit()\n\t\n\t@ddt.data(*testData)\n\tdef test_login(self,data):\t#函数名:test_*\n\t\t'''登录模块测试用例'''\n\t\ttry:\n\t\t\t#print('当前测试用户名:%s'%data['username'])\t\t\t\n\t\t\t#调用登录模块方法\n\t\t\tmylogin = Login_Horde(self.driver)\n\t\t\tmylogin.login(data['username'],data['password'])\n\t\t\tresult,msg = mylogin.is_login_sucess()\n\t\t\t\n\t\t\tif result:\n\t\t\t\tmylogin.logout()\t#登录成功,注销用户\n\t\t\telse:\n\t\t\t\tmylogin.click_close()\t#登录失败,关闭登录对话框\n\t\t\tself.assertTrue(result,msg)\n\t\texcept Exception as msg:\n\t\t\t#断言失败。截屏\n\t\t\tnowTime = time.strftime('%Y%m%d_%H.%M.%S')\n\t\t\tself.driver.get_screenshot_as_file('Screenshots\\\\%s.png'%nowTime)\n\t\t\t#path = os.path.join(os.getcwd(),'report')\n\t\t\t#self.driver.get_screenshot_as_file(r'%s\\%s.png'%(path,now))\n\t\t\t#print('截屏%s'%path)\n\t\t\t\n\t\t\t\nif __name__ == '__main__'\t:\n\tunittest.main()","sub_path":"TesterHorder/testerhordev3.4/testcase/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"178731931","text":"import os\nimport torch\nimport argparse\nfrom datetime import datetime\nfrom narouresearch.networks.styleEncoder.train import train\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"this program is char2vec train\")\n parser.add_argument(\"--aozora_path\", help=\"source path aozora\",required=True)\n parser.add_argument(\"--narou_path\", help=\"source path narou\",required=True)\n parser.add_argument(\"--max_epoch\", default=500, type=int)\n parser.add_argument(\"--steps\", type=int)\n parser.add_argument(\"--sub_steps\", type=int, required=True)\n parser.add_argument(\"--validation_steps\", type=int)\n parser.add_argument(\"--early_stopping\", type=int, required=True)\n parser.add_argument(\"--method\", required=True, choices=['RNN', 'Transformer'])\n parser.add_argument(\"--save_dir\",required=True)\n parser.add_argument(\"--saved_model_dir\")\n parser.add_argument(\"--device\", choices=['cpu','gpu'])\n \n args = parser.parse_args()\n aozora_path = args.aozora_path\n narou_path = args.narou_path\n save_dir = args.save_dir\n max_epoch = args.max_epoch\n steps = args.steps\n sub_steps = args.sub_steps\n validation_steps = args.validation_steps\n early_stopping = args.early_stopping\n method = args.method\n saved_model_dir = args.saved_model_dir\n device = args.device\n\n if device == \"gpu\":\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n else: print(\"cuda is not available.\"); exit();\n else: device = torch.device(\"cpu\")\n print(device)\n\n save_dir = os.path.join(save_dir,datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n os.makedirs(save_dir, exist_ok=True)\n \n paths = (aozora_path, narou_path)\n\n train(paths=paths, save_dir=save_dir, max_epoch=max_epoch, steps=steps,\n sub_steps=sub_steps, validation_steps=validation_steps, early_stopping=early_stopping, \n method=method, device=device, saved_model_dir=saved_model_dir)","sub_path":"jobs/main/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242056421","text":"import socket\n\n#Menginisialisasi socket\nsock = socket.socket()\n\n#Binding ke socket yang sudah ditentukan\nsock.bind((socket.gethostname(), 12344))\n\n#Untuk menerima koneksi yang datang\nsock.listen(5)\nwhile True:\n #Menerima koneksi dari cleint\n conn, addr = sock.accept()\n print('Got connection from', addr)\n print(\"Menerima...\")\n #Untuk menerima file\n l = conn.recv(1024)\n filename = input(str(\"Please enter the file name that you'd like to upload : \"))\n receivedFile = open(filename,'wb')\n while (l):\n print(\"Menerima...\")\n #Untuk mengirim data file yang akan dikirimkan\n receivedFile.write(l)\n l = conn.recv(1024)\n receivedFile.close()\n print(\"File diterima dengan nama file\", filename)\n conn.close()","sub_path":"8-termchar/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"352289341","text":"from math import sin, cos, sqrt, atan2, radians\n\nfrom flask import render_template, flash, redirect, request, url_for, jsonify\nfrom flask_login import current_user, login_user, logout_user, login_required\n\nfrom werkzeug.urls import url_parse\nfrom werkzeug.utils import secure_filename\n\nfrom app.models import USERS, STATUS, BUFFER_PEOPLE, VISITED_PLACES\nfrom app import app, db\nfrom app.form import LoginForm, RegistrationForm\n\nfrom loguru import logger\n\nfrom test_plotly import create_map, create_graph\nimport pandas as pd\nimport plotly.express as px\n\nimport requests\n\nimport os\nimport sys\n\nimport json\n\n\ndef get_address(lon, lat):\n API_KEY = '5c6f743d-2033-4fb4-84d0-60941cb65cc2'\n\n URL = f\"https://geocode-maps.yandex.ru/1.x/?apikey={API_KEY}&geocode={lat},{lon}&format=json\"\n result = requests.get(URL).json()\n print(result)\n return result['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']['metaDataProperty']['GeocoderMetaData']['text']\n\ndef get_nearest(lat, lon, type):\n API_KEY = '5ff9b945-0f5c-4355-8c90-fe93681fea26'\n URL = f\"https://search-maps.yandex.ru/v1/?text={type}&type=biz&lang=ru_RU&ll={lon}, {lat}&results=6&spn=0.009,0.016&rspn=1&format=json&apikey={API_KEY}\"\n result = requests.get(URL).json()\n logger.debug(result)\n if result['properties']['ResponseMetaData']['SearchResponse']['found'] != 0:\n return result['features']\n else:\n return 'Not found'\n\ndef get_lat_lon(addres):\n API_KEY = '5c6f743d-2033-4fb4-84d0-60941cb65cc2'\n URL = f\"https://geocode-maps.yandex.ru/1.x/?apikey={API_KEY}&geocode={addres}&format=json\"\n result = requests.get(URL).json()\n logger.debug(result, result[0]['geometry']['coordinates'])\n return result[0]['geometry']['coordinates']\n\n@app.route('/find_covid/')\n@app.route('/find_covid/index', methods = ['GET', 'POST'])\n@login_required\ndef index():\n logger.debug(request.get_data().decode('utf-8'))\n if request.method == 'POST':\n pass\n \n\n return render_template(\"base.html\", type_of_route = 'index', navigation=['Статистика','Личный кабинет','Выход'])\n\n\n\n# Логика регистрации на сайте(Не относится к API)\n@app.route('/find_covid/login', methods = ['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n print('!@!!!!!!!!!!!!!!!!!!')\n user = USERS.query.filter_by(email=form.email.data).first()\n \n if user is None or not user.check_password(form.password.data):\n print('ERROR')\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login_1.html', title='Sign In', form=form)\n\n@app.route('/find_covid/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/find_covid/account')\ndef LK():\n user_email = current_user.email\n user_id = current_user.id\n return render_template('LK.html', id=user_id, email=user_email, navigation=['Статистика','Личный кабинет','Выход'])\n\n\n@app.route('/find_covid/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = USERS(name=form.name.data, email=form.email.data)\n status_info = STATUS(id=user.id, test_result=form.status.data, сontact_with_infected=form.contact.data, Symptoms=form.symptoms.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.add(status_info)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/find_covid/map', methods=['GET', 'POST'])\ndef map():\n ru_cities = pd.read_csv(\"ru.csv\")\n map = create_map(ru_cities)\n graph_1 = create_graph([6060,11237,8536,6632,5204,5195,9412,20985],'Заражения каронавирусом')\n graph_2 = create_graph([40,58,94,104,70,51,107,256],'Смерть от коронавируса')\n return render_template('map.html', plot=map, graph_1 = graph_1, graph_2 = graph_2 ,type_of_route = 'map', navigation=['Статистика','Личный кабинет','Выход'])\n\n# Логика отработки API(Не относится к сайту)\n\n# Логика отправки текущей геолокации\n@app.route('/find_covid/get_user_data', methods=['GET', 'POST'])\ndef location():\n if request.method == 'POST':\n pharamacies = []\n json_data = request.get_json()\n user = USERS.query.filter_by(email=json_data['email']).first()\n user.add_geo(f\"{json_data['lat']}|{json_data['lon']}\")\n user_id = user.id\n status_info = STATUS.query.filter_by(id=user_id).first()\n lat = json_data['lat']\n lon = json_data['lon']\n print(lat,lon)\n geolocation = get_address(lat,lon)\n near_pharamacies = get_nearest(lat, lon, 'Аптеки')\n for item in near_pharamacies:\n print(item['geometry']['coordinates'])\n print(type(item['geometry']['coordinates']))\n pharamacies.append(item['geometry']['coordinates'])\n logger.debug(near_pharamacies)\n return jsonify({ 'user_data':{\n 'name': geolocation,\n 'name' : user.name,\n 'status': status_info.test_result,\n 'contact': status_info.сontact_with_infected,\n 'symptoms': status_info.Symptoms,\n 'geolocation': geolocation\n }, 'near_pharamacies':{\n 'data':pharamacies,\n }\n \n })\n\n@app.route('/find_covid/edit_user_data', methods=['GET', 'POST'])\ndef edit_user_data():\n if request.method == 'POST':\n json_data = request.get_json()\n logger.debug(json_data)\n user = USERS.query.filter_by(email=current_user.email).first()\n status_info = STATUS.query.filter_by(id=user.id).first()\n print(status_info)\n status_info.edit_test_result(json_data['status'])\n status_info.edit_сontact_with_infected(json_data['contact'])\n status_info.edit_symptoms(json_data['symptoms'])\n print(status_info)\n return 'ok'\n\n@app.route('/find_covid/probability_of_infection', methods=['GET', 'POST'])\ndef probability_of_infection():\n if request.method == 'POST':\n count = 0\n json_data = request.get_json()\n bufer_people = BUFFER_PEOPLE.query.all()\n summ_geolocation = round(json_data['lat'] + json_data['lon'], 5)\n print(summ_geolocation)\n for position in bufer_people:\n print(float(position.geolocation))\n # print(abs(summ_geolocation - float(position.geolocation)))\n print(summ_geolocation - float(position.geolocation))\n if abs(summ_geolocation - float(position.geolocation)) <= 0.00004:\n count += 1\n if BUFFER_PEOPLE.query.filter_by(id = json_data['id']).first():\n bufer_people = BUFFER_PEOPLE.query.filter_by(id = json_data['id']).first()\n bufer_people.edit_geolocation(json_data['lat'] + json_data['lon'])\n bufer_people.edit_type_of_public(json_data['type_of_public'])\n else:\n bufer_people = BUFFER_PEOPLE(id = json_data['id'], geolocation = json_data['lat'] + json_data['lon'], type_of_public = json_data['type_of_public'])\n db.session.add(bufer_people)\n db.session.commit()\n if count <= 2:\n return '5%'\n elif count <= 5:\n return '20%'\n else:\n return '60+%'\n \n return 'ok'\n\n@app.route('/find_covid/chek_position', methods=['GET', 'POST'])\ndef chek_position():\n if request.method == 'POST':\n count = 0\n json_data = request.get_json()\n print(json_data)\n bufer_people = BUFFER_PEOPLE.query.all()\n lat = json_data['lat']\n lon = json_data['lon']\n addres = json_data['addres']\n print(addres)\n end_lat = get_lat_lon(addres)[0]\n end_lon = get_lat_lon(addres)[1]\n summ_geolocation = round(end_lat + end_lon, 5)\n for position in bufer_people:\n print(float(position.geolocation))\n # print(abs(summ_geolocation - float(position.geolocation)))\n print(summ_geolocation - float(position.geolocation))\n if abs(summ_geolocation - float(position.geolocation)) <= 0.00004:\n count += 1\n if count >= 3:\n for item in get_nearest(end_lat, end_lon, json_data['type']):\n print(item['geometry']['coordinates'])\n print(type(item['geometry']['coordinates']))\n objected.append(item['geometry']['coordinates'])\n return objected\n else:\n return 'Normal'\n \n return 'ok'\n\n\n@app.route('/find_covid/user_at_home', methods=['GET', 'POST'])\ndef user_at_home():\n json_data = request.get_json()\n bufer_people = BUFFER_PEOPLE.query.filter_by(id=json_data['id']).first()\n db.session.delete(bufer_people)\n db.session.commit()\n return 'ok'\n\n@app.route('/find_covid/add_visited_places', methods=['GET', 'POST'])\ndef visited_places():\n json_data = request.get_json()\n visited_places = VISITED_PLACES(user_id=json_data['user_id'], lat=json_data['lat'], lon=json_data['lon'], type_of_public=json_data['type_of_public'])\n db.session.add(visited_places)\n db.session.commit()\n return 'ok'\n\n\n@app.route('/find_covid/route_to_the_pharmacy', methods=['GET', 'POST'])\ndef route_to_the_pharmacy():\n json_data = request.get_json()\n lat = json_data['lat']\n lon = json_data['lon']\n near_pharamacies = get_nearest(lat, lon)\n for item in near_pharamacies:\n print(near_pharamacies[0]['geometry']['coordinates'])\n return 'ok'\n\n# Подсчёт кратчайшего расстояния между двумя точками\n# @app.route('/get_shortest_distance', methods=['GET', 'POST'])\n# def location():\n# if request.method == 'POST':\n# json_data = request.get_json()\n# user1 = USERS.query.filter_by(email=json_data['email1']).first()\n# user_id1 = user.id\n# status1_info = STATUS.query.filter_by(id=user_id1).first()\n# user1 = USERS.query.filter_by(email=json_data['email1']).first()\n# user_id1 = user.id\n# status1_info = STATUS.query.filter_by(id=user_id1).first()\n# lat1 = json_data['lat1']\n# lon1 = json_data['lon1']\n# # approximate radius of earth in km\n# R = 6373.0\n\n# lat1 = radians(52.2296756)\n# lon1 = radians(21.0122287)\n# lat2 = radians(52.406374)\n# lon2 = radians(16.9251681)\n\n# dlon = lon2 - lon1\n# dlat = lat2 - lat1\n\n# a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n# c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n# distance = R * c\n\n# print(\"Result:\", distance)\n# print(\"Should be:\", 278.546, \"km\")\n\n\n\n","sub_path":"web_face/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"167066082","text":"#coding:utf-8\nimport json\nimport random\n\nimport requests\n\nimport jiami\nimport time\n\n#公用参数\nappid = '10005'\nkey = 'EUZ9NunT9DQN+wg6p33vgw=='\nnum = random.randint(100,999)\n\ndef request(url,data):\n timestamp = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + str(num)\n print(timestamp)\n s = appid + '&' + json.dumps(data) + '&' + timestamp + '&' + key\n print('加密报文:' + s)\n\n sign = jiami.md5(s)\n\n d = {'appId':appid, 'data':json.dumps(data),'sign':sign,'timestamp':timestamp}\n print('入参data:'+d['data'])\n data = json.dumps(d)\n print('请求报文:'+data)\n headers ={\"Content-Type\":\"application/json\"}\n\n s = requests.post(url,data=data,headers=headers)\n print('+++++++++')\n print('接口状态:'+str(s.status_code))\n print('返回报文:'+s.text)\n print('+++++++++')\n return s.text\n\n\n","sub_path":"老支付系统/协议支付/协议支付3期(网银转账)/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"212549851","text":"import keras\nimport keras.backend as K\nimport keras.layers as L\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef main():\n window_size = 10\n input_dim = 3\n hidden_dim = 10\n kernel_size = 5\n output_dim = 3\n\n # Model\n transform = GatedDense(1, activation=\"tanh\")\n\n inputs = L.Input((window_size, input_dim))\n net = L.TimeDistributed(transform)(inputs) # XXX: This is the key\n\n res = net\n net = L.Conv1D(hidden_dim, kernel_size, padding=\"same\")(net)\n net = L.BatchNormalization()(net)\n net = L.Activation(\"relu\")(net)\n net = L.add([res, net])\n\n res = net\n net = L.Conv1D(hidden_dim, kernel_size, padding=\"same\")(net)\n net = L.BatchNormalization()(net)\n net = L.Activation(\"relu\")(net)\n net = L.add([res, net])\n\n net = L.Dense(output_dim)(net)\n net = L.Activation(\"softplus\")(net)\n outputs = net\n\n model = keras.Model(inputs, outputs)\n model.compile(loss=\"mae\", optimizer=\"adam\")\n\n # Training\n jet = plt.get_cmap(\"jet\")\n viridis = plt.get_cmap(\"viridis\")\n\n x = np.linspace(0, 1, num=1000)\n input_seq = jet(x)[:, :3]\n output_seq = viridis(x)[:, :3]\n train_data = Seq2seqGenerator(input_seq, output_seq, window_size)\n\n model.fit_generator(train_data, epochs=50, verbose=2)\n\n # Use the transform submodel. This works thanks to the TimeDistributed wrapper.\n inputs = L.Input((input_dim,))\n transform_model = keras.Model(inputs, transform(inputs))\n test_X = jet(np.linspace(0, 1, num=10))[:, :3]\n test_y = transform_model.predict(test_X)\n print(test_y)\n\n\nclass GatedDense(keras.Model):\n def __init__(self, dim, activation=\"relu\", **kwargs):\n super().__init__(**kwargs)\n\n dense_options = {\n \"use_bias\": False,\n \"kernel_initializer\": \"he_normal\"\n }\n self.feature_dense = L.Dense(dim, **dense_options)\n self.feature_norm = L.BatchNormalization()\n self.feature_activation = L.Activation(activation)\n self.gate_dense = L.Dense(dim, **dense_options)\n self.gate_norm = L.BatchNormalization()\n self.gate_activation = L.Activation(\"sigmoid\")\n\n def call(self, inputs):\n feature = self.feature_dense(inputs)\n feature = self.feature_norm(feature)\n feature = self.feature_activation(feature)\n gate = self.gate_dense(inputs)\n gate = self.gate_norm(gate)\n gate = self.gate_activation(gate)\n return feature * gate\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1] + (self.feature_dense.units,)\n\n\nclass Seq2seqGenerator(keras.utils.Sequence):\n def __init__(self, x_seq, y_seq, window_size, batch_size=32):\n self.x_seq = x_seq\n self.y_seq = y_seq\n self.window_size = window_size\n self.batch_size = batch_size\n\n def __len__(self):\n return (len(self.x_seq) - self.window_size + 1) // self.batch_size\n\n def __getitem__(self, idx):\n begs = np.random.randint(len(self.x_seq) - self.window_size, size=self.batch_size)\n ends = begs + self.window_size\n batch_xs = []\n batch_ys = []\n for beg, end in zip(begs, ends):\n batch_xs.append(self.x_seq[beg:end])\n batch_ys.append(self.y_seq[beg:end])\n batch_xs = np.array(batch_xs)\n batch_ys = np.array(batch_ys)\n return batch_xs, batch_ys\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test735-keras_submodel_timeseries/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"419506001","text":"'''\nSpark job to aggregate to_address and value\nof transactions\n\nAuthor: Harvey Randall\nDate: 11/11/19\n'''\nimport pyspark\n\n####\n# Helper Functions\n####\n\ndef is_data(line):\n\ttry:\n\t\tfields = line.split(',')\n\t\tint(fields[3])\n\t\treturn True\n\texcept:\n\t\treturn False\n\ndef get_addr_vals(line):\n\tfields = line.split(',')\n\tto = fields[2]\n\twei = int(fields[3])\n\treturn (to, wei)\n\n\n# Get spark context\nsc = pyspark.SparkContext()\nsc.setLogLevel(\"TRACE\")\n\n# Inital Aggregation\ntransactions = sc.textFile('/data/ethereum/transactions')\ntransactions_data = transactions.filter(is_data)\naddress_data = transactions_data.map(get_addr_vals)\naddress_values = address_data.reduceByKey(lambda a,b: (a+b))\n\n# Joining Transactions/Contracts and Filtering\ncontracts = sc.textFile('/data/ethereum/contracts')\n\n# Top Ten\n\n","sub_path":"Coursework/Top_Ten_Popular/Spark/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460184335","text":"import fire\nimport pandas as pd\nfrom simpletransformers_addons.models.sim_text.sim_text_model import SimTextArgs, SimTextModel\nfrom simpletransformers_addons.model_wrappers import FGMWrapper, PGDWrapper\n\ntrain_df = pd.read_json('../user_data/data/train_data/train_enhanced.jsonl', lines=True)\ntest_df = pd.read_json('../user_data/data/train_data/kfold/0/dev.jsonl', lines=True)\ntest_df = test_df.iloc[0:8000]\n\ndef train_model(model_name, max_run_time=None):\n if 'large' in model_name:\n batch_size = 128\n swa_start_step = 6000\n swa_steps = 300\n learning_rate = 4.0e-5\n else:\n batch_size = 256\n swa_start_step = 3000\n swa_steps = 150\n learning_rate = 8.0e-5\n # Optional model configuration\n model_args = SimTextArgs()\n model_args.use_bimodel = False\n model_args.num_train_epochs = 3\n model_args.train_batch_size = batch_size\n model_args.eval_batch_size = 32\n model_args.evaluate_during_training = False\n model_args.evaluate_during_training_steps = 1500\n model_args.save_eval_checkpoints = False\n model_args.no_cache = False\n model_args.max_seq_length = 32\n model_args.learning_rate = learning_rate\n model_args.use_early_stopping = True\n model_args.early_stopping_metric = \"auroc\"\n model_args.early_stopping_metric_minimize = False\n model_args.early_stopping_consider_epochs = True\n model_args.early_stopping_patience = 2\n model_args.gradient_accumulation_steps = 1\n model_args.save_steps = 0\n model_args.save_model_every_epoch = False\n import os\n test_dir = f'../user_data/classification/{model_name}'\n model_args.tensorboard_dir = os.path.join(test_dir, 'runs')\n model_args.cache_dir = os.path.join(test_dir, 'cached')\n model_args.output_dir = os.path.join(test_dir, 'outputs')\n model_args.best_model_dir = os.path.join(test_dir, 'best')\n model_args.labels_list = [0, 1]\n\n #model_args.manual_seed = 124525601\n\n ## swa\n model_args.scheduler = \"constant_schedule_with_warmup\"\n model_args.use_swa = True\n # 300\n model_args.swa_steps = swa_steps\n model_args.swa_lr = learning_rate / 2.0\n model_args.config = {\n \"hidden_dropout_prob\": 0.1\n }\n # 5000\n model_args.swa_start_step = swa_start_step\n ##\n model_args.submodel_type = 'bert'\n # Create a ClassificationModel\n model = SimTextModel(\n \"bert\", f\"../user_data/mlm/{model_name}/outputs\", args=model_args\n )\n model = FGMWrapper(model, epsilon=0.3)\n\n # Train the model\n model.train_model(train_df, eval_df=test_df,\n max_run_time=max_run_time)\n\nif __name__ == '__main__':\n fire.Fire(train_model)\n","sub_path":"code/train/train_single_model.py","file_name":"train_single_model.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"14555145","text":"from tkinter import *\r\nfrom tkinter import ttk\r\n\r\ndef show_entry_fields():\r\n print(\"First Name: %s\\nLast Name: %s\" % (e1.get(), e2.get()))\r\n e1.delete(0,END)\r\n e2.delete(0,END)\r\n\r\nmaster = Tk()\r\nLabel(master, text=\"First Name\").grid(row=0)\r\nLabel(master, text=\"Last Name\").grid(row=1)\r\n\r\ne1 = ttk.Entry(master)\r\ne2 = ttk.Entry(master)\r\n\r\n\r\ne1.grid(row=0, column=1)\r\ne2.grid(row=1, column=1)\r\ne1.insert(5,\"Miller\")\r\ne2.insert(10,\"Jill\")\r\nButton(master, text='Quit', command=master.quit).grid(row=3, column=0, sticky=W, pady=4)\r\nButton(master, text='Show', command=show_entry_fields).grid(row=3, column=1, sticky=W, pady=4)\r\nclass append:\r\n def __init__(self):\r\n self.image=PhotoImage(file=\"add.png\")\r\n self.button=Button(master,image=self.image,command=self.append_f).grid(row=0,column=2,sticky=\"e\",padx=5)\r\n self.label=Label(master,text=\"append\",fg=\"red\").grid(row=2,column=2,sticky=\"e\")\r\n\r\n\r\n def append_f(self):\r\n try:\r\n answer = simpledialog.askstring(\"Input\", \"choose a name\",parent=master)\r\n os.mkdir(answer)\r\n s=scrollbar()\r\n s.creat_files()\r\n except:\r\n messagebox.showwarning(\"Warning\",\"this name was choosed\")\r\n \r\nappend=append()\r\nentry=ttk.Entry(master)\r\nentry.grid(row=0,column=3,sticky=\"w\")\r\nentry.insert(10,'s')\r\nmainloop( )\r\n","sub_path":"New folder (2)/';lkjhgf.py","file_name":"';lkjhgf.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"24394705","text":"import xml.etree.ElementTree as ET\n\nwith open('catalog.xml', 'rb') as data:\n\txmlParsed = ET.parse(data)\n\nelemList = set()\n\nfor elem in xmlParsed.iter():\n\telemList.add(elem.tag)\n\nfor elem in xmlParsed.iter('price'):\n\tprice = float(elem.text)\n\tnew_price = round((price * 1.05), 2)\n\telem.text = str(new_price)\n\tprint(elem.text)","sub_path":"Completed_Exercises/12/my_xml_01.py","file_name":"my_xml_01.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"339865616","text":"\r\nimport sys\r\nimport time\r\nimport random as ran\r\nName=input('Hello, what is your Name? ')\r\nprint('Welcome to the ATM at the national bank of coders!!! (ps, your card number is 87568903, and pin 7895)')\r\nclass ATM(object):\r\n def cardNumber():\r\n num=input('card number? ')\r\n if num=='87568903':\r\n print('Hello, '+Name+'!!!')\r\n elif num=='20080416':\r\n print('Hello, Aarav!!!')\r\n else:\r\n print('Invalid')\r\n exit()\r\n\r\n def pin():\r\n pin=input('Pin Number')\r\n if pin == '9076' or pin == '7895':\r\n print(Name+'...')\r\n print('Validated!!!')\r\n else:\r\n print('invalid')\r\n exit()\r\n def withdraw():\r\n balance=6789\r\n howMuch=input('How much do you want to withdraw?')\r\n howMuchint=int(howMuch)\r\n if howMuchint < balance:\r\n balance=int(balance)-howMuchint\r\n print('your balance is now, '+str(balance)+', don\\'t forget to pick up your money!!!')\r\n print('''________________\r\n| |\r\n| |\r\n| $$$ |\r\n| |\r\n|________________|''')\r\n if howMuchint>balance:\r\n print('Not enough money')\r\n\r\n def deposit():\r\n balance=6789\r\n howMuch=input('How much do you want to deposit?')\r\n howMuchint=int(howMuch)\r\n balance = balance+int(howMuch)\r\n print('Your balance is '+str(balance))\r\n def enquiry():\r\n if Name=='Aarav' or 'aarav':\r\n balance=str(6789)\r\n elif Name=='Shubha' or 'shubha':\r\n balance=str(89678)\r\n print('your balance is '+balance)\r\n cardNumber()\r\n pin()\r\n what=str(input('what is your transaction? (withdraw is 1, deposit is 2, enquiry is 3)'))\r\n if what=='1':\r\n withdraw()\r\n elif what=='2':\r\n deposit()\r\n elif what=='3':\r\n enquiry()\r\n elif what != ('1' or '2' or '3'):\r\n print('there is no transaction for the number '+what)\r\ntime.sleep(5)\r\n","sub_path":"ATM.py","file_name":"ATM.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"34094131","text":"# -*- coding: utf-8 -*-\n\"\"\"\n=============================\nLoad and plot a model\n=============================\n\nHere we load the example model, and then plot it along with the locations.\n\n\"\"\"\n\n# Code source: Lucy Owen & Andrew Heusser\n# License: MIT\n\nimport supereeg as se\n\n# load example model\nmodel = se.load('example_model')\n\n# plot it\nmodel.plot_data(xticklabels=False, yticklabels=False)\n\n# plot locations\nmodel.plot_locs()\n","sub_path":"examples/plot_model.py","file_name":"plot_model.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"218762492","text":"# Given a string s that consists of only uppercase English letters,\n# you can perform at most k operations on that string.\n# In one operation, you can choose any character of the string and change it\n# to any other uppercase English character. Find the length of the longest\n# sub-string containing all repeating letters you can get after performing\n# the above operations.\n\n# Notes\n# in every window, get the character that is occuring the most\n\ndef characterReplacement(s, k):\n longest_substring = 0\n start_window = 0\n max_count = 0\n table = {}\n\n for end_window, char in enumerate(s):\n table[char] = table.get(char, 0) + 1\n max_count = max(max_count, table[char])\n # reduce the current window until we can replace all characters\n while (end_window - start_window + 1) - max_count > k:\n table[s[start_window]] -= 1\n start_window += 1\n\n longest_substring = max(longest_substring, end_window - start_window + 1)\n\n return longest_substring\n\n\ndef main():\n s = \"BAAAB\"\n k = 2\n print(characterReplacement(s, k)) # 5\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"DynamicProgramming/longest_repeating_character_replacement.py","file_name":"longest_repeating_character_replacement.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"625694136","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef detectCycle(head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n hash = {}\n i = 0\n while head:\n hash[head] = i\n i += 1\n\n if head.next is None:\n return None\n elif hash.get(head.next) is not None:\n return head.next\n else:\n head = head.next\n return None\n\ndef detectCycle_2(head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n normal = fast = head\n while normal:\n normal = normal.next\n fast = fast.next\n fast = fast.next\n if normal == fast:\n break\n\n if normal is None: return None\n while head != normal:\n head = head.next\n normal = normal.next\n return head\n\n\nif __name__ == \"__main__\":\n node_root = ListNode(3)\n node_2 = ListNode(2)\n node_0 = ListNode(0)\n node_4 = ListNode(-4)\n node_root.next = node_2\n node_2.next = node_0\n node_0.next = node_4\n node_4.next = node_2\n\n node = detectCycle(node_root)\n print(node.val if node else None)\n","sub_path":"source/142_linked_list_cycle_2.py","file_name":"142_linked_list_cycle_2.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"403283147","text":"# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom contextlib import contextmanager\n\nfrom pex.pex import PEX\nfrom pex.pex_builder import PEXBuilder\n\nfrom pants.backend.python.interpreter_cache import PythonInterpreterCache\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.backend.python.subsystems.pex_build_util import PexBuilderWrapper\nfrom pants.base.workunit import WorkUnitLabel\nfrom pants.task.task import Task\nfrom pants.util.dirutil import safe_concurrent_creation\nfrom pants.util.process_handler import subprocess\nfrom pants.util.strutil import ensure_binary, safe_shlex_join\n\n\nclass PythonToolInstance(object):\n def __init__(self, pex_path, interpreter):\n self._pex = PEX(pex_path, interpreter=interpreter)\n\n @property\n def pex(self):\n return self._pex\n\n def _pretty_cmdline(self, args):\n return safe_shlex_join(self._pex.cmdline(args))\n\n def output(self, args, stdin_payload=None, binary_mode=False, **kwargs):\n process = self._pex.run(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n with_chroot=False,\n blocking=False,\n **kwargs)\n if stdin_payload is not None:\n stdin_payload = ensure_binary(stdin_payload)\n (stdout, stderr) = process.communicate(input=stdin_payload)\n if not binary_mode:\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n return (stdout, stderr, process.returncode, self._pretty_cmdline(args))\n\n @contextmanager\n def run_with(self, workunit_factory, args, **kwargs):\n cmdline = self._pretty_cmdline(args)\n with workunit_factory(cmd=cmdline) as workunit:\n exit_code = self._pex.run(args,\n stdout=workunit.output('stdout'),\n stderr=workunit.output('stderr'),\n with_chroot=False,\n blocking=True,\n **kwargs)\n yield cmdline, exit_code, workunit\n\n def run(self, *args, **kwargs):\n with self.run_with(*args, **kwargs) as (cmdline, exit_code, _):\n return cmdline, exit_code\n\n\nclass PythonToolPrepBase(Task):\n \"\"\"Base class for tasks that resolve a python tool to be invoked out-of-process.\"\"\"\n\n # Subclasses must set to a subclass of `pants.backend.python.subsystems.PythonToolBase`.\n tool_subsystem_cls = None\n\n # Subclasses must set to a subclass of `PythonToolInstance`. This is the type of the\n # product produced by this task. It is distinct from the subsystem type so that multiple\n # instances of the same tool, possibly at different versions, can be resolved by different\n # prep tasks, if necessary.\n tool_instance_cls = None\n\n @classmethod\n def subsystem_dependencies(cls):\n return super(PythonToolPrepBase, cls).subsystem_dependencies() + (\n cls.tool_subsystem_cls.scoped(cls),\n PexBuilderWrapper.Factory,\n PythonInterpreterCache,\n )\n\n @classmethod\n def product_types(cls):\n return [cls.tool_instance_cls]\n\n def _build_tool_pex(self, tool_subsystem, interpreter, pex_path):\n with safe_concurrent_creation(pex_path) as chroot:\n pex_builder = PexBuilderWrapper.Factory.create(\n builder=PEXBuilder(path=chroot, interpreter=interpreter),\n log=self.context.log)\n reqs = [PythonRequirement(r) for r in tool_subsystem.get_requirement_specs()]\n pex_builder.add_resolved_requirements(reqs=reqs, platforms=['current'])\n pex_builder.set_entry_point(tool_subsystem.get_entry_point())\n pex_builder.freeze()\n\n def execute(self):\n tool_subsystem = self.tool_subsystem_cls.scoped_instance(self)\n pex_name = tool_subsystem.options_scope\n pex_path = os.path.join(self.workdir, self.fingerprint, '{}.pex'.format(pex_name))\n\n interpreter_cache = PythonInterpreterCache.global_instance()\n interpreter = interpreter_cache.select_interpreter_for_targets([])\n\n if not os.path.exists(pex_path):\n with self.context.new_workunit(name='create-{}-pex'.format(pex_name),\n labels=[WorkUnitLabel.PREP]):\n self._build_tool_pex(tool_subsystem=tool_subsystem,\n interpreter=interpreter,\n pex_path=pex_path)\n\n tool_instance = self.tool_instance_cls(pex_path, interpreter)\n self.context.products.register_data(self.tool_instance_cls, tool_instance)\n","sub_path":"src/python/pants/backend/python/tasks/python_tool_prep_base.py","file_name":"python_tool_prep_base.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"51377873","text":"from flask import flash, request, render_template, session, redirect, url_for, Response, abort\nfrom libs.parsing_functions import upload_coordinates, create_machines_by_dir, create_new_user_by_dir\nfrom libs.request_functions import login_required\nfrom forms import *\nfrom models_user import *\nimport os\nfrom config import paths\nfrom libs.parsing_functions import load_new_data\nfrom views.data_views import *\nfrom views.upload_views import *\n\nimport shutil\n\n@app.route('/try_new')\ndef try_new():\n return render_template('asdasd.html')\n\n\n@app.route('/check_session')\ndef check_session():\n session.clear()\n session['user_id'] = 1\n\n return 'Тык'\n\n@app.route('/load_new_data')\n@login_required\ndef load_new_data_rule():\n user = Users.query.get(session['user_id'])\n if paths.get('dir_with_users'):\n empty = True\n try:\n result_data = load_new_data(user.id)\n except:\n app.logger.error(traceback.format_exc())\n return jsonify(**{'status': 'False'})\n if result_data:\n for machine_name, protocols in result_data.items():\n for protocol, file_name in protocols.items():\n if file_name:\n empty = False\n break\n if result_data and not empty:\n return jsonify(**{'status': 'True'})\n elif empty:\n return jsonify(**{'status': 'Empty'})\n else:\n return jsonify(**{'status': 'False'})\n else:\n return jsonify(**{'status': 'False'})\n return jsonify(**{'status': 'Disable'})\n\n\n@app.route('/', methods=['GET'])\n@login_required\ndef index():\n user = Users.query.get(session['user_id'])\n if request.cookies.get('selected_protocols') and request.cookies.get('selected_protocols') != 'False':\n selected_protocols = json.loads(request.cookies.get('selected_protocols'))\n else:\n selected_protocols = {}\n load_coordinates_form = LoadCoordinatesForm()\n try:\n farms = user.farms\n except AttributeError:\n farms = []\n if session.get('start_agregat'):\n default_machine_id = session['start_agregat']\n elif user.machines:\n default_machine_id = user.machines[0].id\n else:\n default_machine_id = 'False'\n if session.get('request_date_from') and session.get('request_date_to'):\n sess_from = session.get('request_date_from')\n sess_to = session.get('request_date_to')\n request_date_from = sess_from\n request_date_to = sess_to\n request_date_from_data = datetime.datetime.fromtimestamp(sess_from).strftime('%d-%m-%Y') # для инпута\n request_date_to_data = datetime.datetime.fromtimestamp(sess_to).strftime('%d-%m-%Y')\n else:\n request_date_from = round(datetime.datetime.now().replace(hour=0).timestamp()) # для скрытого поля\n request_date_to = round(datetime.datetime.now().replace(hour=23).timestamp())\n session['request_date_from'] = request_date_from\n session['request_date_to'] = request_date_to\n\n request_date_from_data = datetime.datetime.fromtimestamp(request_date_from).strftime('%d-%m-%Y') # для инпута\n request_date_to_data = datetime.datetime.fromtimestamp(request_date_to).strftime('%d-%m-%Y')\n return render_template('index.html', user=user,\n load_coordinates_form=load_coordinates_form,\n farms=farms,\n selected_protocols=selected_protocols,\n default_machine_id=default_machine_id,\n request_date_from_data=request_date_from_data,\n request_date_to_data=request_date_to_data,\n request_date_from=request_date_from*1000,\n request_date_to=request_date_to*1000,\n )\n\n\n@app.route('/edit_farm', methods=['POST'])\ndef edit_farm():\n try:\n farm_name = request.form['farm_name']\n farm_id = request.form['id_farm']\n except KeyError:\n return 'False'\n else:\n with db.engine.begin() as connect:\n connect.execute(Farm.__table__.update(whereclause=(Farm.id == farm_id), values={'title': farm_name}))\n return 'True'\n\n\n@app.route('/log_out', methods=['GET', 'POST'])\ndef log_out():\n return \"asd\"\n\nimport sqlalchemy.exc\n@app.route('/example', methods=['GET', 'POST'])\n@login_required\ndef example():\n form_file = LoadCoordinatesForm(request.files)\n if request.method == 'POST' and form_file.validate():\n try:\n coordinates_insert = upload_coordinates(form_file)\n except UnicodeDecodeError:\n app.logger.error(traceback.format_exc())\n flash('Файл должен быть закодирован в UTF-8')\n return redirect(url_for('index'))\n except UnicodeEncodeError:\n app.logger.error(traceback.format_exc())\n flash('Имя файла долно содержать только латинские буквы')\n return redirect(url_for('index'))\n except Exception as e:\n app.logger.error(traceback.format_exc())\n flash('Неизвестная ошибка')\n return redirect(url_for('index'))\n if coordinates_insert:\n try:\n farm_id = request.form['farm_id']\n with db.engine.begin() as connection:\n connection.execute(select([Cornfield.name]).where(Cornfield.id_farm == farm_id))\n assert len(coordinates_insert[0]) == len(\n coordinates_insert[1]), 'Длина списка имен != длина списка полей'\n for coordinates in coordinates_insert:\n db.session.add(Cornfield(coordinates[1], coordinates[0], farm_id))\n db.session.commit()\n flash('Поле успешно добавленно')\n return redirect(url_for('index'))\n except sqlalchemy.exc.IntegrityError:\n pass\n except:\n db.session.rollback()\n app.logger.error(traceback.format_exc())\n flash('Ошибка записи в базу данных')\n return redirect(url_for('index'))\n flash('Заполнены не все поля')\n\n\n@app.route('/save_and_clear')\n@login_required\ndef save_and_clear():\n os.listdir(paths['path_with_databases'])\n user = Users.query.get(1)\n for x in db.Model._decl_class_registry.values():\n if x in db_objects.values():\n db.session.query(x).delete()\n try:\n db.session.commit()\n except:\n app.logger.error(traceback.format_exc())\n db.session.roolback()\n flash('Произошла ошибка')\n return redirect(url_for('index'))\n try:\n if not os.path.isdir(paths['path_with_databases']):\n os.mkdir(paths['path_with_databases'])\n shutil.copy(paths['db_path'], os.path.join(paths['path_with_databases'], '{user_name}_{base_name}.db').format(\n base_name=datetime.datetime.fromtimestamp(datetime.datetime.now().timestamp()).strftime(\n '%Y-%m-%d_%H-%M-%S'),\n user_name=user.name\n ).encode()\n )\n except FileNotFoundError as e:\n app.logger.error(e)\n flash('Произошла ошибка')\n return redirect(url_for('index'))\n except KeyError as e:\n app.logger.error(e)\n flash('Произошла ошибка')\n return redirect(url_for('index'))\n\n return redirect(url_for('index'))\n","sub_path":"views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"58472068","text":"import enum\nimport functools\nimport os\nfrom typing import Any, List, Mapping, Optional, Tuple, Union\n\nimport attr\nimport cattr\nimport fontTools.misc.plistlib\n\n\nclass AxisValueFlag(enum.Flag):\n OlderSiblingFontAttribute = 0x0001\n ElidableAxisValueName = 0x0002\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass FlagList:\n \"\"\"Represent a list of AxisValueFlags so I can implement a value\n property.\"\"\"\n\n flags: List[AxisValueFlag] = attr.ib(factory=list)\n\n @property\n def value(self) -> int:\n \"\"\"Return the value of all flags ORed together.\"\"\"\n if not self.flags:\n return 0\n return functools.reduce(lambda x, y: x | y, self.flags).value\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass NameRecord:\n \"\"\"Represent a IETF BCP 47 language code to name string mapping for the\n `name` table.\"\"\"\n\n mapping: Mapping[str, str]\n\n def __getitem__(self, key):\n return self.mapping.__getitem__(key)\n\n @property\n def default(self):\n return self.mapping[\"en\"]\n\n @classmethod\n def from_string(cls, name: str):\n return cls(mapping={\"en\": name})\n\n @classmethod\n def from_dict(cls, dictionary: Mapping):\n return cls(mapping=dictionary)\n\n @classmethod\n def structure(cls, data):\n if isinstance(data, str):\n return cls.from_string(data)\n if isinstance(data, dict):\n return cls.from_dict(data)\n raise ValueError(f\"Don't know how to construct NameRecord from '{data}'.\")\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass LocationFormat1:\n name: NameRecord\n value: float\n flags: FlagList = attr.ib(factory=FlagList)\n\n def fill_in_AxisValue(self, axis_value: Any, axis_index: int, name_id: int):\n \"\"\"Fill in a supplied fontTools AxisValue object.\"\"\"\n axis_value.Format = 1\n axis_value.AxisIndex = axis_index\n axis_value.ValueNameID = name_id\n axis_value.Value = self.value\n axis_value.Flags = self.flags.value\n return axis_value\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass LocationFormat2:\n name: NameRecord\n value: float\n range: Tuple[float, float]\n flags: FlagList = attr.ib(factory=FlagList)\n\n def __attrs_post_init__(self):\n if len(self.range) != 2:\n raise ValueError(\"Range must be a value pair of (min, max).\")\n\n def fill_in_AxisValue(self, axis_value: Any, axis_index: int, name_id: int):\n \"\"\"Fill in a supplied fontTools AxisValue object.\"\"\"\n axis_value.Format = 2\n axis_value.AxisIndex = axis_index\n axis_value.ValueNameID = name_id\n axis_value.NominalValue = self.value\n axis_value.RangeMinValue, axis_value.RangeMaxValue = self.range\n axis_value.Flags = self.flags.value\n return axis_value\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass LocationFormat3:\n name: NameRecord\n value: float\n linked_value: float\n flags: FlagList = attr.ib(factory=FlagList)\n\n def fill_in_AxisValue(self, axis_value: Any, axis_index: int, name_id: int):\n \"\"\"Fill in a supplied fontTools AxisValue object.\"\"\"\n axis_value.Format = 3\n axis_value.AxisIndex = axis_index\n axis_value.ValueNameID = name_id\n axis_value.Value = self.value\n axis_value.LinkedValue = self.linked_value\n axis_value.Flags = self.flags.value\n return axis_value\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass LocationFormat4:\n name: NameRecord\n axis_values: Mapping[str, float]\n flags: FlagList = attr.ib(factory=FlagList)\n\n def fill_in_AxisValue(\n self,\n axis_value: Any,\n axis_name_to_index: Mapping[str, int],\n name_id: int,\n axis_value_record_type: Any,\n ):\n \"\"\"Fill in a supplied fontTools AxisValue object.\"\"\"\n axis_value.Format = 4\n axis_value.ValueNameID = name_id\n axis_value.Flags = self.flags.value\n axis_value.AxisValueRecord = []\n for name, value in self.axis_values.items():\n record = axis_value_record_type()\n record.AxisIndex = axis_name_to_index[name]\n record.Value = value\n axis_value.AxisValueRecord.append(record)\n return axis_value\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass Axis:\n name: NameRecord\n tag: str\n locations: List[Union[LocationFormat1, LocationFormat2, LocationFormat3]] = attr.ib(\n factory=list\n )\n ordering: Optional[int] = None\n\n\n@attr.s(auto_attribs=True, frozen=True, slots=True)\nclass Stylespace:\n axes: List[Axis]\n locations: List[LocationFormat4] = attr.ib(factory=list)\n elided_fallback_name_id: int = 2\n\n def __attrs_post_init__(self):\n \"\"\"Fill in a default ordering unless the user specified at least one\n custom one.\n\n This works around the frozen state with `object.__setattr__`.\n \"\"\"\n if all(axis.ordering is None for axis in self.axes):\n for index, axis in enumerate(self.axes):\n object.__setattr__(axis, \"ordering\", index)\n elif not all(\n isinstance(axis.ordering, int) and axis.ordering >= 0 for axis in self.axes\n ):\n raise ValueError(\n \"If you specify the ordering for one axis, you must specify all of \"\n \"them and they must be >= 0.\"\n )\n\n @classmethod\n def from_bytes(cls, stylespace_content: bytes):\n stylespace_content_parsed = fontTools.misc.plistlib.loads(stylespace_content)\n converter = cattr.Converter()\n converter.register_structure_hook(\n FlagList,\n lambda list_of_str_flags, cls: cls(\n [getattr(AxisValueFlag, f) for f in list_of_str_flags]\n ),\n )\n converter.register_structure_hook(\n NameRecord, lambda data, cls: cls.structure(data)\n )\n stylespace = converter.structure(stylespace_content_parsed, cls)\n return stylespace\n\n @classmethod\n def from_file(cls, stylespace_path: os.PathLike):\n with open(stylespace_path, \"rb\") as fp:\n stylespace = cls.from_bytes(fp.read())\n return stylespace\n","sub_path":"statmake/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"76768424","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\ndef load_spiketrains(idx):\n spiketrains = []\n with open('../mnist/TEST_POISSON_' + str(idx) + '.txt', \"r\") as fl:\n for line in fl:\n spiketrains.append([int(e.strip()) for e in line.split(',') if e != '\\n'])\n return spiketrains\n\ndef plot_raster(spiketrains):\n lambdas = [len(spiketrain) for spiketrain in spiketrains]\n img = np.zeros((len(spiketrains), 1000))\n idx = 0\n for spiketrain in spiketrains:\n for spike in spiketrain:\n for k in range(2):\n for j in range(1):\n img[idx+k, spike] = np.log(lambdas[idx])\n if idx + k == len(spiketrains)-1:\n break \n\n for k in range(2):\n for j in range(1):\n img[idx-k, spike] = np.log(lambdas[idx])\n if idx - k == 0:\n break \n idx += 1\n\n plt.imshow(img, cmap = 'gray')\n plt.xticks([])\n plt.yticks([])\n plt.savefig(\"poisson_raster.png\")\n\ndef plot_2d_hist(spiketrains, bins = (20,20)):\n x_pos = []\n y_pos = []\n idx = 0\n for spiketrain in spiketrains:\n for spike in spiketrain:\n x_pos.append(idx % 28)\n y_pos.append(idx / 28)\n idx += 1\n\n plt.hist2d(x_pos, y_pos, bins = bins)\n plt.savefig(\"poisson_histogram.png\")\n\nif __name__ == \"__main__\":\n spiketrains = load_spiketrains(2)\n plot_raster(spiketrains)\n plot_2d_hist(spiketrains)\n","sub_path":"plotting/old/plot_poisson.py","file_name":"plot_poisson.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"81960355","text":"from typing import Callable, Optional\n\n\nclass ListNode:\n def __init__(self, x: int) -> None:\n self.val = x\n self.next: Optional[ListNode] = None\n\n\ndef create_node_list(values: list[int], val: int) -> tuple[ListNode, ListNode]:\n \"\"\"Creates a ListNode out of a list of values\"\"\"\n head = ListNode(values[0])\n return_node = head\n\n last_node = head\n for value in values[1:]:\n node = ListNode(value)\n if node.val == val:\n return_node = node\n\n last_node.next = node\n last_node = node\n\n return head, return_node\n\n\ndef get_values(node: ListNode) -> list[int]:\n \"\"\"Returns the values in linked list\"\"\"\n values = [node.val]\n curr = node.next\n while curr is not None:\n values.append(curr.val)\n curr = curr.next\n\n return values\n\n\nclass Solution:\n def deleteNode(self, node: ListNode) -> None:\n assert node.next is not None, (node.next, None)\n node.val = node.next.val\n node.next = node.next.next\n\n\ntests = [\n (\n ([4, 5, 1, 9], 5,),\n [4, 1, 9],\n ),\n (\n ([4, 5, 1, 9], 1,),\n [4, 5, 9],\n ),\n (\n ([1, 2, 3, 4], 3,),\n [1, 2, 4],\n ),\n (\n ([0, 1], 0,),\n [1],\n ),\n (\n ([-3, 5, -99], -3,),\n [5, -99],\n ),\n]\n\n\ndef validator(\n deleteNode: Callable[[ListNode], None],\n inputs: tuple[list[int], int],\n expected: list[int]\n) -> None:\n values, node_value = inputs\n node_list, node = create_node_list(values, node_value)\n deleteNode(node)\n\n list_values = get_values(node_list)\n assert list_values == expected, (list_values, expected)\n","sub_path":"delete_node_in_a_linked_list.py","file_name":"delete_node_in_a_linked_list.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"614782704","text":"import pygame as pg\n\npg.init()\n\ndisplayW = 1000\ndisplayH = 750\n\npointer = pg.image.load('Images/pointer.png')\npointer = pg.transform.scale(pointer, (100, 100))\n\nscreen = displaySurface = pg.display.set_mode((displayW, displayH))\npg.display.set_caption('Pygame Test')\nclock = pg.time.Clock()\n\nvalidMoves = ['ab', 'ac', 'ba', 'bc', 'ca', 'cb']\n\n# Colours\ncolours = {\"white\": (255, 255, 255),\n \"black\": (0, 0, 0),\n \"green\": (0, 255, 0),\n \"blue\": (0, 0, 255),\n \"red\": (255, 0, 0),\n \"keys\": (45, 71, 135),\n \"keysof\": (33, 52, 99)\n }\n\npColours = {\"1\": (186, 11, 11),\n \"2\": (78, 10, 186),\n \"3\": (9, 139, 186),\n \"4\": (104, 170, 5),\n \"5\": (255, 230, 7),\n \"6\": (239, 142, 7),\n \"7\": (226, 108, 152),\n \"8\": (186, 11, 11),\n \"9\": (186, 11, 11),\n \"10\": (78, 10, 186),\n \"11\": (9, 139, 186),\n \"12\": (104, 170, 5),\n \"13\": (255, 230, 7),\n \"14\": (239, 142, 7),\n \"15\": (226, 108, 152),\n \"16\": (186, 11, 11)\n }\n\n\ndef dark(colour, n):\n newColour = (colour[0] * 0.78431372549, colour[1] * 0.78431372549, colour[2] * 0.78431372549)\n n -= 1\n while n != 0:\n newColour = (newColour[0] * 0.5, newColour[1] * 0.5, newColour[2] * 0.5)\n n -= 1\n return newColour\n\n\ndef light(colour):\n newColour = (colour[0] * 1.275, colour[1] * 1.275, colour[2] * 1.275)\n return newColour\n\n\n# Text Generation\nfonts = {\"smallText\": pg.font.SysFont(\"lucidaconsole\", 20),\n \"largeText\": pg.font.SysFont('garamond', 70),\n \"words\": pg.font.SysFont(\"candara\", 20)\n }\n\n\ndef textObjects(text, font, colour):\n textSurface = fonts[font].render(text, True, colours[colour])\n return textSurface, textSurface.get_rect() # get_rect to get size of element\n\n\n# draw(surface, colour, pos(top x, top y, x width, y height)\ndef button(msg, colour, x, y, w, h, ic, ac, value=None, action=None):\n result = True\n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pg.draw.rect(displaySurface, ac, (x, y, w, h))\n if click[0] == 1 and action is not None:\n print('click' + str(action))\n if value is not None:\n action(value)\n else:\n action()\n else:\n pg.draw.rect(displaySurface, ic, (x, y, w, h))\n\n textSurf, textRect = textObjects(msg, 'smallText', colour)\n textRect.center = ((x + (w / 2)), (y + (h / 2)))\n displaySurface.blit(textSurf, textRect)\n\n # return result\n\n\ndef border(surf, colour, x, y, w, h, pt, l, r, u, d):\n if u == 1:\n pg.draw.rect(surf, colour, (x, y, w, pt))\n if l == 1:\n pg.draw.rect(surf, colour, (x, y, pt, h))\n if r == 1:\n pg.draw.rect(surf, colour, (x+w-pt, y, pt, h))\n if d == 1:\n pg.draw.rect(surf, colour, (x, y+h-pt, w, pt))\n\n\ndef screenGenerate(pieces, surf, bgC):\n # surf.fill(colours[\"white\"])\n pg.draw.rect(surf, bgC, (160, 690 - (pieces * 60), 40, 20 + ((pieces + 1) * 60)))\n pg.draw.rect(surf, bgC, (480, 690 - (pieces * 60), 40, 20 + ((pieces + 1) * 60)))\n pg.draw.rect(surf, bgC, (800, 690 - (pieces * 60), 40, 20 + ((pieces + 1) * 60)))\n\n\ndef shake(ap, surf):\n for i in range(20):\n pg.draw.rect(surf, (120, 120, 120), (ap[0] - 20, ap[1], 140, 100))\n surf.blit(pointer, (ap[0] + i, ap[1]))\n pg.display.update()\n for i in range(20):\n pg.draw.rect(surf, (120, 120, 120), (ap[0] - 20, ap[1], 140, 100))\n surf.blit(pointer, ap)\n pg.display.update()\n for i in range(20):\n pg.draw.rect(surf, (120, 120, 120), (ap[0] - 20, ap[1], 140, 100))\n surf.blit(pointer, (ap[0] - i, ap[1]))\n pg.display.update()\n for i in range(20):\n pg.draw.rect(surf, (120, 120, 120), (ap[0] - 20, ap[1], 140, 100))\n surf.blit(pointer, ap)\n pg.display.update()\n\n\ndef exitGame():\n pg.quit()\n quit()\n\n\ndef breakFunc():\n value = False\n return value\n\n\ndef colourSlider(val, x, y, w, h):\n for i in range(360):\n colour = pg.Color('#000000')\n colour.hsva = (i, 100, 100, 100)\n pg.draw.rect(screen, colour, (x + i*(w/360), y, w/360, h))\n pg.draw.circle(screen, (255, 0, 0), (int(x), int(y + h/2)), int(h/2))\n pg.draw.circle(screen, (255, 0, 0), (int(x + w), int(y + h / 2)), int(h / 2))\n\n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n yc = y + h/2\n cCol = pg.Color('#000000')\n cCol.hsva = (val, 100, 100, 100)\n xc = (val * (w/360)) + x\n if click[0] == 1:\n if x < mouse[0] < x + w:\n xc = mouse[0]\n pg.draw.circle(screen, (127, 127, 127), (int(xc), int(yc)), h + 2)\n pg.draw.circle(screen, cCol, (int(xc), int(yc)), h)\n\n val = (xc - x) / (w / 360)\n return val\n\n\ndef keyboard():\n pg.draw.rect(displaySurface, dark(colours[\"blue\"], 3), (0, 440, 1000, 265))\n border(displaySurface, dark(colours[\"blue\"], 4), 0, 440, 1000, 265, 7, 1, 1, 1, 1)\n\n button('Q', 'white', 80, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'Q', letter)\n button('W', 'white', 165, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'W', letter)\n button('E', 'white', 250, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'E', letter)\n button('R', 'white', 335, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'R', letter)\n button('T', 'white', 420, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'T', letter)\n button('Y', 'white', 505, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'Y', letter)\n button('U', 'white', 590, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'U', letter)\n button('I', 'white', 675, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'I', letter)\n button('O', 'white', 760, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'O', letter)\n button('P', 'white', 845, 450, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'P', letter)\n\n button('A', 'white', 115, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'A', letter)\n button('S', 'white', 200, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'S', letter)\n button('D', 'white', 285, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'D', letter)\n button('F', 'white', 370, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'F', letter)\n button('G', 'white', 455, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'G', letter)\n button('H', 'white', 540, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'H', letter)\n button('J', 'white', 625, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'J', letter)\n button('K', 'white', 710, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'K', letter)\n button('L', 'white', 795, 535, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'L', letter)\n\n button('Z', 'white', 150, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'Z', letter)\n button('X', 'white', 235, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'X', letter)\n button('C', 'white', 320, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'C', letter)\n button('V', 'white', 405, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'V', letter)\n button('B', 'white', 490, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'B', letter)\n button('N', 'white', 575, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'N', letter)\n button('M', 'white', 660, 620, 75, 75, colours[\"keys\"], colours[\"keysof\"], 'M', letter)\n\n\ndef letter(value):\n return value\n","sub_path":"MajorTasks/Task2/TowersOfHanoi/Setup.py","file_name":"Setup.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"310070726","text":"import time\n\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom appium.webdriver.webdriver import WebDriver\n\n\nclass Testxueqiu_swipe(object):\n driver = WebDriver\n @classmethod\n def setup_class(cls):\n cls.driver = cls.install_app()\n print(\"setup class\")\n # el1 = cls.driver.find_element_by_id(\"com.gongbangbang.www:id/navigation_mine\")\n # el1.click()\n\n def setup_method(self):\n print(\"setup method\")\n # 获取启动的appium的driver实例,用于后续每个case的driver\n self.driver = Testxueqiu_swipe.driver\n # el2 = self.driver.find_element_by_id(\"com.gongbangbang.www:id/loginRegister\")\n # el2.click()\n\n def test_swipe_loop(self):\n self.driver.find_element_by_xpath(\"//*[contains(@resource-id,'title')]//*[@text='热门']\")\n action = TouchAction(self.driver)\n rect = self.driver.get_window_rect()\n #怎么确认是否滑动到最后一个页面,没想到处理方案就把数据先写死了\n for i in range(3):\n # self.driver.swipe(1000,1000,200,200)\n action.press(x=rect[\"width\"]*0.9,y=rect[\"height\"]*0.8).move_to(x=rect[\"width\"]*0.1,y=rect[\"height\"]*0.8).release().perform()\n time.sleep(2)\n for j in range(5):\n action.press(x=rect[\"width\"]*0.1,y=rect[\"height\"]*0.9).move_to(x=rect[\"width\"]*0.1,y=rect[\"height\"]*0.1).release().perform()\n\n\n def teardown_method(self):\n # 不加也没关系,如果不quit,启动appium会自动quit之前的session\n self.driver.quit()\n\n @classmethod\n def install_app(cls) -> WebDriver:\n caps = {}\n # 如果有必要,进行第一次安装\n caps[\"platformName\"] = \"android\"\n caps[\"appPackage\"] = \"com.xueqiu.android\"\n caps[\"appActivity\"] = \".view.WelcomeActivityAlias\"\n # 解决首次启动问题(权限问题)\n caps[\"autoGrantPermissions\"] = \"true\"\n # 为了更快的启动,并保留之前的数据,从而可以保存上一个case执行后的状态\n caps[\"noReset\"] = \"true\"\n\n driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\n driver.implicitly_wait(10)\n return driver\n","sub_path":"HGWZ/AppiumDemo/test/test_xueqiu_swipe.py","file_name":"test_xueqiu_swipe.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"237878274","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\n\nimport numpy as np\nfrom lab import B\nfrom numpy.testing import assert_allclose\nfrom plum import Dispatcher\n\nfrom stheno.cache import Cache\nfrom stheno.input import Observed\nfrom stheno.kernel import EQ\nfrom stheno.matrix import matrix\nfrom stheno.mean import TensorProductMean, ZeroMean, Mean, OneMean, \\\n PosteriorMean\n# noinspection PyUnresolvedReferences\nfrom . import eq, neq, lt, le, ge, gt, raises, call, ok, eprint\n\n\ndef test_corner_cases():\n yield raises, NotImplementedError, lambda: Mean()(1.)\n\n\ndef test_construction():\n m = TensorProductMean(lambda x: x ** 2)\n\n x = np.random.randn(10, 1)\n c = Cache()\n\n yield m, x\n yield m, x, c\n\n yield m, Observed(x)\n yield m, Observed(x), c\n\n\ndef test_basic_arithmetic():\n dispatch = Dispatcher()\n\n @dispatch(Number)\n def f1(x): return np.array([[x ** 2]])\n\n @dispatch(object)\n def f1(x): return np.sum(x ** 2, axis=1)[:, None]\n\n @dispatch(Number)\n def f2(x): return np.array([[x ** 3]])\n\n @dispatch(object)\n def f2(x): return np.sum(x ** 3, axis=1)[:, None]\n\n m1 = TensorProductMean(f1)\n m2 = TensorProductMean(f2)\n m3 = ZeroMean()\n x1 = np.random.randn(10, 2)\n x2 = np.random.randn()\n\n yield ok, np.allclose((m1 * m2)(x1), m1(x1) * m2(x1)), 'prod'\n yield ok, np.allclose((m1 * m2)(x2), m1(x2) * m2(x2)), 'prod 2'\n yield ok, np.allclose((m1 + m3)(x1), m1(x1) + m3(x1)), 'sum'\n yield ok, np.allclose((m1 + m3)(x2), m1(x2) + m3(x2)), 'sum 2'\n yield ok, np.allclose((5. * m1)(x1), 5. * m1(x1)), 'prod 3'\n yield ok, np.allclose((5. * m1)(x2), 5. * m1(x2)), 'prod 4'\n yield ok, np.allclose((5. + m1)(x1), 5. + m1(x1)), 'sum 3'\n yield ok, np.allclose((5. + m1)(x2), 5. + m1(x2)), 'sum 4'\n\n\ndef test_posterior_mean():\n z = np.linspace(0, 1, 10)\n pcm = PosteriorMean(\n TensorProductMean(lambda x: x),\n TensorProductMean(lambda x: x ** 2),\n EQ(), z, matrix(2 * EQ()(z)), np.random.randn(10)\n )\n\n # Check name.\n yield eq, str(pcm), 'PosteriorMean()'\n\n # Check that the mean computes.\n yield lambda: pcm(z)\n\n\ndef test_function_mean():\n m1 = 5 * OneMean() + (lambda x: x ** 2)\n m2 = (lambda x: x ** 2) + 5 * OneMean()\n m3 = (lambda x: x ** 2) + ZeroMean()\n m4 = ZeroMean() + (lambda x: x ** 2)\n x = np.random.randn(10, 1)\n\n yield ok, np.allclose(m1(x), 5 + x ** 2)\n yield ok, np.allclose(m2(x), 5 + x ** 2)\n yield ok, np.allclose(m3(x), x ** 2)\n yield ok, np.allclose(m4(x), x ** 2)\n\n def my_function(x): pass\n\n yield eq, str(TensorProductMean(my_function)), 'my_function'\n\n\ndef test_derivative():\n B.backend_to_tf()\n s = B.Session()\n\n m = TensorProductMean(lambda x: x ** 2)\n m2 = TensorProductMean(lambda x: x ** 3)\n x = B.array(np.random.randn(10, 1))\n\n yield assert_allclose, s.run(m.diff(0)(x)), s.run(2 * x)\n yield assert_allclose, s.run(m2.diff(0)(x)), s.run(3 * x ** 2)\n\n s.close()\n B.backend_to_np()\n\n\ndef test_selected_mean():\n m = 5 * OneMean() + (lambda x: x ** 2)\n x = np.random.randn(10, 3)\n\n yield assert_allclose, m.select([1, 2])(x), m(x[:, [1, 2]])\n\n\ndef test_shifting():\n m = 5 * OneMean() + (lambda x: x ** 2)\n x = np.random.randn(10, 3)\n\n yield assert_allclose, m.shift(5)(x), m(x - 5)\n\n\ndef test_stretching():\n m = 5 * OneMean() + (lambda x: x ** 2)\n x = np.random.randn(10, 3)\n\n yield assert_allclose, m.stretch(5)(x), m(x / 5)\n\n\ndef test_input_transform():\n m = 5 * OneMean() + (lambda x: x ** 2)\n x = np.random.randn(10, 3)\n\n yield assert_allclose, m.transform(lambda x, c: x - 5)(x), m(x - 5)\n","sub_path":"tests/test_mean.py","file_name":"test_mean.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"525346918","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/update-file-system.html\nif __name__ == '__main__':\n \"\"\"\n\tcreate-file-system : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/create-file-system.html\n\tdelete-file-system : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/delete-file-system.html\n\tdescribe-file-systems : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/describe-file-systems.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # file-system-id : The ID of the file system that you want to update.\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"efs\", \"update-file-system\", \"file-system-id\", add_option_dict)\n\n\n\n\n\n","sub_path":"efs_write_1/file-system_update.py","file_name":"file-system_update.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"527526712","text":"import requests\nfrom config import Config\n\n\ndef first_request():\n payload = {\"name\": \"Maksim\"}\n method_url = Config.API_URL + '/hello'\n response = requests.get(url=method_url, params=payload)\n print(response.text)\n\n\ndef task_3():\n name = 'Maks'\n print(f\"hello from {name}!\")\n\n\ndef task_4():\n response = requests.get(Config.API_URL + '/get_text')\n print(response.text)\n\n\nif __name__ == '__main__':\n first_request()\n task_3()\n task_4()\n","sub_path":"lesson_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"193632050","text":"#!/usr/bin/env python3\n\n# Copyright 2014 Brett Slatkin, Pearson Education Inc.\n#\n# Udostępniono na licencji Apache w wersji 2.0 (\"Licencja\").\n# Tego pliku można używać jedynie zgodnie z warunkami Licencji.\n# Treść Licencji znajdziesz na stronie:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# O ile obowiązujące prawo nie stanowi inaczej lub czegoś innego nie\n# uzgodniono w formie pisemnej, oprogramowanie objęte Licencją jest\n# dostarczane w stanie, w jakim jest (wersja \"AS IS\"), BEZ JAKIEJKOLWIEK\n# GWARANCJI, ani wyrażonej otwarcie, ani domyślnej. Dokładne zasady\n# i warunki Licencji znajdziesz w jej treści.\n\n# Przygotowania mające na celu odtworzenie środowiska użytego w książce.\nimport logging\nfrom pprint import pprint\nfrom sys import stdout as STDOUT\n\n\n# Przykład 1.\nclass FrequencyList(list):\n def __init__(self, members):\n super().__init__(members)\n\n def frequency(self):\n counts = {}\n for item in self:\n counts.setdefault(item, 0)\n counts[item] += 1\n return counts\n\n\n# Przykład 2.\nfoo = FrequencyList(['a', 'b', 'a', 'c', 'b', 'a', 'd'])\nprint('Długość wynosi', len(foo))\nfoo.pop()\nprint('Po użyciu metody pop():', repr(foo))\nprint('Częstotliwość:', foo.frequency())\n\n\n# Przykład 3.\nclass BinaryNode(object):\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\n# Przykład 4.\nbar = [1, 2, 3]\nbar[0]\n\n\n# Przykład 5.\nbar.__getitem__(0)\n\n\n# Przykład 6.\nclass IndexableNode(BinaryNode):\n def _search(self, count, index):\n found = None\n if self.left:\n found, count = self.left._search(count, index)\n if not found and count == index:\n found = self\n else:\n count += 1\n if not found and self.right:\n found, count = self.right._search(count, index)\n return found, count\n # Wartością zwrotną jest (found, count).\n\n def __getitem__(self, index):\n found, _ = self._search(0, index)\n if not found:\n raise IndexError('Wykroczono poza indeks')\n return found.value\n\n\n# Przykład 7.\ntree = IndexableNode(\n 10,\n left=IndexableNode(\n 5,\n left=IndexableNode(2),\n right=IndexableNode(\n 6, right=IndexableNode(7))),\n right=IndexableNode(\n 15, left=IndexableNode(11)))\n\n\n# Przykład 8.\nprint('LRR =', tree.left.right.right.value)\nprint('Indeks 0 =', tree[0])\nprint('Indeks 1 =', tree[1])\nprint('11 znajduje się w drzewie?', 11 in tree)\nprint('17 znajduje się w drzewie?', 17 in tree)\nprint('Drzewo ma postać', list(tree))\n\n\n# Przykład 9.\ntry:\n len(tree)\nexcept:\n logging.exception('Oczekiwany')\nelse:\n assert False\n\n\n# Przykład 10.\nclass SequenceNode(IndexableNode):\n def __len__(self):\n _, count = self._search(0, None)\n return count\n\n\n# Przykład 11.\ntree = SequenceNode(\n 10,\n left=SequenceNode(\n 5,\n left=SequenceNode(2),\n right=SequenceNode(\n 6, right=SequenceNode(7))),\n right=SequenceNode(\n 15, left=SequenceNode(11))\n)\n\nprint('Drzewo ma %d węzłów' % len(tree))\n\n\n# Przykład 12.\ntry:\n from collections.abc import Sequence\n \n class BadType(Sequence):\n pass\n \n foo = BadType()\nexcept:\n logging.exception('Oczekiwany')\nelse:\n assert False\n\n\n# Przykład 13.\nclass BetterNode(SequenceNode, Sequence):\n pass\n\ntree = BetterNode(\n 10,\n left=BetterNode(\n 5,\n left=BetterNode(2),\n right=BetterNode(\n 6, right=BetterNode(7))),\n right=BetterNode(\n 15, left=BetterNode(11))\n)\n\nprint('Indeks elementu 7 wynosi', tree.index(7))\nprint('Liczba wystąpień elementu 10 to', tree.count(10))\n","sub_path":"Python/Python - efektywny Python/item_28.py","file_name":"item_28.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"503637741","text":"# -*- coding: utf-8 -*-\n\"\"\"17 - DL - CNN.ipynb\nThis exercise is an introduction to CNNs for classification applied to the MNIST dataset\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.datasets import cifar10, mnist\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\nimport numpy as np\nimport scipy\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nnp.random.seed(0)\n\n# Load MNIST data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\ns1 = x_train.shape\ns2 = x_test.shape\nprint(f\"The mnist data was loaded with {s1[0]} training samples and {s2[0]} testing samples. Each sample is a {s1[1]} x {s1[2]} pixel image.\")\n\nexample_img = x_train[0]\nexample_img\nplt.imshow(example_img, cmap=\"gray\", vmin=0, vmax=255)\n\ndef calculate_conv_shape(X, K, padding=0, stride=1):\n # Calculates the shape of the output of a convolution \n # Inputs:\n # X (np.array): The input matrix\n # K (np.array): The filter matrix\n # padding (int, optional): Defaults to 0. The padding dimension\n # stride (int, optional): Defaults to 1. The stride of the convolution\n # Returns:\n # tuple: The shape of the convolution output (height followed by width)\n \n Xw = X.shape[1]\n Xh = X.shape[0]\n Kw = K.shape[1]\n Kh = K.shape[0]\n\n yw = int((Xw-Kw+2*padding)/stride + 1)\n yh = int((Xh-Kh+2*padding)/stride + 1)\n \n return (yh,yw)\n\n# Manualy create a very basic convolution for sharpening an image\nsharpen = np.array([\n [0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]])\n\ncalculate_conv_shape(example_img, sharpen, padding=1)\n\nans = calculate_conv_shape(example_img, sharpen, padding=1)\nans = calculate_conv_shape(example_img, sharpen, padding=0, stride=2)\n\n# Apply the sharpen filter to the example_img and save the output to sharpened_image\nprint(example_img.shape)\nprint(sharpen.shape)\nsharpened_image = scipy.ndimage.convolve(example_img, sharpen)\n\nplt.imshow(sharpened_image, cmap=\"gray\", vmin=0, vmax=255)\nplt.show()\n\n# Apply a filter of your choice (I chose a blur filter) and save the output image to filtered_image\nmy_filter = np.array([\n [1/16, 1/8, 1/16],\n [1/8, 1/4, 1/8],\n [1/16, 1/8, 1/16]\n])\nfiltered_image = scipy.ndimage.convolve(sharpened_image, my_filter)\n\nplt.imshow(filtered_image, cmap=\"gray\", vmin=0, vmax=255)\nplt.show()\n\n# Create a simple FFNN model for clasifying the images\nsimple_layers = [ Flatten(),\n Dense(256, activation='relu'),\n Dropout(0.1),\n Dense(128, activation='relu'),\n Dropout(0.1),\n Dense(10, activation='softmax')]\nsimple_model = keras.Sequential(simple_layers)\nsimple_model.compile(optimizer=\"adam\", loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n# Fit the data and assess performance\nsimple_model.fit(x_train / 256., y_train, epochs=5)\nsimple_scores = simple_model.evaluate(x_test / 256., y_test)\nprint(simple_scores)\nassert simple_scores[1] > 0.3\n\nprint(f\"\\nThe simple model achieves an accuracy of {simple_scores[1]*100:.2f}% on the test data.\")\n\n# Create a CNN model to compare with the FFNN\ncnn_layers = [\n Conv2D(32, (3,3), activation='relu',input_shape=(28,28,1)),\n MaxPool2D((2,2)),\n Conv2D(64, (3,3), activation='relu'),\n MaxPool2D((2,2)),\n Conv2D(128, (3,3), activation='relu'),\n MaxPool2D((2,2)),\n Conv2D(128, (3,3), activation='relu'),\n MaxPool2D((2,2)),\n Flatten(),\n Dense(100, activation='relu'),\n Dense(100, activation='relu'),\n Dense(10, activation='softmax')]\ncnn_model = keras.Sequential(simple_layers)\ncnn_model.compile(optimizer=\"adam\", loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n# Fit the CNN model and test its performance\ncnn_model.fit(x_train.reshape(-1, 28, 28 ,1), y_train, epochs=5)\ncnn_scores = cnn_model.evaluate(x_test.reshape(-1, 28, 28 ,1), y_test)\nassert cnn_scores[1] > 0.9\nprint(f\"\\nThe CNN model achieves an accuracy of {cnn_scores[1]*100:.2f}% on the test data.\")\n\n# Compare output of the two models\n# Change this value to test out some number images and see how the models perform\ni = 245\nnew_example_img = x_test[i].astype(np.float32)\n\nsimple_new_example_img = new_example_img.reshape(-1, 28, 28)\ncnn_new_example_img=new_example_img.reshape(-1, 28, 28, 1)\n\nsimple_predict = simple_model.predict(simple_new_example_img).argmax()\ncnn_predict = cnn_model.predict(cnn_new_example_img).argmax()\ntarget = y_test[i].astype(np.float32)\n\nplt.imshow(new_example_img, cmap=\"gray\", vmin=0, vmax=255)\nprint(f\"The simple model predicts this image is a {simple_predict} and the CNN predicts it is a {cnn_predict}.\")\nprint(f\"The thing is a {target}\")","sub_path":"Machine Learning - Modern/17_CNN Basic Classification.py","file_name":"17_CNN Basic Classification.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"407505004","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 25 12:04:27 2019\n\n@author: ananya\n\nKneser Ney Smoothing + Interpolation\n\"\"\"\nimport numpy as np\nimport pickle\nimport sys\neps = np.finfo(float).eps\nUNKProb = eps\nfilePath = sys.argv\n\nN = 4\nd = 0.75\n'''\nif(filePath[2] == 'corpus1'):\n TrainedModelPath = 'corpus1_Model/Ngrams_'\nelif (filePath[2] == 'corpus3'):\n TrainedModelPath = 'corpus3_Model/Ngrams_'\nelif (filePath[2] == 'corpus4'):\n TrainedModelPath = 'corpus4_Model/Ngrams_'\nelse:\n print(\"please enter correct trained corpus model\")\n exit(0)\n'''\nTrainedModelPath = 'corpus1_Model/Ngrams_' \n#Get the count of the ngrams words in the ngram model\ndef getCount(N,ngrams):\n with open(TrainedModelPath+str(N),\"rb\") as fp:\n ModelSentences = pickle.load(fp)\n key = tuple(ngrams)\n if key in ModelSentences.keys():\n count = ModelSentences[tuple(ngrams)]\n else:\n count = UNKProb\n return count\n\n#Get the count of the timeS startWords follow any other word in the ngrams model\ndef getCountofStartingWords(N,startWords): #startwords = w1w2w3\n count = 0\n with open(TrainedModelPath+str(N),\"rb\") as fp:\n ModelSentences = pickle.load(fp)\n for key in ModelSentences.keys(): #key = w1w2w3wk\n if(tuple(startWords) == key[:N-1]): #compare w1w2w3 == w1w2w3 by ignoring wk\n count += 1\n return count\n \ndef smoothing(numerator,denominator,total,N): \n if N!=2:\n #Calculate Continuation Probability \n PCN = smoothing(numerator,denominator[1:],total[1:],N-1)\n else:\n PCN = UNKProb\n \n #Calculate First Term\n firstTerm_numerator = (getCount(N,total))-d #Count(w1,w2,w3,w4)\n firstTerm_denominator = getCount(N-1,denominator) #Count(w1,w2,w3)\n if (firstTerm_denominator == 0):\n firstTerm_denominator = UNKProb \n \n if (firstTerm_numerator > 0): \n firstTerm = firstTerm_numerator / firstTerm_denominator\n else:\n firstTerm = 0\n \n #Calculate Lambda\n count_n_1gram_succeed_wk = getCountofStartingWords(N,denominator)\n lamda = (d * count_n_1gram_succeed_wk) / firstTerm_denominator\n \n \n PKN = firstTerm + (lamda * PCN)\n \n return PKN\n\n'''\nif (filePath[3] == 'corpus3'):\n TestCorpusPath = \"corpus3Pickle/corpus3.txt_Output.pkl\"\nelif (filePath[3] == 'corpus4'):\n TestCorpusPath = \"corpus4Pickle/corpus4.txt_Output.pkl\"\nelse:\n print(\"please enter correct trained corpus model\")\n exit(0)\n'''\nTestCorpusPath = \"corpus4Pickle/corpus4.txt_Output.pkl\"\n\noutputFile = 'LM_Output.txt'\nPerplexityFile = 'PexplexityFile'\nOp = open(outputFile, \"w\")\npp = open(PerplexityFile, \"w\")\nTotalPerplexity = 0\nwith open(TestCorpusPath,\"rb\") as A:\n corpusSentences = pickle.load(A) \n for sentence in corpusSentences:\n if not (sentence):\n continue\n \n if '' in sentence:\n sentence.remove('')\n Probability = 0 #Reassigning 0 as initial probability\n sentence_length = len(sentence) #Length of the sentence \n ngrams_of_sentence = zip(*[sentence[i:] for i in range(N)])\n for ngram in ngrams_of_sentence:\n line = list(ngram)\n P = smoothing(line[N-1],line[:N-1],line,N) \n if P == 0:\n P = UNKProb\n \n Probability = Probability + np.log(P) #log liklihood\n \n perplexity = np.exp(-1*Probability/float(sentence_length))\n print(\"Sentence:\",sentence)\n print(\"LogLikelihood:\", Probability)\n print(\"Perplexity:\", perplexity)\n \n TotalPerplexity+= perplexity\n txt = \" \".join(sentence) + ':Probability:'+str(Probability) + ' Perplexity:'+str(perplexity)\n Op.write(txt+'\\n')\n \n \navgPerplexity = TotalPerplexity/float(len(corpusSentences))\nprint(\"Average Perplexity of %s is %d:\" %(TestCorpusPath,avgPerplexity))\ntxt1 = TestCorpusPath + ':' + str(avgPerplexity)\npp.write(txt1+'\\n')","sub_path":"assignment/KneserNeySmoothing.py","file_name":"KneserNeySmoothing.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"633958390","text":"# Import created modules (need to be stored in the same directory as notebook)\nfrom common_files.sql_functions import read_table\nfrom common_files.sql_functions import create_table\n\n# Import libraries\nimport pandas as pd\nimport logging\n\n\ndef join_table():\n\n logging.info(\"... Starting script to create joined table\")\n\n # Read the raw admissions and discharge data into dataframes\n logging.info(\"... Fetching admissions and discharges data\")\n try:\n adm_query = '''\n select \n *\n from derived.admissions;\n '''\n\n dis_query = '''\n select \n *\n from derived.discharges;\n '''\n\n adm_df = read_table(adm_query)\n dis_df = read_table(dis_query)\n except Exception as e:\n logging.error(\"!!! An error occured fetching the data: \")\n raise e\n\n # Create join of admissions & discharges (left outter join)\n logging.info(\"... Creating joined admissions and discharge table\")\n try:\n # join admissions and discharges\n jn_adm_dis = adm_df.merge(dis_df, how='left', left_index=True,\n right_index=True, suffixes=('_admission', '_discharge'))\n # Extend join table with derived columns based on power bi logic - DEL\n #jn_adm_dis_ext = create_columns(jn_adm_dis) - DEL\n except Exception as e:\n logging.error(\"!!! An error occured creating joined dataframe: \")\n raise e\n\n # Now write the table back to the database\n logging.info(\"... Writing the output back to the database\")\n try:\n jn_adm_dis_tbl_n = 'joined_admissions_discharges'\n create_table(jn_adm_dis, jn_adm_dis_tbl_n)\n except Exception as e:\n logging.error(\n \"!!! An error occured writing join output back to the database: \")\n raise e\n\n logging.info(\"... Join script completed!\")\n","sub_path":"step_4_join_and_derived_files/create_joined_table_and_derived_columns.py","file_name":"create_joined_table_and_derived_columns.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"11815693","text":"import discord\nfrom discord.ext import commands\n\nfrom core import checks\nfrom core.models import PermissionLevel\n\nCog = getattr(commands, 'Cog', object)\n\n\nclass Autorole(Cog):\n \"\"\"Auto-assign a role to a user when they join your server.\n More info: [click here](https://github.com/papiersnipper/modmail-plugins/tree/master/autorole)\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.db = bot.plugin_db.get_partition(self)\n\n @Cog.listener()\n async def on_member_join(self, member):\n rolename = (await self.db.find_one({'_id': 'autorole-config'}))['rolename']\n\n if rolename is None:\n return\n else:\n role = discord.utils.get(member.guild.roles, name=rolename)\n await member.add_roles(role)\n\n @commands.group(name='autorole', invoke_without_command=True)\n @checks.has_permissions(PermissionLevel.ADMINISTRATOR)\n async def autorole(self, ctx):\n \"\"\"Auto-assign a role to a user when they join your server.\"\"\"\n\n await ctx.send_help(ctx.command)\n\n @autorole.command(name='set')\n @checks.has_permissions(PermissionLevel.ADMINISTRATOR)\n async def set_(self, ctx, role: discord.Role):\n \"\"\"Sets the default role a member gets when joining.\"\"\"\n\n await self.db.find_one_and_update({'_id': 'autorole-config'}, {'$set': {'rolename': role.name}})\n\n em = discord.Embed(\n title='Autorole',\n url='https://github.com/papiersnipper/modmail-plugins/blob/master/autorole',\n description=f'I will now give {role.mention} to all new members.',\n colour=self.bot.main_color\n )\n\n await ctx.send(embed=em)\n\n @autorole.command(name='give')\n @checks.has_permissions(PermissionLevel.ADMINISTRATOR)\n async def give(self, ctx, role: discord.Role):\n \"\"\"Gives this role to all members of your server.\"\"\"\n\n users = 0\n for member in ctx.guild.members:\n if role.id in [role.id for role in member.roles]:\n continue\n else:\n await member.add_roles(role)\n users = users + 1\n\n em = discord.Embed(\n title='Autorole',\n url='https://github.com/papiersnipper/modmail-plugins/blob/master/autorole',\n description=f'Added {role.mention} for {users} members.',\n colour=self.bot.main_color\n )\n\n await ctx.send(embed=em)\n\n\ndef setup(bot):\n bot.add_cog(Autorole(bot))\n","sub_path":"autorole/autorole.py","file_name":"autorole.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"497498695","text":"from mnSpecFit.Model import Model\nfrom numpy import exp, power\nfrom multiFit.priorGen import *\n\nimport numexpr as ne\n\n\n\n\n\n\n\nclass CPLSG(Model):\n\n def __init__(self):\n\n\n\n def cplsg(x,logA,eFolding):\n eFolding = 10**eFolding\n return ne.evaluate('10.** logA * (x/100.)**(-0.7) * exp(-x*(2+0.7)/eFolding)')\n #val = power(10.,logA)*power(x/300.,index)*exp(-x/eFolding)\n #return val\n\n \n \n self.paramsRanges = [[1.E-10,1.E2,\"J\"],[1.E1,1.E6,\"J\"]]\n \n\n\n def CPLPrior(params, ndim, nparams):\n \n for i in range(ndim):\n params[i] = priorLU[self.paramsRanges[i][-1]](params[i],self.paramsRanges[i][0],self.paramsRanges[i][1])\n \n\n\n \n\n\n self.modName = \"CPLSG\"\n self.model=cplsg\n self.prior=CPLPrior\n self.n_params = 2\n self.parameters = [r\"log(N$_{\\rm CPLsg}$)\",r\"$E_{\\rm p}$\"]\n self._modelDict = {\"params\":self.parameters,\\\n \"model\":cplsg\\\n }\n self._composite = False\n","sub_path":"mnSpecFit/models/CPLSG.py","file_name":"CPLSG.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"478351997","text":"from datetime import datetime\nimport pandas as pd\nimport os\nfilename = input(\"Enter filename to sort :\")\n# Sort & resave\ndf = pd.read_csv('../data/'+ filename,skiprows=5,skip_footer=1)\ndf['Day'] = pd.to_datetime(df['Day'])\ndf = df.sort_index(by=['Day'], ascending=[False])\ndf = df.sort_values(by=['Ad group', 'Day','Cost / conv.'], ascending=[0, 1, 1])\nprint(sum(df['Clicks']))\nfilename2 = filename[:-4].replace(' ', '-').lower()\nos.remove('../data/' + filename)\ndf.to_csv('../data/'+ filename2 + '-cleaned.csv', index=False)\n","sub_path":"develop/sort-date-save.py","file_name":"sort-date-save.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"87923167","text":"import datetime\nimport json\nfrom types import GeneratorType as Generator\nfrom collections.abc import Iterable\nfrom ast import literal_eval\nfrom torch import Tensor\nimport numpy as np\n\n\ndef default_json_handler(obj):\n if isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n if isinstance(obj, Tensor):\n return obj.tolist()\n for np_int_type in (np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64):\n if isinstance(obj, np_int_type):\n return int(obj)\n for np_float_type in (np.float32, np.float64):\n if isinstance(obj, np_float_type):\n return float(obj)\n if isinstance(obj, (Generator, Iterable)):\n return tuple(obj)\n raise Exception(f'Cannot dump data type: {type(obj)}')\n\n\ndef save(obj, path, indent=4):\n with open(path, 'w+') as f:\n json.dump(obj, f, indent=indent, default=default_json_handler)\n print(f'Saved {type(obj)} object to {path}')\n\n\ndef save_all(objs, paths, indent=4):\n for i in range(len(objs)):\n save(objs[i], paths[i], indent=indent)\n\n\ndef load(path):\n with open(path) as f:\n obj = json.load(f)\n print(f'Loaded {path} to {type(obj)} object')\n return obj\n\n\ndef load_all(paths):\n return [load(path) for path in paths]\n\n\ndef jsonify(obj):\n try:\n return json.loads(obj)\n except:\n return obj\n\n\ndef key2str(dictionary, original_types=(tuple,)):\n for key in list(dictionary.keys()):\n if isinstance(dictionary[key], dict):\n key2str(dictionary[key], original_types)\n if isinstance(key, original_types):\n dictionary[str(key)] = dictionary[key]\n del dictionary[key]\n\n\ndef str2key(dictionary):\n for key in list(dictionary.keys()):\n if isinstance(dictionary[key], dict):\n str2key(dictionary[key])\n if isinstance(key, str):\n try:\n dictionary[literal_eval(key)] = dictionary[key]\n except:\n continue\n del dictionary[key]\n","sub_path":"utils/json_utils.py","file_name":"json_utils.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"95282697","text":"\"\"\"People Counter.\"\"\"\n\"\"\"\n Copyright (c) 2018 Intel Corporation.\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit person to whom the Software is furnished to do so, subject to\n the following conditions:\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport os\nimport sys\nimport time\nimport socket\nimport json\nimport cv2\nimport numpy as np\n\nimport logging as log\nimport paho.mqtt.client as mqtt\n\nfrom argparse import ArgumentParser\nfrom inference import Network\n\n# MQTT server environment variables\nHOSTNAME = socket.gethostname()\nIPADDRESS = socket.gethostbyname(HOSTNAME)\nMQTT_HOST = IPADDRESS\n#MQTT_HOST = \"ws://localhost:3000\"\nMQTT_PORT = 3001\nMQTT_KEEPALIVE_INTERVAL = 60\n\ndef build_argparser():\n \"\"\"\n Parse command line arguments.\n\n :return: command line arguments\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser\n\n\ndef connect_mqtt():\n ### TODO: Connect to the MQTT client ###\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client\n\n\ndef infer_on_stream(args, client):\n \"\"\"\n Initialize the inference network, stream video to network,\n and output stats and video.\n\n :param args: Command line arguments parsed by `build_argparser()`\n :param client: MQTT client\n :return: None\n \"\"\"\n count_current = 0\n count_last = 0\n count_last_last = 0\n total_count = 0\n duration = 0\n avg_duration = 0\n total_duration = 0\n start_time = 0\n active_person = 0\n net_input_shape = []\n frame_count = 0\n\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n infer_network.load_model(model=args.model, device=args.device, cpu_extension=args.cpu_extension)\n\n ### TODO: Handle the input stream ###\n cap = cv2.VideoCapture(args.input)\n cap.open(args.input)\n\n # get the required shape for the network\n net_input_shape = infer_network.get_input_shape()\n\n # get the shape of the input image\n width = int(cap.get(3))\n height = int(cap.get(4))\n\n if net_input_shape != [1, 3, 600, 600]:\n #net_input_shape = [1, 3, 600, 600]\n #sometimes gives [1,3] and causes an error, so hard coded shape to match model\n sys.exit(\"Input shape error, forced exit. Please run again until this error does not appear.\")\n\n ### TODO: Loop until stream is over ###\n while cap.isOpened():\n\n ### TODO: Read from the video capture ###\n flag, frame = cap.read()\n frame_count += 1\n\n if not flag:\n #video stream ended, go to end and close out\n break\n\n ### TODO: Start asynchronous inference for specified request ###\n if frame_count%2 == 0: #check every other frame\n ### TODO: Pre-process the image as needed ###\n vid_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))\n #save a copy of the input frame to use on output\n vid_frame_copy = vid_frame\n vid_frame = vid_frame.transpose((2, 0, 1))\n vid_frame = vid_frame.reshape(1, *vid_frame.shape)\n\n infer_network.exec_net(vid_frame)\n\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n\n ### TODO: Get the results of the inference request ###\n results = infer_network.get_output()\n\n # for this model, results should be shape [1, 1, N, 7]\n # N is number of hits, last is a 7 item list [image_id, label, conf, x_min,\n # y_min, x_max, y_max] where label is the predicted class\n\n ### TODO: Extract any desired stats from the results ###\n out_frame, count_current, box = draw_boxes(vid_frame_copy, results, args, net_input_shape[3], net_input_shape[2])\n #out_frame = cv2.putText(out_frame, \"Last Frame Analyzed = \"+str(frame_count), (10, 420), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n\n ### TODO: Calculate and send relevant information on ###\n ### count_current, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n ### Topic \"person/duration\": key of \"duration\" ###\n\n # This block of code from Mentor Help question 129845, some modifications by me\n # If both last and last_last are equal, positive ID for two frames.\n if count_current > count_last and count_last_last == count_last:\n start_time = time.time()\n total_count = total_count + count_current - count_last\n\n #client.publish(\"person\", json.dumps({\"total\": total_count}))\n client.publish(\"person\", json.dumps({\"count\": count_current}))\n\n #out_frame = cv2.putText(out_frame, \"Current Time = \"+str('% 6.2f' % time.time()), (10, 450), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Person Entered Frame = \"+str(count_current), (10, 510), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Total Counted = \"+str(total_count), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Person duration in the video is calculated if two frames of no detect to account for skipped frame\n if count_current < count_last_last and count_last < count_last_last:\n duration = int(time.time() - start_time)\n total_duration += duration / 11 #frames per second and evaluating only every other frame\n avg_duration = int(total_duration / total_count)\n client.publish(\"person/duration\", json.dumps({\"duration\": avg_duration}))\n\n #out_frame = cv2.putText(out_frame, \"Duration = \"+str('% 6.2f' % duration), (10, 540), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1, cv2.LINE_AA)\n out_frame = cv2.putText(out_frame, \"Average Duration = \" + str('% 4.2f' % avg_duration) + \" seconds.\", (10, 570), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1, cv2.LINE_AA)\n\n # Set a double counter to review two frames at a time\n count_last_last = count_last\n count_last = count_current\n #End block of code from Mentor Help question 129845\n\n\n ### TODO: Send the frame to the FFMPEG server ###\n out_frame = out_frame.copy(order='C')\n out_frame = cv2.resize(out_frame, (width, height))\n np.ascontiguousarray(out_frame, dtype=np.float32)\n sys.stdout.buffer.write(out_frame)\n sys.stdout.flush()\n\n ### TODO: Write an output image if `single_image_mode` ###\n\n #Release the capture and destroy any OpenCV windows\n cap.release()\n cv2.destroyAllWindows()\n\n #Disconnect from MQTT\n client.disconnect()\n\n #Print final numbers for reference\n print(\"Video stream ended.\")\n print(\"Final count was \" + str(total_count))\n print(\"Average Duration was \" + str(avg_duration) + \" seconds.\")\n\ndef draw_boxes(vid_frame, results, args, width, height):\n count = 0\n box = [[0, 0], [0, 0]]\n for i in results[0][0]:\n confidence = i[2]\n is_person = i[1]\n if confidence >= args.prob_threshold and is_person == 1:\n #print(\"The confidence detected is \" + str(confidence))\n count += 1\n xmin = int(i[3] * width)\n ymin = int(i[4] * height)\n xmax = int(i[5] * width)\n ymax = int(i[6] * height)\n cv2.rectangle(vid_frame, (xmin,ymin), (xmax, ymax), (255, 0, 0), 1)\n box = [[xmax, ymax], [xmin, ymin]]\n #print(\"Result = \" + str(i))\n return vid_frame, count, box\n\ndef main():\n \"\"\"\n Load the network and parse the output.\n\n :return: None\n \"\"\"\n # Grab command line args\n args = build_argparser().parse_args()\n #print(\"finished argparser\")\n\n # Connect to the MQTT server\n client = connect_mqtt()\n #print(\"finished mqtt connect\")\n\n # Perform inference on the input stream\n infer_on_stream(args, client)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_V1.py","file_name":"main_V1.py","file_ext":"py","file_size_in_byte":10325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"393866025","text":"import io\nimport os\nimport json\nimport string\nfrom nltk.stem import WordNetLemmatizer\nimport math\n\nsplit_string = \"\"\nvocab_list = []\nDict1 = {}\nDict2 = {}\nDict3 = {}\nDict4 = {}\nfreqcounter = 1\n\ndef formindex(filenum, term):\n if filenum in Dict2:\n if term in Dict2[filenum]:\n Dict2[filenum][term] += 1\n else:\n if term in Dict1:\n Dict1[term] += 1\n else:\n Dict1[term] = 1\n Dict2[filenum][term] = 1\n else:\n Dict2[filenum] = {}\n Dict2[filenum][term] = 1\n if term in Dict1:\n Dict1[term] += 1\n else:\n Dict1[term] = 1\n \ndef clean(split_string):\n for x in range(len(split_string)):\n if ord(split_string[x]) < 97 or ord(split_string[x]) > 122:\n split_string = split_string.replace(split_string[x],' ')\n return split_string\n\ndef fileread(txtDir):\n lemmatizer = WordNetLemmatizer()\n stopword = []\n stopfile = \"Stopword-List.txt\"\n stopFile1 = open(stopfile, \"r+\", encoding=\"utf-8\")\n for line in stopFile1:\n if(len(line) >= 2):\n word = line\n word = clean(word)\n word = word.replace(' ', '')\n stopword.append(word)\n \n for txt in os.listdir(txtDir):\n filenum = txt.split('.')[0] \n filename = txtDir + txt\n textFile = open(filename, \"r+\", encoding=\"utf-8\")\n for my_line in textFile:\n if len(my_line) > 5:\n split_string = my_line.split(\" \")\n for i in range(len(split_string)):\n if len(split_string[i]) >= 1:\n split_string[i] = split_string[i].lower()\n split_string[i] = split_string[i].replace('\\n','')\n split_string[i] = clean(split_string[i])\n split_string[i] = split_string[i].replace(' ','')\n split_string[i] = lemmatizer.lemmatize(split_string[i], pos=\"v\")\n check = False\n for x in stopword:\n if split_string[i] == x:\n check = True\n break\n else:\n continue\n\n if check == False:\n formindex(filenum, split_string[i])\n\ndef FormIDF():\n for key in Dict1:\n Dict3[key] = math.log10(50/Dict1[key])\n \ndef FormTFIDF():\n for key in Dict3:\n for key1 in Dict2:\n for term in Dict2[key1].keys():\n if key == term:\n Dict2[key1][term] *= Dict3[term]\n\ndef EvaluateWeight():\n sum = 0\n temp = {}\n addlist = []\n Dict4 = Dict2.copy()\n for key in Dict4:\n for term in Dict4[key]:\n Dict4[key][term] = Dict4[key][term] ** 2\n \n for key in Dict4:\n for term in Dict4[key]:\n sum = sum + Dict4[key][term]\n sum = sum ** 0.5\n for term in Dict2[key]:\n Dict2[key][term] /= sum\n sum = 0\n \ndef main():\n Dir = \"./ShortStories\\\\\"\n fileread(Dir)\n FormIDF()\n vocab = list(Dict3.keys())\n FormTFIDF()\n EvaluateWeight()\n with open (\"Dictionary.json\" , \"w\") as f: \n f.write(json.dumps(Dict2, sort_keys=False, indent=4))\n with open(\"Vocabulary.json\", \"w\") as f:\n f.write(json.dumps(vocab, sort_keys=False, indent=4))\n with open(\"IDF_Dict.json\", \"w\") as f:\n f.write(json.dumps(Dict3, sort_keys=False, indent=4))\n \nmain()\n","sub_path":"Dict_Create.py","file_name":"Dict_Create.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"545407192","text":"\"\"\"\nModule for generating DIRBS country-level reports.\n\nCopyright (c) 2018 Qualcomm Technologies, Inc.\n\n All rights reserved.\n\n\n\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the\n limitations in the disclaimer below) provided that the following conditions are met:\n\n\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided with the distribution.\n\n * Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior written permission.\n\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY\n THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom dirbs.config import OperatorConfig\nfrom .base_operator_country import BaseOperatorCountryReport\n\n\nclass CountryReport(BaseOperatorCountryReport):\n \"\"\"Class used to generation country-level reports.\"\"\"\n\n def __init__(self, conn, data_id, config, month, year, country_name, has_compliance_data=False):\n \"\"\"Constructor.\"\"\"\n super(CountryReport, self).__init__(conn, data_id, config, month, year, 'country_report.html',\n OperatorConfig.COUNTRY_OPERATOR_NAME,\n has_compliance_data=has_compliance_data)\n self.country_name = country_name\n\n def gen_report_data(self):\n \"\"\"Overrides BaseOperatorCountryReport.gen_report_data.\"\"\"\n data = self._gen_base_report_data()\n data['country_name'] = self.country_name\n data['mcc_mnc_pairs'] = {op.id: op.mcc_mnc_pairs for op in self.config.region_config.operators}\n return data\n\n @property\n def report_title(self):\n \"\"\"Overrides BaseOperatorCountryReport.report_title.\"\"\"\n return 'DIRBS country report for {0} ({1:02d}/{2:04d})'.format(self.country_name, self.month, self.year)\n","sub_path":"src/dirbs/reports/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"64466694","text":"import unittest # @UnresolvedImport\n\ndef swap(l, i, j):\n temp = l[i]\n l[i] = l[j]\n l[j] = temp\n\ndef insertion_sort(l1):\n for i in xrange(1,len(l1)):\n j,k = i-1, i\n while j >= 0 and l1[j] > l1[k]:\n swap(l1,j, k)\n j, k = j-1, k-1\n return l1\n# insertion_sort([2, 4, 6, 6, 6, 8,-1])\n\n\ndef merge(l1, l2):\n result = []\n add_to_result= result.append\n i,j = 0, 0\n while True:\n if i < len(l1) and j < len(l2):\n if l1[i] < l2[j]:\n add_to_result(l1[i])\n i +=1\n else:\n add_to_result(l2[j])\n j +=1\n elif i < len(l1):\n for k in xrange(i, len(l1)):\n add_to_result(l1[k])\n break\n else:\n for k in xrange(j, len(l2)):\n add_to_result(l2[k])\n break\n return result\n#print merge([1, 2 ,3, 4, 5, 6], [1, 2, 3, 4, 5, 6])\n\n\ndef merge_sort(l1):\n if len(l1) <= 1: return l1\n left = l1[0: len(l1)/2]\n right = l1[(len(l1)/2) : len(l1)]\n return merge(merge_sort(right), merge_sort(left))\n##print merge_sort([9,5, 4, 7 ,6, 2, 6, 1])\n##print merge_sort([])\n\n\nclass InsertionSortTests(unittest.TestCase):\n def test_emptyList(self):\n test_input = []\n expected = []\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_lenListIs1(self):\n test_input = [1]\n expected = [1]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_oddLenList(self):\n test_input = [54,2,6]\n expected = [2,6,54]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_evenLenList(self):\n test_input = [10,8,5,3,1,0]\n expected = [0,1,3,5,8,10]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_negativeElements(self):\n test_input = [-1,-3,-6,-45,-2]\n expected = [-45,-6,-3,-2,-1]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_bothNegativeAndPositiveElements(self):\n test_input = [-10000,-6,7,3,10]\n expected = [-10000,-6,3,7,10]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\n def test_floatingPointNumbers(self):\n test_input = [-.1,-.3,-.6,-.45,-.2]\n expected = [-0.6, -0.45, -0.3, -0.2, -0.1]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\nif __name__ == \"__main__\":\n unittest.main() \n","sub_path":"pythonTest/src/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"553345052","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom config import *\nfrom telegram import *\nimport re\nfrom email.header import decode_header\nfrom parser import TelegramHTMLParser\nimport quopri\nimport hashlib\nimport io\nimport time\nimport os\nimport traceback\n\nbot = Bot(TOKEN)\n\ndef sanitize_fn(text):\n try:\n value = quopri.decodestring(text)\n except e:\n traceback.print_exc(e)\n value = text\n\n if \".\" in text:\n ext = text.split(\".\")[-1]\n else:\n ext = \"q\"\n\n if text.isalnum() and len(text) < 42:\n return text\n elif text[:30].isalnum():\n return text[:30] + \".\" + ext\n else:\n return hashlib.md5(text.encode()).hexdigest() + \".\" + ext\n\n\ndef decode(text):\n return ''.join(\n t[0].decode() if isinstance(t[0], bytes) else t[0]\n for t in decode_header(text))\n\ndef safe(text):\n return decode(text).replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n\ndef message_content(message):\n if message.is_multipart():\n return '\\n\\n'.join(list(map(message_content, message.get_payload())))\n elif message.get_content_type() == \"text/html\" or message.get_content_type() == \"text/plain\":\n parser = TelegramHTMLParser()\n message = message.get_payload(decode=True).decode('utf-8')\n message = re.sub(r\"(\\r\\n|\\r)\", \"\\n\", message)\n message = re.sub(r\"\\s+\", \" \", message)\n message = re.sub(r\"(\\n\\s)+\\n\", \"\\n\", message)\n message = re.sub(r\"\\n\", \"\", message)\n parser.feed(message)\n return parser.output\n # return safe(message.get_payload(decode=True).decode('utf-8'))\n else:\n fn = f\"{int(time.time())}-{sanitize_fn(message.get_filename(failobj='file'))}\"\n with open(os.path.join(MEDIA_PATH, fn), \"wb\") as f:\n f.write(message.get_payload(decode=True))\n return f\"Attachment {message.get_content_type()}, {len(message.get_payload())} bytes, {MEDIA_ROOT_URL}/{fn}\"\n\ndef send_message(message, dkim=False, spf=False):\n print(message.items())\n text=f'''\\ud83d\\udce7 {safe(message.get(\"Subject\", \"\"))}\n{safe(message.get(\"from\", \"\"))} → {safe(message.get(\"To\", \"\"))}\n\n{message_content(message)}\n'''\n if not dkim:\n text += \"\\n\\u26a0\\ufe0f DKIM is not verified\"\n if not spf:\n text += \"\\n\\u26a0\\ufe0f SPF is not verified\"\n if len(text)<4096:\n try:\n bot.send_message(\n chat_id=CHAT_ID,\n text=text,\n parse_mode=\"HTML\"\n )\n except:\n bot.send_message(\n chat_id=CHAT_ID,\n text=text\n )\n else:\n for x in range(0, len(text), 4096):\n try:\n bot.send_message(\n chat_id=CHAT_ID,\n text=text[x:x+4096],\n parse_mode=\"HTML\"\n )\n time.sleep(0.3)\n except:\n bot.send_message(\n chat_id=CHAT_ID,\n text=text[x:x+4096]\n )\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"460546636","text":"\"\"\"\ntext: fox quick fox fox ... quick ... quick the fox ... quick\n 0 1 2 3 17 29 30 31 56\nquery: \"quick fox\"\nk = 2\nquick: 1, 17, 29, 56\nfox: 0, 2, 3, 31 # indexes of where the word fox appears\nresult = [1, 29]\nq the number of times quick appears\nf the number of times fox appears\n\"\"\"\n\n\nclass Solution:\n def __init__(self, text: str):\n\n self.arr = text.split()\n\n self._map = {}\n\n for i, word in enumerate(self.arr):\n if word not in self._map:\n self._map[word] = []\n self._map[word].append(i)\n\n def process(self, query: str, k): # assume multi word query\n q = len(query) # 2\n words = query.split() # quick, fox\n\n result = []\n\n pointers = [0] * q # index of the nth words self._map[word] array 0, 0\n\n firstn = len(self._map[words[0]]) # 4\n while pointers[0] < firstn:\n\n for i in range(1, q):\n last_index = self._map[words[i - 1]][pointers[i - 1]]\n\n while self._map[words[i]][pointers[i]] < last_index:\n pointers[i] += 1\n if (\n pointers[i] > len(self._map[words[i]])\n or self._map[words[i]][pointers[i]] - last_index > k\n ):\n return result\n\n result.append(self._map[words[0]][pointers[0]])\n\n pointers[0] += 1\n\n return result\n","sub_path":"company_questions/palantir/palantir.py","file_name":"palantir.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"41013536","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Plot radius vector in spherical coordinates\n\n# Define constants\na = 1.\nl = 0.7*a\n\n# Create a blank canvas\nfig = plt.figure(num='Spherical Coordinates',facecolor='w')\nfig.suptitle('Spherical Coordinates',fontsize=20)\n\nax = fig.add_subplot(111)\n\nax.plot([0,-0.7],[0,-0.7],'k-',lw=0.1)\nax.plot([0,1.5],[0,0],'k-',lw=0.1)\nax.plot([0,0],[0,1.3],'k-',lw=0.1)\n\n# x\nax.annotate('', xy=(-0.7,-0.7), xycoords='data',\\\n xytext=(0.,0.),textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\"))\nax.text(-0.79,-0.79,r'$\\hat{x}$',fontsize=20)\n\n# y\nax.annotate('', xy=(1.5,0.), xycoords='data',\\\n xytext=(0.,0.),textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\"))\nax.text(1.5,-0.02,r'$\\hat{y}$',fontsize=20)\n\n# z\nax.annotate('', xy=(0.,1.3), xycoords='data',\\\n xytext=(0.,0.),textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\"))\nax.text(-0.03,1.3,r'$\\hat{z}$',fontsize=20)\n\n# Plot vector\nax.annotate('', xy=(0.5,0.7), xycoords='data',\\\n xytext=(0.,0.), textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\"))\n\n# Plot vertical and projections\nax.plot([0.5,0.5],[0.7,-0.4],'k--')\n\nax.plot([0,0.5],[0.,-0.4],'k--')\nax.plot([0,0.5],[1.1,0.7],'k--')\n\nax.plot([-0.4,0.5],[-0.4,-0.4],'k--')\n\nax.plot([0.5,0.9],[-0.4,0.01],'k--')\n\n# Plot angle theta and label it\nax.annotate('', xy=(0.2638,0.3817), xycoords='data',\\\n xytext=(0.,0.5), textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\",connectionstyle=\"arc3,rad=-0.3\"))\nax.text(0.11,0.53,r'$\\theta$',fontsize=20)\n\n# Plot angle phi and label it\nax.annotate('', xy=(0.18, -0.14), xycoords='data',\\\n xytext=(-0.15,-0.15), textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\",connectionstyle=\"arc3,rad=0.3\"))\nax.text(-0.12,-0.3,r'$\\varphi$',fontsize=20)\n\nax.annotate('', xy=(0.5,-0.8), xycoords='data',\\\n xytext=(0.,0.), textcoords='data', \\\n arrowprops=dict(arrowstyle=\"->\",alpha=0.3))\n#\n\n\n\n\n\nplt.axis('off')\nplt.axis('equal')\n\nplt.savefig('spherical_coordinates.png')\n","sub_path":"ClassicalMechanics/hw5/hw5_figures.py","file_name":"hw5_figures.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256665422","text":"import flask\nimport zeeguu.core\nfrom flask import request\nfrom zeeguu.core.model import RSSFeedRegistration, RSSFeed\nfrom .utils.route_wrappers import cross_domain, with_session\nfrom .utils.json_result import json_result\nfrom . import api\n\nsession = zeeguu.core.db.session\n\nSTART_FOLLOWING_FEED = \"start_following_feed\"\nSTOP_FOLLOWING_FEED = \"stop_following_feed\"\nFOLLOWED_FEEDS = \"followed_feeds\"\nINTERESTING_FEEDS = \"interesting_feeds\"\nRECOMMENDED_FEEDS = \"recommended_feeds\"\n\n\n# ---------------------------------------------------------------------------\n@api.route(f\"/{START_FOLLOWING_FEED}\", methods=(\"POST\",))\n# ---------------------------------------------------------------------------\n@cross_domain\n@with_session\ndef start_following_feed_with_id():\n \"\"\"\n :param: feed_id -- the id of the feed to be followed.\n Start following the feed with the given id\n\n :return: \"OK\" in case of success\n \"\"\"\n if request.form.get(\"source_id\", \"\"):\n feed_id = int(request.form.get(\"source_id\", \"\"))\n else:\n feed_id = int(request.form.get(\"feed_id\", \"\"))\n\n feed_object = RSSFeed.find_by_id(feed_id)\n RSSFeedRegistration.find_or_create(session, flask.g.user, feed_object)\n\n return \"OK\"\n\n\n# ---------------------------------------------------------------------------\n@api.route(f\"/{STOP_FOLLOWING_FEED}\", methods=(\"POST\",))\n# ---------------------------------------------------------------------------\n@cross_domain\n@with_session\ndef stop_following_feed():\n \"\"\"\n A user can stop following the feed with a given ID\n :return: OK / ERROR\n \"\"\"\n\n feed_id = int(request.form.get(\"source_id\", \"\"))\n\n try:\n to_delete = RSSFeedRegistration.with_feed_id(feed_id, flask.g.user)\n session.delete(to_delete)\n session.commit()\n except Exception as e:\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n return \"OOPS. FEED AIN'T THERE IT SEEMS (\" + str(e) + \")\"\n\n return \"OK\"\n\n\n# ---------------------------------------------------------------------------\n@api.route(f\"/{FOLLOWED_FEEDS}\", methods=(\"GET\",))\n# ---------------------------------------------------------------------------\n@cross_domain\n@with_session\ndef get_feeds_being_followed():\n \"\"\"\n A user might be following multiple feeds at once.\n This endpoint returns them as a list.\n\n :return: a json list with feeds for which the user is registered;\n every feed in this list is a dictionary with the following info:\n id = unique id of the feed; uniquely identifies feed in other endpoints\n title = \n url = ...\n language = ...\n image_url = ...\n \"\"\"\n registrations = RSSFeedRegistration.feeds_for_user(flask.g.user)\n feed_list = []\n for reg in registrations:\n try:\n feed_list.append(reg.rss_feed.as_dictionary())\n except Exception as e:\n from sentry_sdk import capture_exception\n\n capture_exception(e)\n zeeguu.core.log(str(e))\n\n return json_result(feed_list)\n\n\n# ---------------------------------------------------------------------------\n@api.route(f\"/{INTERESTING_FEEDS}/\", methods=(\"GET\",))\n# ---------------------------------------------------------------------------\n@cross_domain\n@with_session\ndef get_interesting_feeds_for_language_code(language_code):\n \"\"\"\n Get a list of feeds for the given language\n\n :return:\n \"\"\"\n feed_data = []\n for feed in RSSFeed.find_for_language_id(language_code):\n feed_data.append(feed.as_dictionary())\n return json_result(feed_data)\n\n\n# ---------------------------------------------------------------------------\n@api.route(f\"/{RECOMMENDED_FEEDS}/\", methods=(\"GET\",))\n# ---------------------------------------------------------------------------\n@cross_domain\n@with_session\ndef get_non_subscribed_feeds(language_code):\n \"\"\"\n Get a list of feeds for the given language\n\n :return:\n \"\"\"\n feeds = RSSFeedRegistration.non_subscribed_feeds(flask.g.user, language_code)\n feed_data = [feed.as_dictionary() for feed in feeds]\n return json_result(feed_data)\n","sub_path":"zeeguu/api/api/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"608174560","text":"from libs.graphics import *\nfrom time import sleep\nfrom sys import version_info\n\n\nclass GetThePic(GraphWin):\n \"\"\"\n Object inherited from GraphWin\n\n Any height / [var] means how up/down is the item placed, \n larger [var] means more upper displayed, else lower\n \"\"\"\n \n def __init__(self):\n \"\"\"Create window object from parent class with 800p width 700p height\n Then call functions to add elements to the displayed window\n \"\"\"\n self.width = 800\n self.height = 700\n self.mid_width = self.width / 2\n self.mid_height = self.height / 2\n if version_info[0] == 3: # If version is 3.0.0 or above, Use Super\n super(GetThePic, self).__init__(title=\"Hello world\", width=self.width, height=self.height) # gain access to parent class and call init with args. Only New-Style Class\n else: # Else refer to base class explicitly\n GraphWin.__init__(self, title=\"Hello world\", width=self.width, height=self.height) # Compatible with Old-Style Class\n # Put stuff on the window\n self.setBackground(\"black\")\n self.time_count_down_1999()\n self.title_text()\n self.time_info()\n self.box_title()\n self.line_separate_title()\n self.display_info_text()\n self.draw_comp_with_shapes()\n self.comp_terminal_text()\n self.hold_ui()\n\n\n def time_count_down_1999(self):\n \"\"\"Countdown timer displayed in the beginning with blinking text\n Starting from 1999/12/31 11:59:50 \n ends at 1900/01/01 00:00:00\"\"\"\n time = \"1999/12/31 11:59:50\"\n count_down = Text(Point(self.mid_width, self.height / 6), time)\n count_down.draw(self)\n for x in range(1, 10):\n time = \"1999/12/31 11:59:5{}\".format(x)\n count_down.setText(time)\n # Blink Effect\n sleep(0.8)\n count_down.setTextColor(\"white\")\n sleep(0.2)\n count_down.setTextColor(\"red\")\n time = \"1900/01/01 00:00:00\"\n count_down.setText(time)\n\n\n def title_text(self):\n \"\"\"Display Event Name (Title)\n \"\"\"\n t = Text(Point(self.mid_width, self.height / 9), \"Year 2000 Problem\")\n t.setSize(20); t.setStyle('bold italic'); t.setFace(\"helvetica\"); t.setFill(\"white\")\n t.draw(self)\n\n\n def time_info(self):\n \"\"\"Display Time Information text about the event\n \"\"\"\n t = Text(Point(self.mid_width, self.height / 3.5), \"\") # Display nothing at beginning. append text later\n t.setSize(16); t.setFill(\"white\");\n t.draw(self);\n self.show_typing_animation(\"Fri Dec 31 UTC 1999\", t)\n\n\n def box_title(self):\n \"\"\"Draw Box around the title\n \"\"\"\n r = Rectangle(Point(215, 40), Point(590, 140))\n r.setOutline(\"grey\")\n r.draw(self)\n\n\n def line_separate_title(self):\n \"\"\"Draw a line between the title and the body content\n \"\"\"\n l = Line(Point(self.width, self.height / 2.5), Point(self.width * 0, self.height / 2.5))\n l.setFill(\"white\")\n l.draw(self)\n\n \n def display_info_text(self):\n t = Text(Point(self.width / 1.4, self.mid_height), \"What is Year 2000 Problem (Y2K)?\\n\")\n t.setSize(16)\n self.set_termina_font_and_draw(t)\n t1 = Text(Point(self.width / 1.4, self.height / 1.6), \"\"\"Y2K is a class of computer bugs related\\n to the formatting and storage of calendar\\n data for dates beginning in the year 2000.\\nProblems were anticipated , and arose, \\nbecause many programs represented four-digit\\nyears with only the final two digits\\n making the year 2000 indistinguishable \\nfrom 1900.\"\"\")\n self.set_termina_font_and_draw(t1)\n\n \n def draw_comp_with_shapes(self):\n \"\"\"Draw a computer like thing on the left down corner\n \"\"\"\n monitor_inner = Rectangle(Point(55.0, 400.0), Point(315.0, 550.0))\n monitor_inner.setOutline(\"white\")\n monitor_inner.setFill(\"black\")\n\n monitor_outline = Rectangle(Point(40.0, 380.0), Point(325.0, 570.0))\n monitor_outline.setOutline(\"white\")\n monitor_outline.setFill(\"black\")\n\n monitor_stand = Polygon(Point(108.0, 571.0), Point(90.0, 600.0), Point(266.0, 600),Point(251.0, 571.0))\n monitor_stand.setOutline(\"white\")\n monitor_stand.setFill(\"black\")\n\n monitor_outline.draw(self)\n monitor_inner.draw(self)\n monitor_stand.draw(self)\n\n\n def comp_terminal_text(self):\n \"\"\"Simulate unix like terminal within the computer screen\n \"\"\"\n # str.ljust is used because i want the text to display left->right\n term_default_text = \"root@usr:~# \"\n t = Text(Point(182.0, 418.0), \"root@usr:~# \".ljust(21, \" \"))\n self.set_termina_font_and_draw(t)\n for i in \"date\":\n term_default_text += i\n t.setText(term_default_text.ljust(21, \" \"))\n sleep(0.2)\n\n t2 = Text(Point(182.0, 438.0), \"1999-12-31 11:59:56 UTC\".ljust(21, \" \"))\n self.set_termina_font_and_draw(t2)\n\n t3 = Text(Point(182, 458), \"root@usr:~# \".ljust(21, \" \"))\n self.set_termina_font_and_draw(t3)\n term_default_text = \"root@usr:~# \"\n for i in \"date\":\n term_default_text += i\n t3.setText(term_default_text.ljust(21, \" \"))\n sleep(0.2)\n\n sleep(3)\n\n t4 = Text(Point(182, 478), \"1900-01-01 00:00:00 UTC\".ljust(21, \" \"))\n self.set_termina_font_and_draw(t4)\n t5 = Text(Point(182, 498), \"root@usr:~# \".ljust(21, \" \"))\n self.set_termina_font_and_draw(t5)\n\n\n def set_termina_font_and_draw(self, elem):\n \"\"\"Set the text style & color displayed in the computer\n \n Args:\n elem (Text obj) Text object that contains the text\n \"\"\"\n elem.setFace(\"courier\")\n elem.setFill(\"white\")\n elem.draw(self)\n\n\n def show_typing_animation(self, sentense, elem, pause=0.1):\n \"\"\"Show text with typing animation (show up letter by letter)\n\n Args:\n sentense (str) the sentense you want to show with the animation\n elem (Text Obj) Text object for putting the sentense in\n pause (float) Time pause better letters\n \"\"\"\n for letter in sentense:\n elem.setText(elem.getText() + letter) # Append like function\n sleep(pause)\n\n\n def hold_ui(self):\n \"\"\"Wait for user to click on windows to exit the program\n \"\"\"\n try:\n self.getMouse()\n except:\n pass\n self.close() # Close Window (Exits program)\n\n\nGetThePic() # call GetThePic.__init__ (show poster)\n","sub_path":"Computer Sci I/get_the_pic.py","file_name":"get_the_pic.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"25836289","text":"#!/usr/bin/env python\n\nfrom constants import Task\nfrom dag import WaterPlantsDag, TakeMedicationDag, WalkDogDag\nfrom dag import WaterPlantsWithLocationDag, TakeMedicationWithLocationDag, WalkDogWithLocationDag\n\n# Decode item numbers are based on Estimote Pairings and Script 2.0\n# Experiment Date: 2018-02-13\n\nclass TaskToDag(object):\n\n mapping = {\n Task.WATER_PLANTS: (WaterPlantsDag, WaterPlantsWithLocationDag),\n Task.TAKE_MEDS: (TakeMedicationDag, TakeMedicationWithLocationDag),\n Task.WALK_DOG: (WalkDogDag, WalkDogWithLocationDag)\n }\n\nclass Items(object):\n\n encode = {\n 'W': (0, 'water_can', [Task.WATER_PLANTS]),\n 'S': (1, 'sink_tap', [Task.WATER_PLANTS, Task.TAKE_MEDS]),\n 'P1': (2, 'windowsill_plant', [Task.WATER_PLANTS]),\n 'P2': (3, 'coffee_table_plant', [Task.WATER_PLANTS]),\n 'P3': (4, 'side_table_plant', [Task.WATER_PLANTS]),\n 'U': (5, 'umbrella', [Task.WALK_DOG]),\n 'L': (6, 'leash', [Task.WALK_DOG]),\n 'K': (7, 'keys', [Task.WALK_DOG]),\n 'D': (8, 'dog', [Task.WALK_DOG]),\n 'DR': (9, 'door', [Task.WALK_DOG]),\n 'F': (10, 'food', [Task.TAKE_MEDS]),\n 'C': (11, 'cup', [Task.TAKE_MEDS]),\n 'M': (12, 'medication', [Task.TAKE_MEDS]),\n 'CH': (13, 'chair', [Task.TAKE_MEDS]),\n 'G': (14, 'garbage', [Task.TAKE_MEDS]),\n 'PL': (15, 'pills', [Task.TAKE_MEDS]),\n }\n\n decode = {\n #Estimotes\n 'EST323': 'U',\n 'EST324': 'U',\n 'EST321': 'L',\n 'EST322': 'L',\n 'EST319': 'K',\n 'EST320': 'K',\n 'EST327': 'D',\n 'EST328': 'D',\n 'EST325': 'DR',\n 'EST326': 'DR',\n 'D001': 'DR', #Smart Home sensor\n\n 'EST317': 'F',\n 'EST318': 'F',\n 'EST311': 'C',\n 'EST312': 'C',\n 'EST313': 'M',\n 'EST314': 'M',\n 'EST329': 'G',\n 'EST330': 'G',\n 'D011': 'G', #Smart Home sensor\n 'EST301': 'CH',\n 'EST310': 'CH',\n\n 'EST308': 'W',\n 'EST309': 'W',\n 'EST302': 'P3',\n 'EST303': 'P3',\n 'EST306': 'P1',\n 'EST307': 'P1',\n 'EST304': 'P2',\n 'EST305': 'P2',\n 'EST315': 'S',\n 'EST316': 'S',\n\n #Old Sensor\n 'EST096': 'U',\n 'EST097': 'U',\n 'EST073': 'L',\n 'EST074': 'L',\n 'EST030': 'K',\n 'EST123': 'K',\n 'EST122': 'D',\n 'EST100': 'D',\n 'EST071': 'DR',\n 'EST023': 'DR',\n\n 'EST098': 'F',\n 'EST099': 'F',\n 'EST079': 'C',\n 'EST080': 'C',\n 'EST089': 'M',\n 'EST090': 'M',\n 'EST075': 'G',\n 'EST076': 'G',\n 'EST011': 'CH',\n 'EST078': 'CH',\n\n 'EST022': 'W',\n 'EST124': 'W',\n 'EST024': 'P3',\n 'EST086': 'P3',\n 'EST087': 'P1',\n 'EST088': 'P1',\n 'EST083': 'P2',\n 'EST084': 'P2',\n 'EST093': 'S',\n 'EST121': 'S',\n }\n\nclass Locations(object):\n\n encode = {\n 'L': (0, 'living_entertainment', [Task.WALK_DOG, Task.TAKE_MEDS, Task.WATER_PLANTS]),\n 'LD': (1, 'living_dining', [Task.WALK_DOG, Task.TAKE_MEDS, Task.WATER_PLANTS]),\n 'K': (2, 'kitchen', [Task.TAKE_MEDS, Task.WATER_PLANTS]),\n 'KH': (3, 'kitchen_hallway', [Task.WALK_DOG]),\n 'H': (4, 'hallway', [Task.WALK_DOG])\n }\n\n decode = {\n #Ambient sensors\n\n #Living room left side\n 'M002': 'L', 'M003': 'L',\n 'M006': 'LD', 'M007': 'LD',\n\n #Living room to Dining room\n 'M008': 'LD',\n\n #Dining room\n 'M009': 'LD', 'M010': 'LD',\n 'M013': 'LD', 'M014': 'LD',\n\n #Kitchen\n 'M016': 'K', 'M017': 'K', 'M018': 'K',\n\n #Kitchen window\n 'M051': 'K',\n\n #Kitchen hallway\n 'M019': 'KH',\n\n #Hallway\n 'M023': 'H', 'M021': 'H',\n 'M022': 'H', 'M024': 'H', 'M025': 'H',\n 'M060': 'H', 'M059': 'H', 'M058': 'H', 'M057': 'H', 'M056': 'H',\n 'M055': 'H', 'M054': 'H'\n }\n","sub_path":"src/adl_error_detection/src/adl/util/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"562857970","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport sys, os, signal\n\n# pkill signal values: https://linux.die.net/Bash-Beginners-Guide/sect_12_01.html\n\nbashCommand = \"sudo pkill -2 -f \"\nscriptToKill = \"clock.py\"\n\nif len(sys.argv) > 1:\n scriptToKill = sys.argv[1]\n\nfullCommand = bashCommand + scriptToKill\n\nprint(fullCommand)\n\n#print(str(subprocess.call(\"ps aux | grep 'clock.py'\", shell=True)))\n\nsubprocess.call(fullCommand, shell=True)\n\n# Catching KeyboardInterrupt in Python during program shutdown\n# https://stackoverflow.com/questions/21120947/catching-keyboardinterrupt-in-python-during-program-shutdown\n\n# https://stackoverflow.com/questions/13024532/simulate-ctrl-c-keyboard-interrupt-in-python-while-working-in-linux","sub_path":"utils/killAppProc.py","file_name":"killAppProc.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"613366333","text":"import numpy as np\nimport csv\nfrom scipy import stats\nfrom scipy import signal\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\n\n# Problem 1\n\n# Bits and number of bits\nN = input(\"Enter number of bits (min 50): \")\nN = int(N)\nbits = [0]*N\n\n# Get bits from csv\nbits = np.genfromtxt('bits10k.csv', delimiter=',')\nbits = bits.astype(int)\n\n# Operational frequency\nf = 5000 \n\n# Period\nT = 1/f \n\n# Number of samplings\np = 140\n\n# Points per period\ntp = np.linspace(0, T, p)\n\n# sin(2piftp)\nsin = np.sin(2*np.pi * f * tp)\n\n# sin waveform\nplt.plot(tp, sin)\nplt.xlabel('Tiempo / s')\nplt.savefig(\"images/wave.png\")\n\n# Sampling frequency\nfs = p/T \n\n# Tx linspace\nt = np.linspace(0, N*T, N*p)\n\n# initiate signal\nsign = np.zeros(t.shape)\n\n# BPSK modulated signal\nfor k, b in enumerate(bits):\n if b == 1:\n sign[k*p:(k+1)*p] = sin\n else:\n sign[k*p:(k+1)*p] = -sin \n\n# Visualización de los primeros bits modulados\npb = 10\nplt.figure()\nplt.plot(sign[0:pb*p]) \nplt.savefig(\"images/Tx.png\")\n\n\n\n\n# Problem 2 \n\n# Instant power\nPinst = sign**2\n\n# Medium power\nPs = integrate.trapz(Pinst, t) / (N * T)\nprint(\"Medium power: \", Ps, ' W')\n\n\n\n\n# Problem 3\n\n# get SNR lower and upper limit, then call function get_noise\nSNR_L = input(\"Enter SNR lower limit: \")\nSNR_U = input(\"Enter SNR upper limit: \")\niterate = int(SNR_U) - int(SNR_L)\nRx_list = [[]*N for i in range(iterate + 1)]\n\n# get_noise: Function that plots first 10 bits of sign + noise for each SNR \ndef get_noise(iterate, SNR_L, Rx_list):\n SNR = int(SNR_L)\n for i in range(iterate + 1):\n Pn = Ps / (10**(SNR / 10)) # Noise and given sign's power\n sigma = np.sqrt(Pn) # Noise's standard dev\n ruido = np.random.normal(0, sigma, sign.shape)\n Rx_list[SNR] = sign + ruido # Channel \n pb = 10\n plt.figure()\n plt.title('Senal con ruido blanco, con SNR: '+ str(SNR))\n plt.xlabel('Tiempo (s)')\n plt.ylabel('Amplitud')\n plt.plot(Rx_list[SNR][0:pb*p])\n plt.savefig(\"images/Rx\" + str(SNR) + '.png') # plots first 10 bits of signal + noise\n SNR += 1\n\nget_noise(iterate, SNR_L, Rx_list)\n\n\n# Problem 4\n\n# Before noise\nfw, PSD = signal.welch(sign, fs, nperseg=1024)\nplt.figure()\nplt.semilogy(fw, PSD)\nplt.title('Senal modulada sin ruido')\nplt.xlabel('Frecuencia / kHz')\nplt.ylabel('Densidad espectral de potencia / V**2/kHz')\nplt.savefig(\"images/welch_pre_noise.png\")\n\n\n# Post noise\nSNR_W = int(SNR_L)\nfor i in range(iterate + 1): \n fw, PSD = signal.welch(Rx_list[i], fs, nperseg=1024)\n plt.figure()\n plt.semilogy(fw, PSD)\n plt.title('Senal despues del canal ruidoso, con SNR: '+ str(SNR_W))\n plt.xlabel('Frecuencia / kHz')\n plt.ylabel('Densidad espectral de potencia / V**2/kHz')\n plt.savefig(\"images/welch\" + str(SNR_W) + '.png')\n SNR_W += 1\n\n\n\n# Problem 5\n\nBER_V = []\n# Pseudo-energy from the original wave\nEs = np.sum(sin**2)\n\n# Initiate bit vector with zeros\nbitsRx = np.zeros(bits.shape)\n\n# Signal decodification by energy detection\nSNR_F = int(SNR_L)\nfor i in range(iterate + 1):\n for k, b in enumerate(bits):\n # Producto interno de dos funciones\n Ep = np.sum(Rx_list[i][k*p:(k+1)*p] * sin) \n if Ep > Es/2:\n bitsRx[k] = 1\n else:\n bitsRx[k] = 0\n err = np.sum(np.abs(bits - bitsRx))\n BER = err/N\n BER_V.append(BER)\n print('There are {} errors within {} bits with a SNR of {}dB for an error rate of {}%'.format(err, N, SNR_F, BER))\n SNR_F += 1\n\n\n\n# Problem 6\n\n# SNR_V list contains SNR values\nSNR_V = [*range(int(SNR_L), int(SNR_U) + 1, 1)]\nplt.figure()\nplt.scatter(SNR_V, BER_V)\nplt.xlabel('SRN(dB)')\nplt.ylabel('Ber') \nplt.grid(axis='y', alpha=0.75) \nplt.title('Error rate')\nplt.savefig(\"images/BERvSRN.png\")\n","sub_path":"src/tarea4.py","file_name":"tarea4.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"358203980","text":"from django.urls import path, include\n\nfrom . import views\nfrom .views import *\n\nurlpatterns = [\n path('', MainPageView.as_view(), name='home'),\n path('category/posts/', PostsListView.as_view(), name='category-posts'),\n path('categories/', CategoryListView.as_view(), name='categories'),\n path('/', MainPageView.as_view(), name='list'),\n path('search', SearchListView.as_view(), name='search'),\n path('post-detail//', PostDetailView.as_view(), name='detail'),\n path('product/create/', PostCreateView.as_view(), name='create-post'),\n path('product/update//', PostUpdateView.as_view(), name='update-post'),\n path('product/delete//', PostDeleteView.as_view(), name='delete-post'),\n path('post-detail//comment/', AddCommentView.as_view(), name='add_comment')\n\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"180786552","text":"# -*- coding: utf-8 -*-\nfrom sys import argv, version\nif version < \"3\":\n from io import open\n from commands import getstatusoutput as runcommand\nelse:\n from subprocess import getoutput as runcommand\nimport os\nimport re\nimport pypandoc\nimport datetime\n\n\n\ndef getRelativePathOfPermittedFilesOrDirs(path):\n # print('current work path:', path)\n length = len(path)\n filesordirs = list()\n for parent, dirnames, filenames in os.walk(path):\n for dirname in dirnames:\n # print(os.path.join(parent[length+1:],dirname))\n # print(dirname)\n # filesordirs.append(os.path.join(parent[length+1:],dirname) + '/')\n pass\n for filename in filenames:\n # print(os.path.join(parent[length+1:],filename))\n filesordirs.append(os.path.join(parent[length+1:],filename))\n # print(filesordirs)\n return filesordirs\n\n\ndef readIgnorelist(file=\".gitlogignore\"):\n ignoredFileOrDirs = list()\n try:\n with open(file,\"rb+\") as f:\n while True:\n line = f.readline()\n if line == '':\n # print(\"end of file\")\n break\n if line == '\\n':\n # print(\"blank line\")\n continue\n if line.startswith('#'):\n # print(\"comment line\")\n continue\n line = line.replace('*','[^./]*').replace('\\\\','\\\\\\\\')[0:-1]\n # print(line)\n ignoredFileOrDirs.append(line)\n # yield line\n # print(ignoredFileOrDirs)\n return ignoredFileOrDirs\n except IOError as err:\n print(\"未找到.gitlogignore文件\")\n\n\ndef toWord():\n pass\n\n\ndef countGitCode(since=0,until=0):\n paths = list()\n unpermittedPath = list()\n RelativePathOfPermittedFilesOrDirs = getRelativePathOfPermittedFilesOrDirs(os.getcwd())\n ignoredFileOrDirs = readIgnorelist()\n for fd in RelativePathOfPermittedFilesOrDirs:\n flag = True\n for ig in ignoredFileOrDirs:\n # print(\"filedir:\",fd)\n # print(\"reexp:\",ig)\n # print(\"re.search():\",re.search(ig,fd))\n if re.search(ig,fd):\n unpermittedPath.append(fd)\n flag = False\n break\n if flag:\n paths.append(fd)\n # print(\"unpermittedPath:\", unpermittedPath)\n paths = \" \".join(paths)\n # paths = \" \".join(getRelativePathOfPermittedFilesOrDirs(os.getcwd()))\n command_getauthors = \"git log --pretty='%aN' | sort | uniq\"\n authors = runcommand(command_getauthors)\n if isinstance(authors,tuple):\n authors = authors[1]\n authors = authors.split('\\n')\n template_command_since_until = \"git log --author='{0}' --pretty=tformat: --since={1} --until={2}.0am --shortstat -- {3}\"\n template_command_since = \"git log --author='{0}' --pretty=tformat: --since={1} --shortstat -- {2}\"\n template_command = \"git log --author='{0}' --shortstat -- {1}\"\n template_output = \"用户:{0},插入{1}行,删除{2}行,共计新增{3}行\"\n template_markdown = \"\"\"\n | 开始时间{0} | 结束时间{1} | Cool |\n | ------------- |:-------------:| -----:|\n | col 3 is | right-aligned | $1600 |\n | col 2 is | centered | $12 |\n | zebra stripes | are neat | $1 |\n \"\"\"\n template_html=\"\"\"{0}\"\"\"\n template_html_table = \"\"\"\n \n \"\"\"\n template_html_table_singleline = \"\"\"\"\"\"\n for author in authors:\n if since == 0 and until == 0:\n command_getcounts = template_command.format(author, paths)\n elif until == 0:\n command_getcounts = template_command_since.format(author, since, paths)\n else:\n command_getcounts = template_command_since_until.format(author, since, until, paths)\n print(command_getcounts)\n insertionsAndDeletions = runcommand(command_getcounts)\n if isinstance(insertionsAndDeletions,tuple):\n insertionsAndDeletions = insertionsAndDeletions[1]\n print(insertionsAndDeletions)\n insertions = re.findall(r\", (\\d*) insertion[s]?\\(\\+\\)\",insertionsAndDeletions)\n total_insertation = sum([int(i) for i in insertions])\n deletions = re.findall(r\", (\\d*) deletion[s]?\\(\\-\\)\",insertionsAndDeletions)\n # print(deletions)\n total_deletion = sum([int(i) for i in deletions])\n different = total_insertation - total_deletion\n # print(template_output.format(author, total_insertation, total_deletion, different))\n html_table_singleline = template_html_table_singleline.format(author, total_insertation, total_deletion, different)\n # print(html_table_singleline)\n template_html_table += html_table_singleline\n table = template_html_table.format(since, until) + \"
开始时间{0}结束时间{1}
用户插入删除新增
{0}{1}{2}{3}
\"\n html = template_html.format(table)\n # print(html)\n project_name = re.split(r\"[\\\\/]\",os.getcwd())[-1]\n print(\"project_name:\",project_name)\n pypandoc.convert(html, \"docx\",\n outputfile= project_name + \"-linesCount-\" + str(datetime.datetime.now().strftime(\"%y%m%d%H%M%S\")) + \".docx\", format=\"html\")\n\nif __name__ == \"__main__\":\n if len(argv)>3:\n print(\"too many args.\")\n exit()\n elif len(argv) == 1:\n countGitCode()\n elif len(argv) == 2:\n countGitCode(argv[1])\n elif len(argv) == 3:\n countGitCode(argv[1], argv[2])\n","sub_path":"countgitcode27.py","file_name":"countgitcode27.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"174041133","text":"def mix_index_tuple(row, column_ats):\n\treturn tuple(row[at] for at in column_ats)\n\ndef update_table_indices(table_name):\n\ttable = Context.get_table(table_name)\n\tindices = Context.get_indices()\n\tkeys = filter(lambda idx: indices[idx]['table'] == table_name, indices)\n\tfor key in keys:\n\t\tupdate_index(key)\n\ndef update_index(index_name):\n\tindex = Context.get_index(index_name)\n\ttable = Context.get_table(index['table'])\n\tcontent = table['content']\n\tcolumn_ats = [table['column_list'].index(column) for column in index['columns']]\n\tsort = sorted(range(len(content)), key = lambda k: mix_index_tuple(content[k], column_ats))\n\tindex['content'] = sort\n","sub_path":"ABKR/IndexHandler.py","file_name":"IndexHandler.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"464001630","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nimport itertools\nimport operator\nimport re\nimport requests\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\nfrom django.views.generic import TemplateView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView\nfrom django.http import \\\n Http404, HttpResponseRedirect, HttpResponse, HttpResponseBadRequest\nfrom django.urls import reverse\n\nfrom .models import Grade, Student, FieldValue, AuthCode, Vote\nfrom .forms import StudentCreateForm, FieldValueForm, SendMailForm\n\n\nre_search = re.compile(r'\\w{2,}', re.U)\n\n\ndef get_data(auth_code):\n \"\"\" Метод получающий данные из сервиса авторизации\"\"\"\n r = requests.post(\"http://auth.alumni57.ru/api/v1/check_code\", data={'code': auth_code})\n data = r.json()\n if data['status'] == 'ok':\n data['status'] = 'valid'\n if data['status'] == 'disabled':\n data['status'] = 'revoked'\n return data\n #return { # Заглушка\n # 'full_name': 'Заглушкова Заглушка',\n # 'cross_name': 'Заглушкова Заглушка 1890A',\n # 'year': 1890,\n # 'letter': 'А',\n # 'status': 'valid'\n #}\n\ndef escape_code(code):\n if code:\n fragments = code.split('-')\n fragments[-1] = 'x' * (len(fragments[-1]) - 4) + fragments[-1][-4:]\n return '-'.join(fragments)\n return code\n\n\ndef auth_code_login(request):\n if request.method == 'POST':\n auth_code = request.POST.get('auth_code', '')\n request.session['auth_code'] = auth_code\n request.session['display_code'] = escape_code(auth_code)\n if auth_code: # иначе анонимус\n data = get_data(auth_code)\n if data['status'] != 'valid':\n del request.session['auth_code']\n del request.session['display_code']\n if data['status'] == 'not_found':\n return HttpResponse(status=404)\n return HttpResponse(status=403)\n g = Grade.objects.filter(\n graduation_year=data['year'],\n letter=data['letter'],\n ).first()\n if g:\n s = Student.objects.filter(\n name=data['full_name'],\n main_grade_id=g.pk,\n ).first()\n else:\n s = None\n defaults = {\n 'owner': s,\n 'cross_name': data['cross_name'],\n 'status': data['status'],\n 'revoked_at': data['disabled_at'],\n }\n if s:\n defaults['owner_id'] = s.pk\n a, created = AuthCode.objects.get_or_create(\n code=auth_code, defaults=defaults)\n if not created:\n a.status = data['status']\n a.cross_name = data['cross_name']\n a.revoked_at = data['disabled_at']\n if s:\n a.owner_id = s.pk\n a.save()\n if s:\n request.session['student_id'] = s.pk\n elif 'student_id' in request.session:\n del request.session['student_id']\n return HttpResponse(request.session['display_code'])\n if 'auth_code' in request.session:\n return HttpResponse()\n return HttpResponse(status=403)\n\n\nclass AlphabetView(TemplateView):\n template_name = 'core/alphabet.jade'\n\n def get_context_data(self, **kwargs):\n data = super(AlphabetView, self).get_context_data(**kwargs)\n data['characters'] = 'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЫЭЮЯ'\n return data\n\n\nclass GradeListView(ListView):\n model = Grade\n template_name = 'core/grade_list.jade'\n\n def get_context_data(self, **kwargs):\n data = super(GradeListView, self).get_context_data(**kwargs)\n qs = data['object_list']\n result = []\n for g, i in itertools.groupby(qs, key=lambda x: x.graduation_year):\n result.append((g, list(i)))\n data['grades'] = result\n return data\n\n\nclass BaseStudentListView(ListView):\n model = Student\n\n def get_queryset(self):\n qs = super(BaseStudentListView, self).get_queryset()\n qs = qs.prefetch_related('modifications')\n return qs\n\n\nclass StudentListView(BaseStudentListView):\n template_name = 'core/student_list.jade'\n paginate_by = 100\n\n def get_paginate_by(self, queryset):\n if self.year:\n return None\n elif self.grade_id:\n return None\n elif self.char:\n return None\n return super(StudentListView, self).get_paginate_by(queryset)\n\n def get(self, request, *args, **kwargs):\n self.grade_id = self.request.GET.get('grade_id')\n if self.grade_id and not Grade.objects.filter(id=self.grade_id).exists():\n raise Http404()\n self.year = self.request.GET.get('year')\n self.char = self.request.GET.get('char')\n return super(StudentListView, self).get(request, *args, **kwargs)\n\n def get_queryset(self):\n qs = super(StudentListView, self).get_queryset()\n query = self.request.GET.get('query')\n if query:\n qfv = []\n qname = []\n for i in re_search.findall(query):\n qfv.append(\n Q(modifications__field_value__icontains=i) &\n ~Q(modifications__status=FieldValue.STATUS_DELETED)\n )\n qname.append(Q(name__icontains=i))\n if qfv and qname:\n qs = qs.filter(\n reduce(operator.and_, qfv) | reduce(operator.and_, qname)\n )\n qs = qs.distinct()\n elif self.char:\n qs = qs.filter(name__startswith=self.char)\n elif self.grade_id:\n qs = qs.filter(main_grade_id=self.grade_id)\n elif self.year:\n qs = qs.filter(main_grade__graduation_year=self.year)\n if query or self.year:\n qs = qs.order_by('-main_grade__graduation_year', 'main_grade__letter', 'name')\n else:\n qs = qs.order_by('name')\n return qs.prefetch_related('main_grade', 'main_grade__teachers')\n\n def get_context_data(self, **kwargs):\n context_data = super(StudentListView, self).get_context_data(**kwargs)\n if self.grade_id:\n context_data['grade'] = Grade.objects.get(id=self.grade_id)\n if not self.request.GET.get('query'):\n context_data['show_teachers'] = True\n context_data['year'] = self.year\n context_data['char'] = self.char\n res = []\n qs = context_data['object_list']\n if self.char:\n for g, i in itertools.groupby(qs, key=lambda x: x.name[0]):\n l = list(i)\n res.append(('Буква ' + g.upper(), l))\n context_data['show_grade'] = True\n else:\n for g, i in itertools.groupby(qs, key=lambda s: s.main_grade.pk):\n l = list(i)\n g = l[0].main_grade\n res.append((g, l))\n context_data['object_list'] = res\n return context_data\n\n\nclass SuggestListView(ListView):\n model = FieldValue\n\n def get_queryset(self):\n query = self.request.GET.get('query', '')\n self.query = re_search.findall(query)\n\n qs = super(SuggestListView, self).get_queryset()\n\n if self.query:\n qu = [\n ~Q(status=FieldValue.STATUS_DELETED),\n ~Q(field_name=FieldValue.FIELD_EMAIL),\n ~Q(field_name=FieldValue.FIELD_SOCIAL_FB),\n ~Q(field_name=FieldValue.FIELD_SOCIAL_VK),\n ]\n\n for q in self.query:\n qu.append(Q(field_value__icontains=q))\n\n qu = reduce(operator.and_, qu)\n\n qs = qs.filter(qu) \\\n .values_list('field_value', flat=True) \\\n .distinct()\n else:\n qs = qs.none()\n\n return qs\n\n def render_to_response(self, context, **response_kwargs):\n if self.query:\n data = list(context['object_list'])\n else:\n data = []\n\n if self.query and self.request.GET.get('students') in ('1', 'true'):\n qu = []\n\n for q in self.query:\n qu.append(Q(name__icontains=q))\n\n qu = reduce(operator.and_, qu)\n\n qs = Student.objects.filter(qu) \\\n .values_list('name', flat=True) \\\n .distinct()[:30]\n\n data = list(qs) + data\n return JsonResponse({\n 'data': data,\n })\n\n\nclass StudentDetailView(DetailView):\n template_name = 'core/student_detail.jade'\n model = Student\n\n def get_context_data(self, **kwargs):\n context_data = super(StudentDetailView, self).get_context_data(**kwargs)\n\n grouped_modifications_iterator = itertools.groupby(\n self.object.modifications.order_by('field_name'),\n lambda modification: modification.field_name\n )\n order = [i[0] for i in FieldValue.STATUS_CHOICES]\n key = lambda x: (order.index(x.status), -x.votes)\n modifications = dict(\n (field_name, sorted(field_values, key=key))\n for field_name, field_values\n in grouped_modifications_iterator\n )\n context_data['grouped_modifications'] = modifications\n context_data['grade'] = self.object.main_grade\n context_data['field_types'] = FieldValue.EDITABLE_FIELDS\n return context_data\n\n\nclass StudentCreateView(CreateView):\n template_name = 'core/student_form.jade'\n model = Student\n form_class = StudentCreateForm\n\n def get_initial(self):\n \"\"\"\n Если id класса есть в url, добавляем год и букву в initial\n \"\"\"\n initial = super(StudentCreateView, self).get_initial()\n if 'grade_id' in self.kwargs:\n try:\n grade = Grade.objects.get(id=self.kwargs.get('grade_id'))\n except Grade.objects.DoesNotExist:\n pass\n else:\n initial['graduation_year'] = grade.graduation_year\n initial['grade_letter'] = grade.letter\n return initial\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n\n # Привязываем код авторизации и создаем для него запись в таблице кодов\n auth_code = self.request.session.get('auth_code')\n if auth_code:\n author_code = AuthCode.objects.get_by_code(auth_code)\n self.object.author_code = author_code\n\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass FieldValueCreateView(CreateView):\n template_name = 'core/fieldvalue_form.jade'\n model = FieldValue\n form_class = FieldValueForm\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n\n # Привязываем правку к выпускнику по id из урла\n student_id = self.kwargs.get('pk')\n exists = FieldValue.objects.filter(\n target_id=student_id,\n field_name=self.object.field_name,\n field_value__iexact=self.object.field_value,\n ).exists()\n if exists:\n return HttpResponseRedirect(self.get_success_url())\n try:\n self.object.target = Student.objects.get(id=student_id)\n except Student.objects.DoesNotExist:\n return Http404()\n\n # Привязываем код авторизации и создаем для него запись в таблице кодов\n auth_code = self.request.session.get('auth_code')\n if auth_code:\n auth_code = AuthCode.objects.get_by_code(auth_code)\n self.object.author_code = auth_code\n\n self.object.save()\n vote = Vote(field_value=self.object,\n value=Vote.VOTE_ADDED)\n if auth_code:\n vote.author_code = auth_code\n vote.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def form_invalid(self, form):\n return HttpResponseBadRequest(form.errors.as_json(), content_type='application/json')\n\n def get_success_url(self):\n return reverse('student-detail', args=[self.kwargs['pk']])\n\n\ndef handle_vote(request, pk, vote_type):\n if request.method != 'POST':\n return HttpResponseBadRequest()\n obj = Vote()\n\n # Идентификатор значения FieldValue\n try:\n obj.field_value = FieldValue.objects.get(id=pk)\n except FieldValue.objects.DoesNotExist:\n return Http404()\n\n # Тип голоса FieldValue\n if vote_type in (Vote.VOTE_UP, Vote.VOTE_DOWN, Vote.VOTE_TO_DEL):\n obj.value = vote_type\n else:\n return HttpResponseBadRequest()\n\n # Привязываем код авторизации и создаем для него запись в таблице кодов\n auth_code = request.session.get('auth_code')\n if auth_code:\n author_code = AuthCode.objects.get_by_code(auth_code)\n obj.author_code = author_code\n\n if Vote.objects.filter(\n field_value_id=obj.field_value.pk,\n value=obj.value,\n author_code_id=obj.author_code.pk).exists():\n return HttpResponse(status=406)\n\n obj.save()\n return HttpResponseRedirect(\n reverse('student-detail', kwargs={\n 'pk': str(obj.field_value.target_id)}))\n\n\nclass SendMailView(CreateView):\n template_name = 'core/sendmail_form.jade'\n model = FieldValue\n form_class = SendMailForm\n\n def form_valid(self, form):\n # Идентификатор FieldValue c email\n try:\n email = self.get_object().field_value\n except FieldValue.objects.DoesNotExist:\n return Http404()\n\n message = render_to_string(\n 'mail.txt', {\n 'message': form.cleaned_data['message'],\n 'author_code': self.get_object().author_code,\n })\n\n send_mail(\n form.cleaned_data['subject'],\n message,\n settings.EMAIL_FROM,\n [email],\n )\n\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('student-detail', kwargs={\n 'pk': str(self.get_object().target_id)\n })\n\n\nclass QAView(TemplateView):\n template_name = 'core/qa.jade'\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"159509950","text":"from scipy.integrate import quad as integral\nimport numpy as np \nimport math\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nimport query_argonaut_gregreen as query\n\n\ndef getGalactic(ra_dec_str, d):\n \"\"\"\n Return the galactic coordinates of the star, \n given ra and dec in a string, and d in kpc.\n \"\"\"\n c = SkyCoord(ra_dec_str, unit=(u.hourangle, u.deg), distance = d*u.kpc)\n return c\n\n\ndef extCoeffIntegral(c, a = 0.4, r_0 = 3.5, z_0 = 0.130): \n \"\"\"\n Return the extinction coefficient using the integral.\n c = galactic coordinates\n a = local extinction coefficient [mag/kpc]\n s_0 = distance [kpc]\n r_0 = length scale [kpc]\n z_0 = scale height [kpc]\n \"\"\"\n l = c.galactic.l.radian\n b = c.galactic.b.radian\n x = c.cartesian.x.value\n y = c.cartesian.y.value\n z = c.cartesian.z.value\n d = float(np.sqrt(x**2 + y**2 + z**2))\n\n def integrand(s, l, b, r_0, z_0):\n x_s = s * math.cos(b) * math.sin(l)\n y_s = (-1) * s * math.cos(b) * math.cos(l)\n z_s = math.sin(b)\n r_sun = 8 #kpc\n r_g = np.sqrt((x_s**2) + ((y_s + r_sun)**2))\n z_g = s * z_s\n exp1 = math.exp((-1) * r_g / r_0)\n exp2 = math.exp((-1) * abs(z_g) / z_0)\n integrand = exp1 * exp2\n return integrand\n\n coeff = integral(integrand, 0, d, args = (l, b, r_0, z_0))\n return a * coeff[0]\n\n\ndef dist2DistMod(d):\n \"\"\"\n Return the distance modulus given a distance.\n d = distance [kpc]\n \"\"\"\n log_arg = (d * 1000) / 10\n mod = 5 * math.log10(log_arg)\n return mod\n\n\ndef extCoeffArgo(c):\n \"\"\"\n Return the extinction coefficient using the Argonaut Skymaps\n http://argonaut.skymaps.info/gal-lb-query-light\n c = galactic coordinates\n \"\"\"\n x = c.cartesian.x.value\n y = c.cartesian.y.value\n z = c.cartesian.z.value\n d = float(np.sqrt(x**2 + y**2 + z**2))\n c_mod = dist2DistMod(d)\n l = c.galactic.l.value\n b = c.galactic.b.value\n qresults = query.query(l, b)\n mod_list = qresults['distmod']\n nearest_mod = min(mod_list, key=lambda x:abs(x-c_mod))\n index = mod_list.index(nearest_mod)\n E_BV = qresults['best'][index]\n R_V = 3.1 #Taken from Green 2015\n A_V = R_V * E_BV\n conversion_factor = 0.302 / 2.742 #Taken from Schlafly 2011\n A_K = A_V * conversion_factor\n return A_K\n\n\ndef getCoeff(target, d):\n \"\"\"\n Return the extinction coefficient.\n year = year of candidate\n d = distance [pc]\n \"\"\"\n d = d / 1000 #Get d in kpc\n if target == 'MB960005':\n coords = '18:05:02.5 -27:42:17' #RA and Dec of MB96-5\n elif target == 'MB980006':\n coords = '17:57:32.8 -28:42:45' #RA and Dec of MB98-6\n else:\n raise ValueError(\"Wrong Input for Target\")\n c = getGalactic(coords, d)\n return extCoeffArgo(c)\n\n\ndef distmod2Dist(distmod):\n \"\"\"\n Return the distance [kpc] given a distance modulus.\n distmod = distance modulus\n \"\"\"\n exponent = (distmod / 5) + 1\n d = 10**(exponent)\n return d / 1000\n\n\ndef distModArgo(c, E_BV):\n \"\"\"\n Return the distance modulus of the source using the Argonaut Skymaps\n to get distmod from an E_BV value\n http://argonaut.skymaps.info/gal-lb-query-light\n c = galactic coordinates\n \"\"\"\n x = c.cartesian.x.value\n y = c.cartesian.y.value\n z = c.cartesian.z.value\n d = float(np.sqrt(x**2 + y**2 + z**2))\n l = c.galactic.l.value\n b = c.galactic.b.value\n qresults = query.query(l, b)\n e_list = qresults['best']\n nearest_e = min(e_list, key=lambda x:abs(x - E_BV))\n index = e_list.index(nearest_e)\n distmod = qresults['distmod'][index]\n return distmod\n\n\ndef getDist(target, d, E_BV):\n \"\"\"\n Return the extinction coefficient.\n year = year of candidate\n d = distance [pc]\n \"\"\"\n d = d / 1000 #Get d in kpc\n if target == 'MB960005':\n coords = '18:05:02.5 -27:42:17' #RA and Dec of MB96-5\n elif target == 'MB980006':\n coords = '17:57:32.8 -28:42:45' #RA and Dec of MB98-6\n else:\n raise ValueError(\"Wrong Input for Target\")\n c = getGalactic(coords, d)\n distmod = distModArgo(c, E_BV)\n distance = distmod2Dist(distmod)\n print(\"Distance: {0} kpc\".format(distance))\n return distance\n\n\n","sub_path":"ml/isochrone/extinction.py","file_name":"extinction.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"263258693","text":"#!/usr/bin/python3.4\n\nfrom pyshoutbox import __version__\n\ntry:\n\tfrom setuptools import setup\n\nexcept ImportError as err:\n\tfrom distutils.core import setup\n\npackages = [\n\t\"pyshoutbox\"\n]\n\nrequires = [\"requests>=2.2.1\"]\n\nsetup(\n\tname=\"pyshoutbox\",\n\tversion = __version__,\n\tdescription = \"Python iShoutbox Parser\",\n\tauthor = \"Benjamin Sparr\",\n\turl = \"\",\n\tpackages = packages,\n\tpackage_dir = {\"pyshoutbox\": \"pyshoutbox\"},\n\tinstall_requires = requires,\n\tlicense = \"Public domain\",\n\tzip_safe = False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"52415272","text":"import os\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport pytz\n\nimport timely_beliefs as tb\nfrom timely_beliefs.examples import get_example_df, get_examples_path\nfrom timely_beliefs.tests.utils import assert_metadata_is_retained\n\n\n@pytest.fixture(scope=\"module\")\ndef csv_file(tmpdir_factory):\n \"\"\"Save BeliefsDataFrame to csv.\"\"\"\n\n get_example_df().to_csv(\"test.csv\")\n yield\n os.remove(\"test.csv\")\n\n\n@pytest.fixture(scope=\"module\")\ndef csv_nat_file(tmpdir_factory):\n \"\"\"Save BeliefsDataFrame to csv, after including a NaT value.\"\"\"\n\n df = get_example_df().reset_index()\n df.iloc[0, 1] = pd.NaT\n df = df.set_index(\n [\"event_start\", \"belief_time\", \"source\", \"cumulative_probability\"]\n )\n df.to_csv(\"test_NaT.csv\")\n yield\n os.remove(\"test_NaT.csv\")\n\n\ndef test_load_beliefs(csv_file):\n \"\"\"Test loading BeliefsDataFrame from csv.\n The saved file does not contain the sensor information, and the sources are saved by their name.\n Therefore, we test the following functionality:\n - The user should specify the sensor upon loading\n - The user should be warned that the loaded sources are not of type BeliefSource.\n - The user should have the possibility to look up the saved source names by passing a list of sources.\n \"\"\"\n\n # Load beliefs with pd.read_csv\n df = pd.read_csv(\"test.csv\")\n df_copy = df.copy()\n with pytest.warns(UserWarning, match=\"created\"):\n bdf = tb.BeliefsDataFrame(df, sensor=tb.Sensor(\"Sensor Y\"))\n assert bdf.sensor.name == \"Sensor Y\"\n\n # Check that input frame was not altered\n # GH 34\n pd.testing.assert_frame_equal(df, df_copy)\n\n # Now load beliefs with tb.read_csv\n # No lookup should issue warning\n with pytest.warns(UserWarning, match=\"looking them up\"):\n bdf = tb.read_csv(\"test.csv\", sensor=tb.Sensor(\"Sensor Y\"))\n for s in bdf.index.get_level_values(\"source\"):\n assert isinstance(\n s, tb.BeliefSource\n ) # Source names automatically get converted to sources\n assert all(c == tb.BeliefSource for c in bdf.sources.map(type))\n\n # This lookup should fail\n with pytest.raises(ValueError, match=\"not in list\"):\n tb.read_csv(\n \"test.csv\",\n sensor=tb.Sensor(\"Sensor Y\"),\n look_up_sources=[tb.BeliefSource(name=\"Source X\")],\n )\n\n # This lookup should succeed\n source_a, source_b = tb.BeliefSource(\"Source A\"), tb.BeliefSource(\"Source B\")\n bdf = tb.read_csv(\n \"test.csv\", sensor=tb.Sensor(\"Sensor Y\"), look_up_sources=[source_a, source_b]\n )\n assert bdf.sensor.name == \"Sensor Y\"\n assert source_a in bdf.index.get_level_values(\"source\")\n assert source_b in bdf.index.get_level_values(\"source\")\n assert isinstance(bdf.index.get_level_values(\"event_start\")[0], datetime)\n assert isinstance(bdf.index.get_level_values(\"belief_time\")[0], datetime)\n\n\ndef test_load_beliefs_with_nat_values(csv_nat_file):\n \"\"\"Test loading BeliefsDataFrame from csv containing one NaT belief_time value.\n\n The NaT value is an empty cell in the belief time column.\n The relevant row should be skipped, as requested using the combination of na_values and keep_default_na.\n \"\"\"\n\n # Load beliefs with tb.read_csv\n df = tb.read_csv(\n \"test_NaT.csv\",\n sensor=tb.Sensor(\"Sensor Y\"),\n source=tb.BeliefSource(\"Source A\"),\n timezone=\"Europe/Amsterdam\",\n na_values=[\"\"], # tells pd.read_csv to treat empty cells as NaN values\n keep_default_na=False,\n )\n assert len(df) == len(get_example_df()) - 1\n\n\ndef test_load_timezone_naive_data():\n \"\"\"Test loading timezone naive time series data from csv.\n\n The test data is around a DST transition that lead to duplicate indices.\n \"\"\"\n\n # Load only datetime and value columns with tb.read_csv\n sensor = tb.Sensor(\"Sensor X\")\n source = tb.BeliefSource(\"Source A\")\n path = os.path.join(get_examples_path(), \"timezone_naive_sample.csv\")\n timezone = \"Europe/Amsterdam\"\n df = tb.read_csv(\n path=path,\n timezone=timezone,\n sensor=sensor,\n source=source,\n belief_horizon=timedelta(0),\n usecols=[\"datetime\", \"value\"],\n )\n assert len(df.event_starts.unique()) == 6\n\n # Load also the column describing when the data was recorded\n df = tb.read_csv(\n path=path,\n timezone=timezone,\n sensor=sensor,\n source=source,\n usecols=[\"datetime\", \"recorded\", \"value\"],\n )\n assert len(df.event_starts.unique()) == 6\n\n # Reload while filtering by a column that ends up in the BeliefsDataFrame\n df = tb.read_csv(\n path=path,\n timezone=timezone,\n sensor=sensor,\n source=source,\n usecols=[\"datetime\", \"recorded\", \"value\"],\n filter_by_column={\"recorded\": \"2022-10-09 00:00\"},\n )\n assert len(df.event_starts.unique()) == 5\n\n # Reload while filtering by a column that does not end up in the BeliefsDataFrame\n df = tb.read_csv(\n path=path,\n timezone=timezone,\n sensor=sensor,\n source=source,\n usecols=[\"datetime\", \"recorded\", \"value\"],\n filter_by_column={\"sensor\": \"X\"},\n )\n assert len(df.event_starts.unique()) == 5\n assert \"sensor\" not in df.columns.names\n\n\n@pytest.mark.parametrize(\n \"args, kwargs\",\n [\n ([], {}),\n ([pd.DataFrame()], {}),\n ([pd.Series()], {}),\n ([], {\"sensor\": tb.Sensor(\"test\")}),\n ],\n)\ndef test_empty_beliefs(args, kwargs):\n \"\"\"Test construction of empty BeliefsDataFrame.\"\"\"\n\n bdf = tb.BeliefsDataFrame(*args, **kwargs)\n assert bdf.empty\n if bdf.sensor:\n assert bdf.sensor.name == \"test\"\n else:\n assert bdf.event_resolution is None\n assert \"event_value\" in bdf\n for name in [\"event_start\", \"belief_time\", \"source\", \"cumulative_probability\"]:\n assert name in bdf.index.names\n\n # Check that initializing with self returns a copy of self\n # GH 34\n bdf_copy = bdf.copy()\n bdf = tb.BeliefsDataFrame(bdf)\n pd.testing.assert_frame_equal(bdf_copy, bdf)\n\n\n@pytest.mark.parametrize(\n \"missing_column_name, data, present_column_names\",\n [\n (\n \"event_start\",\n [\n [1, datetime(2000, 1, 1, tzinfo=pytz.utc), 3, 4],\n [5, datetime(2000, 1, 1, tzinfo=pytz.utc), 7, 8],\n ],\n [\"source\", \"belief_time\", \"cumulative_probability\", \"event_value\"],\n ),\n (\n \"belief_time\",\n [\n [datetime(2000, 1, 1, tzinfo=pytz.utc), 2, 3, 4],\n [datetime(2000, 1, 1, tzinfo=pytz.utc), 6, 7, 8],\n ],\n [\"event_start\", \"source\", \"cumulative_probability\", \"event_value\"],\n ),\n (\n \"source\",\n [\n [\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n 3,\n 4,\n ],\n [\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n 7,\n 8,\n ],\n ],\n [\"event_start\", \"belief_time\", \"cumulative_probability\", \"event_value\"],\n ),\n (\n \"event_value\",\n [\n [\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n 3,\n 4,\n ],\n [\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n 7,\n 8,\n ],\n ],\n [\"event_start\", \"belief_time\", \"cumulative_probability\", \"source\"],\n ),\n ],\n)\ndef test_incomplete_beliefs(missing_column_name, data, present_column_names):\n \"\"\"Test exceptions are thrown when input data is missing required column headers.\n Only cumulative_probability can be missed, since it is has a default value.\"\"\"\n df = pd.DataFrame(data, columns=present_column_names)\n\n with pytest.raises(KeyError, match=missing_column_name):\n with pytest.warns(UserWarning, match=\"created\"):\n tb.BeliefsDataFrame(df)\n\n\n@pytest.mark.parametrize(\n \"invalid_column, data, column_names\",\n [\n (\n \"event_start\",\n [[1, timedelta(), 3, 4]],\n [\"event_start\", \"belief_horizon\", \"event_value\", \"source\"],\n ), # event_start is not a datetime\n (\n \"event_start\",\n [[datetime(2000, 1, 1), timedelta(), 3, 4]],\n [\"event_start\", \"belief_horizon\", \"event_value\", \"source\"],\n ), # event_start is missing timezone\n (\n \"belief_horizon\",\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), 2, 3, 4]],\n [\"event_start\", \"belief_horizon\", \"event_value\", \"source\"],\n ), # belief_horizon is not a timedelta\n (\n \"belief_time\",\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), 2, 3, 4]],\n [\"event_start\", \"belief_time\", \"event_value\", \"source\"],\n ), # belief_time is not a datetime\n (\n \"source\",\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), timedelta(), 3, None]],\n [\"event_start\", \"belief_horizon\", \"event_value\", \"source\"],\n ), # source is None\n ],\n)\ndef test_invalid_beliefs(invalid_column, data, column_names):\n \"\"\"Test exceptions are thrown when input data is of the wrong type.\"\"\"\n df = pd.DataFrame(data, columns=column_names)\n with pytest.raises(TypeError, match=invalid_column):\n with pytest.warns(UserWarning, match=\"created\"):\n tb.BeliefsDataFrame(df)\n\n\n@pytest.mark.parametrize(\n \"df_or_s, kwargs\",\n [\n (\n pd.DataFrame(\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), timedelta(), 3, 4]],\n columns=[\"event_start\", \"belief_horizon\", \"source\", \"event_value\"],\n ),\n {\"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1))},\n ),\n (\n pd.DataFrame(\n [\n [\n datetime(2000, 1, 1, tzinfo=pytz.utc),\n datetime(2000, 1, 1, hour=1, tzinfo=pytz.utc),\n 3,\n 4,\n ]\n ],\n columns=[\"event_start\", \"belief_time\", \"source\", \"event_value\"],\n ),\n {\"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1))},\n ),\n (\n pd.DataFrame(\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), timedelta(), 4]],\n columns=[\"event_start\", \"belief_horizon\", \"event_value\"],\n ),\n {\n \"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n \"source\": 3,\n },\n ), # move source to keyword argument\n (\n pd.DataFrame(\n [[datetime(2000, 1, 1, tzinfo=pytz.utc), 4]],\n columns=[\"event_start\", \"event_value\"],\n ),\n {\n \"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n \"source\": 3,\n \"belief_horizon\": timedelta(),\n },\n ), # move source and belief_horizon to keyword argument\n (\n pd.DataFrame([[4]], columns=[\"event_value\"]),\n {\n \"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n \"source\": 3,\n \"belief_horizon\": timedelta(),\n \"event_start\": datetime(2000, 1, 1, tzinfo=pytz.utc),\n },\n ), # move source, belief_horizon and event_start to keyword argument\n (\n pd.Series([4]),\n {\n \"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n \"source\": 3,\n \"belief_horizon\": timedelta(),\n \"event_start\": datetime(2000, 1, 1, tzinfo=pytz.utc),\n },\n ), # move source, belief_horizon and event_start to keyword argument and use Series instead of DataFrame\n (\n pd.Series(\n [4], index=pd.DatetimeIndex([datetime(2000, 1, 1, tzinfo=pytz.utc)])\n ),\n {\n \"sensor\": tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n \"source\": 3,\n \"belief_horizon\": timedelta(),\n },\n ), # move source and belief_horizon keyword argument and use Series instead of DataFrame\n ],\n)\ndef test_belief_setup_with_data_frame(df_or_s, kwargs):\n \"\"\"Test different ways of setting up the same BeliefsDataFrame.\"\"\"\n df_or_s_copy = df_or_s.copy()\n\n with pytest.warns(UserWarning, match=\"created\"):\n bdf = tb.BeliefsDataFrame(df_or_s, **kwargs)\n assert bdf.event_starts[0] == datetime(2000, 1, 1, tzinfo=pytz.utc)\n assert (\n bdf.belief_times[0]\n == datetime(2000, 1, 1, tzinfo=pytz.utc) + bdf.event_resolution\n )\n assert bdf.belief_horizons[0] == timedelta()\n assert bdf.sources[0].name == \"3\"\n assert bdf.values[0] == 4\n\n # Check that input data frame or series was not altered\n # GH 34\n if isinstance(df_or_s, pd.DataFrame):\n pd.testing.assert_frame_equal(df_or_s, df_or_s_copy)\n elif isinstance(df_or_s, pd.Series):\n pd.testing.assert_series_equal(df_or_s, df_or_s_copy)\n\n # Check that initializing with self returns a copy of self\n # GH 34\n bdf_copy = bdf.copy()\n bdf = tb.BeliefsDataFrame(bdf)\n pd.testing.assert_frame_equal(bdf_copy, bdf)\n\n\n@pytest.mark.parametrize(\n \"args, kwargs\",\n [\n (\n [],\n {\n \"beliefs\": [\n tb.TimedBelief(\n tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n tb.BeliefSource(3),\n 4,\n event_start=datetime(2000, 1, 1, tzinfo=pytz.utc),\n belief_horizon=timedelta(),\n )\n ]\n },\n ),\n (\n [\n [\n tb.TimedBelief(\n tb.Sensor(name=\"temp\", event_resolution=timedelta(hours=1)),\n tb.BeliefSource(3),\n 4,\n event_start=datetime(2000, 1, 1, tzinfo=pytz.utc),\n belief_horizon=timedelta(),\n )\n ]\n ],\n {},\n ),\n ],\n)\ndef test_belief_setup_with_timed_beliefs(args, kwargs):\n \"\"\"Test different ways of setting up the same BeliefsDataFrame.\"\"\"\n bdf = tb.BeliefsDataFrame(*args, **kwargs)\n assert bdf.event_starts[0] == datetime(2000, 1, 1, tzinfo=pytz.utc)\n assert (\n bdf.belief_times[0]\n == datetime(2000, 1, 1, tzinfo=pytz.utc) + bdf.event_resolution\n )\n assert bdf.belief_horizons[0] == timedelta()\n assert bdf.sources[0].name == \"3\"\n assert bdf.values[0] == 4\n tb.BeliefsDataFrame()\n\n\ndef test_converting_between_data_frame_and_series_retains_metadata():\n \"\"\"\n Test whether slicing of a BeliefsDataFrame into a BeliefsSeries retains the metadata.\n Test whether expanding dimensions of a BeliefsSeries into a BeliefsDataFrame retains the metadata.\n \"\"\"\n example_df = get_example_df()\n df = example_df\n series = df[\"event_value\"]\n assert_metadata_is_retained(series, original_df=example_df, is_series=True)\n df = series.to_frame()\n assert_metadata_is_retained(df, original_df=example_df)\n\n\ndef test_dropping_index_levels_retains_metadata():\n example_df = get_example_df()\n df = example_df.copy()\n df.index = df.index.get_level_values(\"event_start\") # drop all other index levels\n assert_metadata_is_retained(df, original_df=example_df)\n\n\n@pytest.mark.parametrize(\"drop_level\", [True, False])\ndef test_slicing_retains_metadata(drop_level):\n \"\"\"\n Test whether slicing the index of a BeliefsDataFrame retains the metadata.\n \"\"\"\n example_df = get_example_df()\n df = example_df\n df = df.xs(\"2000-01-03 10:00:00+00:00\", level=\"event_start\", drop_level=drop_level)\n print(df)\n assert_metadata_is_retained(df, original_df=example_df)\n\n\n@pytest.mark.parametrize(\"resolution\", [timedelta(minutes=30), timedelta(hours=2)])\ndef test_mean_resampling_retains_metadata(resolution):\n \"\"\"\n Test whether mean resampling retains the metadata.\n\n Fails with pandas==1.0.0\n Succeeds with pandas==1.1.0\n \"\"\"\n example_df = get_example_df()\n df = example_df\n df = df.resample(resolution, level=\"event_start\").mean()\n print(df)\n assert_metadata_is_retained(\n df,\n original_df=example_df,\n event_resolution=example_df.event_resolution,\n ) # todo: the event_resolution metadata is only updated when resampling using df.resample_events(). A reason to override the original resample method, or otherwise something to document.\n\n\n@pytest.mark.parametrize(\"resolution\", [timedelta(minutes=30), timedelta(hours=2)])\ndef _test_agg_resampling_retains_metadata(resolution):\n \"\"\"\n Test whether aggregate resampling retains the metadata.\n\n Fails with pandas==1.1.5\n \"\"\"\n example_df = get_example_df()\n df = example_df\n df = df.reset_index(level=[\"belief_time\", \"source\", \"cumulative_probability\"])\n df = df.resample(resolution).agg(\n {\n \"event_value\": np.nanmean,\n \"source\": \"first\", # keep the only source\n \"belief_time\": \"max\", # keep the latest belief\n \"cumulative_probability\": \"prod\", # assume independent variables\n }\n )\n df = df.set_index([\"belief_time\", \"source\", \"cumulative_probability\"], append=True)\n print(df)\n assert_metadata_is_retained(\n df,\n original_df=example_df,\n event_resolution=example_df.event_resolution,\n ) # todo: the event_resolution metadata is only updated when resampling using df.resample_events(). A reason to override the original resample method, or otherwise something to document.\n\n\n@pytest.mark.parametrize(\n \"test_df\",\n [\n \"example_df\",\n \"empty_df\",\n ],\n)\ndef test_groupby_retains_metadata(test_df):\n \"\"\"Test whether grouping by index level retains the metadata.\n\n Succeeds with pandas==1.0.0\n Fails with pandas==1.1.0\n Fixed with pandas==1.1.5\n Fails with pandas==1.3.0\n \"\"\"\n if test_df == \"example_df\":\n original_df = get_example_df()\n elif test_df == \"empty_df\":\n original_df = tb.BeliefsDataFrame(sensor=tb.Sensor(name=\"test\", unit=\"W\"))\n else:\n raise NotImplementedError\n df = original_df.copy()\n\n def assert_function(x):\n print(x)\n assert_metadata_is_retained(x, original_df=original_df)\n return x\n\n df = df.groupby(level=\"event_start\", group_keys=False).apply(\n lambda x: assert_function(x)\n )\n assert_metadata_is_retained(df, original_df=original_df)\n\n\ndef test_copy_series_retains_name_and_metadata():\n # GH 41\n df = get_example_df()\n sensor = df.sensor\n s = df[\"event_value\"]\n assert s.sensor == sensor\n name = s.name\n s_copy = s.copy()\n assert s_copy.name == name\n assert s_copy.sensor == sensor\n\n\ndef test_init_from_beliefs_data_frame():\n \"\"\"Check that input BeliefsDataFrame was not altered.\"\"\"\n # GH 34\n df = get_example_df().rename(columns={\"event_value\": \"reference_value\"})\n df_copy = df.copy()\n tb.BeliefsDataFrame(df)\n pd.testing.assert_frame_equal(df, df_copy)\n\n\ndef test_init_from_beliefs_series():\n \"\"\"Check that input BeliefsSeries was not altered.\"\"\"\n # GH 34\n df = get_example_df().rename(columns={\"event_value\": \"reference_value\"})\n s = df[\"reference_value\"]\n df_copy = df.copy()\n s_copy = s.copy()\n\n # check method using to_frame\n bdf = s.to_frame()\n pd.testing.assert_frame_equal(df, df_copy) # original bdf was not altered\n pd.testing.assert_frame_equal(\n bdf, df_copy\n ) # new bdf retains altered column of original bdf\n pd.testing.assert_series_equal(s, s_copy) # input BeliefsSeries was not altered\n\n # check method using class init\n bdf = tb.BeliefsDataFrame(s)\n pd.testing.assert_frame_equal(df, df_copy) # original bdf was not altered\n pd.testing.assert_frame_equal(\n bdf, df_copy\n ) # new bdf retains altered column of original bdf\n pd.testing.assert_series_equal(s, s_copy) # input BeliefsSeries was not altered\n\n\ndef test_groupby_does_not_retain_temporary_attribute():\n df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"x\", \"y\"])\n df.a = \"b\"\n assert df.a == \"b\"\n df2 = df.groupby(\"x\", group_keys=False).apply(lambda x: x)\n assert not hasattr(df2, \"a\")\n df3 = df.groupby(\"x\", group_keys=False).sum()\n assert not hasattr(df3, \"a\")\n\n\n@pytest.mark.parametrize(\n \"att, args\",\n [\n # (\"all\", []),\n # (\"any\", []),\n # (\"count\", []),\n (\"first\", []),\n (\"last\", []),\n (\"max\", []),\n # (\"mean\", []),\n # (\"median\", []),\n (\"min\", []),\n (\"prod\", []),\n # (\"sem\", []),\n # (\"size\", []),\n # (\"std\", []),\n (\"sum\", []),\n # (\"var\", []),\n # (\"apply\", [lambda x: x]),\n # (\"apply\", [np.max])\n # (\"apply\", [np.min])\n # (\"apply\", [np.nanmean]),\n (\"agg\", [\"first\"]),\n (\"agg\", [\"max\"]),\n # (\"agg\", [\"mean\"]),\n (\"agg\", [\"min\"]),\n (\"agg\", [\"sum\"]),\n # (\"agg\", [{\"y\": \"min\"}]),\n # (\"agg\", [{\"x\": \"min\", \"y\": \"max\"}]),\n ],\n)\ndef test_groupby_retains_subclass_attribute(att, args):\n \"\"\"Checks on metadata propagation for subclassed DataFrames under groupby operations.\n\n Commented-out parameter combinations fail with pandas==1.1.5\n The relevant issue has to do with calling finalize after operations:\n see https://github.com/pandas-dev/pandas/issues/28283\n \"\"\"\n\n METADATA = [\"a\"]\n\n class SubclassedSeries(pd.Series):\n\n _metadata = METADATA\n\n @property\n def _constructor(self):\n return SubclassedSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedDataFrame\n\n class SubclassedDataFrame(pd.DataFrame):\n\n _metadata = METADATA\n\n @property\n def _constructor(self):\n return SubclassedDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSeries\n\n df = SubclassedDataFrame([[1, 2], [3, 4]], columns=[\"x\", \"y\"])\n df.a = \"b\"\n assert df.a == \"b\"\n df2 = getattr(df.groupby(\"x\", group_keys=False), att)(*args)\n print(df2)\n assert df2.a == \"b\"\n\n\n@pytest.mark.parametrize(\"constant\", [1, -1, 3.14, timedelta(hours=1), [\"TiledString\"]])\ndef test_multiplication_with_constant_retains_metadata(constant):\n \"\"\"Check whether the metadata is still there after multiplication.\"\"\"\n # GH 35\n example_df = get_example_df()\n df = example_df * constant\n assert_metadata_is_retained(df, original_df=example_df)\n\n # Also check suggested workarounds from GH 35\n if constant == -1:\n df = -example_df\n assert_metadata_is_retained(df, original_df=example_df)\n\n df = example_df.abs()\n assert_metadata_is_retained(df, original_df=example_df)\n","sub_path":"timely_beliefs/tests/test_belief_io.py","file_name":"test_belief_io.py","file_ext":"py","file_size_in_byte":23558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"548663878","text":"import socket\nip = \"10.108.33.37\"\npuerto = 8080\nrespuestas = 2\ndef lee_mensaje(cliente):\n condicion = True\n while condicion:\n operacion = cliente.recv(2048).decode(\"utf-8\")\n op=[\"Salir\",\"Sumar\",\"Multiplicar\"]\n\n try:\n print(\"Usted ha elegido la operacion: \",op[int(operacion)])\n\n if int(operacion) == 0:\n print(\"Ha seleccionado Salir\")\n cliente.close()\n\n if int(operacion) == 1:\n num_1 = cliente.recv(2048).decode(\"utf-8\")\n num_2 = cliente.recv(2048).decode(\"utf-8\")\n resultado = int(num_1) + int(num_2)\n resultado = str.encode(str(resultado))\n cliente.send(resultado)\n\n if int(operacion) == 2:\n num_1 = cliente.recv(2048).decode(\"utf-8\")\n num_2 = cliente.recv(2048).decode(\"utf-8\")\n resultado = int(num_1) * int(num_2)\n resultado = str.encode(str(resultado))\n cliente.send(resultado)\n \n except KeyboardInterrupt:\n cliente.close()\n serversocket.close()\n print(\"La calculadora se cierra...\")\n\n except Exception:\n print(\"Introduce un valor válido!\")\n\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n serversocket.bind((ip, puerto))\n serversocket.listen(respuestas)\n print(\"Esperando conexion en el puerto:\",puerto,\"y en la ip:\",ip)\n (cliente, address) = serversocket.accept()\n lee_mensaje(cliente)\n\nexcept OSError:\n print(\"Fallo\")\n\nexcept KeyboardInterrupt:\n cliente.close()\n serversocket.close()\n print(\"La calculadora se cierra...\")\n","sub_path":"servidor_calculadora.py","file_name":"servidor_calculadora.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"412084918","text":"#!/usr/bin/env python3\n\nimport csv\nimport pandas as pd\nimport numpy as np\n\nclass DataManipulation:\n\n def __init__(self, filename):\n self.__data = pd.read_csv(filename)\n self.__NUM_OF_BINS = 20\n self.__total = 0\n self.__total_motor = None\n self.__total_sensor = None\n self.__total_us = None\n\n \"\"\"Normalizing motor values\n\n Normalized the left and right motor values ranging from -1000 to 1000, to be from -1 to 1\n \"\"\"\n def normalized_motor(self):\n self.__data['right motor'] = self.__data['right motor'].values/1000 #min -1 max 1\n self.__data['left motor'] = self.__data['left motor'].values/1000 #min -1 max 1\n\n \"\"\"Normalizing values ranging from 0 to 1\n \n Normalized all sensor, motor and ultrasound values to be in range from 0 to 1.\n \"\"\"\n def normalized_0_to_1(self):\n self.__total_motor = (0.5*((self.__data['left motor'].values + self.__data['right motor'].values)/2) + 0.5) * self.__NUM_OF_BINS\n self.__total_sensor = ((self.__data['left sensor'].values + self.__data['right sensor'].values) / 2) * self.__NUM_OF_BINS\n self.__total_us = ((self.__data['left ultrasound sensor'].values + self.__data['right ultrasound sensor'].values) / 2) * self.__NUM_OF_BINS\n \n for i in range(len(self.__total_motor)):\n if self.__total_motor[i] == self.__NUM_OF_BINS:\n self.__total_motor[i] = self.__NUM_OF_BINS-1\n\n if self.__total_sensor[i] == self.__NUM_OF_BINS:\n self.__total_sensor[i] == self.__NUM_OF_BINS-1\n\n if self.__total_motor[i] == self.__NUM_OF_BINS:\n self.__total_motor[i] == self.__NUM_OF_BINS-1\n\n self.__total_motor = np.floor(self.__total_motor).astype(int)\n self.__total_sensor = np.floor(self.__total_sensor).astype(int)\n self.__total_us = np.floor(self.__total_us).astype(int)\n\n return self.__total_motor,self.__total_sensor,self.__total_us\n\n def get_NUM_OF_BINS(self):\n return self.__NUM_OF_BINS\n\n def set_NUM_OF_BINS(self, NUM_OF_BINS):\n self.__NUM_OF_BINS = NUM_OF_BINS\n\n def get_data(self):\n return self.__data\n\n def get_total_motor(self):\n return self.__total_motor\n\n def get_total_sensor(self):\n return self.__total_sensor\n\n def get_total_us(self):\n return self.__total_us\n\n def normalized_minus1_to_1(self):\n pass\n\n \"\"\"Converting the total values to bin number\n \n Count the total with the formula\n DIM1 * (NOB * DIM2) * ((NOB**2) * DIM3)\n\n If there is no third dimension, it would just count until the second dimension.\n \"\"\"\n def convert_values_to_bins(self, firstDimension=None, secondDimension=None, thirdDimension=None,\n fourthDimension=None, fifthDimension=None, sixthDimension=None):\n if self.__total is not 0:\n self.__total = 0\n\n if firstDimension is not None: \n self.__total += firstDimension \n if secondDimension is not None:\n self.__total += self.__NUM_OF_BINS * secondDimension\n if thirdDimension is not None:\n self.__total += (self.__NUM_OF_BINS**2) * thirdDimension\n if fourthDimension is not None:\n self.__total += (self.__NUM_OF_BINS**3) * fourthDimension\n if fifthDimension is not None:\n self.__total += (self.__NUM_OF_BINS**4) * fifthDimension\n if sixthDimension is not None:\n self.__total += (self.__NUM_OF_BINS**5) * sixthDimension\n\n self.__total = np.floor(self.__total).astype(int)\n return self.__total\n\n \"\"\"Removing transition where state goes to itself.\n \n To remove where state goes to itself (e.g A A A A B will be reduced to A B)\n \"\"\"\n def remove_continous_state(self):\n temp = [self.__total[i] for i in range(len(self.__total)-1) if self.__total[i] != self.__total[i+1]]\n if temp[-1] != self.__total[-1]:\n temp.append(self.__total[-1])\n return np.array(temp)\n\n \"\"\"Make a dictionary inside a dictionary from transition array without frequency\n \n This detects how many times a state goes to another state. \n Represented with a dictionary in a dictionary. (e.g {A : {B: 2}} means bin A moves to bin B 2 times.)\n \"\"\"\n def transition_frequency(self, transition_array):\n transition_with_frequency = {}\n for i in range(len(transition_array)-1):\n if transition_array[i] not in transition_with_frequency:\n transition_with_frequency[transition_array[i]] = {}\n transition_with_frequency[transition_array[i]][transition_array[i+1]] = 1\n\n elif transition_array[i+1] in transition_with_frequency[transition_array[i]]:\n transition_with_frequency[transition_array[i]][transition_array[i+1]] += 1\n\n elif transition_array[i+1] not in transition_with_frequency[transition_array[i]]:\n transition_with_frequency[transition_array[i]][transition_array[i+1]] = 1\n\n return transition_with_frequency\n\n \"\"\"Translate a transition with frequency dictionary to transition array with frequency\n \n This is to make a new list where every 3 values indicate source, destination, and frequency visited.\n \"\"\"\n def most_visited_state_transition(self, transition_with_frequency):\n most_visited_state = []\n for i,j in transition_with_frequency.items():\n most_visited_state.append(i)\n most_visited_state.append(sorted(j.items(),key=lambda t: t[1], reverse=True)[0][0])\n most_visited_state.append(sorted(j.items(),key=lambda t: t[1], reverse=True)[0][1])\n\n return np.array(most_visited_state)\n\n \"\"\"Separate values into arrays of bins\n \n Take the array values and digitize it to bins to determine which bin each value belongs to.\n \"\"\"\n def digitize_total_values(self, to_be_digitized, type):\n first_array_bin = np.arange(0, self.__NUM_OF_BINS)\n second_array_bin = np.arange(0, (self.__NUM_OF_BINS**2), self.__NUM_OF_BINS)\n third_array_bin = np.arange(0, (self.__NUM_OF_BINS**3), self.__NUM_OF_BINS**2)\n\n if type == '2d':\n #numpy digitize assigns bin from 1 to 10. but we want to use 0 to 9 \n xArray = np.digitize(to_be_digitized % self.__NUM_OF_BINS, first_array_bin) - 1\n yArray = np.digitize(to_be_digitized, second_array_bin) - 1\n\n return xArray, yArray\n elif type == '3d':\n #numpy digitize assigns bin from 1 to 10. but we want to use 0 to 9\n xArray = np.digitize(to_be_digitized % self.__NUM_OF_BINS, first_array_bin) - 1\n yArray = np.digitize(to_be_digitized % (self.__NUM_OF_BINS**2), second_array_bin) - 1\n zArray = np.digitize(to_be_digitized, third_array_bin) - 1\n\n return xArray, yArray, zArray\n","sub_path":"experiment/DataManipulation.py","file_name":"DataManipulation.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"455563644","text":"from keras import backend as K\nfrom keras.engine.topology import Layer\nimport numpy as np \nfrom keras import activations\nfrom keras import initializers\nfrom keras import regularizers\nfrom keras import constraints\n\n\nclass Smooth(Layer):\n\n def __init__(self, \n units,\n activation=None,\n use_bias=False,\n kernel_initializer='random_normal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.is_placeholder = False\n\n super(Smooth, self).__init__(**kwargs)\n\n def build(self, input_shape):\n if input_shape is None:\n raise RuntimeError('specify input shape')\n \n # Create trainable weight variable for this layer.\n self.W = self.add_weight(name='weights',\n shape=(9, 9),\n # initializer='random_normal',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n trainable=True)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(9,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n super(Smooth, self).build(input_shape) # Be sure to call this somewhere!\n\n def call(self, input):\n output = K.dot(input, self.W) \n\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n \n if self.activation is not None:\n return self.activation(output)\n else:\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape # not changing the dimensions\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'is_placeholder': False\n }\n\n base_config = super(Smooth, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n# a pool layer to pool values based on a dense weight matrix\nclass Densepool(Layer):\n\n def __init__(self, \n mtx,\n mtx_1,\n activation=None,\n **kwargs):\n self.mtx = mtx\n self.mtx_1 = mtx_1\n self.activation = activations.get(activation)\n self.trainable = False\n super(Densepool, self).__init__(**kwargs)\n\n def build(self, input_shape):\n if input_shape is None:\n raise RuntimeError('specify input shape')\n\n super(Densepool, self).build(input_shape) # Be sure to call this somewhere!\n\n def call(self, input):\n dim = K.int_shape(input)\n flat_input = K.reshape(input, (-1, dim[1] * 3, 3))\n # print \"flat shape: >>>\"\n # print flat_input.shape\n \n mtx_tensor = K.constant(self.mtx, dtype='float32', name='mtx_tensor') # v_num x (tri*3)\n mtx_1_tensor = K.transpose(K.constant(self.mtx_1, dtype='float32', name='mtx_1_tensor')) \n\n pos = K.dot(mtx_tensor, flat_input) \n new_pos = K.permute_dimensions(pos, (1, 0, 2))\n # print new_pos.shape # (?, 700, 3)\n \n tri = K.dot(mtx_1_tensor, new_pos) \n output = K.permute_dimensions(tri, (1, 0, 2)) \n s = K.int_shape(output)\n output = K.reshape(output, (-1, s[1]/3, 9))\n\n if self.activation is not None: \n return self.activation(output)\n else:\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\n\n","sub_path":"m_layers.py","file_name":"m_layers.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574161234","text":"from django.db import models\nfrom django.db.models import ObjectDoesNotExist\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.mctc.models.general import Case, Provider\nfrom apps.mctc.models.logs import MessageLog\n\nfrom datetime import datetime, date, timedelta\nimport md5\n\nclass Report:\n def get_alert_recipients(self):\n \"\"\" Each report will send an alert, how it will choose when to send an alert\n is up to the model, however. \"\"\"\n # this is the reporter, the provider or the CHW depending what you call it\n provider = self.provider\n facility = provider.clinic\n assert facility, \"This provider does not have a clinic.\"\n\n recipients = []\n\n # find all the people assigned to alerts from this facility\n for user in facility.following_clinics.all():\n # only send if they want\n if user.alerts:\n if user not in recipients:\n recipients.append(user)\n \n \n # find all the users monitoring this user\n for user in provider.following_users.all():\n if user.alerts:\n if user not in recipients:\n recipients.append(user)\n\n return recipients\n\nclass Observation(models.Model):\n uid = models.CharField(max_length=15)\n name = models.CharField(max_length=255)\n letter = models.CharField(max_length=2, unique=True)\n\n class Meta:\n app_label = \"mctc\"\n ordering = (\"name\",)\n\n def __unicode__(self):\n return self.name\n\nclass DiarrheaObservation(models.Model):\n uid = models.CharField(max_length=15, primary_key=True)\n name = models.CharField(max_length=255)\n letter = models.CharField(max_length=2, unique=True)\n\n class Meta:\n app_label = \"mctc\"\n ordering = (\"name\",)\n\n def __unicode__(self):\n return self.name\n \nclass ReportMalaria(Report, models.Model):\n class Meta:\n get_latest_by = 'entered_at'\n ordering = (\"-entered_at\",)\n app_label = \"mctc\"\n verbose_name = \"Malaria Report\"\n verbose_name_plural = \"Malaria Reports\"\n \n case = models.ForeignKey(Case, db_index=True)\n provider = models.ForeignKey(Provider, db_index=True)\n entered_at = models.DateTimeField(db_index=True)\n bednet = models.BooleanField(db_index=True)\n result = models.BooleanField(db_index=True) \n observed = models.ManyToManyField(Observation, blank=True) \n\n def get_dictionary(self):\n return {\n 'result': self.result,\n 'result_text': self.result and \"Y\" or \"N\",\n 'bednet': self.bednet,\n 'bednet_text': self.bednet and \"Y\" or \"N\",\n 'observed': \", \".join([k.name for k in self.observed.all()]), \n }\n \n def zone(self):\n return self.case.zone.name\n \n def results_for_malaria_bednet(self):\n \tbednet = \"N\"\n \tif self.bednet is True:\n \t bednet = \"Y\"\t\n \treturn \"%s\"%(bednet)\n\n def results_for_malaria_result(self):\n \tresult = \"-\"\n \tif self.bednet is True:\n \t result = \"+\"\t\n \treturn \"%s\"%(result)\n\n def name(self):\n return \"%s %s\" % (self.case.first_name, self.case.last_name)\n \n def provider_number(self):\n return self.provider.mobile\n \n def save(self, *args):\n if not self.id:\n self.entered_at = datetime.now()\n super(ReportMalaria, self).save(*args)\n \n @classmethod\n def count_by_provider(cls,provider, duration_end=None,duration_start=None):\n if provider is None:\n return None\n try:\n if duration_start is None or duration_end is None:\n return cls.objects.filter(provider=provider).count()\n return cls.objects.filter(entered_at__lte=duration_end, entered_at__gte=duration_start).filter(provider=provider).count()\n except models.ObjectDoesNotExist:\n return None\n \nclass ReportMalnutrition(Report, models.Model):\n \n MODERATE_STATUS = 1\n SEVERE_STATUS = 2\n SEVERE_COMP_STATUS = 3\n HEALTHY_STATUS = 4\n STATUS_CHOICES = (\n (MODERATE_STATUS, _('MAM')),\n (SEVERE_STATUS, _('SAM')),\n (SEVERE_COMP_STATUS, _('SAM+')),\n (HEALTHY_STATUS, _(\"Healthy\")),\n )\n\n case = models.ForeignKey(Case, db_index=True)\n provider = models.ForeignKey(Provider, db_index=True)\n entered_at = models.DateTimeField(db_index=True)\n muac = models.IntegerField(_(\"MUAC (mm)\"), null=True, blank=True)\n height = models.IntegerField(_(\"Height (cm)\"), null=True, blank=True)\n weight = models.FloatField(_(\"Weight (kg)\"), null=True, blank=True)\n observed = models.ManyToManyField(Observation, blank=True)\n status = models.IntegerField(choices=STATUS_CHOICES, db_index=True, blank=True, null=True)\n \n class Meta:\n app_label = \"mctc\"\n verbose_name = \"Malnutrition Report\"\n verbose_name_plural = \"Malnutrition Reports\"\n get_latest_by = 'entered_at'\n ordering = (\"-entered_at\",)\n\n def get_dictionary(self):\n return {\n 'muac' : \"%d mm\" % self.muac,\n 'observed' : \", \".join([k.name for k in self.observed.all()]),\n 'diagnosis' : self.get_status_display(),\n 'diagnosis_msg' : self.diagnosis_msg(),\n }\n \n \n def __unicode__ (self):\n return \"#%d\" % self.id\n \n def symptoms(self):\n \treturn \", \".join([k.name for k in self.observed.all()])\n \n def zone(self):\n return self.case.zone.name\n \n def name(self):\n return \"%s %s\" % (self.case.first_name, self.case.last_name) \n \n def provider_number(self):\n return self.provider.mobile\n \n def diagnose (self):\n complications = [c for c in self.observed.all() if c.uid != \"edema\"]\n edema = \"edema\" in [ c.uid for c in self.observed.all() ]\n self.status = ReportMalnutrition.HEALTHY_STATUS\n if edema or self.muac < 110:\n if complications:\n self.status = ReportMalnutrition.SEVERE_COMP_STATUS\n else:\n self.status = ReportMalnutrition.SEVERE_STATUS\n elif self.muac < 125:\n self.status = ReportMalnutrition.MODERATE_STATUS\n\n def diagnosis_msg(self):\n if self.status == ReportMalnutrition.MODERATE_STATUS:\n msg = \"MAM Child requires supplemental feeding.\"\n elif self.status == ReportMalnutrition.SEVERE_STATUS:\n msg = \"SAM Patient requires OTP care\"\n elif self.status == ReportMalnutrition.SEVERE_COMP_STATUS:\n msg = \"SAM+ Patient requires IMMEDIATE inpatient care\"\n else:\n msg = \"Child is not malnourished\"\n \n return msg\n\n def save(self, *args):\n if not self.id:\n self.entered_at = datetime.now()\n super(ReportMalnutrition, self).save(*args)\n \n @classmethod\n def count_by_provider(cls,provider, duration_end=None,duration_start=None):\n if provider is None:\n return None\n try:\n if duration_start is None or duration_end is None:\n return cls.objects.filter(provider=provider).count()\n return cls.objects.filter(entered_at__lte=duration_end, entered_at__gte=duration_start).filter(provider=provider).count()\n except models.ObjectDoesNotExist:\n return None \n\nclass Lab(models.Model):\n name = models.CharField(max_length=255)\n code = models.CharField(max_length=10)\n \n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = \"mctc\"\n ordering = (\"code\",) \n\nclass LabDiagnosis(models.Model):\n lab = models.ForeignKey(Lab)\n diagnosis = models.ForeignKey(\"ReportDiagnosis\")\n amount = models.IntegerField(blank=True, null=True)\n result = models.BooleanField(blank=True)\n\n def __unicode__(self):\n return \"%s, %s - %s\" % (self.lab, self.diagnosis, self.amount)\n\n class Meta:\n app_label = \"mctc\"\n\nclass DiagnosisCategory(models.Model):\n name = models.CharField(max_length=255)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = \"mctc\"\n ordering = (\"name\",)\n \nclass Diagnosis(models.Model):\n name = models.CharField(max_length=255)\n code = models.CharField(max_length=10)\n category = models.ForeignKey(DiagnosisCategory)\n mvp_code = models.CharField(max_length=255)\n instructions = models.TextField(blank=True)\n \n def __unicode__(self):\n return self.mvp_code\n\n class Meta:\n app_label = \"mctc\"\n ordering = (\"code\",)\n verbose_name = \"Diagnosis Code\"\n verbose_name_plural = \"Diagnosis Codes\"\n \nclass ReportDiagnosis(Report, models.Model):\n case = models.ForeignKey(Case, db_index=True)\n provider = models.ForeignKey(Provider, db_index=True)\n diagnosis = models.ManyToManyField(Diagnosis)\n lab = models.ManyToManyField(Lab, through=LabDiagnosis)\n text = models.TextField()\n entered_at = models.DateTimeField(db_index=True)\n \n def __unicode__(self):\n return self.case\n\n class Meta:\n verbose_name = \"Diagnosis Report\"\n app_label = \"mctc\"\n\n def save(self, *args):\n if not self.id:\n self.entered_at = datetime.now()\n super(ReportDiagnosis, self).save(*args)\n\n def get_dictionary(self):\n extra = []\n for ld in LabDiagnosis.objects.filter(diagnosis=self):\n if ld.amount:\n extra.append(\"%s %s\" % (ld.lab.code, ld.amount))\n else:\n extra.append(\"%s%s\" % (ld.lab.code, ld.result and \"+\" or \"-\"))\n \n return {\n \"diagnosis\": \", \".join([str(d) for d in self.diagnosis.all()]),\n \"labs\": \", \".join([str(d) for d in self.lab.all()]),\n \"labs_text\": \", \".join(extra)\n }\n\nclass ReportDiarrhea(Report, models.Model):\n \n MODERATE_STATUS = 1\n DANGER_STATUS = 2\n SEVERE_STATUS = 3\n HEALTHY_STATUS = 4\n STATUS_CHOICES = (\n (MODERATE_STATUS, _('Moderate')),\n (DANGER_STATUS, _('Danger')),\n (SEVERE_STATUS, _('Severe')),\n (HEALTHY_STATUS, _(\"Healthy\")),\n )\n\n case = models.ForeignKey(Case, db_index=True)\n provider = models.ForeignKey(Provider, db_index=True)\n entered_at = models.DateTimeField(db_index=True)\n ors = models.BooleanField()\n days = models.IntegerField(_(\"Number of days\")) \n observed = models.ManyToManyField(DiarrheaObservation, blank=True)\n status = models.IntegerField(choices=STATUS_CHOICES, db_index=True, blank=True, null=True)\n \n class Meta:\n app_label = \"mctc\"\n verbose_name = \"Diarrhea Report\"\n verbose_name_plural = \"Diarrhea Reports\"\n get_latest_by = 'entered_at'\n ordering = (\"-entered_at\",)\n\n def get_dictionary(self):\n return {\n 'ors' : \"ORS: %s\" % (\"yes\" if self.ors else \"no\"),\n 'days' : \"Days: %d\" % self.days,\n 'observed' : \", \".join([k.name for k in self.observed.all()]),\n 'diagnosis' : self.get_status_display(),\n 'diagnosis_msg' : self.diagnosis_msg(),\n }\n \n def __unicode__ (self):\n return \"#%d\" % self.id\n\n def diagnose (self):\n if self.days >= 3 or self.observed.all().count() > 0:\n self.status = ReportDiarrhea.DANGER_STATUS\n else:\n self.status = ReportDiarrhea.MODERATE_STATUS\n\n def diagnosis_msg(self):\n if self.status == ReportDiarrhea.MODERATE_STATUS:\n msg = \"MOD Patient should take ORS.\"\n elif self.status == ReportDiarrhea.SEVERE_STATUS:\n msg = \"SEV Patient must be referred at clinic.\"\n elif self.status == ReportDiarrhea.DANGER_STATUS:\n msg = \"DANG Patient must go to Clinic.\"\n else:\n msg = \"HEAL Patient not in danger.\"\n \n return msg\n\n def save(self, *args):\n if not self.id:\n self.entered_at = datetime.now()\n super(ReportDiarrhea, self).save(*args)\n \nclass ReportCHWStatus(Report, models.Model):\n class Meta:\n verbose_name = \"CHW Perfomance Report\"\n app_label = \"mctc\"\n @classmethod\n def get_providers_by_clinic(cls, duration_start, duration_end, muac_duration_start, clinic_id=None):\n \n \n ps = []\n fields = []\n counter = 0\n clinic_cases = 0\n clinic_mrdt = 0\n clinic_muac = 0\n clinic_sent = 0\n clinic_processed = 0\n clinic_refused = 0\n \n if clinic_id is not None:\n providers = Provider.list_by_clinic(clinic_id)\n for provider in providers:\n p = {}\n counter = counter + 1\n p['counter'] = \"%d\"%counter\n p['provider'] = provider\n p['num_cases'] = Case.count_by_provider(provider)\n p_muac = ReportMalaria.count_by_provider(provider, duration_end, duration_start)\n p['num_malaria_reports'] = p_muac\n clinic_mrdt = clinic_mrdt + p_muac \n num_cases = Case.count_by_provider(provider)\n clinic_cases = clinic_cases + num_cases\n num_muac = ReportMalnutrition.count_by_provider(provider, duration_end, muac_duration_start)\n clinic_muac = clinic_muac + num_muac\n if num_cases == 0:\n muac_percentage = 0\n else:\n muac_percentage = round(float(float(num_muac)/float(num_cases))*100, 0)\n p['num_muac_reports'] = \"%d %% (%s/%s)\"%(muac_percentage, num_muac, num_cases)\n sms_sent = MessageLog.count_by_provider(provider, duration_end, duration_start)\n clinic_sent = clinic_sent + sms_sent\n p['sms_sent'] = sms_sent\n sms_processed = MessageLog.count_processed_by_provider(provider, duration_end, duration_start)\n clinic_processed = clinic_processed + sms_processed\n p['sms_processed'] = sms_processed\n sms_refused = MessageLog.count_refused_by_provider(provider, duration_end, duration_start)\n clinic_refused = clinic_refused + sms_refused\n p['sms_refused'] = sms_refused\n if p['sms_sent'] != 0:\n p['sms_rate'] = int(float(float(p['sms_processed'])/float(p['sms_sent'])*100))\n else:\n p['sms_rate'] = 0\n #p['sms_rate'] = \"%s%%\"%p['sms_rate']\n last_activity = MessageLog.days_since_last_activity(provider)\n if last_activity == \"\":\n p['days_since_last_activity'] = \"No Activity\"\n else:\n p['days_since_last_activity'] = \"%s days ago\"%last_activity\n \n ps.append(p)\n \n #ps = sorted(ps)\n # Summary \n p = {}\n p['counter'] = \"\"\n p['provider'] = \"Summary\"\n p['num_cases'] = clinic_cases\n p['num_malaria_reports'] = clinic_mrdt\n num_cases = clinic_cases\n num_muac = clinic_muac\n if num_cases == 0:\n muac_percentage = 0\n else:\n muac_percentage = round(float(float(num_muac)/float(num_cases))*100, 0)\n p['num_muac_reports'] = \"%d %% (%s/%s)\"%(muac_percentage, num_muac, num_cases)\n p['sms_sent'] = clinic_sent\n p['sms_processed'] = clinic_processed\n p['sms_refused'] = clinic_refused\n if p['sms_sent'] != 0:\n p['sms_rate'] = int(float(float(p['sms_processed'])/float(p['sms_sent'])*100))\n else:\n p['sms_rate'] = 0\n #p['sms_rate'] = \"%s%%\"%p['sms_rate']\n p['days_since_last_activity'] = \"\" \n \n ps.append(p)\n # caseid +|Y lastname firstname | sex | dob/age | guardian | provider | date\n fields.append({\"name\": '#', \"column\": None, \"bit\": \"{{ object.counter }}\" })\n fields.append({\"name\": 'PROVIDER', \"column\": None, \"bit\": \"{{ object.provider }}\" })\n fields.append({\"name\": 'NUMBER OF CASES', \"column\": None, \"bit\": \"{{ object.num_cases}}\" })\n fields.append({\"name\": 'MRDT', \"column\": None, \"bit\": \"{{ object.num_malaria_reports }}\" })\n fields.append({\"name\": 'MUAC', \"column\": None, \"bit\": \"{{ object.num_muac_reports }}\" })\n fields.append({\"name\": 'RATE', \"column\": None, \"bit\": \"{{ object.sms_rate }}% ({{ object.sms_processed }}/{{ object.sms_sent }})\" })\n fields.append({\"name\": 'LAST ACTVITY', \"column\": None, \"bit\": \"{{ object.days_since_last_activity }}\" })\n return ps, fields \n \nclass ReportAllPatients(Report, models.Model):\n class Meta:\n verbose_name = \"CHW Perfomance Report\"\n app_label = \"mctc\"\n @classmethod\n def by_provider(cls, provider_id=None): \n qs = []\n fields = []\n counter = 0\n if provider_id is not None:\n cases = Case.objects.order_by(\"last_name\").filter(provider=provider_id)\n \n for case in cases:\n q = {}\n q['case'] = case\n counter = counter + 1\n q['counter'] = \"%d\"%counter\n try:\n muacc = ReportMalnutrition.objects.filter(case=case).latest()\n #q['malnut'] = u\"%(diag)s on %(date)s\" % {'diag': muacc.diagnosis_msg(), 'date': muacc.entered_at.strftime(\"%Y-%m-%d\")}\n q['malnut_muac'] = \"%s (%smm)\"%(muacc.get_status_display(), muacc.muac)\n q['malnut_symptoms'] = muacc.symptoms()\n except ObjectDoesNotExist:\n q['malnut_muac'] = \"\"\n q['malnut_symptoms'] = \"\"\n \n try:\n orsc = ReportDiarrhea.objects.filter(case=case).latest()\n q['diarrhea'] = u\"%(diag)s on %(date)s\" % {'diag': orsc.diagnosis_msg(), 'date': orsc.entered_at.strftime(\"%Y-%m-%d\")}\n except ObjectDoesNotExist:\n q['diarrhea'] = None\n \n try:\n mrdtc = ReportMalaria.objects.filter(case=case).latest()\n mrdtcd = mrdtc.get_dictionary()\n #q['malaria'] = u\"result:%(res)s bednet:%(bed)s obs:%(obs)s on %(date)s\" % {'res': mrdtcd['result_text'], 'bed': mrdtcd['bednet_text'], 'obs': mrdtcd['observed'], 'date': mrdtc.entered_at.strftime(\"%Y-%m-%d\")}\n q['malaria_result'] = mrdtc.results_for_malaria_result()\n q['malaria_bednet'] = mrdtc.results_for_malaria_bednet()\n except ObjectDoesNotExist:\n q['malaria_result'] = \"\"\n q['malaria_bednet'] = \"\"\n \n try:\n dc = ReportDiagnosis.objects.filter(case=case).latest('entered_at')\n dcd = dc.get_dictionary()\n q['diagnosis'] = u\"diag:%(diag)s labs:%(lab)s on %(date)s\" % {'diag': dcd['diagnosis'], 'lab': dcd['labs_text'], 'date': dc.entered_at.strftime(\"%Y-%m-%d\")}\n except ObjectDoesNotExist:\n q['diagnosis'] = None\n \n qs.append(q)\n # caseid +|Y lastname firstname | sex | dob/age | guardian | provider | date\n fields.append({\"name\": '#', \"column\": None, \"bit\": \"{{ object.counter }}\" })\n fields.append({\"name\": 'PID#', \"column\": None, \"bit\": \"{{ object.case.ref_id }}\" })\n fields.append({\"name\": 'NAME', \"column\": None, \"bit\": \"{{ object.case.last_name }} {{ object.case.first_name }}\" })\n fields.append({\"name\": 'SEX', \"column\": None, \"bit\": \"{{ object.case.gender }}\" })\n fields.append({\"name\": 'AGE', \"column\": None, \"bit\": \"{{ object.case.age }}\" })\n fields.append({\"name\": 'REGISTERED', \"column\": None, \"bit\": \"{{ object.case.date_registered }}\" })\n fields.append({\"name\": 'MRDT', \"column\": None, \"bit\": \"{{ object.malaria_result }}\" })\n fields.append({\"name\": 'BEDNET', \"column\": None, \"bit\": \"{{ object.malaria_bednet }}\" })\n fields.append({\"name\": 'CMAM', \"column\": None, \"bit\": \"{{ object.malnut_muac }}\" })\n fields.append({\"name\": 'SYMPTOMS', \"column\": None, \"bit\": \"{{ object.malnut_symptoms}}\" })\n return qs, fields\n","sub_path":"apps/mctc/models/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":20935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507822608","text":"# -*- coding: utf-8 -*-\n\"\"\"\n core.middleware\n ~~~~~~~~~~~~~~~\n\n File with core middlewares.\n\n :copyright: (c) 2015 by Rambler&Co.\n\"\"\"\n\nfrom admin_app.core.utils.options import SOURCES\nfrom admin_app.core.models import City\nfrom admin_app.fetchers.management.commands.geonames import GeoNameImport\n\n\nclass XGeoIdMiddleware(object):\n\n def process_request(self, request):\n self.helper = GeoNameImport(\n source_url=SOURCES['rambler_geonames']['url'],\n source_title=SOURCES['rambler_geonames']['title'],\n )\n ext_id = request.META.get('HTTP_X_GEO_ID')\n\n try:\n city = self.helper.get_obj_by_source(City, ext_id, True)\n except Exception as e:\n city = None\n request.city = city\n","sub_path":"src/admin_app/core/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"86694354","text":"from collections import deque\n\n\ndef bfs(root):\n global time\n Q = deque()\n distance[root] = 0\n Q.append(root)\n i = root\n while len(Q) > 0:\n i = Q.popleft()\n for v in range(n):\n if v in adjDict[i] and color[v] == 'white':\n Q.append(v)\n color[v] = 'gray'\n distance[v] = distance[i] + 1\n color[i] = 'black'\n label[i] = time\n\n\nfirstline = list(map(int, input().split()))\n\nn = firstline[0]\nadjDict = {i: [] for i in range(n)}\ncolor = ['white'] * n\ndistance = [-1] * n\nlabel = [-1] * n\n\nm = firstline[1]\nfor _ in range(m):\n adj = list(map(int, input().split()))\n adjDict[adj[0]].append(adj[1])\n adjDict[adj[1]].append(adj[0])\n\ntime = 0\nfor i in range(n):\n if color[i] == 'white':\n bfs(i)\n time += 1\n\nq = int(input())\nfor _ in range(q):\n question = list(map(int, input().split()))\n if label[question[0]] == label[question[1]]:\n print('yes')\n else:\n print('no')\n","sub_path":"111D.py","file_name":"111D.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"582918937","text":"\"\"\"\n.. module:: sip_batchoutput\n :synopsis: A useful module indeed.\n\"\"\"\n\nfrom django.views.decorators.http import require_POST\n\nimport csv\nimport sip_model,sip_tables\n\nimport logging\nfrom threading import Thread\nimport Queue\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(\"SIPBatchOutput\")\n\nchemical_name=[]\nspecies_tested_bird=[]\nspecies_tested_mammal=[]\nbodyweight_quail=[]\nbodyweight_duck=[]\nbodyweight_bird_other=[]\nbodyweight_rat=[]\nbodyweight_tested_mammal_other=[]\navian_ld50=[]\nmammalian_ld50=[]\nsolubility=[]\nbodyweight_assessed_bird=[]\nmineau_scaling_factor=[]\nbodyweight_assessed_mammal=[]\nnoaec_d=[]\nnoaec_q=[]\nnoaec_o=[]\nnoael_mammal_water=[]\nSpecies_of_the_bird_NOAEC_CHOICES=[]\nnoael_avian_water=[]\n\n######Pre-defined outputs########\ndose_bird_out = []\ndose_mamm_out = []\nat_bird_out = []\nat_mamm_out = []\ndet_out = []\nact_out = []\nacute_bird_out = []\nacuconb_out = []\nacute_mamm_out = []\nacuconm_out = []\nchron_bird_out = []\nchronconb_out = []\nchron_mamm_out = []\nchronconm_out = []\n\njid_all = []\njid_batch = []\nsip_all = []\nall_threads = []\nout_html_all = {}\njob_q = Queue.Queue()\nthread_count = 10\n\n\ndef html_table(row_inp_all):\n while True:\n row_inp_temp_all = row_inp_all.get()\n if row_inp_temp_all is None:\n break\n else:\n row_inp = row_inp_temp_all[0]\n iter = row_inp_temp_all[1]\n\n logger.info(\"iteration: \" + str(iter))\n chemical_name.append(str(row_inp[0]))\n species_tested_bird.append(float(row_inp[1]))\n species_tested_mammal.append(float(row_inp[2]))\n bodyweight_quail.append(float(row_inp[3]))\n bodyweight_duck.append(float(row_inp[4]))\n bodyweight_bird_other.append(float(row_inp[5])) \n bodyweight_rat.append(float(row_inp[6]))\n bodyweight_tested_mammal_other.append(float(row_inp[7]))\n solubility.append(float(row_inp[8]))\n avian_ld50.append(float(row_inp[9])) \n mammalian_ld50.append(float(row_inp[10]))\n bodyweight_assessed_bird.append(float(row_inp[11]))\n mineau_scaling_factor.append(float(row_inp[12]))\n bodyweight_assessed_mammal.append(float(row_inp[13]))\n noaec_d.append(float(row_inp[14]))\n noaec_q.append(float(row_inp[15]))\n noaec_o.append(float(row_inp[16]))\n noael_mammal_water.append(float(row_inp[17]))\n Species_of_the_bird_NOAEC_CHOICES.append(str(row_inp[18]))\n if Species_of_the_bird_NOAEC_CHOICES[iter] == '1':\n noael_avian_water.append(float(row_inp[14]))\n elif Species_of_the_bird_NOAEC_CHOICES[iter] == '2':\n noael_avian_water.append(float(row_inp[15]))\n elif Species_of_the_bird_NOAEC_CHOICES[iter] == '3':\n noael_avian_water.append(float(row_inp[16]))\n\n logger.info(chemical_name)\n logger.info(species_tested_bird)\n logger.info(species_tested_mammal)\n logger.info(bodyweight_quail)\n logger.info(bodyweight_duck)\n logger.info(bodyweight_bird_other)\n logger.info(bodyweight_rat)\n logger.info(bodyweight_tested_mammal_other)\n logger.info(solubility)\n logger.info(avian_ld50)\n logger.info(mammalian_ld50)\n logger.info(bodyweight_assessed_bird)\n logger.info(mineau_scaling_factor)\n logger.info(bodyweight_assessed_mammal)\n logger.info(noaec_d)\n logger.info(noaec_q)\n logger.info(noaec_o)\n logger.info(noael_mammal_water)\n logger.info(Species_of_the_bird_NOAEC_CHOICES)\n\n sip_obj = sip_model.sip(True,True,'batch',chemical_name[iter], species_tested_bird[iter], species_tested_mammal[iter], bodyweight_quail[iter],\n bodyweight_duck[iter], bodyweight_bird_other[iter], bodyweight_rat[iter], bodyweight_tested_mammal_other[iter], solubility[iter], avian_ld50[iter],\n mammalian_ld50[iter], bodyweight_assessed_bird[iter], mineau_scaling_factor[iter], bodyweight_assessed_mammal[iter], noaec_d[iter], noaec_q[iter], noaec_o[iter], Species_of_the_bird_NOAEC_CHOICES[iter], noael_mammal_water[iter])\n\n dose_bird_out.append(sip_obj.dose_bird_out)\n dose_mamm_out.append(sip_obj.dose_mamm_out)\n at_bird_out.append(sip_obj.at_bird_out)\n at_mamm_out.append(sip_obj.at_mamm_out)\n det_out.append(sip_obj.det_out)\n act_out.append(sip_obj.act_out)\n acute_bird_out.append(sip_obj.acute_bird_out)\n acuconb_out.append(sip_obj.acuconb_out)\n acute_mamm_out.append(sip_obj.acute_mamm_out)\n acuconm_out.append(sip_obj.acuconm_out)\n chron_bird_out.append(sip_obj.chron_bird_out)\n chronconb_out.append(sip_obj.chronconb_out)\n chron_mamm_out.append(sip_obj.chron_mamm_out)\n chronconm_out.append(sip_obj.chronconm_out)\n\n jid_all.append(sip_obj.jid)\n sip_all.append(sip_obj) \n if iter == 0:\n jid_batch.append(sip_obj.jid)\n\n batch_header = \"\"\"\n
\n

Batch Calculation of Iteration %s:

\n
\n \"\"\"%(iter + 1)\n\n out_html_temp = batch_header + sip_tables.table_all(sip_obj)\n out_html_all[iter]=out_html_temp\n\ndef loop_html(thefile):\n reader = csv.reader(thefile.file.read().splitlines())\n header = reader.next()\n # logger.info(header)\n i=0\n iter_html=\"\"\n for row in reader:\n job_q.put([row, i])\n i=i+1\n\n all_threads = [Thread(target=html_table, args=(job_q, )) for j in range(thread_count)]\n for x in all_threads:\n x.start()\n for x in all_threads:\n job_q.put(None)\n for x in all_threads:\n x.join()\n\n html_timestamp = sip_tables.timestamp(\"\", jid_batch[0])\n out_html_all_sort = OrderedDict(sorted(out_html_all.items()))\n sum_html = sip_tables.table_all_sum(sip_tables.sumheadings, sip_tables.tmpl, bodyweight_quail,bodyweight_duck,bodyweight_bird_other,bodyweight_rat,bodyweight_tested_mammal_other,solubility,\n avian_ld50,mammalian_ld50,bodyweight_assessed_bird,mineau_scaling_factor,bodyweight_assessed_mammal,noael_avian_water,noael_mammal_water,\n dose_bird_out, dose_mamm_out, at_bird_out, \n at_mamm_out, det_out, act_out, acute_bird_out, acute_mamm_out, \n chron_bird_out, chron_mamm_out)\n\n return html_timestamp + sum_html + \"\".join(out_html_all_sort.values())\n\n\n@require_POST\ndef sipBatchOutputPage(request):\n thefile = request.FILES['upfile']\n iter_html=loop_html(thefile)\n\n return iter_html, sip_all, jid_batch","sub_path":"models/sip/sip_batchoutput.py","file_name":"sip_batchoutput.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"609051741","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.utils.spectral_norm as SpectralNorm\n\nclass ConvBlock(nn.Module):\n\tdef __init__(self, ni, no, ks, stride, pad = None, use_bn = True, use_pixelshuffle = False, norm_type = 'batchnorm'):\n\t\tsuper(ConvBlock, self).__init__()\n\t\tself.use_bn = use_bn\n\t\tself.use_pixelshuffle = use_pixelshuffle\n\t\tif(pad == None):\n\t\t\tpad = ks // 2 // stride\n\t\tif(use_pixelshuffle):\n\t\t\tself.conv = nn.Conv2d(ni, no * 2 * 2, ks, stride, pad, bias = False)\n\t\t\tself.pixelshuffle = nn.PixelShuffle(2)\n\t\telse:\n\t\t\tself.conv = nn.Conv2d(ni, no, ks, stride, pad, bias = False)\n\n\t\tif(self.use_bn == True):\n\t\t\tif(norm_type == 'batchnorm'):\n\t\t\t\tself.bn = nn.BatchNorm2d(no)\n\t\t\telif(norm_type == 'instancenorm'):\n\t\t\t\tself.bn = nn.InstanceNorm2d(no)\n\t\tself.relu = nn.LeakyReLU(0.2, inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.conv(x)\n\t\tif(self.use_pixelshuffle == True):\n\t\t\tout = self.pixelshuffle(out)\n\t\tif(self.use_bn == True):\n\t\t\tout = self.bn(out)\n\t\tout = self.relu(out)\n\t\treturn out\n\nclass DeConvBlock(nn.Module):\n\tdef __init__(self, ni, no, ks, stride, pad = None, output_pad = 0, use_bn = True, norm_type = 'batchnorm'):\n\t\tsuper(DeConvBlock, self).__init__()\n\t\tself.use_bn = use_bn\n\t\tif(pad is None):\n\t\t\tpad = ks // 2 // stride\n\t\tself.deconv = nn.ConvTranspose2d(ni, no, ks, stride, pad, output_padding = output_pad, bias = False)\n\t\tif(self.use_bn == True):\n\t\t\tif(norm_type == 'batchnorm'):\n\t\t\t\tself.bn = nn.BatchNorm2d(no)\n\t\t\telif(norm_type == 'instancenorm'):\n\t\t\t\tself.bn = nn.InstanceNorm2d(no)\n\t\tself.relu = nn.ReLU(inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.deconv(x)\n\t\tif(self.use_bn == True):\n\t\t\tout = self.bn(out)\n\t\tout = self.relu(out)\n\t\treturn out\n\nclass UpSample(nn.Module):\n\tdef __init__(self):\n\t\tsuper(UpSample, self).__init__()\n\t\tself.scale_factor = 2\n\n\tdef forward(self, x):\n\t\treturn F.interpolate(x, None, self.scale_factor, 'bilinear', align_corners = True)\n\nclass DownSample(nn.Module):\n\tdef __init__(self):\n\t\tsuper(DownSample, self).__init__()\n\n\tdef forward(self, x):\n\t\treturn F.avg_pool2d(x, 2)\n\nclass SelfAttention(nn.Module):\n\tdef __init__(self, ni):\n\t\tsuper(SelfAttention, self).__init__()\n\t\tself.ni = ni\n\t\tself.f = nn.Conv2d(self.ni, self.ni//8, 1, 1, 0)\n\t\tself.g = nn.Conv2d(self.ni, self.ni//8, 1, 1, 0)\n\t\tself.h = nn.Conv2d(self.ni, self.ni, 1, 1, 0)\n\t\tself.softmax = nn.Softmax(dim = -1)\n\t\tself.alpha = nn.Parameter(torch.tensor(0.0))\n\n\tdef forward(self, x):\n\t\t# x : (bs, ni, sz, sz)\n\t\tf_out = self.f(x)\n\t\t# (bs, ni // 8, sz, sz)\n\t\tf_out = f_out.view(f_out.size(0), self.ni//8, -1)\n\t\t# (bs, ni // 8, sz * sz)\n\t\tf_out = f_out.permute(0, 2, 1)\n\t\t# (bs, sz * sz, ni // 8)\n\n\t\t# x : (bs, ni, sz, sz)\n\t\tg_out = self.g(x)\n\t\t# (bs, ni // 8, sz, sz)\n\t\tg_out = g_out.view(g_out.size(0), self.ni//8, -1)\n\t\t# (bs, ni // 8, sz * sz)\n\n\t\t# x : (bs, ni, sz, sz)\n\t\th_out = self.h(x)\n\t\t# (bs, ni, sz, sz)\n\t\th_out = h_out.view(h_out.size(0), self.ni, -1)\n\t\t# (bs, ni, sz * sz)\n\n\t\tf_g_mult = torch.bmm(f_out, g_out)\n\t\t# (bs, sz * sz, sz * sz)\n\t\tf_g_mult = self.softmax(f_g_mult)\n\t\t# (bs, sz * sz, sz * sz)\n\t\tf_g_h_mult = torch.bmm(h_out, f_g_mult)\n\t\t# (bs, ni, sz * sz)\n\t\tf_g_h_mult = f_g_h_mult.view(*x.shape)\n\t\t# (bs, ni, sz, sz)\n\n\t\tout = self.alpha * f_g_h_mult + x\n\t\t# (bs, ni, sz, sz)\n\n\t\treturn out\n\n# Residual Architectures with Spectral Norm and Self Attention\n\nclass Generative_ResBlock(nn.Module):\n\tdef __init__(self, ic, oc, upsample = True, use_spectral_norm = False):\n\t\tsuper(Generative_ResBlock, self).__init__()\n\t\tself.conv1 = nn.Conv2d(ic, oc, 3, 1, 1)\n\t\tself.conv2 = nn.Conv2d(oc, oc, 3, 1, 1)\n\t\tself.conv3 = nn.Conv2d(ic, oc, 1, 1, 0)\n\t\tnn.init.xavier_uniform(self.conv1.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv2.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv3.weight.data, np.sqrt(2))\n\n\t\tif(use_spectral_norm == False):\n\t\t\tmodel_list = [nn.BatchNorm2d(ic), nn.ReLU(inplace = True), self.conv1]\n\t\telif(use_spectral_norm == True):\n\t\t\tmodel_list = [nn.BatchNorm2d(ic), nn.ReLU(inplace = True), SpectralNorm(self.conv1)]\n\t\tif(upsample == True):\n\t\t\tmodel_list.append(UpSample())\n\t\tmodel_list.extend([\n\t\t\tnn.BatchNorm2d(oc),\n\t\t\tnn.ReLU(inplace = True),\n\t\t])\n\t\tif(use_spectral_norm == False):\n\t\t\tmodel_list.append(self.conv2)\n\t\telif(use_spectral_norm == True):\n\t\t\tmodel_list.append(SpectralNorm(self.conv2))\n\t\tself.model = nn.Sequential(*model_list)\n\n\t\tbypass_list = [self.conv3]\n\t\tif(upsample == True):\n\t\t\tbypass_list.append(UpSample())\n\t\tself.bypass = nn.Sequential(*bypass_list)\n\n\tdef forward(self, x):\n\t\tout = self.model(x) + self.bypass(x)\n\t\treturn out\n\nclass Discriminative_ResBlock_First(nn.Module):\n\tdef __init__(self, ic, oc, downsample = True):\n\t\tsuper(Discriminative_ResBlock_First, self).__init__()\n\t\tself.conv1 = nn.Conv2d(ic, oc, 3, 1, 1)\n\t\tself.conv2 = nn.Conv2d(oc, oc, 3, 1, 1)\n\t\tself.conv3 = nn.Conv2d(ic, oc, 1, 1, 0)\n\t\tnn.init.xavier_uniform(self.conv1.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv2.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv3.weight.data, np.sqrt(2))\n\n\t\tmodel_list = [\n\t\t\tSpectralNorm(self.conv1),\n\t\t\tnn.ReLU(inplace = True),\n\t\t\tSpectralNorm(self.conv2)\n\t\t]\n\t\tif(downsample == True):\n\t\t\tmodel_list.append(nn.AvgPool2d(2))\n\t\tself.model = nn.Sequential(*model_list)\n\n\t\tbypass_list = [SpectralNorm(self.conv3)]\n\t\tif(downsample == True):\n\t\t\tbypass_list.append(nn.AvgPool2d(2))\n\t\tself.bypass = nn.Sequential(*bypass_list)\n\n\tdef forward(self, x):\n\t\tout = self.model(x) + self.bypass(x)\n\t\treturn out\n\nclass Discriminative_ResBlock(nn.Module):\n\tdef __init__(self, ic, oc, downsample = True):\n\t\tsuper(Discriminative_ResBlock, self).__init__()\n\t\tself.conv1 = nn.Conv2d(ic, oc, 3, 1, 1)\n\t\tself.conv2 = nn.Conv2d(oc, oc, 3, 1, 1)\n\t\tself.conv3 = nn.Conv2d(ic, oc, 1, 1, 0)\n\t\tnn.init.xavier_uniform(self.conv1.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv2.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv3.weight.data, np.sqrt(2))\n\n\t\tmodel_list = [\n\t\t\tnn.ReLU(inplace = True),\n\t\t\tSpectralNorm(self.conv1),\n\t\t\tnn.ReLU(inplace = True),\n\t\t\tSpectralNorm(self.conv2)\n\t\t]\n\t\tif(downsample == True):\n\t\t\tmodel_list.append(nn.AvgPool2d(2))\n\t\tself.model = nn.Sequential(*model_list)\n\n\t\tbypass_list = [SpectralNorm(self.conv3)]\n\t\tif(downsample == True):\n\t\t\tbypass_list.append(nn.AvgPool2d(2))\n\t\tself.bypass = nn.Sequential(*bypass_list)\n\n\tdef forward(self, x):\n\t\tout = self.model(x) + self.bypass(x)\n\t\treturn out\n\nclass ResNetGan_D(nn.Module):\n\tdef __init__(self, sz, nc, ndf, use_sigmoid = True, self_attention_layer = None):\n\t\tsuper(ResNetGan_D, self).__init__()\n\t\tself.sz = sz\n\t\tself.nc = nc\n\t\tself.ndf = ndf\n\t\tself.self_attention_layer = self_attention_layer\n\t\tcur_ndf = self.ndf\n\n\t\tself.blocks = [Discriminative_ResBlock_First(self.nc, cur_ndf, True)]\n\t\tfor i in range(int(math.log2(self.sz)) - 3):\n\t\t\tif(cur_ndf == self_attention_layer):\n\t\t\t\tself.blocks.append(SelfAttention(cur_ndf))\n\t\t\tself.blocks.append(Discriminative_ResBlock(cur_ndf, cur_ndf*2, True))\n\t\t\tcur_ndf = cur_ndf * 2\n\t\tself.blocks.append(Discriminative_ResBlock(cur_ndf, cur_ndf, False))\n\t\tself.blocks = nn.Sequential(*self.blocks)\n\n\t\tself.relu = nn.ReLU(inplace = True)\n\t\tself.avgpool = nn.AdaptiveAvgPool2d(1)\n\t\tself.dense = nn.Linear(cur_ndf, 1)\n\t\tself.sigmoid = nn.Sigmoid()\n\t\tself.use_sigmoid = use_sigmoid\n\t\t\n\t\tnn.init.xavier_uniform(self.dense.weight.data, 1.)\n\n\tdef forward(self, x):\n\t\tout = self.blocks(x)\n\t\tout = self.relu(out)\n\t\tout = self.avgpool(out)\n\t\tout = out.view(out.size(0), -1)\n\t\tout = self.dense(out)\n\t\tif(self.use_sigmoid == True):\n\t\t\tout = self.sigmoid(out)\n\t\treturn out\n\nclass ResNetGan_G(nn.Module):\n\tdef __init__(self, sz, nz, nc, ngf, use_spectral_norm = False, self_attention_layer = None):\n\t\tsuper(ResNetGan_G, self).__init__()\n\t\tself.sz = sz\n\t\tself.nz = nz\n\t\tself.nc = nc\n\t\tself.ngf = ngf\n\t\tself.self_attention_layer = self_attention_layer\n\t\tcur_ngf = self.ngf*self.sz//8\n\t\tself.dense = nn.Linear(self.nz, 4*4*cur_ngf)\n\t\tself.use_spectral_norm = use_spectral_norm\n\n\t\tself.blocks = [Generative_ResBlock(cur_ngf, cur_ngf, True, self.use_spectral_norm)]\n\t\tfor i in range(int(math.log2(self.sz)) - 3):\n\t\t\tif(cur_ngf == self_attention_layer):\n\t\t\t\tself.blocks.append(SelfAttention(cur_ngf))\n\t\t\tself.blocks.append(Generative_ResBlock(cur_ngf, cur_ngf // 2, True, self.use_spectral_norm))\n\t\t\tcur_ngf = cur_ngf // 2\n\t\tself.blocks = nn.Sequential(*self.blocks)\n\n\t\tself.bn = nn.BatchNorm2d(cur_ngf)\n\t\tself.relu = nn.ReLU(inplace = True)\n\t\tself.conv = nn.Conv2d(cur_ngf, self.nc, 1, 1, 0)\n\t\tself.tanh = nn.Tanh()\n\n\t\tnn.init.xavier_uniform(self.dense.weight.data, 1.)\n\t\tnn.init.xavier_uniform(self.conv.weight.data, 1.)\n\n\tdef forward(self, x):\n\t\tout = x.view(x.size(0), -1)\n\t\tout = self.dense(out)\n\t\tout = out.view(out.size(0), -1, 4, 4)\n\t\tout = self.blocks(out)\n\t\tout = self.conv(self.relu(self.bn(out)))\n\t\tout = self.tanh(out)\n\t\treturn out\n\n# Custom Layers and Models\nclass Custom128x128_G(nn.Module):\n\tdef __init__(self, nz, oc):\n\t\tsuper(Custom128x128_G, self).__init__()\n\t\tself.nz = nz\n\t\tself.oc = oc\n\n\t\tself.init_block = nn.Sequential(\n\t\t\tnn.Linear(nz, 64 * 16 * 16),\n\t\t\tReshape((-1, 64, 16, 16)),\n\t\t\tnn.BatchNorm2d(64),\n\t\t\tnn.ReLU(inplace = True)\n\t\t)\n\t\tself.blocks = nn.Sequential(\n\t\t\t*([Custom_G_ResBlock(64, 64)] * 16)\n\t\t)\n\t\tself.blocks2 = nn.Sequential(\n\t\t\tCustom_G_UpBlock(64),\n\t\t\tCustom_G_UpBlock(64),\n\t\t\tCustom_G_UpBlock(64)\n\t\t)\n\n\t\tself.bn = nn.BatchNorm2d(64)\n\t\tself.relu = nn.ReLU(inplace = True)\n\t\tself.conv = nn.Conv2d(64, self.oc, 9, 1, 4, bias = True)\n\t\tself.tanh = nn.Tanh()\n\n\t\tfor m in self.modules():\n\t\t\tif(isinstance(m, nn.Conv2d)):\n\t\t\t\tm.weight.data.normal_(0.0, 0.02)\n\t\t\t\tif(m.bias is not None):\n\t\t\t\t\tm.bias.data.zero_()\n\n\tdef forward(self, z):\n\t\tout = self.init_block(z.view(-1, self.nz))\n\t\tout = self.relu(self.bn(self.blocks(out))) + out\n\t\tout = self.blocks2(out)\n\t\tout = self.tanh(self.conv(out))\n\n\t\treturn out\n\nclass Custom128x128_D(nn.Module):\n\tdef __init__(self, nc, use_sigmoid):\n\t\tsuper(Custom128x128_D, self).__init__()\n\t\tself.nc = nc\n\t\tself.use_sigmoid = use_sigmoid\n\n\t\tself.conv = nn.Conv2d(nc, 32, 4, 2, 1)\n\t\tself.relu = nn.LeakyReLU(0.2, inplace = True)\n\n\t\tself.blocks = nn.Sequential(\n\t\t\tCustom_D_GlobalBlock(32, 64),\n\t\t\tCustom_D_GlobalBlock(64, 128),\n\t\t\tCustom_D_GlobalBlock(128, 256),\n\t\t\tCustom_D_GlobalBlock(256, 512),\n\t\t\tCustom_D_GlobalBlock(512, 1024)\n\t\t)\n\t\tself.dense = nn.Linear(1024 * 2 * 2, 1)\n\t\tself.sigmoid = nn.Sigmoid()\n\n\t\tfor m in self.modules():\n\t\t\tif(isinstance(m, nn.Conv2d)):\n\t\t\t\tm.weight.data.normal_(0.0, 0.02)\n\t\t\t\tif(m.bias is not None):\n\t\t\t\t\tm.bias.data.zero_()\n\n\tdef forward(self, x):\n\t\tout = self.relu(self.conv(x))\n\t\tout = self.blocks(out)\n\t\tout = out.view(-1, 1024 * 2 * 2)\n\t\tout = self.dense(out)\n\t\tout = out.view(-1, 1, 1, 1)\n\t\tif(self.use_sigmoid):\n\t\t\tout = self.sigmoid(out)\n\n\t\treturn out\n\nclass Custom_D_ResBlock(nn.Module):\n\tdef __init__(self, nc, oc):\n\t\tsuper(Custom_D_ResBlock, self).__init__()\n\t\tself.nc = nc\n\t\tself.oc = oc\n\t\tself.conv1 = nn.Conv2d(nc, oc, 3, 1, 1)\n\t\tself.conv2 = nn.Conv2d(oc, oc, 3, 1, 1)\n\t\tself.relu = nn.LeakyReLU(0.2, inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.conv1(x)\n\t\tout = self.relu(out)\n\t\tout = self.conv2(out)\n\t\tout = out + x\n\t\tout = self.relu(out)\n\n\t\treturn out\n\nclass Custom_D_GlobalBlock(nn.Module):\n\tdef __init__(self, nc, oc):\n\t\tsuper(Custom_D_GlobalBlock, self).__init__()\n\t\tself.nc = nc\n\t\tself.oc = oc\n\t\tself.block1 = Custom_D_ResBlock(nc, nc)\n\t\tself.block2 = Custom_D_ResBlock(nc, nc)\n\t\tself.conv = nn.Conv2d(nc, oc, 4, 2, 1)\n\t\tself.relu = nn.LeakyReLU(0.2, inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.block1(x)\n\t\tout = self.block2(out)\n\t\tout = self.conv(out)\n\t\tout = self.relu(out)\n\n\t\treturn out\n\nclass Custom_G_ResBlock(nn.Module):\n\tdef __init__(self, nc, oc):\n\t\tsuper(Custom_G_ResBlock, self).__init__()\n\t\tself.nc = nc\n\t\tself.oc = oc\n\t\tself.conv1 = nn.Conv2d(nc, oc, 3, 1, 1, bias = False)\n\t\tself.conv2 = nn.Conv2d(oc, oc, 3, 1, 1, bias = False)\n\t\tself.bn1 = nn.BatchNorm2d(oc)\n\t\tself.bn2 = nn.BatchNorm2d(oc)\n\t\tself.relu = nn.ReLU(inplace = True)\n\n\tdef forward(self, x):\n\t\tout = self.conv1(x)\n\t\tout = self.bn1(out)\n\t\tout = self.relu(out)\n\n\t\tout = self.conv2(out)\n\t\tout = self.bn2(out)\n\t\tout = out + x\n\n\t\treturn out\n\nclass Custom_G_UpBlock(nn.Module):\n\tdef __init__(self, nc):\n\t\tsuper(Custom_G_UpBlock, self).__init__()\n\t\tself.nc = nc\n\t\tself.conv = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias = False)\n\t\tself.pixelshuffle = nn.PixelShuffle(2)\n\t\tself.bn = nn.BatchNorm2d(nc)\n\t\tself.relu = nn.ReLU(inplace = True)\n\n\tdef forward(self, x):\n\t\t# (bs, nc, n, n)\n\t\tout = self.conv(x)\n\t\t# (bs, nc * 4, n, n)\n\t\tout = self.pixelshuffle(out)\n\t\t# (bs, nc, n * 2, n * 2)\n\t\tout = self.bn(out)\n\t\t# (bs, nc, n * 2, n * 2)\n\t\tout = self.relu(out)\n\t\t# (bs, nc, n * 2, n * 2)\n\n\t\treturn out\n\nclass Reshape(nn.Module):\n\tdef __init__(self, shape):\n\t\tsuper(Reshape, self).__init__()\n\t\tself.shape = shape\n\n\tdef forward(self, x):\n\t\tout = x.view(*self.shape)\n\t\treturn out\n\nclass Wave_D(nn.Module):\n\tdef __init__(self, nz):\n\t\tsuper(Wave_D, self).__init__()\n\t\tself.nz = nz\n\t\tself.linear = nn.Linear(self.nz, 512)\n\t\t#self.conv1 = nn.ConvTransposed1d(512, 256, 4)\n\n\nclass Wave_D(nn.Module):\n\tdef __init__(self, sz):\n\t\tsuper(Wave_D, self).__init__()\n\t\tself.sz = sz\n\t\t# sz should be divided by 4^5\n\t\tself.conv1 = nn.Conv1d(1, 64, 25, 4, 12)\n\t\tself.conv2 = nn.Conv1d(64, 128, 25, 4, 12)\n\t\tself.conv3 = nn.Conv1d(128, 256, 25, 4, 12)\n\t\tself.conv4 = nn.Conv1d(256, 512, 25, 4, 12)\n\t\tself.conv5 = nn.Conv1d(512, 512, 25, 4, 12)\n\t\tself.reshape = Reshape((-1, sz // (4**5) * 512))\n\t\tself.linear = nn.Linear(sz // (4**5) * 512, 1)\n\n\tdef forward(self, x):\n\t\t# (bs, nc, sz)\n\t\tout = self.conv1(x)\n\t\tout = self.conv2(out)\n\t\tout = self.conv3(out)\n\t\tout = self.conv4(out)\n\t\tout = self.conv5(out)\n\t\tout = self.reshape(out)\n\t\tout = self.linear(out)\n\t\t# (bs, 1)\n\t\treturn out\n\nclass Wave_G(nn.Module):\n\tdef __init__(self, nz, sz):\n\t\tsuper(Wave_G, self).__init__()\n\t\tself.nz = nz\n\t\tself.sz = sz\n\t\t# sz should be divided by 4^5\n\t\tself.conv5 = nn.ConvTranspose1d(64, 1, 25, 4, 12, output_padding = 3)\n\t\tself.conv4 = nn.ConvTranspose1d(128, 64, 25, 4, 12, output_padding = 3)\n\t\tself.conv3 = nn.ConvTranspose1d(256, 128, 25, 4, 12, output_padding = 3)\n\t\tself.conv2 = nn.ConvTranspose1d(512, 256, 25, 4, 12, output_padding = 3)\n\t\tself.conv1 = nn.ConvTranspose1d(512, 512, 25, 4, 12, output_padding = 3)\n\t\tself.reshape = Reshape((-1, 512, sz // (4**5)))\n\t\tself.linear = nn.Linear(nz, sz // (4**5) * 512)\n\n\tdef forward(self, x):\n\t\t# (bs, nz, 1, 1)\n\t\tout = x.view(x.shape[0], x.shape[1])\n\t\tout = self.linear(out)\n\t\tout = self.reshape(out)\n\t\tout = self.conv1(out)\n\t\tout = self.conv2(out)\n\t\tout = self.conv3(out)\n\t\tout = self.conv4(out)\n\t\tout = self.conv5(out)\n\t\t# (bs, 1, sz)\n\n\t\treturn out\n","sub_path":"architectures_experimental/unconditional.py","file_name":"unconditional.py","file_ext":"py","file_size_in_byte":14615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"356306067","text":"a = 1\nb = 2\nc = 3.5\nhi = \"Hello, World!\"\n\n#print(type(hi))\n\n#name = input(\"Hi there, what is your name?: \")\n\n#print(\"Hello, it's nice to meet you, \" + name)\n\n#age = input(\"how old are you?: \")\n\n#print(\"I'm \"+age+ \" too, \"+ name+\"!\")\n\n#dob = input(\"When were you born?: \")\n\n#print(\"Ahhh, an Aries I see!\")\n\nflat = 1\nhigh_street = 3\nTown = \"Tywyn\"\nCounty = \"Gwynedd\"\n\nprint(f\"My address is Flat {flat}, {high_street} High Street, {Town}, {County}\")\n#print(a==b)","sub_path":"Variables/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"210726033","text":"# coding=utf-8\nfrom OrcLib.LibApi import OrcBus\nfrom OrcLib.LibException import OrcApiModelFailException\n\nfrom WidgetDefMod import WidgetDefMod\nfrom WidgetDetMod import WidgetDetMod\n\n\nclass WidgetDefBus(OrcBus):\n\n def __init__(self):\n\n OrcBus.__init__(self, \"widget_def\", WidgetDefMod)\n\n self.__bus_widget_det = WidgetDetBus()\n\n def bus_list_search(self, p_cond):\n \"\"\"\n 查询\n :param p_cond:\n :return:\n \"\"\"\n result = []\n\n mode = p_cond[\"type\"] if \"type\" in p_cond else None\n widget_id = p_cond[\"id\"] if \"id\" in p_cond else None\n\n try:\n # 查询符合条件的整用例树,至根节点\n if \"all\" == mode:\n result = self._model.usr_search_all(p_cond)\n\n # 查询节点及子节点\n elif \"tree\" == mode:\n if widget_id is not None:\n result = self._model.usr_search_tree(widget_id)\n\n # 查询节点路径\n elif \"path\" == mode:\n if widget_id is not None:\n result = self._model.usr_search_path(widget_id)\n\n # 其他情况只查询符合条件的数据\n else:\n result = self._model.usr_search(p_cond)\n\n except Exception:\n self._logger.error(\"Search widget_def error, input: %s\" % p_cond)\n raise OrcApiModelFailException\n\n return result\n\n def bus_delete(self, p_id):\n \"\"\"\n 删除单一步骤\n :param p_id:\n :return:\n \"\"\"\n try:\n # 查找 widget_det_list\n widget_det_list = self.__bus_widget_det.bus_list_search(dict(widget_id=p_id))\n\n # 获取 widget_det_id_list\n widget_det_id_list = [widget_det.id for widget_det in widget_det_list]\n\n # 删除 widget_dets\n self.__bus_widget_det.bus_list_delete(widget_det_id_list)\n\n # 删除 widget_def\n self._model.usr_delete(p_id)\n\n except Exception:\n self._logger.error(\"Delete widget_def error, input: %s\" % p_id)\n raise OrcApiModelFailException\n\n return True\n\n\nclass WidgetDetBus(OrcBus):\n\n def __init__(self):\n\n OrcBus.__init__(self, \"widget_det\", WidgetDetMod)\n","sub_path":"OrcApi/Driver/Web/WidgetBus.py","file_name":"WidgetBus.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"101445493","text":"\"\"\"\nA filesystem foreign data wrapper.\n\nThis foreign data wrapper is based on StructuredDirectory, see\nhttps://github.com/Kozea/StructuredFS.\n\n\"\"\"\n\nfrom multicorn import ForeignDataWrapper\nfrom multicorn.fsfdw.structuredfs import StructuredDirectory\nfrom multicorn.utils import log_to_postgres\nfrom logging import ERROR, WARNING\nimport os\n\n\nclass FilesystemFdw(ForeignDataWrapper):\n \"\"\"A filesystem foreign data wrapper.\n\n The foreign data wrapper accepts the following options:\n\n root_dir -- The base dir for searching the file\n pattern -- The pattern for looking for file, starting from the\n root_dir. See :class:`StructuredDirectory`.\n content_property -- The column's name which contains the file content.\n (defaults to None)\n filename_property -- The column's name wich contains the full filename.\n\n \"\"\"\n\n def __init__(self, options, columns):\n super(FilesystemFdw, self).__init__(options, columns)\n root_dir = options.get('root_dir')\n pattern = options.get('pattern')\n self.content_column = options.get('content_column', None)\n self.filename_column = options.get('filename_column', None)\n self.structured_directory = StructuredDirectory(root_dir, pattern)\n if self.filename_column:\n if self.filename_column not in columns:\n log_to_postgres(\"The filename column (%s) does not exist\"\n \"in the column list\" % self.filename_column,\n ERROR,\n \"You should try to create your table with an \"\n \"additional column: \\n\"\n \"%s character varying\" % self.filename_column)\n else:\n columns.remove(self.filename_column)\n if self.content_column:\n if self.content_column not in columns:\n log_to_postgres(\"The content column (%s) does not exist\"\n \"in the column list\" % self.content_column,\n ERROR,\n \"You should try to create your table with an \"\n \"additional column: \\n\"\n \"%s bytea\" % self.content_column)\n else:\n columns.remove(self.content_column)\n if len(self.structured_directory.properties) < len(columns):\n missing_columns = set(columns).difference(\n self.structured_directory.properties)\n log_to_postgres(\"Some columns are not mapped in the structured fs\",\n WARNING, \"Remove the following columns: %s \"\n % missing_columns)\n\n def execute(self, quals, columns):\n \"\"\"Execute method.\n\n The FilesystemFdw performs some optimizations based on the filesystem\n structure.\n\n \"\"\"\n cond = dict((qual.field_name, unicode(qual.value)) for\n qual in quals if qual.operator == '=')\n if self.filename_column in cond:\n item = self.structured_directory.from_filename(\n cond[self.filename_column])\n if item is not None and os.path.exists(item.full_filename):\n new_item = dict(item)\n if self.content_column:\n new_item[self.content_column] = item.read()\n if self.filename_column:\n new_item[self.filename_column] = item.filename\n yield new_item\n return\n else:\n cond.pop(self.content_column, None)\n for item in self.structured_directory.get_items(**cond):\n new_item = dict(item)\n if self.content_column and self.content_column in columns:\n new_item[self.content_column] = item.read()\n if self.filename_column and self.filename_column in columns:\n new_item[self.filename_column] = item.filename\n yield new_item\n","sub_path":"python/multicorn/fsfdw/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"101685855","text":"from conans import ConanFile, CMake\n\n\nclass LmglConan(ConanFile):\n name = \"lmgl\"\n version = \"0.0.1\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n generators = \"cmake_find_package\"\n requires = (\n \"lmlib/0.0.1\",\n (\"lmpl/0.0.1\", 'private'),\n 'Vulkan/1.1.92.1@lawrencem/stable',\n )\n build_requires = (\n 'Catch2/2.5.0@catchorg/stable',\n )\n\n def source(self):\n self.run(\"git clone https://github.com/Lawrencemm/lmgl.git\")\n\n def build(self):\n cmake = CMake(self)\n cmake.configure(source_folder=\"lmgl\", )\n cmake.build()\n cmake.install()\n\n def package(self):\n self.copy(\"*.h\", dst=\"include\", src=\"lmgl/include\")\n\n def package_info(self):\n self.cpp_info.libs = [\"lmgl\"]\n","sub_path":"lmgl/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"458232020","text":"from flask_restful import Resource\nimport requests\nimport time\nfrom flask import request\nfrom ..misc import config as _config\nconfig = _config.get().rcp\n\nclass Radio(Resource):\n\n request_id = 1\n\n def make_request(self, method, params={}, debug_title=\"unkn req\"):\n r = requests.post(config.url, json={\n \"method\": method,\n \"jsonrpc\": \"2.0\",\n \"params\": params,\n \"id\": Radio.request_id\n })\n Radio.request_id += 1\n print(\"%s: %s\" % (debug_title, r.status_code))\n return r\n\n def clear_queue(self):\n self.make_request(\"core.tracklist.clear\", debug_title=\"clear queue\")\n\n def queue_radio(self, radio_uri):\n r = self.make_request(\"core.library.browse\", {\"uri\": radio_uri}, \"get tracks from %s\" % radio_uri)\n tracks = list(map(lambda x: x['uri'], r.json()['result']))\n self.make_request(\"core.tracklist.add\", {\"uris\": tracks}, debug_title=\"queue tracks\")\n self.make_request(\"core.playback.play\", debug_title=\"play\")\n\n def play(self):\n self.make_request(\"core.playback.play\", debug_title=\"play\")\n\n def pause(self):\n self.make_request(\"core.playback.pause\", debug_title=\"pause\")\n\n def next(self):\n self.make_request(\"core.playback.next\", debug_title=\"next\")\n\n def previous(self):\n self.make_request(\"core.playback.previous\", debug_title=\"previous\")\n\n def set_volume(self, volume):\n self.make_request(\"core.mixer.set_volume\", {\"volume\": volume}, debug_title=\"volume = %d\" % volume)\n\n def fade_volume(self, start_volume=1, end_volume=30, duration=300, fade_interval=3):\n for i in range(0, duration + 1, fade_interval):\n volume = int(start_volume + i * float(end_volume - start_volume) / duration)\n self.make_request(\"core.mixer.set_volume\", {\"volume\": volume}, debug_title=\"volume = %d\" % volume)\n time.sleep(fade_interval)\n\n def put(self, action, data=None):\n if data is None:\n data = request.json\n if data is None:\n data = {}\n\n if action == \"clearQueue\":\n self.clear_queue()\n if action == \"queueRadio\":\n radio = data.get(\"radio\", \"gmusic:radio:7b807d80-5a6a-36d1-9017-9b6ac79371d3\")\n self.queue_radio(radio)\n\n if action == \"play\":\n self.play()\n if action == \"pause\":\n self.pause()\n if action == \"next\":\n self.next()\n if action == \"previous\":\n self.previous()\n\n if action == \"setVolume\":\n volume = data.get(\"volume\", 1)\n self.set_volume(volume)\n if action == \"fadeVolume\":\n start_volume = data.get(\"startVolume\", 1)\n end_volume = data.get(\"endVolume\", 30)\n duration = data.get(\"duration\", 300)\n fade_interval = data.get(\"fadeInterval\", 3)\n self.fade_volume(start_volume, end_volume, duration, fade_interval)\n\n if action == \"wakeSequence\":\n radio = data.get(\"radio\", \"gmusic:radio:7b807d80-5a6a-36d1-9017-9b6ac79371d3\")\n start_volume = data.get(\"startVolume\", 1)\n end_volume = data.get(\"endVolume\", 30)\n duration = data.get(\"duration\", 300)\n fade_interval = data.get(\"fadeInterval\", 3)\n self.set_volume(1)\n self.clear_queue()\n self.queue_radio(radio)\n self.fade_volume(start_volume, end_volume, duration, fade_interval)\n\n\n","sub_path":"smarthome/devices/_Radio.py","file_name":"_Radio.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"434102097","text":"import numpy as np\nfrom numpy import random as rnd\nfrom scipy.special import comb as choose\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n# from matplotlib import rc\n# rc('text', usetex=True)\n# rc('font', family='serif')\n# rcParams commented out, conflicts with pymc plot\n\n\n# Start by writing a function to calculate the likelihood function\ndef likelihood(n, h, p):\n '''Returns the probability of h heads in n trials, with probability of heads being p'''\n return choose(n, h, exact=True)*(p**h)*((1-p)**(n-h))\n\n\n# Write function to find posterior, assuming constant prior\ndef constant_posterior(n, h, p, c):\n '''Returns the posterior for a constant prior c'''\n return c*likelihood(n, h, p)\n\n\n# Write function to find posterior, assuming Gaussian prior\ndef gaussian_posterior(n, h, p, mean, var):\n '''Returns the posterior for a Gaussian prior'''\n return norm(mean, var).pdf(p)*likelihood(n, h, p)\n\n\n# Write function to provide coin toss data\ndef toss(p, n):\n '''Returns the number of heads in n tosses, with probability of heads p'''\n return sum(rnd.binomial(1, p, n))\n\n\ndef plot_constant(n, p, c, plot_label=None):\n '''Saves a plot of the posterior function for a coin toss, assuming contant prior'''\n h_array = np.linspace(0, 1, 200)\n post = constant_posterior(n, toss(p, n), h_array, c) \n log_post_arr = [np.log(k) for k in post]\n log_post_arr -= np.max(log_post_arr) # Normalization\n post_arr = np.exp(log_post_arr)\n plt.plot(h_array, post_arr, label = plot_label)\n plt.legend(prop={\"size\" : 11})\n\n\ndef plot_gaussian(n, p, mean, var, plot_label=None):\n '''Saves a plot of the posterior function for a coin toss, assuming Gaussian prior'''\n h_array = np.linspace(0, 1, 200)\n post = gaussian_posterior(n, toss(p, n), h_array, mean, var)\n log_post_arr = [np.log(k) for k in post]\n log_post_arr -= np.max(log_post_arr)\n post_arr = np.exp(log_post_arr)\n plt.plot(h_array, post_arr, label = plot_label)\n plt.legend(prop={\"size\" : 11})\n\nif __name__ == \"__main__\":\n for p in [0.3, 0.5, 0.9]:\n for n in [10, 50, 200]:\n plot_constant(n, p, 0.1, plot_label = \"n = %i\" % n)\n plt.xlabel('H')\n plt.ylabel('P(X|D)')\n plt.title('Actual H = %s, Constant Prior' % p)\n plt.tight_layout()\n plt.savefig('const_%s.png' % p)\n plt.close()\n\n for p in [0.3, 0.5, 0.9]:\n for offset in [0.01, 0.1, 0.3]:\n for var in [0.1, 0.3]:\n for n in [10, 50, 200]:\n mean = p - offset\n plot_gaussian(n, p, mean, var, plot_label = \"n = %i\" % n)\n plt.xlabel('H')\n plt.ylabel('P(X|D)')\n plt.title(r'Actual H = %s, $\\mu = %s$, $\\sigma = %s$, Gaussian Prior' % (p, mean, var))\n plt.tight_layout()\n plt.savefig('gaussian_%s_%s_%s.png' % (p, mean, var))\n plt.close()\n","sub_path":"Assignment5/bayesian.py","file_name":"bayesian.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"507022685","text":"#!/usr/bin/python\n# -*-coding: utf-8 -*-\n\nfrom math import sqrt\n\n\ndef isprime(number):\n if number == 1:\n return False\n for i in range(2, int(sqrt(number))+1):\n if number % i == 0:\n return False\n return True\n\n\ndef get_sequence(number):\n str_number = str(number)\n result = []\n for i in range(1, len(str_number)):\n result.append(int(str_number[i::]))\n result.append(int(str_number[0:i]))\n return result\n\n\n# Start search from 11 since one-digit prime number are not truncatable primes\nnumber = 11\nresult = []\n\nwhile True:\n if isprime(number):\n if all(map(isprime, get_sequence(number))):\n result.append(number)\n number += 1\n # there are 11 truncatable primes in total\n if len(result) == 11:\n print(sum(result))\n break\n","sub_path":"Project-Euler/src/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"537874465","text":"# GERALO AMELIO DE LIMA JUNIOR\n# UNIFIP - Patos\n# 05 de março de 2020\n# Questão 07 - Faça um programa que calcule a área total (m​2​) de uma casa\n# com 4 cômodos. O usuário deve inserir a largura e comprimento de cada um dos\n# cômodos, calcular a área individual de cada um e por fim exibir a área total da casa.\n\ncomodo1_larg = int(input(\"Informe a largura do comodo 1: \"))\ncomodo1_comp = int(input(\"Informe o comprimento do comodo 1: \"))\ncomodo2_larg = int(input(\"Informe a largura do comodo 2: \"))\ncomodo2_comp = int(input(\"Informe o comprimento do comodo 2: \"))\ncomodo3_larg = int(input(\"Informe a largura do comodo 3: \"))\ncomodo3_comp = int(input(\"Informe o comprimento do comodo 3: \"))\ncomodo4_larg = int(input(\"Informe a largura do comodo 4: \"))\ncomodo4_comp = int(input(\"Informe o comprimento do comodo 4: \"))\n\narea_comodo1 = comodo1_comp + comodo1_larg\narea_comodo2 = comodo2_comp + comodo2_larg\narea_comodo3 = comodo3_comp + comodo3_larg\narea_comodo4 = comodo4_comp + comodo4_larg\n\narea_total = area_comodo1 + area_comodo2 + area_comodo3 + area_comodo4\nprint(area_total)\n","sub_path":"Q7.py","file_name":"Q7.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"606853852","text":"from pico2d import *\nimport random\nimport game_framework\nimport main_state\nimport game_world\n\nfrom BehaviorTree import BehaviorTree, SelectorNode, SequenceNode, LeafNode\n\nTIME_PER_ACTION = 0.4\nACTION_PER_TIME = 1.0 / TIME_PER_ACTION\nFRAMES_PER_ACTION = 4\n\nclass Meteo:\n def __init__(self):\n self.x, self.y = 930, 800\n self.time = 0\n self.Image = load_image('./resource/object/meteo.png')\n self.bombImage = load_image('./resource/object/meteo_bomb.png')\n self.dir = -90\n self.moveFrame = 0\n self.bombFrame = 0\n self.damage = 100\n self.bomb = 1\n\n\n def update(self):\n self.time += game_framework.frame_time\n if self.time < 4:\n self.moveFrame = (self.moveFrame + ACTION_PER_TIME * FRAMES_PER_ACTION * game_framework.frame_time) % 2\n if self.time < 2:\n self.y += 200 * math.sin(math.radians(180 * self.time / 2)) * game_framework.frame_time\n else:\n self.y -= 230 * game_framework.frame_time\n\n self.dir = 180 * self.time / 5 - 90\n if self.time < 3:\n self.x -= 1200 * math.sin(math.radians(360 * self.time / 4)) * game_framework.frame_time\n else:\n self.x += 700 * game_framework.frame_time\n\n elif self.time < 8:\n if self.bombFrame < 3:\n self.bombFrame = (self.bombFrame + ACTION_PER_TIME * FRAMES_PER_ACTION * game_framework.frame_time / 4)\n else:\n game_world.remove_object(self)\n\n if self.time > 4 and self.bomb == 1:\n for i in range(5):\n fighters = main_state.get_fighters()\n self.bomb = 0\n for fighter in fighters:\n fighter.hp -= self.damage\n if fighter.hp <= 0:\n fighters.remove(fighter)\n\n\n\n\n\n def draw(self):\n if self.time < 2:\n self.Image.clip_composite_draw(700 * int(self.moveFrame), 0, 700, 700, math.radians(self.dir), ' ', self.x, self.y, 700 * pow(self.time, 3) / pow(4, 3), 700 * pow(self.time, 3) / pow(4, 3))\n elif self.time < 4:\n self.Image.clip_composite_draw(700 * int(self.moveFrame), 0, 700, 700, 0, 'h', self.x, self.y, 700 * pow(self.time, 3) / pow(4, 3), 700 * pow(self.time, 3) / pow(4, 3))\n elif self.time < 8:\n if self.time > 6:\n self.bombImage.opacify(1 - (self.time - 6) / 2)\n self.bombImage.clip_draw(int(self.bombFrame) * 700, 0, 700, 700, 850, 550)\n\n #draw_rectangle(*self.get_bb())\n\n def get_bb(self):\n return self.x - 350, self.y - 350, self.x + 350, self.y+350\n\n\n\n","sub_path":"meteo.py","file_name":"meteo.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"312775461","text":"import os\nimport csv\n\nbudget_data_csv = os.path.join('..' , 'python-challenge' , 'budget_data.csv')\n\nNumber_of_Months = 0\nNet_Total = 0\nAverage_Change = 0\nPrevious = 0\nNet_Change = 0\nTotal_Net_Change = 0\nGreatest_Change = 0\nGreatest_Decrease = 0\n\nwith open(budget_data_csv,'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter =\",\")\n header = next(csvfile)\n\n for row in csvreader:\n Net_Total = Net_Total +int(row[1])\n Number_of_Months = Number_of_Months + 1 \n if Number_of_Months > 1:\n Net_Change = int(row[1]) - Previous\n Previous = int(row[1])\n Total_Net_Change = Total_Net_Change + Net_Change \n \n if Net_Change > Greatest_Change:\n Greatest_Change = Net_Change\n Greatest_Date = row[0]\n if Net_Change < Greatest_Decrease:\n Greatest_Decrease = Net_Change\n Worst_Date = row[0]\n\nAverage_Change = Total_Net_Change / (Number_of_Months - 1)\n\nprint(\"Total Months: \" , Number_of_Months)\nprint(\"Total: $\",Net_Total)\nprint(\"Average Change: $\",round(Average_Change))\nprint (\"Greatest Increase in Profits: \", Greatest_Date , \"$\",Greatest_Change)\nprint (\"Greatest Decrease in Profits: \", Worst_Date , \"$\",Greatest_Decrease)\n\noutputPath = os.path.join('..','python-challenge', 'PyBank.txt')\nfile=open(outputPath, 'w')\nfile.write(\"Total Months: \" + str(Number_of_Months))\nfile.write(\"Total: $\" + str(Net_Total))\nfile.write(\"Average Change: $\" + str(round(Average_Change)))\nfile.write(\"Greatest Increase in Profits\" + str(Greatest_Date) + \"$\" + str(Greatest_Change))\nfile.write(\"Greatest Decrease in Profits\" + str(Worst_Date) + \"$\" + str(Greatest_Decrease))\nfile.close()","sub_path":"PyBank.py","file_name":"PyBank.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"614678183","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport os\nimport random\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib as tc\n\nimport main as graph\nfrom vocab import Vocab\nfrom recorder import Recorder\n\n# define global initial parameters\nglobal_params = tc.training.HParams(\n # whether share source and target word embedding\n shared_source_target_embedding=False,\n # whether share target and softmax word embedding\n shared_target_softmax_embedding=True,\n\n # decoding maximum length: source length + decode_length\n decode_length=50,\n # beam size\n beam_size=4,\n # length penalty during beam search\n decode_alpha=0.6,\n\n # parameters for rnnsearch\n # encoder and decoder hidden size\n hidden_size=1000,\n # source and target embedding size\n embed_size=620,\n # dropout value\n dropout=0.1,\n # label smoothing value\n label_smooth=0.1,\n # model name\n model_name=\"rnnsearch\",\n # gru, lstm, sru or atr\n cell=\"atr\",\n # whether use caencoder\n caencoder=True,\n # whether use layer normalization, it will be slow\n layer_norm=False,\n # notice that when opening the swap memory switch\n # you can train reasonably larger batch on condition\n # that your system will use much more cpu memory\n swap_memory=True,\n\n # allowed maximum sentence length\n max_len=100,\n # constant batch size at 'batch' mode for batch-based batching\n batch_size=80,\n # constant token size at 'token' mode for token-based batching\n token_size=3000,\n # token or batch-based data iterator\n batch_or_token='token',\n # batch size for decoding, i.e. number of source sentences decoded at the same time\n eval_batch_size=32,\n # whether shuffle batches during training\n shuffle_batch=True,\n\n # source vocabulary\n src_vocab_file=\"\",\n # target vocabulary\n tgt_vocab_file=\"\",\n # source train file\n src_train_file=\"\",\n # target train file\n tgt_train_file=\"\",\n # source development file\n src_dev_file=\"\",\n # target development file\n tgt_dev_file=\"\",\n # source test file\n src_test_file=\"\",\n # target test file\n tgt_test_file=\"\",\n # output directory\n output_dir=\"\",\n # output during testing\n test_output=\"\",\n\n # adam optimizer hyperparameters\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n # gradient clipping value\n clip_grad_norm=5.0,\n # initial learning rate\n lrate=1e-5,\n\n # maximum epochs\n epoches=10,\n # the effective batch size is: batch/token size * update_cycle\n # sequential update cycle\n update_cycle=1,\n # the number of gpus\n gpus=[0],\n\n # print information every disp_freq training steps\n disp_freq=100,\n # evaluate on the development file every eval_freq steps\n eval_freq=10000,\n # print sample translations every sample_freq steps\n sample_freq=1000,\n # saved checkpoint number\n checkpoints=5,\n # the maximum training steps, program with stop if epoches or max_training_steps is metted\n max_training_steps=1000,\n\n # number of threads for threaded reading, seems useless\n nthreads=6,\n # buffer size controls the number of sentences readed in one time,\n buffer_size=100,\n # a unique queue in multi-thread reading process\n max_queue_size=100,\n # random control, not so well for tensorflow.\n random_seed=1234,\n)\n\nflags = tf.flags\nflags.DEFINE_string(\"parameters\", \"\", \"Additional Mergable Parameters\")\nflags.DEFINE_string(\"mode\", \"train\", \"train or test\")\n\n\ndef save_parameters(params, output_dir):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MkDir(output_dir)\n\n param_name = os.path.join(output_dir, \"param.json\")\n with tf.gfile.Open(param_name, \"w\") as writer:\n tf.logging.info(\"Saving parameters into {}\"\n .format(param_name))\n writer.write(params.to_json())\n\n\ndef load_parameters(params, output_dir):\n param_name = os.path.join(output_dir, \"param.json\")\n param_name = os.path.abspath(param_name)\n\n if tf.gfile.Exists(param_name):\n tf.logging.info(\"Loading parameters from {}\"\n .format(param_name))\n with tf.gfile.Open(param_name, 'r') as reader:\n json_str = reader.readline()\n params.parse_json(json_str)\n return params\n\n\ndef setup_recorder(params):\n recorder = Recorder()\n # This is for early stopping, currectly I did not use it\n recorder.bad_counter = 0\n recorder.estop = False\n\n recorder.lidx = -1 # local data index\n recorder.step = 0 # global step\n recorder.epoch = 0 # epoch number\n recorder.history_scores = []\n recorder.valid_script_scores = []\n\n # trying to load saved recorder\n record_path = os.path.join(params.output_dir, \"record.json\")\n record_path = os.path.abspath(record_path)\n if tf.gfile.Exists(record_path):\n recorder.load_from_json(record_path)\n\n params.add_hparam('recorder', recorder)\n return params\n\n\ndef main(_):\n # set up logger\n tf.logging.set_verbosity(tf.logging.INFO)\n\n params = global_params\n\n # try loading parameters\n # priority: command line > saver > default\n # 1. load latest path to load parameters\n params.parse(flags.FLAGS.parameters)\n params = load_parameters(params, params.output_dir)\n # 2. refine with command line parameters\n params.parse(flags.FLAGS.parameters)\n\n # set up random seed\n random.seed(params.random_seed)\n np.random.seed(params.random_seed)\n tf.set_random_seed(params.random_seed)\n\n # loading vocabulary\n tf.logging.info(\"Begin Loading Vocabulary\")\n start_time = time.time()\n params.src_vocab = Vocab(params.src_vocab_file)\n params.tgt_vocab = Vocab(params.tgt_vocab_file)\n tf.logging.info(\"End Loading Vocabulary, Source Vocab Size {}, \"\n \"Target Vocab Size {}, within {} seconds\"\n .format(params.src_vocab.size(), params.tgt_vocab.size(),\n time.time() - start_time))\n\n mode = flags.FLAGS.mode\n if mode == \"train\":\n # save parameters\n save_parameters(params, params.output_dir)\n\n # load the recorder\n params = setup_recorder(params)\n\n graph.train(params)\n elif mode == \"test\":\n graph.evaluate(params)\n else:\n tf.logging.error(\"Invalid mode: {}\".format(mode))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"649191599","text":"import numpy as np\nimport numpy.random as randy\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n# make fake data -------------------\nndata = 25\nnoise = randy.randn(ndata)\nfake_x = np.linspace(0, 10, ndata)\nfake_y = np.linspace(0, 10, ndata) + (noise)\nfake_x.reshape((ndata, 1))\nfake_y.reshape((ndata, 1))\n\n# make tensorflow model ------------\nx_in = tf.placeholder(tf.float32) \ny_in = tf.placeholder(tf.float32)\n\n# liner model\nrand_init = tf.contrib.layers.xavier_initializer_conv2d(\n uniform=True, seed=None, dtype=tf.float32)\n\n# use 'get_variable' to make tensorflow variables\nW = tf.get_variable(name=\"w\", dtype=tf.float32, initializer=rand_init, \n shape=(1))\nb = tf.get_variable(name=\"b\", dtype=tf.float32, initializer=rand_init, \n shape=(1))\n\n# define relationship\nout = x_in*W + b\n\n# loss\nloss = tf.reduce_mean(tf.square(out - y_in))\n\n# minimization operation\nlearn_rate = 0.001\nfit_line = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss)\n\n# run\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nsess.run(out, {x_in: fake_x, y_in: fake_y})\n\n# fit\nfor i in range(1000):\n sess.run(fit_line, {x_in: fake_x, y_in: fake_y})\n\n# print results\npred = sess.run(out,{x_in: fake_x, y_in: fake_y})\nplt.plot(fake_x,pred,fake_x,fake_y)\nplt.show()\n\n\n\n\n\n","sub_path":"regress.py","file_name":"regress.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"277358452","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, clear_output\n\nlearning_rate=0.01\ndim_s=2\nEMAX=100#1000\n\nclass GP_MPC:\n def __init__(self,gp_propagator,evaluate,len_horizon,waypoints):\n self.waypoints = waypoints\n self.len_horizon = len_horizon\n self.gp_propagator = gp_propagator\n self.evaluate = evaluate\n \n def run(self,state_init,controls_init,vs,dt,start,_vars):\n start = torch.LongTensor([start])\n state_init = state_init.data.clone()\n controls = torch.nn.Parameter(controls_init.data.clone())\n vs = torch.LongTensor(vs.data.clone())\n if len(dt)==1:\n dt = torch.ones(self.len_horizon).view(-1,1)*dt\n opt = torch.optim.Adam([controls],lr=learning_rate)\n\n # refは参照点。speed間隔刻みで、horizonステップ数分。初期値はstartでずらしていく\n # refs = self.waypoints[vs[1:]+start]\n if (start+self.len_horizon) <= len(self.waypoints[:,0])-1:\n refs = self.waypoints[vs[1:]+start]\n else:\n new_waypoints = torch.cat((self.waypoints, self.waypoints))\n refs = new_waypoints[vs[1:]+start]\n print(start)\n\n _,sigma_init = self.gp_propagator.initialize(state_init,controls[0])\n\n idx = np.arange(EMAX)\n ans = np.zeros(len(idx))\n for epoch in range(EMAX):\n controls_dt = torch.cat([controls,dt],dim=1)\n state = state_init\n \n path = []\n if epoch == EMAX-1:\n vars_ = []\n sigma = sigma_init\n for t in range(self.len_horizon):\n state,sigma = self.gp_propagator.forward(state,controls_dt[t],sigma,True)\n sigma_xy = sigma[:dim_s,:dim_s]\n _,eigs,_ = sigma.svd() # svdは特異値分解\n var = eigs[0] # the largest eigenvalue of sigma_xy \n path.append(state.view(1,-1))\n vars_.append(var.view(1))\n path = torch.cat(path,dim=0)\n vars_ = torch.cat(vars_,dim=0)\n else:\n # horizonステップ分だけ運動方程式を更新\n for t in range(self.len_horizon):\n state,_ = self.gp_propagator.forward(state,controls_dt[t],None,False)\n path.append(state.view(1,-1))\n path = torch.cat(path,dim=0)\n\n # MPCの最適化\n opt.zero_grad()\n loss = self.evaluate(path,refs,vs,_vars) \n ans[epoch] = loss.data.numpy()\n loss.backward(retain_graph=True)\n opt.step()\n opt.zero_grad()\n\n controls_dt = torch.cat([controls,dt],dim=1)\n return controls_dt,path,vars_.data.clone()\n \n \n \n \nclass MPC:\n def __init__(self,model,evaluate,len_horizon,waypoints):\n\n self.waypoints = waypoints\n self.len_horizon = len_horizon\n self.model = model\n self.evaluate = evaluate\n \n def run(self,state_init,controls_init,vs,dt,start):\n start = torch.LongTensor([start])\n state_init = state_init.data.clone()\n controls = torch.nn.Parameter(controls_init.data.clone())\n vs = torch.LongTensor(vs.data.clone())\n if len(dt)==1:\n dt = torch.ones(self.len_horizon).view(-1,1)*dt\n opt = torch.optim.Adam([controls],lr=learning_rate)\n\n # refs = self.waypoints[vs[1:]+start]\n if (start+self.len_horizon) <= len(self.waypoints[:,0])-1:\n refs = self.waypoints[vs[1:]+start]\n else:\n new_waypoints = torch.cat((self.waypoints, self.waypoints))\n refs = new_waypoints[vs[1:]+start]\n\n idx = np.arange(EMAX)\n ans = np.zeros(len(idx))\n\n # fig2, ax2 = plt.subplots(figsize=(20,8))\n # x_ = np.arange(0,self.len_horizon)\n # y_ = np.arange(0,self.len_horizon)\n # l1, = ax2.plot(x_, y_, 'o', color='green')\n # l2, = ax2.plot(self.waypoints[:,0], self.waypoints[:,1], color='k')\n # ax2.set_aspect('equal', 'box')\n\n for epoch in range(EMAX):\n controls_dt = torch.cat([controls,dt],dim=1)\n state = state_init\n path = []\n for t in range(self.len_horizon):\n # state = self.model(state,controls_dt[t])\n state = self.model.forward(state,controls_dt[t])\n path.append(state.view(1,-1))\n path = torch.cat(path,dim=0)\n \n opt.zero_grad()\n \n loss = self.evaluate(path,refs,vs) \n loss.backward()\n opt.step()\n ans[epoch] = loss.data.numpy()\n\n # l1.set_data(path[:,0].detach().numpy(), path[:,1].detach().numpy())\n # clear_output(wait=True)\n # display(fig2)\n # plt.pause(0.01)\n controls_dt = torch.cat([controls,dt],dim=1)\n # plt.close()\n return controls_dt,path","sub_path":"src/mpc.py","file_name":"mpc.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"312972671","text":"'''\nGiven an integer n, return the number of trailing zeroes in n!.\n\nExample 1:\n\nInput: 3\nOutput: 0\nExplanation: 3! = 6, no trailing zero.\n'''\n\nclass Solution:\n def trailingZeroes(self, n: int) -> int:\n a=5\n s=0\n flag=0\n while (n//a)!=0:\n s+=n//a\n a*=5\n return s\n","sub_path":"61_TrailingZeroes_MATH.py","file_name":"61_TrailingZeroes_MATH.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"453751107","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nfrom matplotlib.image import imread\n\nroot_data_dir = os.path.abspath('./chest_xray')\ntest_path = root_data_dir + '/test'\ntrain_path = root_data_dir + '/train'\nvalidation_path = root_data_dir + '/val'\nnormal_path = '/NORMAL'\npneumonia_path = '/PNEUMONIA'\n\nflip_transformation = 'flip'\nrotation_transformation = 'rotation'\nequalize_data_transformation = 'rand_flip_transform'\nscale_transformation = 'scale'\ntranslation_transformation = 'translate'\nnoise_transformation = 'noise'\n\n\ndef cls():\n # Simple helper function to clear the console\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef update_progress(progress, description):\n # A simple progress bar to indicate how much work has been completed during long run-times\n bar_length = 20\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n if progress < 0:\n progress = 0\n if progress >= 1:\n progress = 1\n block = int(round(bar_length * progress))\n cls()\n text = description + \\\n \" [{0}] {1:.1f}%\".format(\n \"#\" * block + \"-\" * (bar_length - block), progress * 100)\n print(text, flush=True)\n\n\ndef perform_pre_processing_tasks(tasks, description, total_augmentations):\n for task in tasks:\n apply_transformation_to_folder(task, description, total_augmentations)\n\n\ndef equalize_data_with_rand_flips(root_path):\n normal_data_path = root_path + normal_path\n normal_files = get_num_files(normal_data_path)\n pneumonia_data_path = root_path + pneumonia_path\n pneumonia_files = get_num_files(pneumonia_data_path)\n\n diff = abs(normal_files - pneumonia_files)\n if normal_files < pneumonia_files:\n task = [root_path, normal_path,\n equalize_data_transformation]\n else:\n task = [root_path, pneumonia_path,\n equalize_data_transformation]\n while(diff > 0):\n diff = balance_dataset(task, diff)\n\n\ndef cache_balanced_dataset(folder_path):\n # File-paths of un-augmented data saved on filesystem and dynamically loaded into arrays to increase performance\n balance_dataset = np.array([])\n\n for image_filename in os.listdir(folder_path):\n if ('.jpeg' in image_filename):\n file_location = os.path.join(folder_path + '/' + image_filename)\n balance_dataset = np.append(balance_dataset, file_location)\n if (normal_path in folder_path):\n file_name = 'normal_cache.txt'\n else:\n file_name = 'pneumonia_cache.txt'\n np.savetxt(file_name, balance_dataset, delimiter=' ', fmt='%s')\n\n\ndef augment_training_data():\n root_data_path = train_path\n # Defining the data paths for the given root dir\n normal_data_path = root_data_path + normal_path\n pneumonia_data_path = root_data_path + pneumonia_path\n\n original_normal_files_num = get_num_files(normal_data_path)\n original_pneumonia_files_num = get_num_files(pneumonia_data_path)\n\n num_augmentations = round(original_pneumonia_files_num * (1/5))\n if original_normal_files_num != original_pneumonia_files_num:\n # Equalizing the data before applying transformations\n equalize_data_with_rand_flips(root_data_path)\n # Applying transformations\n cache_balanced_dataset(normal_data_path)\n cache_balanced_dataset(pneumonia_data_path)\n perform_pre_processing_tasks(get_flip_images_tasks(\n root_data_path), 'Flipping Images', num_augmentations)\n perform_pre_processing_tasks(get_rotate_images_tasks(\n root_data_path), 'Rotating Images', num_augmentations)\n perform_pre_processing_tasks(get_scale_images_tasks(\n root_data_path), 'Scaling Images', num_augmentations)\n perform_pre_processing_tasks(get_translation_image_tasks(\n root_data_path), 'Translating Images', num_augmentations)\n perform_pre_processing_tasks(get_noise_image_tasks(\n root_data_path), 'Adding Noise to Images', num_augmentations)\n\n\ndef get_num_files(folder):\n numFiles = 0\n for image_filename in os.listdir(folder):\n if image_filename.endswith('.jpeg'):\n numFiles += 1\n return numFiles\n\n\ndef check_data_exists():\n if not os.path.exists(root_data_dir) or \\\n not os.path.exists(test_path) or \\\n not os.path.exists(train_path) or \\\n not os.path.exists(validation_path):\n print(\n 'dataset is not within this directory -- please resolve before continuing....')\n print('dataset should be included at his notebook\\'s level, with the following structure:')\n print('model.ipynb\\squeezeNet.ipynb\\ncore/\\nchest_xray/\\n\\ttest/\\n\\\\t\\tNORMAL/\\n\\t\\tPNEUMONIA/\\n\\ttrain/\\n\\t\\tNORMAL/\\n\\t\\tPNEUMONIA/\\n\\tval/\\n\\t\\tNORMAL/\\n\\t\\tPNEUMONIA/')\n raise AttributeError('Data not found')\n\n print('test data location = ' + test_path + \"\\ntraining data location = \" +\n train_path + \"\\nvalidation data location = \" + validation_path)\n\n# Wrapper functions to generate 'tasks' which describe the directory needed to be augmented, and the desired augmentation\n\n\ndef get_flip_images_tasks(root_dir):\n tasks = [\n [root_dir, normal_path, flip_transformation],\n [root_dir, pneumonia_path, flip_transformation]\n ]\n return tasks\n\n\ndef get_rotate_images_tasks(root_dir):\n tasks = [\n [root_dir, normal_path, rotation_transformation],\n [root_dir, pneumonia_path, rotation_transformation],\n ]\n return tasks\n\n\ndef get_scale_images_tasks(root_dir):\n tasks = [\n [root_dir, normal_path, scale_transformation],\n [root_dir, pneumonia_path, scale_transformation]\n ]\n return tasks\n\n\ndef get_translation_image_tasks(root_dir):\n tasks = [\n [root_dir, normal_path, translation_transformation],\n [root_dir, pneumonia_path, translation_transformation]\n ]\n return tasks\n\n\ndef get_noise_image_tasks(root_dir):\n tasks = [\n [root_dir, normal_path, noise_transformation],\n [root_dir, pneumonia_path, noise_transformation]\n ]\n return tasks\n\n\ndef balance_dataset(task, num_transformations):\n total_transformations = num_transformations\n image_folder = task[0]\n path = task[1]\n transformation = task[2]\n\n for image_filename in os.listdir(image_folder + path):\n transformed_images = []\n if (image_filename.endswith('.jpeg') and num_transformations > 0):\n img = imread(image_folder + path + '/' + image_filename)\n # Converting grey-scale to RGB -- needed for tf.convert_to_tensor()\n if (len(img.shape) < 3):\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\n tf_img = tf.convert_to_tensor(img)\n image_filename = image_filename.split('.')[0]\n # Adding the images to the dataset\n if (transformation == equalize_data_transformation and not is_added_image(image_filename)):\n transformation_type = np.random.randint(low=0, high=1)\n if (transformation_type == 0):\n transformed_images += [[tf.image.random_flip_up_down(\n tf_img), image_filename + '-equalize' + str(num_transformations)]]\n else:\n transformed_images += [[tf.image.random_flip_left_right(\n tf_img), image_filename + '-equalize' + str(num_transformations)]]\n num_transformations -= 1\n\n # Saving the specified transform to the file-system and caching their locations\n for transformed_image in transformed_images:\n img, imgName = transformed_image\n img_to_save = tf.io.encode_jpeg(img)\n file_path = os.path.join(\n image_folder+path + '/'+imgName+'.jpeg')\n tf.io.write_file(file_path, img_to_save)\n update_progress((total_transformations - num_transformations) /\n total_transformations, 'Balancing Data: ')\n if (num_transformations > 0):\n return num_transformations\n else:\n return 0\n\n\ndef apply_transformation_to_folder(task, description, num_transformations=-1):\n # Extracting the info from the task\n total_transformations = num_transformations\n image_folder = task[0]\n path = task[1]\n transformation = task[2]\n\n update_progress(0, description)\n\n # Loading the un-augmented file-paths\n if normal_path in path:\n images_locations = np.loadtxt(fname='normal_cache.txt', dtype='str')\n description += ' (Normal): '\n else:\n images_locations = np.loadtxt(fname='pneumonia_cache.txt', dtype='str')\n description += ' (Pneumonia): '\n\n # Applying specified transform\n for file_path in images_locations:\n # Getting the image name from the full filepath saved\n if ('.jpeg' in file_path):\n image_filename = file_path.split('/')[-1]\n transformed_images = []\n # Sanity check to determine if we are dealing with the correct data\n if (image_filename.endswith('.jpeg') and not is_transformation(image_filename) and (num_transformations == -1 or num_transformations > 0)):\n img = imread(file_path)\n\n # Converting grey-scale to RGB -- needed for tf.convert_to_tensor()\n if (len(img.shape) < 3):\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\n tf_img = tf.convert_to_tensor(img)\n image_filename = image_filename.split('.')[0]\n\n # Flip Transform\n if (transformation == flip_transformation):\n transformed_images += [[tf.image.flip_up_down(tf_img), image_filename + '-FlipUD'], [\n tf.image.flip_left_right(tf_img), image_filename + '-FlipLR']]\n # 180DEG Transformation\n if (transformation == rotation_transformation):\n # k = number of anti-clockwise 90 degree rotations\n transformed_images += [\n [tf.image.rot90(tf_img, k=2), image_filename + '-Rotation120']]\n # Scale Transformation\n if (transformation == scale_transformation):\n image_height = img.shape[0]\n image_width = img.shape[1]\n scale = 0.1\n # Tensor for up-scaled image dimensions\n scaled_dims_up_arr = np.array(\n [image_height*scale + image_height, image_width*scale + image_width])\n scale_dims_up_tensor = tf.convert_to_tensor(\n scaled_dims_up_arr, dtype=tf.int32)\n # Tensor for down-scaled image dimensions\n scaled_dims_down_arr = np.array(\n [abs(image_height*scale - image_height), abs(image_width*scale - image_width)])\n scale_dims_down_tensor = tf.convert_to_tensor(\n scaled_dims_down_arr, dtype=tf.int32)\n\n # Resizing the image by +10% -- tensor is of type float\n resized_image_up = tf.image.resize(\n tf_img, size=scale_dims_up_tensor,\n method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=True)\n resized_image_up_cropped = tf.image.crop_to_bounding_box(\n resized_image_up, 0, 0, image_height, image_width)\n # Resizing the image by -10% -- tenfor is also of type float\n resized_image_down = tf.image.resize(\n tf_img, size=scale_dims_down_tensor,\n method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=True)\n # Casting Tensor to type int so it can be saved to the file system\n resized_image_up_int = tf.cast(\n resized_image_up_cropped, tf.uint8)\n resized_image_down_int = tf.cast(\n resized_image_down, tf.uint8)\n\n # Cropping the image back to its original shape\n transformed_images += [[resized_image_up_int, image_filename+'-scaledUp'], [\n resized_image_down_int, image_filename+'-scaledDown']]\n if transformation == translation_transformation:\n # Arrays and tensors defined for resizing later\n original_image_size = np.array(\n [img.shape[0], img.shape[1]])\n original_image_size_tensor = tf.convert_to_tensor(\n original_image_size, dtype=tf.int32)\n # Performing a random translation with 10% margin for width and height\n translated_image = tf.keras.preprocessing.image.random_shift(\n tf_img, 0.1, 0.1)\n # Resize image to original size\n translated_image_resized = tf.image.resize(\n translated_image, size=original_image_size_tensor, method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=True)\n translated_image_cropped_int = tf.cast(\n translated_image_resized, tf.uint8)\n\n transformed_images += [[translated_image_cropped_int,\n image_filename+'-randomTranslate']]\n if transformation == noise_transformation:\n # Creating gaussian noise\n noise = tf.random.normal(shape=tf.shape(\n tf_img), mean=0.0, stddev=1.0, dtype=tf.float32)\n noise_int = tf.cast(noise, tf.uint8)\n # Adding noise to image\n noisy_tensor = tf.add(tf_img, noise_int)\n transformed_images += [[noisy_tensor,\n image_filename+'-noise']]\n # Saving the specified transform to the file-system\n for transformed_image in transformed_images:\n img, imgName = transformed_image\n img_to_save = tf.io.encode_jpeg(img)\n tf.io.write_file(os.path.join(\n image_folder+path + '/'+imgName+'.jpeg'), img_to_save)\n update_progress(\n (total_transformations - num_transformations) / total_transformations, description)\n if (num_transformations == 0):\n update_progress(1, description)\n break\n elif (num_transformations != -1):\n num_transformations -= 1\n\n\ndef is_transformation(image_filename):\n return is_transformed_image(image_filename, flip_transformation) or \\\n is_transformed_image(image_filename, rotation_transformation) or \\\n is_transformed_image(image_filename, scale_transformation) or \\\n is_transformed_image(image_filename, translation_transformation) or \\\n is_transformed_image(image_filename, noise_transformation)\n\n\ndef is_transformed_image(image_filename, transformation):\n name_with_extension = image_filename.split('/')[-1]\n image_name_lowercase = name_with_extension.split('.')[0].lower()\n return transformation in image_name_lowercase\n\n\ndef is_added_image(image_filename):\n name_with_extension = image_filename.split('/')[-1]\n image_name_lowercase = name_with_extension.split('.')[0].lower()\n return 'equalize' in image_name_lowercase\n\n\ncheck_data_exists()\naugment_training_data()\n","sub_path":"core/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":15663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"330359373","text":"import easygui as ui\nfieldnames=['*用户名','*真实姓名','固话','QQ','*E-mail','*手机号码']\nfieldvalues=[]\nwhile 1:\n error=''\n temp=ui.multenterbox(fields=fieldnames,values=fieldvalues)\n if temp==None:\n break\n \n fieldvalues=[]\n for each in temp:\n fieldvalues.append(''.join(each.split()))\n \n for i in range(len(fieldnames)):\n if fieldnames[i][0]=='*' and fieldvalues[i]=='':\n error+=fieldnames[i]+'为必填项\\n'\n if error!='':\n ui.msgbox(error)\n else:\n with open('用户信息.txt','w') as f:\n for i in range(len(fieldnames)):\n f.writelines(fieldnames[i]+':'+fieldvalues[i]+'\\n')\n break\n \n \n \n\n\n\n\n","sub_path":"35/输入用户信息.py","file_name":"输入用户信息.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"353317882","text":"from PyQt5.QtWidgets import QWidget, QStackedWidget\nfrom PyQt5.QtWidgets import QHBoxLayout, QGridLayout, QVBoxLayout, QListWidget, QListWidgetItem, QLayout\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QMainWindow\nfrom PyQt5.QtWidgets import QPushButton\n#from PyQt5.QtGui import QColor\nfrom PyQt5.QtCore import Qt, QSize\nfrom gui.svg.gui_element_builder import *\nfrom subscreens.clock import Clock\nfrom gui.button_full import Button_full\nfrom gui.header_right import Header_right\n\nfrom subscreens.placeholder import Placeholder\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.setWindowTitle(\"\")\n self.screens_config = []\n self.current_screen = 0\n self.sub_screens = {}\n self.button = {}\n self.central_widget = QStackedWidget()\n self.number_of_subs = 0\n self.main_layout = QGridLayout()\n self.gui_element_builder = GuiElementsBuilder()\n\n def say_hello(self):\n print(\"Button clicked, Hello!\")\n\n def set_central_widget2(self, widget: QWidget):\n source_button = self.sender()\n print(\"set_central_widget2 WidgetName: \" + widget.getName())\n index = self.central_widget.widget(widget)\n self.central_widget.setCurrentIndex(index)\n\n def set_central_widget(self):\n source_button = self.sender()\n for i in range(0, self.number_of_subs):\n if self.central_widget.widget(i).getName() == source_button.text():\n self.central_widget.setCurrentIndex(i)\n\n def change_widget(self, widget: QWidget, direction: int):\n #print(\"len: \" + str(len(self.screens_config[\"sub\"])))\n max_screen = len(self.screens_config[\"sub\"])\n if direction == 0:\n self.central_widget.setCurrentIndex((self.central_widget.currentIndex() - 1) % max_screen)\n #self.current_screen = (self.current_screen + 1) % max_screen\n elif direction == 1:\n self.central_widget.setCurrentIndex((self.central_widget.currentIndex() + 1) % max_screen)\n #self.current_screen = (self.current_screen - 1) % max_screen\n else:\n print(\"that not a valid direction\")\n\n #Farbe sollte im Widget selbst geetzt werden bzw. schon bei Erstellung\n #for items in self.screens_config['sub']:\n #b = QColor(self.screens_config['sub'][self.current_screen][\"Background\"])\n #b = QColor(self.screens_config['sub'][self.central_widget.currentIndex()][\"Background\"])\n #p = self.central_widget.palette()\n #p.setColor(self.central_widget.backgroundRole(), b)\n #self.central_widget.setPalette(p)\n #self.central_widget.setAutoFillBackground(True)\n\n\n def init_with_config(self, config: dict):\n self.screens_config = config\n\n # TODO: put in delegation or inheritance\n #Set Title\n if 'name' not in config:\n title = str(config['main']['name'])\n self.setWindowTitle(title)\n # Set Resolution\n window_width = config['main'][\"resolution\"][0]\n window_height = config['main'][\"resolution\"][1]\n button_width = config['main'][\"button-size\"][0]\n button_height = config['main'][\"button-size\"][1]\n self.number_of_subs = len(config['sub'])\n else:\n title = str(config['name'])\n self.setWindowTitle(title)\n # Set Resolution\n window_width = config[\"resolution\"][0]\n window_height = config[\"resolution\"][1]\n button_width = config[\"button-size\"][0]\n button_height = config[\"button-size\"][1]\n self.number_of_subs = len(self.screens_config[\"sub\"])\n self.setFixedSize(window_width, window_height)\n\n main_widget = QWidget()\n main_widget.setLayout(self.main_layout)\n vbox_menu = QVBoxLayout()\n vbox_menu.setSizeConstraint(QLayout.SetFixedSize)\n\n system_color = \"#ffd966\"\n\n # Header\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.TOP_LEFT, 0, 0), 0, 0)\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.BUTTON, 30, 650), 0, 1, Qt.AlignTop)\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.END_RIGHT, 0, 0), 0, 3, Qt.AlignTop)\n\n # Menu\n self.main_layout.addLayout(vbox_menu, 1, 0)\n\n # Footer\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.BOTTOM_LEFT, 0, 0), 2, 0)\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.BUTTON, 30, 650), 2, 1, Qt.AlignBottom)\n self.main_layout.addWidget(self.gui_element_builder.get_svg_widget(Gui_Element.END_RIGHT, 0, 0), 2, 3, Qt.AlignBottom)\n\n # button_stylesheet = \"background-color:\" + system_color +\"; border-width: 2px; border-radius: 36px; bordericolor: black; font: bold 14px; min-width: 10em; padding: 6px;\"\n # button_up = QPushButton(\"\\u1403\")\n # button_up.setFixedSize(button_width * window_width/100, button_height * window_height/100)\n # button_up.setStyleSheet(button_stylesheet)\n #\n # button_down = QPushButton(\"\\u1401\")\n # button_down.setFixedSize(button_width * window_width / 100, button_height * window_height / 100)\n # button_down.setStyleSheet(button_stylesheet)\n\n button_ListWidget = QListWidget()\n button_ListWidget.setStyleSheet( \"\"\"QListWidget{background: #f2eeed; border: 0px solid #f2eeed;}\"\"\")\n # Erstellen der rechten Button-Leiste ##############\n #vbox_menu.addWidget(button_up)\n button_width = button_width * window_width / 100\n button_height = button_height * window_height / 100\n background_color = \"#f2eeed\"\n button_size = QSize(button_width, button_height)\n for i in range(0, self.number_of_subs):\n subbutton_list_item = QListWidgetItem(button_ListWidget)\n placeholder_listItem = QListWidgetItem(button_ListWidget)\n placeholder_listItem.setSizeHint(QSize(button_width, 4))\n\n flag = placeholder_listItem.flags() & Qt.ItemIsUserCheckable\n placeholder_listItem.setFlags(flag)\n #subbutton_listItem.setBackground(QColor(\"#f2eeed\"))\n # Widgets ##################################################################################################\n if i == 0:\n self.central_widget.insertWidget(i, Clock())\n else:\n self.central_widget.insertWidget(i, Placeholder(self.screens_config[\"sub\"][i][\"name\"]))\n # Buttons ##################################################################################################\n button_color = self.screens_config['sub'][i][\"Background\"]\n self.button[i] = QPushButton(self.screens_config[\"sub\"][i][\"name\"], self)\n self.button[i].setFixedSize(button_size)\n\n #Button with stylsheet\n #self.button[i].setStyleSheet(\"background-color:\" +button_color +\"; border-width: 2px; border-radius: 10px; bordericolor: black; font: bold 14px; min-width: 10em; padding: 6px;\")\n\n path_button = Button_full.build_svg(\n Button_full(button_width, button_height, background_color,\n self.screens_config[\"sub\"][i][\"Background\"] + \"_button\"))\n self.button[i].setStyleSheet(\"background-image: url(\" + path_button + \");\"\n \"border:1px; background-color:\" + button_color + \";\")\n #print(\"Button: \" + self.button[i].text() + \"Screen: \" + self.central_widget.widget(i).getName())\n # signals ##################################################################################################\n #self.button[i].clicked.connect(lambda widget=self.central_widget.widget(i): self.set_central_widget2(self, widget))\n self.button[i].clicked.connect(lambda widget=self.central_widget.widget(i): self.set_central_widget())\n subbutton_list_item.setSizeHint(button_size)\n #print(self.button[i].size())\n #\n button_ListWidget.addItem(placeholder_listItem)\n button_ListWidget.addItem(subbutton_list_item)\n button_ListWidget.setItemWidget(subbutton_list_item, self.button[i])\n button_ListWidget.setMaximumWidth(1000)\n #vbox_menu.addWidget(self.button[i])\n\n vbox_menu.addWidget(button_ListWidget)\n #vbox_menu.addWidget(button_down)\n #downbutton_listItem = QListWidgetItem(button_ListWidget)\n #downbutton_listItem.setSizeHint(button_down.size())\n #button_ListWidget.addItem(downbutton_listItem)\n #button_ListWidget.setItemWidget(downbutton_listItem, button_down)\n button_ListWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n button_ListWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n button_ListWidget.setMaximumWidth(button_ListWidget.sizeHintForColumn(0))\n\n #############################################\n self.central_widget.setCurrentIndex(1)\n self.main_layout.addWidget(self.central_widget, 1, 1)\n\n self.setCentralWidget(main_widget)\n\n # signals\n #button_up.clicked.connect(lambda: self.change_widget(self.central_widget, 0))\n #button_down.clicked.connect(lambda: self.change_widget(self.central_widget, 1))\n\n","sub_path":"main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":9499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"242097971","text":"#!/usr/bin/python\r\n#\r\n# Python script to convert serial traffic to MQTT and vice versa.\r\n# Stephen Gray 2017/04/15\r\n# David deMarco 2017/07/9 - Updated to Python 3.5 and added logging\r\n#\r\n# Serial data arrives as comma delimited in the following form (comma delimited, colons separate MQTT topic and message):\r\n# ,:,:\r\n# using the topic and message from the serial read, the MQTT topics are updated:\r\n# sensors/ \r\n#\r\n# MQTT topic sensors/>/# is subscribed to. MQTT Topics should be:\r\n# sensors/>/ \r\n# Any new data is sent to serial as:\r\n# :\r\n# Which the attached arduino broadcasts over the wireless network.\r\n# Anything destined for node 1 (the gateway ardunio) is written to serial without sending mode. \r\n#\r\n# Developed for use with Moteinos https://lowpowerlab.com/guide/moteino/ using a slightly modified \r\n# gateway sketch (to format serial data as comma delimited).\r\n#\r\n# Primary functionality from Python MQTT client from Eclipse Paho http://www.eclipse.org/paho/\r\n# Paho documentation at https://pypi.python.org/pypi/paho-mqtt/1.1\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport serial\r\nimport sys\r\nimport settings as s\r\nimport logging\r\nimport logging.handlers\r\n\r\n##############Configuration##############\r\n#Settings have been moved to the settings.py file so that different setting can exist between the development platform\r\n#and the Pi Gatway.\r\n#########################################\r\n\r\n\r\n##############Logging Settings##############\r\n#Added a logging routine so when the gateway is run as a process on the PI you can see what is going on. Main change you\r\n#you might want to make is the logging level.\r\n\r\n# Change the above to .DEBUG for message infomation and .INFO for runtime minimal messages\r\nlogging.basicConfig(level=logging.DEBUG)\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n# Change this to as well. Not entirely sure which one is controlling this. Sorry!\r\nLOG_LEVEL = logging.DEBUG # Could be e.g. \"DEBUG\" or \"WARNING\"\r\n\r\n\r\n# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data\r\n# Give the logger a unique name (good practice)\r\nlogger = logging.getLogger(__name__)\r\n# Set the log level to LOG_LEVEL\r\nlogger.setLevel(LOG_LEVEL)\r\n# Make a handler that writes to a file, making a new file at midnight and keeping 3 backups\r\nhandler = logging.handlers.TimedRotatingFileHandler(s.LOG_FILENAME, when=\"midnight\", backupCount=3)\r\n# Format each log message like this\r\nformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\r\n# Attach the formatter to the handler\r\nhandler.setFormatter(formatter)\r\n# Attach the handler to the logger\r\nlogger.addHandler(handler)\r\n#########################################\r\n\r\nLOGGER.info('Starting up Pi/Moteino Gateway')\r\n\r\n#MQTT callbacks\r\n#the callback for when the client receives a CONNACK response from the server.\r\ndef on_connect(client, userdata, flags, rc):\r\n LOGGER.info(\"Connected to MQTT broker with result code \"+str(rc))\r\n # Subscribing in on_connect() means that if we lose the connection and\r\n # reconnect then subscriptions will be renewed.\r\n client.subscribe(\"sensors/>/#\")\r\n\r\n#the callback for when a PUBLISH message is received from the server.\r\ndef on_message(client, userdata, msg):\r\n #A message has been posted to sensors/>/#.\r\n\t#If node number is 1, pass through msg.payload\r\n list1=msg.topic.split(\"/\")\r\n if len(list1)==3 and list1[2]==\"1\":\r\n ser.write(msg.payload)\r\n if len(list1)==3 and list1[2]!=\"1\":\r\n sendstr=(list1[2]+\":\"+msg.payload)\r\n LOGGER.info (\"Send string: \"+ sendstr)\r\n ser.write(sendstr)\r\n\r\ndef on_publish(client, userdata, mid):\r\n# print \"Published \", mid\r\n pass\r\n\r\n#called on program exit\r\ndef cleanup():\r\n LOGGER.info(\"Ending and cleaning up\")\r\n #Close Serial\r\n ser.close()\r\n #Disconnect from MQTT\r\n mqttc.loop_stop()\r\n mqttc.disconnect()\r\n sys.exit(0)\r\n\r\n#connect to serial port\r\ntry:\r\n LOGGER.info('Connecting to serial device %s', s.SERIALDEV)\r\n ser = serial.Serial(\r\n port=s.SERIALDEV,\r\n baudrate=s.BAUD,\r\n parity=serial.PARITY_NONE,\r\n# stopbits=serial.STOPBITS_ONE,\r\n# timeout=0.5\r\n)\r\nexcept:\r\n LOGGER.info(\"Failed to open serial port\")\r\n sys.exit(-1)\r\n\r\n#connect to MQTT and main program loop\r\ntry:\r\n ser.flushInput()\r\n #create MQTT client\r\n mqttc=mqtt.Client()\r\n\r\n #attach MQTT callbacks\r\n mqttc.on_connect = on_connect\r\n mqttc.on_message = on_message\r\n mqttc.on_publish = on_publish\r\n #connect to broker (blocking function, callback when successful, callback subscribes to topics)\r\n mqttc.connect(s.BROKER, s.PORT, 60)\r\n #start background thread to monitor MQTT. Recieved messages will be handled by on_message function\r\n mqttc.loop_start()\r\n\r\n #main program loop: read serial and publish\r\n while 1:\r\n serialreadline=ser.readline()\r\n logger.debug(serialreadline)\r\n items=serialreadline.decode().split(\",\")\r\n\r\n #Format of serial line is:\r\n #,:,:, etc, :,ACK:\r\n try:\r\n nodenum=items[0]\r\n #crash out of the try if the first item is not a number\r\n int(nodenum)\r\n logger.debug(\"From node number: %s\", nodenum)\r\n for item in items[1:]:\r\n if \":\" in item:\r\n data=item.split(\":\")\r\n #first element is topic, second is payload\r\n topic=\"sensors/ 32:\n print(\"It is warm out\")\nelse:\n print(\"It is cold out\")\n# note: use elif for else if\n\n# loops\n# while and for loops\n# for (int i = 0; i < 5; i++) { }\n# for item in sequence:\n# body\nfor i in range(5): # [0, 5) incrementing by 1\n print(i, end=\" \") # default end is newline\nprint()\n\n# range(start, stop): [start, stop)\n# range(start, stop, step): [start, stop) incrementing by step\n\n# warm up\n# 2, 4, 6, ..., 40\ndef print_even_numbers(stop=40):\n for i in range(2, stop, 2):\n print(i, end=\", \")\n print(i + 2)\n\n# functions \n# they start with def\n# and they can accept keyword arguments\nprint_even_numbers(stop=20) # call\n\n# random numbers (import random)\nrandom.seed(0) # for reproducibility\ndie_roll = random.randint(1, 6) # [1, 6]\nprint(die_roll)\n\n# decimal printing\n# we can use round(value, 2)\nprint(math.pi, round(math.pi, 2))\n# C style\nprint(\"%.2f\" %(math.pi))\n# Pythonic style\nprint(\"{:.2f}\".format(math.pi))\n\n# lists\n# like are like arrays...\n# but they can grow/shrink in size\n# they are objects (methods)\n# recall: .()\n# they can have mixed types\nfibs = [1, 1, 2, 3, 5, 8]\nprint(fibs)\nfor value in fibs:\n print(value)\n# indices\nfor i in range(len(fibs)):\n print(i, \":\", fibs[i])\n\n# built in list functions\n# len()\nprint(sum(fibs))\nprint(max(fibs))\n\n# list methods\nfibs.append(13)\nprint(fibs)\nprint(fibs[-1])\n\ndef add_one(table):\n for i in range(len(table)):\n for j in range(len(table[i])):\n table[i][j] += 1\n\ndef clear_out(table):\n table = []\n\n# python is pass by OBJECT REFERENCE\n# object references are passed by value\n# for more info: https://robertheaton.com/2014/02/09/pythons-pass-by-object-reference-as-explained-by-philip-k-dick/\n# nested list (2D list, table)\nmatrix = [[0, 1, 2], [3, 4, 5]]\nprint(matrix)\nprint(\"matrix before:\", matrix)\nadd_one(matrix) # the reference to matrix is passed by value\n# (it is copied) into the parameter table\nprint(\"matrix after:\", matrix)\n\nprint(\"matrix before:\", matrix)\nclear_out(matrix)\nprint(\"matrix after:\", matrix)\n\n# task: define/call a pretty_print(table)\n# accept a 2D list\n# print in a grid structure\n# 0 1 2\n# 3 4 5\n\ndef pretty_print(table):\n for i in range(len(table)):\n for j in range(len(table[i])):\n print(table[i][j], end=\" \")\n print()\n\npretty_print(matrix)\n\n# shallow vs deep copy\nmatrix_copy = matrix.copy() # shallow copy\n# shallow copy: object references are copied, not the objects themselves\nmatrix_deep_copy = copy.deepcopy(matrix)\n# deep copy: objects are copied\nprint(\"matrix before:\", matrix)\nprint(\"matrix copy before:\", matrix_copy)\nprint(\"matrix deep copy before:\", matrix_deep_copy)\nadd_one(matrix)\nprint(\"matrix after:\", matrix)\nprint(\"matrix copy after:\", matrix_copy)\nprint(\"matrix deep copy after:\", matrix_deep_copy)\n# moral of the story: you probably want a deep copy\n\n# file IO\n# we want to open a csv file and store its contents\n# in a nested list (e.g. table)\n# csv file: comma separated value file\n\ndef convert_to_numeric(values):\n # attempt to convert each value in the 1D list values to a numeric type\n # if it values, skip the value\n for i in range(len(values)):\n try:\n numeric_value = int(values[i])\n # success!!\n values[i] = numeric_value\n except ValueError:\n # failure\n print(values[i], \"could not be converted a numeric type\")\n\ndef read_table(filename):\n table = []\n # 1. open\n # 2. process\n # 3. close \n infile = open(filename, \"r\") # \"r\" is for reading\n lines = infile.readlines()\n print(lines)\n for line in lines:\n # get rid of the newline character at the end of the line\n line = line.strip() # remove leading and trailing whitespaces\n # split the line into its individual values\n values = line.split(\",\")\n convert_to_numeric(values)\n table.append(values)\n # TODO: write a loop to iterate through each line\n # convert the numeric values to numeric types\n infile.close()\n\n return table\n\ndef write_table(filename, table):\n # TODO: challenge: get rid of the extra\n # newline that is written out\n outfile = open(filename, \"w\")\n for row in table:\n for i in range(len(row) - 1):\n outfile.write(str(row[i]) + \",\")\n outfile.write(str(row[i + 1]) + \"\\n\")\n outfile.close()\n\ntable = read_table(\"msrp.csv\")\nprint(table)\nwrite_table(\"msrp_copy.csv\", table)\n\n# classes \n# class: a collection of state (attributes) and behavior (methods)\n# that completely describes something\n# object: an instance of a class\n\nclass Subject:\n \"\"\"Represents a subject in a research study.\n\n Attributes:\n sid(int): a unique value that identifies each subject\n name(str): name of the subject\n measurements(dict of string:float): records study measurements for each subject\n\n num_subjects(int): class-level attribute that keeps track of the total number of subjects\n\n \"\"\"\n num_subjects = 0 # this is a class-level attribute!!\n # means one num_subjects variable is shared amongst all subject objects\n # DO NOT put your instance-level variable declarations here!!\n\n # special method __init__()\n # initializer for new Subject objects (e.g. constructor)\n # self is like this\n # and refers to the \"current\" or \"invoking\" object\n def __init__(self, name, measurements=None):\n # declare and initialize instance-level attributes\n self.sid = Subject.num_subjects\n Subject.num_subjects += 1\n self.name = name \n if measurements is None:\n measurements = {}\n self.measurements = measurements\n\n # special method __str__() that is invoked anytime\n # a string representation of an object is needed\n def __str__(self):\n return \"SID: \" + str(self.sid) + \" NAME: \" + self.name + \\\n \" MEASUREMENTS: \" + str(self.measurements)\n\n def record_measurement(self, timestamp, value):\n # probably should some error checking...\n self.measurements[timestamp] = value \n\ngooeyduck = Subject(\"gooeyduck\")\nprint(gooeyduck)\n\ngooeyduck.record_measurement(\"2-2-21 8:50AM\", 1.55)\nprint(gooeyduck)\nprint(gooeyduck.measurements)\n\n# GS: note after class changing order to make sure spike's measurements are empty\nspike = Subject(\"spike\")\nprint(spike)\n","sub_path":"PythonBasicsFunS1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"353468027","text":"##\n# \\file plotline2d.py\n# \\brief plot 2d results, used by plotrecover.py\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nfrom matplotlib.ticker import FuncFormatter\n\ndef set_cov_Pmat(sigma, tau, alpha, Tcon):\n Pmat = np.zeros((Tcon.shape[0], Tcon.shape[0]))\n xv, yv = np.meshgrid(Tcon, Tcon)\n Pmat = sigma*sigma*np.exp( - pow( np.fabs( xv - yv )/tau, alpha) )\n return Pmat\n\ndef plotline2d():\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif', size=15)\n \n fp=open(\"../data/mrk486_hb2d.txt\", \"r\")\n line=fp.readline()\n text=line.split()\n nt=int(text[1])\n nv=int(text[2])\n \n print(nv, nt)\n \n date_hb=np.zeros(nt)\n grid_vel=np.zeros(nv)\n prof = np.zeros((nt, nv))\n prof_err = np.zeros((nt, nv))\n \n for j in range(0, nt):\n for i in range(0, nv):\n line=fp.readline()\n grid_vel[i], date_hb[j], prof[j, i], prof_err[j, i] = line.split()\n \n line=fp.readline()\n \n fp.close()\n grid_wave = grid_vel / 3e5 * 4861.0 + 4861.0\n\n\n fp=open(\"../data/pline2d_data.txt\", \"r\")\n date_hb_sim=np.zeros(nt)\n grid_vel_sim=np.zeros(nv)\n prof_sim = np.zeros((nt, nv))\n prof_err_sim = np.zeros((nt, nv))\n \n #fp.readline()\n for j in range(0, nt):\n for i in range(0, nv):\n line=fp.readline()\n grid_vel_sim[i], date_hb_sim[j], prof_sim[j, i] = line.split()\n \n line=fp.readline()\n \n # read light curves\n conlc=np.loadtxt(\"../data/mrk486_con.txt\")\n conlc_sim=np.loadtxt(\"../data/pcon.txt\")\n hblc=np.zeros((nt, 3))\n hblc[:, 1]=np.sum(prof, axis=1) * (grid_vel[1]-grid_vel[0]) * 4861/3e5\n hblc[:, 2]=np.sqrt(np.sum(prof_err**2, axis=1)) * (grid_vel[1]-grid_vel[0]) * 4861/3e5\n hblc_sim=np.sum(prof_sim, axis=1)*(grid_vel_sim[1]-grid_vel_sim[0]) * 4861/3e5\n \n date0 = conlc[0, 0]\n conlc[:, 0]=conlc[:, 0]-date0\n conlc_sim[:, 0]=conlc_sim[:, 0]-date0\n date_hb_sim = date_hb_sim-date0\n date_hb = date_hb-date0 \n hblc[:, 0] = date_hb\n \n grid_vel /= 1.0e3\n grid_vel_sim /= 1.0e3\n \n # read pt\n con_scale = np.mean(conlc[:, 1])\n line_scale = np.mean(hblc[:, 0])\n syserr_con = np.exp(-25.049281) * con_scale\n syserr = np.exp(-6.421502) * line_scale\n hd=np.loadtxt(\"../data/sample2d.txt\", skiprows=1)\n phd = np.loadtxt(\"../data/posterior_sample2d.txt\")\n hd_info = np.loadtxt(\"../data/sample_info2d.txt\", skiprows=1)\n level = np.loadtxt(\"../data/levels2d.txt\", skiprows=1)\n idx = np.where(hd_info[:, 0]>level.shape[0] - 30)\n hd_sort=np.sort(hd[idx[0], 8]/np.log(10.0)+6.0)\n mbh1=hd_sort[int(len(hd_sort)*0.1585)]\n mbh2=hd_sort[int(len(hd_sort)*(1.0-0.1585))]\n print(mbh1, mbh2)\n \n \n fig=plt.figure(1, figsize=(9, 8))\n cmap=plt.get_cmap('jet')\n \n ax1 = fig.add_axes([0.1, 0.6, 0.25, 0.3])\n \n plt.imshow(prof, cmap=cmap, interpolation=None, aspect='auto', extent=[grid_vel[0], grid_vel[nv-1], date_hb[-1], date_hb[0]], vmax = np.amax(prof), vmin=np.amin(prof))\n ax1.set_xlabel(r'$\\rm Velocity\\ (10^3km\\ s^{-1})$')\n ax1.set_ylabel(r'$\\rm Time\\ (+2\\ 450\\ 500)$')\n \n xlim=ax1.get_xlim()\n ylim=ax1.get_ylim()\n plt.text(xlim[0]+0.08*(xlim[1]-xlim[0]), ylim[1]-0.1*(ylim[1]-ylim[0]), r'$\\rm Data$', color='white')\n \n #ax1.xaxis.set_major_locator(MultipleLocator(2000))\n #ax1.xaxis.set_minor_locator(MultipleLocator(400))\n \n \n ax2=fig.add_axes([0.37, 0.6, 0.25, 0.3])\n \n plt.imshow(prof_sim, cmap=cmap, interpolation=None, aspect='auto', extent=[grid_vel[0], grid_vel[nv-1], date_hb[-1], date_hb[0]], vmax = np.amax(prof_sim), vmin=np.amin(prof_sim))\n ax2.set_xlabel(r'$\\rm Velocity\\ (10^3km\\ s^{-1})$')\n #ax2.set_ylabel('Time (+2 450 000)')\n xlim=ax2.get_xlim()\n ylim=ax2.get_ylim()\n plt.text(xlim[0]+0.08*(xlim[1]-xlim[0]), ylim[1]-0.1*(ylim[1]-ylim[0]), r'$\\rm Model$', color='white')\n \n \n [i.set_visible(False) for i in ax2.get_yticklabels()]\n #ax2.xaxis.set_major_locator(MultipleLocator(2000))\n #ax2.xaxis.set_minor_locator(MultipleLocator(400))\n \n \n ax3=fig.add_axes([0.7, 0.6, 0.25, 0.3])\n \n offset = np.max(prof.flatten()) * 0.3\n for j in range(0, 2):\n i = np.random.randint(nt)\n plt.errorbar(grid_vel, prof[i, :]+j*offset, yerr=np.sqrt(prof_err[i, :]*prof_err[i, :] + syserr*syserr/10000), ls='none', ecolor='k', capsize=1, markeredgewidth=1)\n plt.plot(grid_vel_sim, prof_sim[i, :]+j*offset, color='b', lw=2)\n \n ax3.set_xlabel(r'$\\rm Velocity\\ (10^3km\\ s^{-1})$')\n ax3.set_ylabel(r'$\\rm Flux$')\n ax3.set_xlim([grid_vel[0], grid_vel[-1]])\n xlim=ax3.get_xlim()\n ylim=ax3.get_ylim()\n plt.text(xlim[0]+0.08*(xlim[1]-xlim[0]), ylim[1]-0.1*(ylim[1]-ylim[0]), r'$\\rm Profile$', color='black')\n \n #ax3.xaxis.set_major_locator(MultipleLocator(2000))\n #ax3.xaxis.set_minor_locator(MultipleLocator(400))\n \n \n ax4=fig.add_axes([0.1, 0.35, 0.52, 0.15])\n \n offset = 12\n con_scale = np.mean(conlc[:, 1])\n con = np.zeros(conlc_sim.shape[0])\n for i in range(0, phd.shape[0], int(phd.shape[0]/10.0+1.0)):\n if(phd.ndim == 1):\n Pmat = set_cov_Pmat(np.exp(phd[1+offset]), np.exp(phd[2+offset]), 1.0, conlc_sim[:, 0])\n break\n else:\n Pmat = set_cov_Pmat(np.exp(phd[i, 1+offset]), np.exp(phd[i, 2+offset]), 1.0, conlc_sim[:, 0])\n Mmat = np.linalg.cholesky(Pmat)\n #Mmat = Mmat.T\n \n con = np.matmul(Mmat, phd[i, 4+offset:]) + phd[i, 3+offset]\n ax4.plot(conlc_sim[:, 0], con*con_scale, color='grey', linewidth=0.1)\n \n plt.errorbar(conlc[:, 0], conlc[:, 1], yerr=np.sqrt(conlc[:, 2]*conlc[:, 2] + syserr_con*syserr_con), marker='o', markersize=3, ls='none', lw=1, capsize=1, markeredgewidth=0.5)\n plt.plot(conlc_sim[:, 0], conlc_sim[:, 1], color='red')\n #plt.plot(conlc_sim[:, 0], conlc_sim[:, 1]+conlc_sim[:, 2], ls='dotted', color='red')\n #plt.plot(conlc_sim[:, 0], conlc_sim[:, 1]-conlc_sim[:, 2], ls='dotted', color='red')\n ax4.set_xlim(conlc_sim[0, 0], conlc_sim[-1, 0])\n ymax = np.max(conlc[:, 1])\n ymin = np.min(conlc[:, 1])\n ax4.set_ylim(ymin - 0.2*(ymax-ymin), ymax + 0.2*(ymax- ymin))\n ax4.set_ylabel(r'$F_{\\rm 5100}$')\n #ax4.set_ylim([30, 95])\n \n #ax4.xaxis.set_minor_locator(MultipleLocator(4.0))\n #ax4.yaxis.set_major_locator(MultipleLocator(40.0))\n #ax4.yaxis.set_minor_locator(MultipleLocator(8.0))\n \n \n [i.set_visible(False) for i in ax4.get_xticklabels()]\n \n ax5=fig.add_axes([0.1, 0.2, 0.52, 0.15])\n \n plt.errorbar(hblc[:, 0], hblc[:, 1], yerr=np.sqrt(hblc[:, 2]*hblc[:, 2] + syserr*syserr/10000), marker='o', markersize=3, ls='none', lw=1, capsize=1, markeredgewidth=0.5)\n plt.plot(date_hb_sim, hblc_sim, color='red')\n #plt.plot(hblc_sim[:, 0], hblc_sim[:, 1]+hblc_sim[:, 2], ls='dotted', color='red')\n #plt.plot(hblc_sim[:, 0], hblc_sim[:, 1]-hblc_sim[:, 2], ls='dotted', color='red')\n \n ax5.set_xlabel(r'$\\rm HJD\\ (+2\\ 450\\ 500)$')\n ax5.set_ylabel(r'$F_{\\rm H\\beta}$')\n xlim=ax4.get_xlim()\n ax5.set_xlim([xlim[0], xlim[1]])\n #ax4.set_xlim([0, 110])\n \n #ax5.xaxis.set_minor_locator(MultipleLocator(4.0))\n #ax5.yaxis.set_major_locator(MultipleLocator(0.5))\n #ax5.yaxis.set_minor_locator(MultipleLocator(0.1))\n \n \n ax6=fig.add_axes([0.7, 0.2, 0.25, 0.3])\n \n hp, bins, patches=plt.hist(hd[idx[0], 8]/np.log(10.0)+6.0, bins=10, normed=1)\n ax6.set_xlabel(r'$\\log(M_\\bullet/M_\\odot)$')\n ax6.set_ylabel(r'$\\rm Hist$')\n ylim=ax6.get_ylim()\n \n ax6.set_ylim(ylim)\n #ax6.set_xlim((6.0, 8.5))\n ax6.xaxis.set_major_locator(MultipleLocator(1.0))\n \n \n plt.savefig('lc.pdf', format='pdf', bbox_inches='tight')\n","sub_path":"analysis/plotline2d.py","file_name":"plotline2d.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"22378824","text":"# Functions used in Proposal stage of the spell checker.\n# Based on Nick Sweeting's Repo\n# Modified by Hanyu Wang, Hengduo Li\n\nfrom itertools import product\n\n\ndef numberofdupes(string, idx):\n \"\"\"return the number of times in a row the letter at index idx is duplicated\"\"\"\n # \"abccdefgh\", 2 returns 1\n initial_idx = idx\n last = string[idx]\n while idx+1 < len(string) and string[idx+1] == last:\n idx += 1\n return idx-initial_idx\n\n\ndef variants(word):\n \"\"\"get all possible variants for a word\"\"\"\n alphabet = set('abcdefghijklmnopqrstuvwxyz')\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [a + b[1:] for a, b in splits if b]\n transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]\n replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]\n inserts = [a + c + b for a, b in splits for c in alphabet]\n return set(deletes + transposes + replaces + inserts)\n\ndef double_variants(word):\n \"\"\"get variants for the variants for a word\"\"\"\n return set(s for w in variants(word) for s in variants(w))\n\ndef reductions(word):\n \"\"\"return flat option list of all possible variations of the word by removing duplicate letters\"\"\"\n word = list(word)\n # ['h','i', 'i', 'i'] becomes ['h', ['i', 'ii', 'iii']]\n for idx, l in enumerate(word):\n n = numberofdupes(word, idx)\n # if letter appears more than once in a row\n if n:\n # generate a flat list of options ('hhh' becomes ['h','hh','hhh'])\n flat_dupes = [l*(r+1) for r in range(n+1)][:3] # only take up to 3, there are no 4 letter repetitions in english\n # remove duplicate letters in original word\n for _ in range(n):\n word.pop(idx+1)\n # replace original letter with flat list\n word[idx] = flat_dupes\n\n # ['h',['i','ii','iii']] becomes 'hi','hii','hiii'\n for p in product(*word):\n yield ''.join(p)\n\ndef vowelswaps(word):\n \"\"\"return flat option list of all possible variations of the word by swapping vowels\"\"\"\n vowels = set('aeiouy')\n word = list(word)\n # ['h','i'] becomes ['h', ['a', 'e', 'i', 'o', 'u', 'y']]\n for idx, l in enumerate(word):\n if type(l) == list:\n pass # dont mess with the reductions\n elif l in vowels:\n word[idx] = list(vowels) # if l is a vowel, replace with all possible vowels\n\n # ['h',['i','ii','iii']] becomes 'hi','hii','hiii'\n for p in product(*word):\n yield ''.join(p)\n\ndef both(word):\n \"\"\"permute all combinations of reductions and vowelswaps\"\"\"\n for reduction in reductions(word):\n for variant in vowelswaps(reduction):\n yield variant\n","sub_path":"functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"312723399","text":"from string import ascii_lowercase\r\nfrom collections import defaultdict\r\n\r\n\r\nletters = {x: [x] for x in ascii_lowercase}\r\n\r\nstring = input().strip()\r\n\r\nfor a, b in zip(string, string[::-1]):\r\n if a == b: continue\r\n if letters[a] == None or letters[b] == None: continue\r\n \r\n letters[a].extend(letters[b])\r\n letters[b] = None\r\n\r\nadj = {x: [] for x in ascii_lowercase}\r\nfor connection in range(int(input())):\r\n u, v, cost = input().split()\r\n \r\n adj[u].append((v, int(cost)))\r\n\r\n\r\n\r\n \r\ndef _reverse(graph):\r\n r = {}\r\n for src in graph:\r\n for (dst,c) in graph[src].items():\r\n if dst in r:\r\n r[dst][src] = c\r\n else:\r\n r[dst] = { src : c }\r\n return r\r\n\r\ndef _getCycle(n, g, visited=None, cycle=None):\r\n if visited is None:\r\n visited = set()\r\n if cycle is None:\r\n cycle = []\r\n visited.add(n)\r\n cycle += [n]\r\n if n not in g:\r\n return cycle\r\n for e in g[n]:\r\n if e not in visited:\r\n cycle = _getCycle(e,g,visited,cycle)\r\n return cycle\r\n\r\ndef _mergeCycles(cycle,G,RG,g,rg):\r\n allInEdges = []\r\n minInternal = None\r\n minInternalWeight = sys.maxint\r\n\r\n # find minimal internal edge weight\r\n for n in cycle:\r\n for e in RG[n]:\r\n if e in cycle:\r\n if minInternal is None or RG[n][e] < minInternalWeight:\r\n minInternal = (n,e)\r\n minInternalWeight = RG[n][e]\r\n continue\r\n else:\r\n allInEdges.append((n,e)) \r\n\r\n # find the incoming edge with minimum modified cost\r\n minExternal = None\r\n minModifiedWeight = 0\r\n for s,t in allInEdges:\r\n u,v = rg[s].popitem()\r\n rg[s][u] = v\r\n w = RG[s][t] - (v - minInternalWeight)\r\n if minExternal is None or minModifiedWeight > w:\r\n minExternal = (s,t)\r\n minModifiedWeight = w\r\n\r\n u,w = rg[minExternal[0]].popitem()\r\n rem = (minExternal[0],u)\r\n rg[minExternal[0]].clear()\r\n if minExternal[1] in rg:\r\n rg[minExternal[1]][minExternal[0]] = w\r\n else:\r\n rg[minExternal[1]] = { minExternal[0] : w }\r\n if rem[1] in g:\r\n if rem[0] in g[rem[1]]:\r\n del g[rem[1]][rem[0]]\r\n if minExternal[1] in g:\r\n g[minExternal[1]][minExternal[0]] = w\r\n else:\r\n g[minExternal[1]] = { minExternal[0] : w }\r\n\r\ndef mst(root,G):\r\n\r\n\r\n RG = _reverse(G)\r\n if root in RG:\r\n RG[root] = {}\r\n g = {}\r\n for n in RG:\r\n if len(RG[n]) == 0:\r\n continue\r\n minimum = float('inf')\r\n s,d = None,None\r\n for e in RG[n]:\r\n if RG[n][e] < minimum:\r\n minimum = RG[n][e]\r\n s,d = n,e\r\n if d in g:\r\n g[d][s] = RG[s][d]\r\n else:\r\n g[d] = { s : RG[s][d] }\r\n \r\n cycles = []\r\n visited = set()\r\n for n in g:\r\n if n not in visited:\r\n cycle = _getCycle(n,g,visited)\r\n cycles.append(cycle)\r\n\r\n rg = _reverse(g)\r\n for cycle in cycles:\r\n if root in cycle:\r\n continue\r\n _mergeCycles(cycle, G, RG, g, rg)\r\n\r\n return g\r\n\r\n\r\n\r\ncost = 0\r\nfor group in letters.values():\r\n if group == None or len(group) == 1: continue\r\n \r\n g = defaultdict(dict)\r\n \r\n for letter in group:\r\n for edge in adj[letter]:\r\n if edge[0] in group:\r\n g[letter][edge[0]] = edge[1]\r\n \r\n best = float('inf')\r\n for letter in group:\r\n graph = mst(letter, g)\r\n curr = 0\r\n if len(graph):\r\n for x in graph.values():\r\n for y in x.values():\r\n curr += y\r\n best = min(curr, best) \r\n cost += best\r\nprint(cost)\r\n","sub_path":"august easy/palidromic.py","file_name":"palidromic.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"232350038","text":"# -*- coding: utf-8 -*-\n# flake8: noqa\n# pylint: skip-file\n\nimport locale\nimport os\nfrom nose.tools import assert_equals, assert_raises\nfrom .._ext import ren\nfrom ..dev._bouth23 import b2u3, u\nfrom ..dev.helpers import File, cwdfiles\n\"\"\"\nnose tests\n\"\"\"\n\nWINDOWS = os.name == 'nt'\nENCODING = locale.getpreferredencoding()\nif ENCODING != 'UTF-8':\n print(\"Your default locale encoding (%s) doesn't allow unicode filenames!\"\n % ENCODING)\n print(\"=> Some tests could fail.\")\n\nTESTFILE_1 = './ç-deleteme.pdf' if WINDOWS else '/tmp/ç-deleteme.pdf'\nTESTFILE_2 = './ç-deleteme-PLEASE.pdf' if WINDOWS else '/tmp/ç-deleteme-PLEASE.pdf'\n\n#F1 = '9780321534965.pdf'\nF1 = '9780872203495.pdf'\nF2 = '9781597499644.pdf'\nF3 = '9781852330729.pdf'\nF4 = '9787500117018.pdf'\nF5 = '9789727576807.pdf'\n\nF6 = 'Campos2011_Emergências obstétricas_9789727576807.pdf'\n#F7 = 'Knuth2008_The Art Of Computer Programming_9780321534965.pdf'\n#F7a = 'Knuth2008_Introduction To Combinatorial Algorithms And Boolean Functions_9780321534965.pdf'\nF7 = 'Plato1997_Complete Works_9780872203495.pdf'\nF8 = 'Man2001_Genetic Algorithms Concepts And Designs_9781852330729.pdf'\nF9 = \"O'Connor2012_Violent Python A Cookbook for Hackers, Forensic Analysts, Penetra_9781597499644.pdf\"\nF10 = '海明威2007_Lao ren yu hai_9787500117018.pdf'\n\nF11 = 'myfile.pdf'\n\nFISBN = [F1, F2, F3, F4, F5]\nFFT = [F6, F7, F8, F9, F10]\nFILES = FISBN + FFT + [F11]\n\n\ndef create_files(files):\n os.chdir(os.path.dirname(TESTFILE_1))\n for fn in files:\n try:\n with open(fn, 'w') as f:\n f.write(b2u3('ooo') + b2u3(fn))\n except UnicodeEncodeError:\n print(\"Your default locale (%s) doesn't allow non-ascii filenames!\"\n % locale.CODESET)\n\n\ndef delete_files(fnpatt):\n os.chdir(os.path.dirname(TESTFILE_1))\n for fn in cwdfiles(fnpatt):\n os.remove(fn)\n\n\ndef setup_module():\n # create_files([u(TESTFILE_1), u(TESTFILE_2)])\n os.chdir(os.path.dirname(TESTFILE_1))\n #create_files(FISBN + [F11])\n create_files([F1])\n\n\ndef teardown_module():\n delete_files(\"*.pdf\")\n\n\ndef test_ren():\n \"\"\"Test 'high level' ren function.\"\"\"\n ren(F1)\n assert_equals(F7 in cwdfiles(\"*.pdf\"), True)\n #assert_equals(F7 in cwdfiles(\"*.pdf\") or F7a in cwdfiles(\"*.pdf\"), True)\n # create_files([F5])\n # ren(F5)\n # assert_equals('Campos2011_Emergências obstétricas_9789727576807.pdf' in cwdfiles(\"*.pdf\"), True)\n","sub_path":"env/lib/python3.8/site-packages/isbnlib/test/test_rename.py","file_name":"test_rename.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"139637064","text":"# Local packages\nfrom data_utilities.utils import export_data_chunk, backup_sql_database, get_query\n\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport datetime\n\n# Web scraping\nimport urllib3\n\n# Logging\nimport traceback\n\n# Progress bar\nfrom tqdm import trange\n\nhttp = urllib3.PoolManager()\n\nimport time\n\n\ndef display_info(*args, **kwargs):\n print('{} failed on chunk_start: {}, chunk_end: {} and exception: {}'.format(\n args[0], args[1], args[2], args[3:]))\n\n\ndef get_info(start=0):\n \"\"\"\n Loads fight info and returns fight urls, titles, locations and dates\n \"\"\"\n\n # Load Fight Info from database (created in fight url scraper)\n fight_info = get_query('raw_ufc_fight_urls')\n fight_info.drop_duplicates(subset=\"url\", inplace=True)\n fight_info.reset_index(drop=True, inplace=True) \n\n # return fight_urls[start:], fight_events[start:], fight_locations[start:], fight_dates[start:]\n \n return fight_info.loc[start:, :]\n\n\ndef nice_text(tag):\n \"\"\"\n Helper function to get the tag text\n \"\"\"\n\n return \" \".join(str(tag.get_text()).split())\n\n\ndef scrape_chunk(fight_urls, fight_events, fight_locations, fight_dates,\n fighter_1_names, fighter_1_urls, fighter_2_names,\n fighter_2_urls, chunk_start, chunk_end, chunk, \n path='../data/raw/fight_chunks/'):\n \"\"\"\n Retrieves the next chunk of fights\n \"\"\"\n\n # Initialize an empty dataframe\n fight_columns = [\n 'fighter_url', 'name', 'kd', 'sig_strikes', 'sig_attempts', 'strikes',\n 'strike_attempts', 'takedowns', 'td_attempts', 'sub_attempts', 'pass',\n 'reversals', 'head', 'head_attempts', 'body', 'body_attempts', 'leg',\n 'leg_attempts', 'distance', 'distance_attempts', 'clinch', 'fight_id',\n 'clinch_attempts', 'ground', 'ground_attempts', 'outcome', 'referee',\n 'weight_class', 'round', 'round_time', 'time_format', 'method',\n 'details', 'date', 'location', 'event', 'event_url'\n ]\n fight_df = pd.DataFrame(columns=fight_columns)\n\n # Iterate through the fight urls, and pull relevant variables/fields\n for i in range(chunk_start, chunk_end):\n\n # Store fight event, location and date\n event = fight_events[i]\n event_url = fight_urls[i]\n location = fight_locations[i]\n date = fight_dates[i]\n\n # Specific URL for a fight\n response = http.request('GET', fight_urls[i])\n fight_soup = bs(response.data, \"lxml\")\n\n # If fight has not occurred yet, store minimal information\n if datetime.datetime.strptime(date, '%Y-%m-%d').date() >= datetime.date.today():\n\n headers = fight_soup.find_all('h3',\n class_='b-fight-details__person-name')\n person_divs = fight_soup.find_all('div',\n class_=\"b-fight-details__person\")\n details = fight_soup.find_all('i',\n class_='b-fight-details__fight-title')\n fighter_urls = []\n for person_div in person_divs:\n h3 = person_div.find('h3')\n try:\n fighter_url = h3.find('a')\n fighter_urls.append(fighter_url.get('href'))\n except:\n fighter_urls.append(None)\n\n name_1 = nice_text(headers[0])\n name_2 = nice_text(headers[1])\n\n if fighter_urls[0] is not None and fighter_urls[1] is not None:\n fighter_url_1, fighter_url_2 = fighter_urls\n else:\n if name_1 == fighter_1_names[i]:\n fighter_url_1 = fighter_1_urls[i]\n fighter_url_2 = fighter_2_urls[i]\n else:\n fighter_url_1 = fighter_2_urls[i]\n fighter_url_2 = fighter_1_urls[i]\n \n\n\n\n weight_class = nice_text(details[0])\n\n data_odd = {\n 'name': [name_1], 'weight_class': [weight_class],\n 'date': [date], 'location': [location], 'event': [event],\n 'event_url': [event_url],\n 'fight_id': [i], 'fighter_url': [fighter_url_1]\n }\n data_even = {\n 'name': [name_2], 'weight_class': [weight_class],\n 'date': [date], 'location': [location], 'event': [event],\n 'event_url': [event_url],\n 'fight_id': [i], 'fighter_url': [fighter_url_2]\n }\n fight_odd = pd.DataFrame(data=data_odd)\n fight_even = pd.DataFrame(data=data_even)\n fight_df = pd.concat(\n [fight_df, fight_odd, fight_even],\n axis=0,\n ignore_index=True,\n sort=True\n )\n continue\n\n trs = fight_soup.find_all('tr') # all the tables in each fight URL\n headers = fight_soup.find_all('i')\n bad_call = 0\n \n # Get the name and outcome for each fighter\n person_divs = fight_soup.find_all('div',\n class_=\"b-fight-details__person\")\n if not person_divs:\n response = http.request('GET', fight_urls[i])\n time.sleep(5)\n fight_soup = bs(response.data, \"lxml\")\n time.sleep(5)\n person_divs = fight_soup.find_all('div',\n class_=\"b-fight-details__person\")\n if not person_divs:\n print(\"Person divs missing after waiting 5 seconds for the following URL:\", fight_urls[i])\n raise Exception\n\n # Get bout details\n bout_details = fight_soup.find_all('p',\n class_=\"b-fight-details__text\")\n\n # Get weight class\n details_title = fight_soup.find_all('i',\n class_=\"b-fight-details__fight-title\")\n names = []\n fighter_urls = []\n winloss = []\n\n # Extract name, outcome and fighter url\n for person_div in person_divs:\n i_tag = person_div.find('i')\n try:\n winloss.append(nice_text(i_tag))\n except:\n winloss.append(None)\n h3 = person_div.find('h3')\n try:\n names.append(nice_text(h3))\n except:\n names.append(None)\n try:\n fighter_url = h3.find('a')\n fighter_urls.append(fighter_url.get('href'))\n except:\n fighter_urls.append(None)\n\n name_1, name_2 = names\n\n if fighter_urls[0] is not None and fighter_urls[1] is not None:\n fighter_url_1, fighter_url_2 = fighter_urls \n else:\n if name_1 == fighter_1_names[i]:\n fighter_url_1 = fighter_1_urls[i]\n fighter_url_2 = fighter_2_urls[i]\n else:\n fighter_url_1 = fighter_2_urls[i]\n fighter_url_2 = fighter_1_urls[i]\n\n winloss_1, winloss_2 = winloss\n\n try:\n weight_class = \" \".join(str(details_title[0].get_text()).split())\n except:\n weight_class = None\n try:\n method = \" \".join(str(headers[5].get_text()).split())\n except:\n method = None\n try:\n rounds = str(headers[6].get_text()).split()[1]\n except:\n rounds = None\n try:\n round_time = \" \".join(str(headers[8].get_text()).split()[1:])\n except:\n round_time = None\n try:\n time_format = \" \".join(str(headers[10].get_text()).split()[2:])\n except:\n time_format = None\n try:\n referee = \" \".join(str(headers[12].get_text()).split()[1:])\n except:\n referee = None\n\n try:\n details = \" \".join(str(bout_details[1].get_text()).split()[1:])\n except:\n details = None\n try:\n tr1 = str(trs[1].get_text()).split()\n # Find the location of the 2nd table tr2 (it varies)\n j = 0\n while j < 10:\n if str(trs[j].get_text()).split()[6] == 'Head':\n # print j+1\n tr2 = str(trs[j + 1].get_text()).split()\n break\n j += 1\n # print tr1; #print tr2\n\n # Test for the end of names\n k = 0\n while k < len(tr1):\n try:\n int(tr1[k])\n break\n except:\n k += 1\n continue\n # print k\n except:\n\n # Check whether fight details are avaiable\n tables = fight_soup.find_all('table')\n\n # If no fight details, add minimal information\n if not tables:\n try:\n data_odd = {\n 'name': [name_1], 'weight_class': [weight_class],\n 'round': [rounds], 'date': [date], 'round_time': [round_time],\n 'time_format': [time_format], 'outcome': [winloss_1],\n 'referee': [referee], 'method': [method], 'details': [details],\n 'location': [location], 'event': [event],\n 'event_url': [event_url],\n 'fight_id': [i], 'fighter_url': [fighter_url_1]\n }\n data_even = {\n 'name': [name_2], 'weight_class': [weight_class],\n 'round': [rounds], 'date': [date], 'round_time': [round_time],\n 'time_format': [time_format], 'outcome': [winloss_2],\n 'referee': [referee], 'method': [method], 'details': [details],\n 'location': [location], 'event': [event],\n 'event_url': [event_url],\n 'fight_id': [i], 'fighter_url': [fighter_url_2]\n }\n fight_odd = pd.DataFrame(data=data_odd)\n fight_even = pd.DataFrame(data=data_even)\n fight_df = pd.concat(\n [fight_df, fight_odd, fight_even],\n axis=0,\n ignore_index=True,\n sort=True\n )\n continue\n except:\n print('{}: {} vs {} unable to add fight details.'.format(fight_dates[i], name_1, name_2))\n bad_call += 1\n continue\n\n # Add each fighter's information to the dataframe\n data1 = {\n 'fighter_url': [fighter_url_1], 'name': [name_1], 'kd': tr1[k], 'sig_strikes': tr1[k + 2],\n 'sig_attempts': tr1[k + 4], 'strikes': tr1[k + 10],\n 'strike_attempts': tr1[k + 12], 'takedowns': tr1[k + 16],\n 'td_attempts': tr1[k + 18], 'sub_attempts': tr1[k + 24],\n 'pass': tr1[k + 26], 'reversals': tr1[k + 28], 'head': tr2[k + 8],\n 'head_attempts': tr2[k + 10], 'body': tr2[k + 14],\n 'body_attempts': tr2[k + 16], 'leg': tr2[k + 20],\n 'leg_attempts': tr2[k + 22], 'distance': tr2[k + 26],\n 'distance_attempts': tr2[k + 28], 'clinch': tr2[k + 32],\n 'clinch_attempts': tr2[k + 34], 'ground': tr2[k + 38],\n 'ground_attempts': tr2[k + 40], 'outcome': winloss_1,\n 'referee': referee, 'weight_class': weight_class, 'round': rounds,\n 'round_time': round_time, 'time_format': time_format,\n 'method': method, 'details': details, 'fight_id': i, 'date': date,\n 'location': location, 'event': event, 'event_url': [event_url]\n }\n fight_odd = pd.DataFrame(data1)\n\n data2 = {\n 'fighter_url': [fighter_url_2], 'name': [name_2], 'kd': tr1[k + 1], 'sig_strikes': tr1[k + 5],\n 'sig_attempts': tr1[k + 7], 'strikes': tr1[k + 13],\n 'strike_attempts': tr1[k + 15], 'takedowns': tr1[k + 19],\n 'td_attempts': tr1[k + 21], 'sub_attempts': tr1[k + 25],\n 'pass': tr1[k + 27], 'reversals': tr1[k + 29], 'head': tr2[k + 11],\n 'head_attempts': tr2[k + 13], 'body': tr2[k + 17],\n 'body_attempts': tr2[k + 19], 'leg': tr2[k + 23],\n 'leg_attempts': tr2[k + 25], 'distance': tr2[k + 29],\n 'distance_attempts': tr2[k + 31], 'clinch': tr2[k + 35],\n 'clinch_attempts': tr2[k + 37], 'ground': tr2[k + 41],\n 'ground_attempts': tr2[k + 43], 'outcome': winloss_2,\n 'referee': referee, 'weight_class': weight_class, 'round': rounds,\n 'round_time': round_time, 'time_format': time_format,\n 'method': method, 'details': details, 'fight_id': i, 'date': date,\n 'location': location, 'event': event, 'event_url': [event_url]\n }\n fight_even = pd.DataFrame(data2)\n\n fight_df = pd.concat(\n [fight_df, fight_odd, fight_even],\n axis=0,\n ignore_index=True,\n sort=True\n )\n\n # Export to database\n export_data_chunk(fight_df, 'raw_ufc_fights', chunk)\n\n\ndef scrape_ufc_fights(chunk_size=100, start=0, num_fights=None):\n \"\"\"\n Retrieves all fights in chunks\n \"\"\"\n\n # fight_urls, fight_events, fight_locations, fight_dates = get_info()\n fight_info = get_info()\n\n # Extract fight data\n fight_urls = fight_info['url'].values.tolist()\n fight_events = fight_info['event'].values.tolist()\n fight_locations = fight_info['location'].values.tolist()\n fight_dates = fight_info['date'].dt.tz_localize(None).dt.strftime('%Y-%m-%d').values.tolist()\n\n # Extract fighter data\n fighter_1_names = fight_info['fighter_a'].values.tolist()\n fighter_1_urls = fight_info['fighter_a_url'].values.tolist()\n fighter_2_names = fight_info['fighter_b'].values.tolist()\n fighter_2_urls = fight_info['fighter_b_url'].values.tolist()\n\n if not num_fights:\n num_fights = len(fight_urls)\n chunk_starts = range(start, num_fights, chunk_size)\n\n with trange(len(chunk_starts)) as t:\n for chunk in t:\n try:\n chunk_end = min((len(fight_urls), chunk_starts[chunk] + chunk_size))\n scrape_chunk(fight_urls, fight_events, fight_locations, fight_dates,\n fighter_1_names, fighter_1_urls, fighter_2_names,\n fighter_2_urls, chunk_starts[chunk], chunk_end, chunk)\n except Exception as e:\n # If an error occurs, log the bad chunk for further processing\n # later\n display_info(scrape_chunk.__name__, chunk_starts[chunk],\n chunk_end, e, traceback.format_exc())\n raise Exception\n\n\nif __name__ == '__main__':\n scrape_ufc_fights(chunk_size=20)\n backup_sql_database()\n","sub_path":"data_pipeline/ufc_fight_scraper.py","file_name":"ufc_fight_scraper.py","file_ext":"py","file_size_in_byte":15024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"250737371","text":"from .core import *\n\n\n##-----------------------------------------------------------------------------\n## pre- and post- conditions\n##-----------------------------------------------------------------------------\ndef instrument_is_MOSFIRE():\n '''Verifies that MOSFIRE is the currently selected instrument.\n '''\n INSTRUMEkw = ktl.cache(service='dcs', keyword='INSTRUME')\n if INSTRUMEkw.read() != 'MOSFIRE':\n raise FailedCondition('MOSFIRE is not the selected instrument')\n\n\n##-----------------------------------------------------------------------------\n## mxy offset\n##-----------------------------------------------------------------------------\ndef mxy(dx, dy, skipprecond=False, skippostcond=False):\n '''Moves dx dy arcseconds in the instrument pixel coordinates.\n \n Calls shell scripts:\n - mosfireScriptMsg\n - wftel\n '''\n this_function_name = inspect.currentframe().f_code.co_name\n log.debug(f\"Executing: {this_function_name}\")\n\n ##-------------------------------------------------------------------------\n ## Pre-Condition Checks\n if skipprecond is True:\n log.debug('Skipping pre condition checks')\n else:\n instrument_is_MOSFIRE()\n # tracking is True?\n \n ##-------------------------------------------------------------------------\n ## Script Contents\n\n dcs = ktl.cache(service='dcs')\n\n autresum = dcs['autresum'].read()\n angle = 0.136 * np.pi/180 # offset between CSU and detector [rad]\n u = dx*np.cos(angle) + dy*np.sin(angle)\n v = dy*np.cos(angle) - dx*np.sin(angle)\n\n now = datetime.utcnow()\n exec_date = now.strftime('%Y/%m/%d,%H:%M:%S')\n\n dcs['instxoff'].write(u)\n dcs['instyoff'].write(v)\n dcs['rel2curr'].write(True)\n\n # log the move. This should be temporary because it adds 0.05 sec to\n # the execution of the mxy command.\n nightpath = f'/s/nightly1/{now.year:4d}/{now.month:02d}/{now.day:02d}/'\n# offset_str = f'modify -s dcs instxoff={u:.3f} instyoff={v:.3f} rel2base=t'\n offset_str = f\"dcs['instxoff'].write({u}) dcs['instyoff'].write({v}) dcs['rel2curr'].write(True)\"\n mosfireScriptMsg = ['mosfireScriptMsg',\n '-f', f'{nightpath}instrumentOffsets',\n '-m', '{exec_date} {offset_str}']\n subprocess.call(mosfireScriptMsg)\n\n tick = datetime.utcnow()\n subprocess.call(['wftel', autresum])\n tock = datetime.utcnow()\n duration = (tock-tick).total_seconds()\n print(f'mxy wftel completed in {duration:.2f} sec')\n \n ##-------------------------------------------------------------------------\n ## Post-Condition Checks\n if skippostcond is True:\n log.debug('Skipping post condition checks')\n else:\n pass\n\n return None\n\n\ndef mxy_with_args():\n p = argparse.ArgumentParser(description=description)\n p.add_argument('dx', type=float, help=\"X distance in arcsec\")\n p.add_argument('dy', type=float, help=\"Y distance in arcsec\")\n args = p.parse_args()\n mxy(args.dx, args.dy)\n\n\n##-----------------------------------------------------------------------------\n## sltmov offset\n##-----------------------------------------------------------------------------\ndef sltmov(distance, skipprecond=False, skippostcond=False):\n '''Move along slit\n '''\n this_function_name = inspect.currentframe().f_code.co_name\n log.debug(f\"Executing: {this_function_name}\")\n\n ##-------------------------------------------------------------------------\n ## Pre-Condition Checks\n if skipprecond is True:\n log.debug('Skipping pre condition checks')\n else:\n instrument_is_MOSFIRE()\n # tracking is True?\n\n ##-------------------------------------------------------------------------\n ## Script Contents\n\n dcs = ktl.cache(service='dcs')\n angle = -3.74 * np.pi/180 # slit angle with respect to detector y pixels\n dx = distance * np.sin(angle)\n dy = distance * np.cos(angle)\n log.info(f'Making sltmov {distance}')\n mxy(dx, dy)\n \n ##-------------------------------------------------------------------------\n ## Post-Condition Checks\n if skippostcond is True:\n log.debug('Skipping post condition checks')\n else:\n pass\n\n return None\n\n\n##-----------------------------------------------------------------------------\n## gotobase\n##-----------------------------------------------------------------------------\ndef gotobase(skipprecond=False, skippostcond=False):\n '''gotobase\n '''\n this_function_name = inspect.currentframe().f_code.co_name\n log.debug(f\"Executing: {this_function_name}\")\n\n ##-------------------------------------------------------------------------\n ## Pre-Condition Checks\n if skipprecond is True:\n log.debug('Skipping pre condition checks')\n else:\n instrument_is_MOSFIRE()\n \n ##-------------------------------------------------------------------------\n ## Script Contents\n\n dcs = ktl.cache(service='dcs')\n # modify -s dcs raoff=0 decoff=0 rel2base=true\n dcs['RAOFF'].write(0)\n dcs['DECOFF'].write(0)\n dcs['REL2BASE'].write(True)\n \n ##-------------------------------------------------------------------------\n ## Post-Condition Checks\n if skippostcond is True:\n log.debug('Skipping post condition checks')\n else:\n pass\n\n return None\n\n\n##-----------------------------------------------------------------------------\n## markbase\n##-----------------------------------------------------------------------------\ndef markbase(skipprecond=False, skippostcond=False):\n '''markbase\n '''\n this_function_name = inspect.currentframe().f_code.co_name\n log.debug(f\"Executing: {this_function_name}\")\n\n ##-------------------------------------------------------------------------\n ## Pre-Condition Checks\n if skipprecond is True:\n log.debug('Skipping pre condition checks')\n else:\n instrument_is_MOSFIRE()\n \n ##-------------------------------------------------------------------------\n ## Script Contents\n\n dcs = ktl.cache(service='dcs')\n dcs['MARK'].write(True)\n\n ##-------------------------------------------------------------------------\n ## Post-Condition Checks\n if skippostcond is True:\n log.debug('Skipping post condition checks')\n else:\n pass\n\n return None\n\n","sub_path":"mosfire/dcs.py","file_name":"dcs.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"629126761","text":"import csv \nimport json \nimport re\n\ndef deEmojify(text):\n regrex_pattern = re.compile(pattern = \"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\xa0\"\n u\"\\U0001f97a\"\n \"]+\", flags = re.UNICODE)\n return regrex_pattern.sub(r'_REMOVE_',text)\n\ncsvfile = \"ccClub2021_FP.csv\"\njsonfile = \"db.json\"\n\njsonArray = []\nwith open(csvfile, encoding='utf-8') as csvf: \n csvReader = csv.DictReader(csvf) \n for idx, row in enumerate(csvReader):\n content = [ x for x in deEmojify( row[\"content\"] ).split(\"#\")[1:] if \"_REMOVE_\" not in x ]\n row[\"content\"] = \"\".join( \" #\"+x.rstrip() for x in content ) \n \n temp = {\"model\": \"Gallery.lottery\", \"pk\": idx, \"fields\":row}\n jsonArray.append(temp)\n\nwith open(jsonfile, 'w', encoding='utf-8') as jsonf: \n jsonString = json.dumps(jsonArray, indent=4, ensure_ascii=False)\n jsonf.write(jsonString)\n","sub_path":"ccClub2021_FP/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"385238023","text":"import leht\n# from datetime import datetime\ntest=leht.result('3pmramnaym','ten')\nquery = test.queries()\n\n# 30/12/2003\n# 13/03/2005\n# 13/03/2005\n\n# datestring = \"2008-12-12 19:21:10\" %Y-%m-%d %H:%M:%S\n# alb = datetime.strptime('13/03/2005', '%d/%m/%Y')\n# alb = datetime.strptime('13/03/2005', '%d/%m/%Y')\n# # print alb.year, alb.month, alb.day\n# print(alb.year)\n\n\ntest.album_location_json = '../dist/mp3/{}/info.json'\ntest.album_location_cover = '../dist/mp3/{}/cover{}'\ntest.album_location_audio = '../dist/mp3/{}/{}.mp3'\n\n# from=1 to=last type=scv\n# audio=yes album=3\n# audio=yes from=1 to=last type=scv artist=3\n# audio=yes from=1 to=last type=scv artist=29\n\ntest.album_list_scan_audio_save=query['audio'] and True\nquery_from = query['from'].isnumeric() and int(query['from']) > 0 and query['from']\nquery_to = query['to']\n\nif 'album' in query and query['album'].isnumeric() and int(query['album']) > 0:\n test.album_info('mm',query['album'],True)\nelse:\n if 'artist' in query and query['artist'].isnumeric() and int(query['artist']) > 0:\n test.album_list_scan_by_artist=query['artist']\n\n if query['log-id']:\n try:\n test.album_ids = test.json_load(query['log-id'])\n except Exception as e:\n test.album_ids = {}\n\n for item_type in set(query['type']):\n if query_from and item_type in 'scv':\n test.album_lists={}\n test.album_list_scan('mm',item_type.upper(),query_from,query_to)\n if query['log-list']:\n if test.album_lists:\n test.json_save(query['log-list'].format(item_type),test.album_lists)\n # if test.artist_lists:\n # test.json_save(query['log-list'].format(item_type),test.artist_lists)\n\n if query['log-id']:\n test.json_save(query['log-id'],test.album_ids)","sub_path":"m3s/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"336994903","text":"\nfrom itertools import combinations\nfrom operator import itemgetter\nimport statistics\nimport math\nimport xlrd\n\n\ndef read_excel(filename):\n\tfile_data = xlrd.open_workbook(filename)\n\ttable = file_data.sheets()[0]\n\treturn table\n\ndef remove_0(row):\n\treturn [data for data in row if data != 0]\n\n#generating candidate list\ndef sea_variance():\n\tsea_data = read_excel(\"Complete data set.xlsx\");\n\tblock_var = []\n\tfor block in range(1,21):\n\t\trow_data = sea_data.row_values(block)\n\t\tblock_var.append((block, statistics.variance(row_data)))\n\n\tmax_var = max([y for (x,y) in block_var])\n\tblock_var_score = [(x, y / max_var ) for (x,y) in block_var]\n\n\t#for tide power station, sea variance as high as possible\n\tpower_score = sorted(block_var_score, key = itemgetter(1), reverse = True)\n\t#for harbour, sea variance as low as possible\n\tharbour_score = [(x, 1-y) \n\t\t\t\t\tfor (x,y) in sorted(block_var_score, key = itemgetter(1))]\n\n\tprint(power_score)\n\tprint(harbour_score)\n\n\tpower_cand = [x for (x,y) in power_score]\n\tharbour_cand = [x for (x,y) in harbour_score]\n\n\treturn power_cand,harbour_cand, block_var_score\n\ndef shorten_candidate(score_list):\n\tscore_list = sorted(score_list, key = itemgetter(1), reverse = True)\n\tresult = [int(x) for x,y in score_list][:first_k_element]\n\treturn result\n\ndef candidate_list():\n\n\t#define the data set for \n\tagriculture_score = []\n\tfishing_score = []\n\tprivate_housing_score = []\n\trecreation_score = []\n\tconservation_score = []\n\tother_score = []\n\n\t#from row_index 57 to 77 are the score data in the excel file\n\t#read the score from excel\n\tscore_rowindex_range = [x for x in range(57, 77)]\n\tfor row_index in score_rowindex_range:\n\t\trow_data = score_table.row_values(row_index)\n\t\t#row_data is in the form:\n\t\t#blcok_number, agriculture, fishing , housing, recreation, conservation\n\t\t#0\t\t\t\t1 \t\t\t2 \t\t\t3\t,\t4\t\t, 5\n\t\tagriculture_score.append((row_data[0], row_data[1]))\n\t\tfishing_score.append((row_data[0], row_data[2]))\n\t\tprivate_housing_score.append((row_data[0], row_data[3]))\n\t\trecreation_score.append((row_data[0], row_data[4]))\n\t\tconservation_score.append((row_data[0], row_data[5]))\n\n\t#sorint the score and find top k's place for this function of land\n\tagriculture = shorten_candidate(agriculture_score)\n\tfishing = shorten_candidate(fishing_score)\n\tprivate_housing = shorten_candidate(private_housing_score)\n\trecreation = shorten_candidate(recreation_score)\n\tconservation = shorten_candidate(conservation_score)\n\ttemp_power, temp_harbour, block_var_score= sea_variance()\n\telectricity = temp_power[:first_k_element]\n\tharbour = temp_harbour[:first_k_element]\n\n\t##########################ADDING RESTRICTION HERE################\n\t#reducting the candidate lists by sea_level, connectivity and duck numbers\n\t#this forbidden place is generated by a calculation from matlab\n\tsea_level_danger = [4, 5, 7, 14, 15, 16, 18] \n\n\t#this place is place with 0 ducks\n\tzero_duck_zone = [1,2,3,4,5,6] \n\n\t#Every public area should connected to two area\t\n\t#read from adjacent matrix\n\tpublic_area_list = [5, 6, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19 ,20] \n\n\tfor i in range(first_k_element):\n\n\t\tif private_housing[i] in sea_level_danger:\n\t\t\tprivate_housing[i] = 0\n\n\t\tif (recreation[i] in sea_level_danger or \n\t\t\trecreation[i] not in public_area_list):\n\t\t\trecreation[i] = 0\n\n\t\t#remoce the case there is no duck\n\t\tif (conservation[i] in sea_level_danger or \n\t\t\tconservation[i] in zero_duck_zone or\n\t\t\tconservation[i] not in public_area_list):\n\t\t\tconservation[i] = 0\n\n\n\n\tprivate_housing = remove_0(private_housing)\n\trecreation = remove_0(recreation)\n\tconservation = remove_0(conservation)\n\n\n\t#print out the result\n\tprint(\"Candidate list as followed\")\n\tprint (\"agriculture:\",agriculture)\n\tprint (\"fishing\", fishing)\n\tprint (\"private_housing:\", private_housing)\n\tprint (\"recreation:\", recreation)\n\tprint (\"conservation:\", conservation)\n\n\treturn agriculture, fishing, private_housing, conservation, recreation, electricity, harbour, block_var_score\n\n#simulation part\ndef block_max_score_function(block):\n\tblock_data = score_table.row_values(block + 56)\n\tmax_score, function_num = 0, 0\n\tfor i in range(1,6):\n\t\tif block_data[i] > max_score:\n\t\t\tmax_score = block_data[i]\n\t\t\tfunction_num = i\n\n\tfunction = score_table.row_values(56)[function_num]\n\n\treturn ((function, block, max_score))\n\ndef remaining_candidate(waiting_list, temp_result):\n\tresult = set(waiting_list)\n\tfor temp in temp_result:\n\t\tresult -= set(temp)\n\n\treturn result\n\ndef calculate_score(current_plan):\n\t#take a list with length 6\n\t#data in the file in the form:\n\t#agriculture, fishing, housing, recreation, convervation, other\n\tscore = 0\n\t#where row index 57 means the score in block1\n\tscore_rowindex_range = [x for x in range(57, 77)]\n\n\tmode = 1\n\tfor function in current_plan:\n\t\t#function 1 = conservation, function = 2 recreation, function = 3 fishing\n\t\t#function 4 = agriculture, function = 5 private housing function =6 other area score\n\t\tfor block in function:\n\t\t\t#when block = 1, 1+56 = 57 which is blcok 1 data in score_table\n\t\t\tblock_data = score_table.row_values(block + 56) \n\t\t\tif mode == 1:\n\t\t\t\tscore += block_data[5]\n\t\t\tif mode ==2:\n\t\t\t\tscore += block_data[4]\n\t\t\tif mode ==3:\n\t\t\t\tscore += block_data[2]\n\t\t\tif mode ==4:\n\t\t\t\tscore += block_data[1]\n\t\t\tif mode ==5:\n\t\t\t\tscore += block_data[3]\n\t\t\tif mode ==6:\n\t\t\t\treturn score\n\t\t\t\t#score += block_data[6]\n\t\tmode += 1\n\n\treturn score\n\ndef simulation(agriculture, fishing, private_housing, conservation, recreation, electricity, harbour, block_var_score):\n\t\n\tprint (\"\\nstart simulation\\n\")\n\n\t#1 for max block, 2 for block with 16\n\ttotal_result, temp_result, best_comb, best_comb2 = [], [], [], []\n\n\tmax_score, max_score2 = 0, 0\n\n\tcount = 0\n\t#determin the 12 area that is necessary by the council rule\n\tfor conserve in combinations(conservation, 3):\n\t\ttemp_result = []\n\t\ttemp_result.append(conserve)\n\t\tfor recreate in combinations(remaining_candidate(recreation, temp_result),3):\n\t\t\ttemp_result.append(recreate)\n\t\t\tfor fish in combinations(remaining_candidate(fishing, temp_result), 2):\n\t\t\t\ttemp_result.append(fish)\n\t\t\t\tfor agri in combinations(remaining_candidate(agriculture, temp_result),2):\n\t\t\t\t\ttemp_result.append(agri)\n\t\t\t\t\tfor housing in combinations(remaining_candidate(private_housing, temp_result),2):\n\t\t\t\t\t\ttemp_result.append(housing)\n\t\t\t\t\t\tfor elec in combinations(remaining_candidate(electricity, temp_result),1):\n\t\t\t\t\t\t\ttemp_result.append(elec)\n\t\t\t\t\t\t\tfor har in combinations(remaining_candidate(harbour, temp_result),1):\n\t\t\t\t\t\t\t\ttemp_result.append(har)\n\n\t\t\t\t\t\t\t\tcurrent_plan_score = calculate_score(temp_result)\n\t\t\t\t\t\t\t\tcurrent_plan_score += block_var_score[elec[0] - 1][1]\n\t\t\t\t\t\t\t\tcurrent_plan_score2 = current_plan_score\n\n\t\t\t\t\t\t\t\t#excluding block 16 because it is a isolated island\n\t\t\t\t\t\t\t\tif har[0] != 16:\n\t\t\t\t\t\t\t\t\tcurrent_plan_score += (1-block_var_score[har[0] - 1][1])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcurrent_plan_score2 += (1-block_var_score[har[0] - 1][1])\n\t\t\t\t\t\t\t\t\tif current_plan_score2 > max_score2:\n\t\t\t\t\t\t\t\t\t\tmax_score2 = current_plan_score2\n\t\t\t\t\t\t\t\t\t\tbest_comb2 = temp_result.copy()\n\t\t\t\t\t\t\t\t\t\tprint(\"\\n\")\n\t\t\t\t\t\t\t\t\t\tprint (best_comb2, max_score2)\n\n\n\t\t\t\t\t\t\t\tif current_plan_score > max_score:\n\t\t\t\t\t\t\t\t\tmax_score = current_plan_score\n\t\t\t\t\t\t\t\t\t#list = old_list operation will not generate two list, \n\t\t\t\t\t\t\t\t\t#this is pythoon's feature\n\t\t\t\t\t\t\t\t\tbest_comb = temp_result.copy()\n\t\t\t\t\t\t\t\t\tprint(\"\\n\")\n\t\t\t\t\t\t\t\t\tprint (best_comb, max_score)\n\n\t\t\t\t\t\t\t\t#a small line for fun\n\t\t\t\t\t\t\t\tprint(\"number of combination tried:\", count, \"\\r\", end = \"\")\n\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#go into the next combination\n\t\t\t\t\t\t\t\ttemp_result.remove(temp_result[-1])\n\n\t\t\t\t\t\t\ttemp_result.remove(temp_result[-1])\n\n\t\t\t\t\t\ttemp_result.remove(temp_result[-1])\n\n\t\t\t\t\ttemp_result.remove(temp_result[-1])\n\t\t\n\t\t\t\ttemp_result.remove(temp_result[-1])\n\t\n\t\t\ttemp_result.remove(temp_result[-1])\n\n\n\t#the case exclude block 16 as harbor\n\t#result for first 13 elements\n\tprint (\"The is second best combination\")\n\tprint (\"conservation\", \" recreation\", \" fishing\", \"agriculture\", \"housing\",\n\t\t\t\"elec\", \"harbour\")\n\tprint(best_comb, max_score)\n\n\t#calculate the rest land with their highest score\n\tdetermined_block = [block for function in best_comb for block in function]\n\tremaining_block = set([x for x in range(1,21)]) - set(determined_block)\n\tfor block in remaining_block:\n\t\t(function, block_num, score) = block_max_score_function(block)\n\t\tmax_score += score\n\t\tprint (block_num, \":\", function, score)\n\tprint (\"the total scoer for the whole town is\", max_score)\n\n\t#the case use block 16 as harbor\n\t#result for first 13 elements\n\tprint (\"The is best combination\")\n\tprint (\"conservation\", \" recreation\", \" fishing\", \"agriculture\", \"housing\",\n\t\t\t\"elec\", \"harbour\")\n\tprint(best_comb2, max_score2)\n\n\t#calculate the rest land with their highest score\n\tdetermined_block = [block for function in best_comb2 for block in function]\n\tremaining_block = set([x for x in range(1,21)]) - set(determined_block)\n\tfor block in remaining_block:\n\t\t(function, block_num, score) = block_max_score_function(block)\n\t\tmax_score2 += score\n\t\tprint (block_num, \":\", function, score)\n\tprint (\"the total scoer for the whole town is\", max_score2)\n\n\n\n\nglobal first_k_element\nfirst_k_element = 20\t#find the first k element in score list\n\nglobal score_table\nscore_table = read_excel(\"data_conver_seadata included.xlsm\")\n\n\nagriculture, fishing, private_housing, conservation, recreation, electricity, harbour, block_var_score= candidate_list()\n\nsimulation(agriculture, fishing, private_housing, conservation, recreation, \n\t\t\telectricity, harbour, block_var_score)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Simulation/SimulationV5.py","file_name":"SimulationV5.py","file_ext":"py","file_size_in_byte":9521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"484688343","text":"\"\"\"Multiplication class\"\"\"\nfrom calc.operations.calculation import Calculation\n\n\n# pylint: disable=too-few-public-methods\n\n\nclass Multiplication(Calculation):\n \"\"\"multiplication calculation object\"\"\"\n\n def get_result(self):\n \"\"\"get the multiplication results\"\"\"\n result = 1.0\n for value in self.values:\n result = result * value\n print(result)\n return result\n","sub_path":"calculations/operations/multiplication.py","file_name":"multiplication.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"385012338","text":"from turtle import Turtle\n\nMOVE_SPEED = 20\nUP = 90\nDOWN = 270\nLEFT = 180\nRIGHT = 0\n\nclass Snake:\n\n def __init__(self):\n self.segments = []\n self.create_snake()\n\n def create_snake(self):\n for _ in range(3):\n self.create_segment()\n self.head = self.segments[0]\n\n\n def create_segment(self):\n segment = Turtle(shape=\"square\")\n segment.pu()\n if len(self.segments) > 0:\n last_seg_pos = self.segments[-1].pos()\n segment.setpos(last_seg_pos)\n segment.color(\"white\")\n self.segments.append(segment)\n\n\n def move(self):\n for seg in range(len(self.segments) - 1, 0, -1):\n if seg > 0:\n next_seg = self.segments[seg - 1]\n self.segments[seg].goto(next_seg.pos())\n self.head.forward(MOVE_SPEED)\n\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.seth(UP)\n\n\n def down(self):\n if self.head.heading() != UP:\n self.head.seth(DOWN)\n\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.seth(LEFT)\n\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.seth(RIGHT)\n\n \n def reset(self):\n for seg in self.segments:\n seg.goto(1000, 1000)\n self.segments.clear()\n self.create_snake()\n self.head = self.segments[0]\n\n","sub_path":"day-24/snek/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204494666","text":"import pysftp\nimport os\nimport paramiko\nimport config\nimport func\nimport time\nimport uuid\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\n# from azure.storage.blob import BlockBlobService\n\ncnopts = pysftp.CnOpts()\ncnopts.hostkeys = None\n\n# This is used during development phase for testing purpose\nif config.send_email_on == 'Yes':\n try:\n func.send_email(config.fromaddr,config.toaddr,'SFTP To Blob Python Job Started','Started')\n except:\n print(\"Email Sending Failed\")\n func.error_log('Email Sending Failed')\n\n# Making sure the solution does not fail even if there are large files in the sftp location\n\n# paramiko.sftp_file.SFTPFile.MAX_REQUEST_SIZE = pow(2, 22) # 4MB per chunk # Slow but reliable with large files >30GB\nparamiko.sftp_file.SFTPFile.MAX_REQUEST_SIZE = pow(2, 40)\n\n\n\n# Current working directory\ndir = config.local_path\n\nmyHostname = config.myHostname\nmyUsername = config.myUsername\nmyPassword = config.myPassword\n\nIterator = 0\n\nfor Iterator in range(20):\n Iterator = Iterator + 1\n\n try:\n\n print(\"Attempting to Connect to SFTP\")\n\n sftp = pysftp.Connection(host=myHostname, username = myUsername, password = myPassword, cnopts=cnopts)\n\n print(\"Connection to SFTP successfully established\")\n\n\n # This is used during development phase for testing purpose\n func.send_email(config.fromaddr, config.toaddr, 'SFTP To Blob Python SFTP Connection Established', 'Connection to SFTP successfully established')\n\n sftp.cwd('/Optimum/Australia/')\n\n remote_file_list = sftp.listdir_attr(config.remote_path)\n\n\n for attr in remote_file_list:\n\n print(\"Downloading File: %s\" % attr.filename)\n\n try:\n if attr.st_mtime == os.path.getmtime(os.path.join(config.local_path, attr.filename)):\n print(\"Latest file already exists\")\n else:\n remote_file_name_with_path = [config.remote_path, attr.filename]\n remote_file_name_with_path = '/'.join(remote_file_name_with_path)\n\n local_file_name_with_path = [dir, attr.filename]\n local_file_name_with_path = '\\\\'.join(local_file_name_with_path)\n\n sftp.get(remote_file_name_with_path, local_file_name_with_path, preserve_mtime=True)\n\n print(\"Downloaded File: %s\" % attr.filename)\n\n except:\n remote_file_name_with_path = [config.remote_path, attr.filename]\n remote_file_name_with_path = '/'.join(remote_file_name_with_path)\n\n local_file_name_with_path = [dir, attr.filename]\n local_file_name_with_path = '\\\\'.join(local_file_name_with_path)\n\n sftp.get(remote_file_name_with_path, local_file_name_with_path, preserve_mtime=True)\n\n print(\"Downloaded File: %s\" % attr.filename)\n\n # The following code is for downloading all items in a directory in the sftp location\n # sftp.get_d('FP_PROD',dir,preserve_mtime=True)\n\n # Get ouf of the loop as the connection worked and files downloaded\n\n break\n\n except:\n\n print(\"The attempt to connect to SFTP failed for %s time(s)\" % Iterator)\n func.error_log('The attempt to connect to SFTP failed ')\n Iterator = Iterator + 1\n time.sleep(10)\n\n # This is used during development phase for testing purpose\n if config.send_email_on == 'Yes':\n try:\n func.send_email(config.fromaddr, config.toaddr, 'SFTP To Blob Python Job Failed', 'The attempt to connect to SFTP failed')\n except:\n print(\"Email Sending Failed\")\n func.error_log('Email Sending Failed')\n\n\n\n # transport.close()\n\n #import os\n #os.system(\". C:\\Anaconda\\evvs\\sftptoblob2\\Scripts\\activate && python && sftptoblob.py\")\n md = r'c:\\Anaconda\\envs\\sftptoblob3\\Scripts\\python c:\\sftptoblob\\sftptoblob.py'\n # md = 'c:\\\\Anaconda\\\\envs\\\\sftptoblob2\\\\Scripts\\\\python c:\\\\Users\\\\Donovan.Gregory\\\\PycharmProjects\\\\untitled\\\\sftptoblob.py'\n\n # The following code makes the iterator invalid, however, this is the only way the connection is 100% reliably established\n # as with iterator even after 20 trial the connection failed\n # it was noted that the python code had to break and run again to establish connection through repeated trial\n # therefore, this technique was used.\n\n os.system(md)\n\n\n\n\nsftp.close()\n\n\n\n\n## Pushing to Blob Storage\n\n# Getting list of files to upload\n\nfile_list = os.listdir(config.local_path)\n\nprint(\"The following files will be uploaded to Blob Storage\")\nprint(file_list)\n\n\n\n\n# Blob Storage Connection String Retrieval from environment\n\ntry:\n connect_str = os.getenv('CONNECT_STR')\n\n # List the blobs in the container\n print(\"\\nListing blobs...\")\n\n service = BlobServiceClient.from_connection_string(connect_str)\n blob_service_client = service\n\n # Getting List of Blobs in the Container\n\n container_client = ContainerClient(account_url=config.account_name, container_name=config.container_name,\n credential=config.account_key)\n\n blobs_list = container_client.list_blobs()\n\n blob_file_list = []\n\n for blob in blobs_list:\n print(blob.name + '\\n')\n # print(blob.last_modified)\n\n blob_file_list.append(blob.name)\n\nexcept:\n print(\"Blob Connection could not be established\")\n func.error_log('Blob Connection could not be established')\n if config.send_email_on == 'Yes':\n try:\n func.send_email(config.fromaddr, config.toaddr, 'Blob Connection could not be establishedd','Blob Connection could not be established')\n except:\n print(\"Email Sending Failed\")\n func.error_log('Email Sending Failed')\n\n\n# Upload Blobs in the container\n\ntry:\n for i in range(len(file_list)):\n\n print(i)\n\n # Establish file connection details\n blob_client = blob_service_client.get_blob_client(container='blobparking', blob=file_list[i])\n\n print(\"\\nUploading to Azure Storage as blob:\\n\\t\" + file_list[i])\n\n # Create a file in local Documents directory to upload and download\n local_path = config.local_path\n local_file_name = file_list[i]\n local_file_name_with_path = os.path.join(local_path, local_file_name)\n upload_file_path = os.path.join(local_path, local_file_name)\n\n # Upload the file\n with open(upload_file_path, \"rb\") as data:\n\n if file_list[i] in blob_file_list:\n blob_client.delete_blob()\n\n blob_client.upload_blob(data)\n\nexcept:\n print(\"Blob Upload Failed\")\n func.error_log('Blob Upload Failed')\n\n if config.send_email_on == 'Yes':\n try:\n func.send_email(config.fromaddr, config.toaddr, 'Blob Upload Failed','Blob Upload Failed')\n except:\n print(\"Email Sending Failed\")\n func.error_log('Email Sending Failed')\n\n\n# This is used during development phase for testing purpose\nif config.send_email_on == 'Yes':\n try:\n func.send_email(config.fromaddr, config.toaddr, 'SFTP To Blob Python Job Completed', 'The Job is now complete')\n except:\n print(\"Email Sending Failed\")\n func.error_log('Email Sending Failed')","sub_path":"sftptoblob.py","file_name":"sftptoblob.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"645474030","text":"def difference_in_scores2(in_file,blout_queries_file,organism):\n import csv\n import statistics\n import matplotlib.pyplot as plt\n import numpy as np\n import matplotlib.mlab as mlab\n import scipy.stats\n import math\n with open(in_file) as csv_in:\n reader = csv.reader(csv_in)\n\n my_dict = determine_subgroups(blout_queries_file,organism)\n correct_diff = []\n incorrect_diff = []\n \n for x in my_dict: # this comes from the determine_subgroups function\n \n TP = 0 #confusion matrix variables\n FP = 0 \n FN = 0 \n TN = 0 \n misc = 0\n training = 0 \n \n for row in reader:\n score1 = float(row[2])\n score2 = float(row[4])\n \n prediction = str(row[1]) #my assignment is in the 2nd col\n actual = str(row[6]) #blast results are in the 7th col \n \n if phrases(x, prediction) == True and \\\n phrases(my_dict[x],actual) == True:\n TP +=1 \n conf_matrix_type = 'TP'\n \n elif phrases(x, prediction) == False and \\\n phrases(my_dict[x],actual) == True:\n FN +=1\n conf_matrix_type = 'FN'\n \n elif phrases(x,prediction) == True and \\\n phrases(my_dict[x],actual) == False:\n FP +=1\n conf_matrix_type = 'FP'\n \n elif phrases(x,prediction) == False and \\\n phrases(my_dict[x],actual) == False:\n TN +=1\n conf_matrix_type = 'TN'\n \n else:\n misc +=1\n \n \n if conf_matrix_type == 'TP':\n diff = score1 - score2\n correct_diff.append(diff)\n elif conf_matrix_type == 'FP':\n diff = score1-score2\n incorrect_diff.append(diff)\n elif conf_matrix_type == 'FN':\n diff = score1 - score2\n incorrect_diff.append(diff) \n \n \n incorrect_mu = sum(incorrect_diff)/len(incorrect_diff)\n incorrect_sigma = statistics.stdev(incorrect_diff)\n \n correct_mu = sum(correct_diff)/len(correct_diff)\n correct_sigma = statistics.stdev(correct_diff)\n \n print(incorrect_mu,incorrect_sigma)\n print(correct_mu,correct_sigma)\n \n #norm dist for correct\n x = np.linspace(correct_mu - 3*correct_sigma, correct_mu +3*correct_sigma,100)\n plt.plot(x,scipy.stats.norm.pdf(x,correct_mu,correct_sigma),linewidth=3.5, label = 'Correct',color = 'Red')\n #norm dist for incorrect\n z = np.linspace(incorrect_mu - 3*incorrect_sigma, incorrect_mu + 3*incorrect_sigma,100)\n plt.plot(z,scipy.stats.norm.pdf(z,incorrect_mu,incorrect_sigma),linewidth = 3.5,label = 'Incorrect',color = 'Blue')\n \n #formatting\n plt.xticks(fontsize = 24)\n plt.yticks(fontsize = 24)\n plt.show\n \n#%%\n#works as of 22/02 \ndef difference_in_scores(in_file,blout_queries_file,organism): \n #e.g. h_sapiens_full_results.csv,h_sapiens_blout_queries, Homo sapiens \n #only works on an individual organism basis. Doesnt check by subgroup\n #simply checks if assignment was correct or not and averages it out\n import csv\n import sys\n #sys.stdout = open(out_file,'a+')\n with open(in_file) as csv_in:\n reader = csv.reader(csv_in)\n count = 0 \n sum_diff = 0 \n count_diff = 0 \n wrong_sum_diff = 0 \n wrong_diff_count = 0 \n correct_assignment = [] #can use these to calculate the s.d.\n incorrect_assignment = [] #which can then be used in a t-test for signific\n \n my_dict = determine_subgroups(blout_queries_file,organism)\n \n for row in reader:\n \n assigned_subg = str(row[1])\n assigned_subg = assigned_subg[1:] #removes space before e.g. ' Mouse Heavy Chain 1'\n actual_subg = str(row[6])\n score1 = float(row[2])\n score2 = float(row[4])\n x = my_dict[assigned_subg]\n \n \n \n if phrases(x,actual_subg)==True: #does predictes\n diff = score1 - score2 #if so calc diff between scores\n sum_diff +=diff\n count_diff+=1\n elif phrases(x,actual_subg) == False: #was assignment incorrect?\n diff = score1-score2\n wrong_sum_diff += diff\n wrong_diff_count+=1\n \n \n av_diff_correct = sum_diff/count_diff\n av_diff_wrong = wrong_sum_diff/wrong_diff_count\n \n print('Organism:',organism)\n print('Average difference of correct assignment',av_diff_correct,'\\n')\n print('Average difference of wrong assignment',av_diff_wrong,'\\n')","sub_path":"difference_between_scores.py","file_name":"difference_between_scores.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"201775950","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\nN = 1000\nsteps = np.random.gamma(8, 1, N) * 1000\nx = [int(step) for step in steps]\ny = [(np.random.gamma(180, 1, 1)[0] - step / 250) for step in x]\n\nm, b, r, p, err = stats.linregress(x, y)\n\nrSq = r**2\n\nfig, ax = plt.subplots()\n\nax.annotate('$y=%(m).3fx+%(b).2f$\\n$r^2=%(rSq).4f$' % {\"m\": m, \"b\": b, \"rSq\": rSq},\n xy=(0, 0), xycoords='axes fraction', fontsize=18, ha='left', va='bottom')\n\ncolors = np.random.rand(N)\nplt.scatter(x, y, 100, c=colors, alpha=0.5)\n\nlineY = [m * val + b for val in x]\n\nplt.plot(x, lineY, linewidth=4)\n\nplt.suptitle(\"Impact of Physical Activity on Blood Sugar\", fontsize=20)\nplt.xlabel(\"Steps/Day (src: fitbit)\", fontsize=18)\nplt.ylabel(\"Average Blood Sugar (src: dexcom)\", fontsize=18)\n\nplt.show()\n","sub_path":"scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"204248861","text":"#!/usr/local/bin/python3\n\nimport sys\nimport os\n\ndef evenOddMap(array, func):\n if not all(isinstance(item, int) for item in array):\n print(\"List must be all numbers. Exiting.\")\n exit(1)\n if not inspect.isfunction(func):\n print(\"Second parameter must be a function. Exiting.\")\n exit(1)\n\n\n\n\n#Execution\n\ntestData = [1,2,3,4]\ntestBools = evenOddMap(testData, evenOddFunction)","sub_path":"mapfunction.py","file_name":"mapfunction.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"75373396","text":"#importing libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\n#%%\r\n#importing dataset\r\ntrain=pd.read_csv('train.csv')\r\ntest=pd.read_csv('test.csv')\r\n#%%\r\nprint(train.columns)\r\n#%%\r\n#missing values\r\nprint(train.isnull().sum())\r\n#%%\r\nprint(test.isnull().sum())\r\n#%%\r\n#visualizing the distribution for positive and negative examples\r\ndef see_chart(feature):\r\n survived = train[train['Survived']==1][feature].value_counts()\r\n dead = train[train['Survived']==0][feature].value_counts()\r\n df = pd.DataFrame([survived,dead])\r\n df.index = ['Survived','Dead']\r\n df.plot(kind='bar',stacked=False, figsize=(10,8))\r\n#return dictionary containing probability of survival for different classes within indivisual feature \r\ndef probability_of_survival(feature):\r\n t1=train[train['Survived']==0][feature].value_counts()\r\n t2=train[train['Survived']==1][feature].value_counts()\r\n t3=t2.loc[t1.index]\r\n dictn={}\r\n for val in t1.index:\r\n dictn[val]=round(t3.loc[val]/(t1.loc[val]+t3.loc[val]),2)\r\n return dictn\r\n#%%\r\nsee_chart('Pclass')\r\nprint(probability_of_survival('Pclass'))\r\n#data shows there are 63% chance of survival if person belongs to 1st class , 47% for 2nd class , 24% for 3rd class\r\n#therfore feature \"Pclass\" is strong indicator for prediction\r\n#%%\r\nsee_chart('Sex')\r\nprint(probability_of_survival('Sex'))\r\n#data shows there are 74% chance of survival if person is female and 19% if person is male\r\n#therfore feature \"Sex\" is strong indicator for prediction\r\n#%%\r\nsee_chart('SibSp')\r\n#data show less chance of survival if person is single with 0 sibling\r\n#%%\r\nsee_chart('Parch')\r\n#data show less chance of survival if person have 0 parch\r\n#%%\r\nsee_chart('Embarked')\r\nprint(probability_of_survival('Embarked'))\r\n#data show 34% chance of survival if person belong to S embarked , 39 % for Q embarked , 55% for C embarked\r\n#%%\r\n#Age distribution \r\nfacet = sns.FacetGrid(train, hue=\"Survived\",aspect=4)\r\nfacet.map(sns.kdeplot,'Age',shade= True)\r\nfacet.set(xlim=(0, train['Age'].max()))\r\nfacet.add_legend()\r\nplt.show()\r\n#people with age group of less than 18 is more likely to survive\r\n#%%\r\n#fare distribution \r\nfacet = sns.FacetGrid(train, hue=\"Survived\",aspect=4)\r\nfacet.map(sns.kdeplot,'Fare',shade= True)\r\nfacet.set(xlim=(0, train['Fare'].max()))\r\nfacet.add_legend()\r\nplt.show()\r\n\r\n#%%\r\n#feature Enginnering \r\n#extracting more information from data \r\n#Name Title of person('Mr','Miss',Ms.,etc.) may be good indicator\r\ntrain_test_data = [train, test] # combining train and test dataset\r\nfor dataset in train_test_data:\r\n dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False)\r\n#%%\r\n#extracting first character of feature Cabin \r\nfor dataset in train_test_data:\r\n dataset['Cabin'] = dataset['Cabin'].str[:1]\r\n#%%\r\nprint(train['Title'].value_counts()) \r\n#%%\r\nprint(test['Title'].value_counts())\r\n#%%\r\n#mapping titles\r\ntitle_map = {\"Mr\": 0, \"Miss\": 1, \"Mrs\": 2, \r\n \"Master\": 3, \"Dr\": 3, \"Rev\": 3, \"Col\": 3, \"Major\": 3, \"Mlle\": 3,\"Countess\": 3,\r\n \"Ms\": 3, \"Lady\": 3, \"Jonkheer\": 3, \"Don\": 3, \"Dona\" : 3, \"Mme\": 3,\"Capt\": 3,\"Sir\": 3 }\r\nfor dataset in train_test_data:\r\n dataset['Title'] = dataset['Title'].map(title_map)\r\n#%%\r\n#mapping Sex \r\nsex_mapping = {\"male\": 0, \"female\": 1}\r\nfor dataset in train_test_data:\r\n dataset['Sex'] = dataset['Sex'].map(sex_mapping)\r\n#%%\r\n#imputing missing values\r\n#filling null values in Age columns by median of age gropued by Titles \r\ntrain[\"Age\"].fillna(train.groupby(\"Title\")[\"Age\"].transform(\"median\"), inplace=True)\r\ntest[\"Age\"].fillna(test.groupby(\"Title\")[\"Age\"].transform(\"median\"), inplace=True)\r\n#%%\r\n#null values in Embarked column is only 2 in train data we can fill by most frequent category \r\ntrain['Embarked'].fillna(train['Embarked'].value_counts().index[0],inplace=True)\r\n#%%\r\n#mapping embarked columns\r\nembarked_mapping = {\"S\": 0, \"C\": 1, \"Q\": 2}\r\nfor dataset in train_test_data:\r\n dataset['Embarked'] = dataset['Embarked'].map(embarked_mapping)\r\n#%%\r\n#imputing Fare columns by mean of fare grouped by Classes\r\ntest[\"Fare\"].fillna(test.groupby(\"Pclass\")[\"Fare\"].transform(\"mean\"), inplace=True)\r\n#%%\r\n#most of data is empty in Cabin is empty 687 in train data and 327 in test data\r\n#distribution of cabin among different classes\r\nPclass1 = train[train['Pclass']==1]['Cabin'].value_counts()\r\nPclass2 = train[train['Pclass']==2]['Cabin'].value_counts()\r\nPclass3 = train[train['Pclass']==3]['Cabin'].value_counts()\r\ndf = pd.DataFrame([Pclass1, Pclass2, Pclass3])\r\ndf.index = ['1st class','2nd class', '3rd class']\r\ndf.plot(kind='bar',stacked=True, figsize=(10,5))\r\n#%%\r\n#from the plot we can see 1st class persons were in cabin C \r\n#therefore we can fill null value of cabin by most frequent cabin grouped by pclass\r\ntrain[\"Cabin\"].fillna(train.groupby(\"Pclass\")[\"Cabin\"].transform(lambda x: x.value_counts().index[0]), inplace=True)\r\ntest[\"Cabin\"].fillna(test.groupby(\"Pclass\")[\"Cabin\"].transform(lambda x: x.value_counts().index[0]), inplace=True)\r\n#%%\r\n#cabin mapping\r\n#label encoding Cabin \r\nimport sklearn.preprocessing as skp\r\nlable_cabin=skp.LabelEncoder()\r\ntrain['Cabin']=lable_cabin.fit_transform(train['Cabin'])\r\ntest['Cabin']=lable_cabin.transform(test['Cabin'])\r\n#%%\r\n#creating new column containing family size of person sibling+parch \r\ntrain[\"Family\"] = train[\"SibSp\"] + train[\"Parch\"] + 1\r\ntest[\"Family\"] = test[\"SibSp\"] + test[\"Parch\"] + 1\r\n#%%\r\n#final features for train and test\r\nfeature=['Pclass', 'Sex', 'Age', 'Fare', 'Cabin', 'Embarked', 'Title', 'Family']\r\n#%%\r\nftrain=train[feature]\r\nftest=test[feature]\r\n#%%\r\n#feature scalling \r\nfs=skp.StandardScaler()\r\nftrain=pd.DataFrame(fs.fit_transform(ftrain))\r\nftest=pd.DataFrame(fs.transform(ftest))\r\nftrain.columns=feature\r\nftest.columns=feature\r\n#%%\r\n#target value\r\ny=train['Survived']\r\n#%%\r\n#training from different models and evaluting performance\r\n#no hyperparameter tuning \r\n#choosing model which gives highest accuracy score\r\nimport sklearn.metrics as skmet\r\nimport sklearn.model_selection as skms\r\n#LOGISTIC REGRESSION\r\nimport sklearn.linear_model as sklm\r\nmodel_logistic_regression=sklm.LogisticRegression()\r\n#K foldcross validation cv=5 \r\naccuracy=skms.cross_val_score(estimator=model_logistic_regression,X=ftrain,y=y,cv=5)\r\n#mean of accuracy score\r\naccuracy_logistic_regression=accuracy.mean()\r\nprint('logistic regreesion model accuracy',accuracy_logistic_regression)\r\n#%%\r\n#SUPPORT VECTOR MACHINE\r\n#SVC LINEAR \r\nimport sklearn.svm as skvm\r\nmodel_svc_linear=skvm.SVC(kernel='linear')\r\naccuracy=skms.cross_val_score(estimator=model_svc_linear,X=ftrain,y=y,cv=5)\r\naccuracy_svc_linear=accuracy.mean()\r\nprint('SVC linear model accuracy',accuracy_svc_linear)\r\n#%%\r\n#SVC NON LINEAR (kernel RBF)\r\nimport sklearn.svm as skvm\r\nmodel_svc_rbf=skvm.SVC(kernel='rbf')\r\naccuracy=skms.cross_val_score(estimator=model_svc_rbf,X=ftrain,y=y,cv=5)\r\naccuracy_svc_rbf=accuracy.mean()\r\nprint('SVC kernel rbf model accuracy',accuracy_svc_rbf)\r\n#%%\r\n#DECISION TREE CLASSIFIER\r\nimport sklearn.tree as skt\r\nmodel_decision_tree=skt.DecisionTreeClassifier()\r\naccuracy=skms.cross_val_score(estimator=model_decision_tree,X=ftrain,y=y,cv=5)\r\naccuracy_decision_tree=accuracy.mean()\r\nprint('decision tree model accuracy',accuracy_decision_tree)\r\n#%%\r\n#RANDOM FOREST CLASSIFIER \r\nimport sklearn.ensemble as ske\r\nmodel_random_forest=ske.RandomForestClassifier()\r\naccuracy=skms.cross_val_score(estimator=model_random_forest,X=ftrain,y=y,cv=5)\r\naccuracy_random_forest=accuracy.mean()\r\nprint('random forest model accuracy',accuracy_random_forest)\r\n#%%\r\n#NAIVE BAYES CLASSIFIER\r\nimport sklearn.naive_bayes as sknb\r\nmodel_naive_bayes=sknb.GaussianNB()\r\naccuracy=skms.cross_val_score(estimator=model_naive_bayes,X=ftrain,y=y,cv=5)\r\naccuracy_naive_bayes=accuracy.mean()\r\nprint('naive bayes model accuracy',accuracy_naive_bayes)\r\n#%%\r\n#KNN \r\nimport sklearn.neighbors as skn\r\nmodel_knn=skn.KNeighborsClassifier(n_neighbors=5)\r\naccuracy=skms.cross_val_score(estimator=model_knn,X=ftrain,y=y,cv=5)\r\naccuracy_knn=accuracy.mean()\r\nprint('KNN model accuracy',accuracy_knn)\r\n#%%\r\n#XGBOOST CLASSIFIER\r\nimport xgboost as xgb\r\nmodel_xgb=xgb.XGBClassifier()\r\naccuracy=skms.cross_val_score(estimator=model_xgb,X=ftrain,y=y,cv=5)\r\naccuracy_xgb=accuracy.mean()\r\nprint('Xbg classifier model accuracy',accuracy_xgb)\r\n#%%\r\n#we performed model training on 8 different classification models\r\n#overall accuracy performance, model which gives highest accuracy score will be chosen to train on data\r\nprint('logistic regreesion model accuracy',accuracy_logistic_regression)\r\nprint('SVC linear model accuracy',accuracy_svc_linear)\r\nprint('SVC kernel rbf model accuracy',accuracy_svc_rbf)\r\nprint('decision tree model accuracy',accuracy_decision_tree)\r\nprint('random forest model accuracy',accuracy_random_forest)\r\nprint('naive bayes model accuracy',accuracy_naive_bayes)\r\nprint('KNN model accuracy',accuracy_knn)\r\nprint('Xbg classifier model accuracy',accuracy_xgb)\r\n#%%\r\n#best model is SVC (kernel =rbf) \r\n\r\n#%%\r\n#hyperparameter tuning \r\n#rgrid search \r\n#finding best parameters which can fit model best\r\nmodel=skvm.SVC(kernel='rbf',random_state=0)\r\nparameter={'C': [0.1,0.25,0.5,1,2,5, 10,100,1000], 'gamma': [0.01,0.05,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,0.8, 0.9,1,'scale']}\r\ngrid_search = skms.GridSearchCV(estimator =model, param_grid = parameter,scoring = 'accuracy',cv = 5,n_jobs = -1)\r\ngrid_search.fit(ftrain,y)\r\nprint('best_score grid search',grid_search.best_score_)\r\nprint('best_parameters',grid_search.best_params_)\r\n#%%\r\n#fitting data on best estimator\r\nbest_model=grid_search.best_estimator_\r\nbest_model.fit(ftrain,y)\r\n#%%\r\n#prediction of test data\r\ntest_prediction=best_model.predict(ftest)\r\n#%%\r\n#this test prediction resulted in accuracy score of 0.77511 on submitting at kaggle titanic competition \r\n#means 324 out of 418 test data were correctly predicted\r\n#%%\r\nnew=pd.DataFrame()\r\nnew['PassengerId']=test['PassengerId']\r\nnew['Survived']=test_prediction\r\nnew.to_csv('submission.csv',index=False)\r\n\r\n","sub_path":"Titanic - Machine Learning from Disaster/titanic_solution_spyder.py","file_name":"titanic_solution_spyder.py","file_ext":"py","file_size_in_byte":10038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"481427752","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef main():\n data = pd.read_csv('census.csv')\n previsores = data.iloc[:, 0:14].values\n classe = data.iloc[:, 14].values\n \n list_prev = [1, 3, 5, 6, 7, 8, 9, 13]\n for i in list_prev:\n labelencoder_prev = LabelEncoder()\n previsores[:, i] = labelencoder_prev.fit_transform(previsores[:, i])\n\n scaler = StandardScaler()\n previsores = scaler.fit_transform(previsores)\n \n previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = \\\n train_test_split(previsores, classe, test_size=0.15, random_state=0)\n # utiliza-se kernelPCA quando os problemas não são\n # linearmente separáveis\n lda = LinearDiscriminantAnalysis(n_components=6)\n previsores_treinamento = lda.fit_transform(previsores_treinamento, classe_treinamento)\n previsores_teste = lda.transform(previsores_teste)\n \n classifier = RandomForestClassifier(n_estimators=40, criterion='entropy', random_state=0)\n classifier.fit(previsores_treinamento, classe_treinamento)\n predict = classifier.predict(previsores_teste)\n \n accuracy = accuracy_score(classe_teste, predict)\n print(str(accuracy))\n\n\nif __name__ == '__main__':\n main()","sub_path":"lda_census.py","file_name":"lda_census.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"226266320","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nimport MinkowskiEngine as ME\nimport MinkowskiFunctional as MF\nfrom torch.optim import SGD\nfrom MinkowskiEngine.MinkowskiNonlinearity import MinkowskiModuleBase\nimport torch.nn.functional as F\n\nclass MinkowskiLeakyReLU(MinkowskiModuleBase):\n MODULE = nn.LeakyReLU\n\nclass ResNetBlock(ME.MinkowskiNetwork) : \n \n def __init__(self,\n in_features,\n out_features,\n stride=1,\n D=3,\n kernel_size=2) : \n \n super(ResNetBlock, self).__init__(D)\n \n self.in_features = in_features\n self.out_features = out_features\n \n self.norm_fn1 = ME.MinkowskiBatchNorm(num_features = in_features)\n self.act_fn = MinkowskiLeakyReLU()\n self.conv1 = ME.MinkowskiConvolution(\n in_features, out_features, kernel_size=kernel_size,\n stride=stride, dimension=D)\n \n self.norm_fn2 = ME.MinkowskiBatchNorm(num_features = out_features)\n self.act_fn = MinkowskiLeakyReLU()\n self.conv2 = ME.MinkowskiConvolution(\n out_features, out_features, kernel_size=kernel_size,\n stride=stride, dimension=D)\n \n self.res = ME.MinkowskiLinear(in_features, out_features)\n \n \n def forward(self, x) :\n \n if self.in_features != self.out_features : \n res = self.res(x)\n else : \n res = x \n out = self.norm_fn1(x)\n out = self.act_fn(out)\n out = self.conv1(out)\n \n out = self.norm_fn2(out)\n out = self.act_fn(out)\n out = self.conv2(out)\n \n return(out+res)\n \nclass Encoder(ME.MinkowskiNetwork) : \n \n def __init__(self, cfg, name = 'uresnet_encoder') : \n \n self.model_config = cfg[name]\n self.D = self.model_config.get('D', 3)\n \n super(Encoder, self).__init__(self.D)\n \n self.reps = self.model_config.get('reps', 2)\n self.encoder_depth = self.model_config.get('depth', 7)\n self.encoder_num_filters = self.model_config.get('encoder_num_filters', 16)\n self.kernel_size = self.model_config.get('kernel_size', 2)\n self.nPlanes = [self.encoder_num_filters*i for i in range(1, self.encoder_depth+1)]\n \n self.encoding_conv = []\n self.encoding_block = []\n\n for i in range(self.encoder_depth):\n m = []\n for _ in range(self.reps):\n m.append(ResNetBlock(self.nPlanes[i], self.nPlanes[i], \n D=self.D, kernel_size = self.kernel_size))\n m = nn.Sequential(*m)\n self.encoding_block.append(m)\n \n m = []\n if i < self.encoder_depth-1:\n m.append(ME.MinkowskiBatchNorm(self.nPlanes[i]))\n m.append(MinkowskiLeakyReLU())\n m.append(ME.MinkowskiConvolution(\n in_channels=self.nPlanes[i],\n out_channels=self.nPlanes[i+1],\n kernel_size=self.kernel_size, stride=2, dimension=self.D))\n \n \n m = nn.Sequential(*m)\n self.encoding_conv.append(m.cuda())\n \n self.encoding_block = nn.Sequential(*self.encoding_block)\n self.encoding_conv = nn.Sequential(*self.encoding_conv)\n \n def forward(self, x) :\n encoding_features = []\n \n for i, layer in enumerate(self.encoding_block):\n x = self.encoding_block[i](x)\n encoding_features.append(x)\n x = self.encoding_conv[i](x)\n return(x, encoding_features)\n \n \nclass Decoder(ME.MinkowskiNetwork) : \n \n def __init__(self, cfg, name = 'uresnet_decoder') :\n\n self.model_config = cfg[name]\n self.D = self.model_config.get('D', 3)\n super(Decoder, self).__init__(self.D)\n\n self.reps = self.model_config.get('reps', 2)\n self.decoder_depth = self.model_config.get('depth', 7)\n self.decoder_num_filters = self.model_config.get('decoder_num_filters', 16)\n self.kernel_size = self.model_config.get('kernel_size', 2)\n self.nPlanes = [self.decoder_num_filters*i for i in range(1, self.decoder_depth+1)]\n\n self.decoding_conv = []\n self.decoding_block = []\n\n \n for i in range(self.decoder_depth) : \n m = []\n if i > 0 :\n m.append(ME.MinkowskiBatchNorm(self.nPlanes[self.decoder_depth - i]))\n m.append(MinkowskiLeakyReLU())\n m.append(ME.MinkowskiConvolutionTranspose(\n in_channels=self.nPlanes[self.decoder_depth - i],\n out_channels=self.nPlanes[self.decoder_depth - 1 - i],\n kernel_size=self.kernel_size, stride=2, \n generate_new_coords = False,\n dimension=self.D))\n \n m = nn.Sequential(*m)\n self.decoding_conv.append(m)\n \n m = []\n for _ in range(self.reps):\n m.append(ResNetBlock((2 if (_==0)&(i>0) else 1)*self.nPlanes[self.decoder_depth - 1 - i], self.nPlanes[self.decoder_depth - 1 - i], D=self.D, kernel_size=self.kernel_size))\n m = nn.Sequential(*m)\n self.decoding_block.append(m)\n \n self.decoding_block = nn.Sequential(*self.decoding_block)\n self.decoding_conv = nn.Sequential(*self.decoding_conv)\n \n def forward(self, x, encoding_features) :\n for i, layer in enumerate(self.decoding_block):\n x = self.decoding_conv[i](x)\n if i > 0 : \n x = ME.cat(encoding_features[-i-1], x)\n x = self.decoding_block[i](x)\n return(x)\n \n \nclass UResNet(ME.MinkowskiNetwork):\n \n def __init__(self, cfg, name='uresnet_2bis'):\n \n self.model_config = cfg[name]\n self.D = self.model_config.get('D', 3)\n super(UResNet, self).__init__(self.D)\n \n self.in_features = self.model_config.get('in_features', 1)\n self.out_features = self.model_config.get('num_types', 5)\n \n self.depth = self.model_config.get('depth', 7)\n \n self.num_filters = self.model_config.get('filters', 16)\n self.nPlanes = [self.num_filters*i for i in range(1, self.depth+1)]\n self.spatial_size= self.model_config.get('spatial_size', 768)\n \n self.input_layer = ME.MinkowskiConvolution(\n in_channels = self.in_features,\n out_channels = self.num_filters,\n kernel_size=3, stride=1, dimension =self.D)\n \n self.encoder = Encoder(cfg)\n self.decoder = Decoder(cfg)\n \n self.norm_fn = ME.MinkowskiBatchNorm(self.num_filters)\n self.act_fn = MinkowskiLeakyReLU()\n self.linear = ME.MinkowskiLinear(self.num_filters, self.out_features)\n \n def forward(self, x) : \n \n coords = x[0][:, 0:4].float()\n feats = x[0][:, 4].float().reshape([-1, 1])\n x = ME.SparseTensor(feats = feats, coords=coords)\n out = self.input_layer(x)\n out, encoding_features = self.encoder(out)\n out = self.decoder(out, encoding_features)\n \n out = self.norm_fn(out)\n out = self.act_fn(out)\n out = self.linear(out)\n \n out = {'logits' : out.F}\n \n return(out)\n \nclass SegmentationLoss(nn.Module) : \n \n def __init__(self, cfg, name = 'segmentation_loss') : \n super(SegmentationLoss, self).__init__()\n self.xentropy = nn.CrossEntropyLoss(reduction = 'none')\n \n def forward(self, out, labels) : \n \n out = out['logits']\n assert len(out) == len(labels[0])\n labels = labels[0]\n labels = torch.tensor(labels).to(dtype=torch.long)\n batch_ids = labels[:, 0].unique()\n \n uresnet_loss = 0.0\n uresnet_acc = 0.0\n \n num_class = len(labels[:, 4].unique())\n count = 0\n \n class_acc = [[0 for i in range(num_class)] for j in range(num_class)]\n class_count = [0 for i in range(num_class)]\n \n for b in batch_ids :\n \n batch_index = labels[:, 0] == b\n print(\"labels : \", labels)\n batch_labels = labels[batch_index, 4]\n print(\"batch_labels : \", batch_labels)\n batch_predictions = out[batch_index, :]\n loss_seg = self.xentropy(batch_predictions, batch_labels)\n uresnet_loss += torch.mean(loss_seg)\n \n # Accuracy for semantic segmentation\n with torch.no_grad():\n predicted_labels = torch.argmax(batch_predictions, dim=-1)\n acc = float((predicted_labels == batch_labels).sum()/float(len(batch_labels)))\n uresnet_acc += acc\n count += 1\n\n for c1 in range(num_class):\n class_mask = batch_labels == c1\n if float(class_mask.sum()) != 0.0 : \n class_count[c1] += 1\n for c2 in range(num_class):\n class_acc[c1][c2] += (predicted_labels[class_mask] == c2).sum()/float((class_mask.sum()))\n\n \n res = {\n 'loss': uresnet_loss/count,\n 'accuracy': uresnet_acc/count\n }\n \n for i in range(num_class) : \n for j in range(num_class) : \n res[f'class_acc_{i}_{j}'] = class_acc[i][j]/class_count[i]\n \n return(res)\n \n","sub_path":"mlreco/models/uresnet.py","file_name":"uresnet.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"457062649","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ncap = cv2.VideoCapture(1)\nyuzler_cascade = cv2.CascadeClassifier(r\"OpenCV\\Cascades\\haarcascade_frontalface_default.xml\")\ngoz_cascade = cv2.CascadeClassifier(r\"OpenCV\\Cascades\\haarcascade_eye.xml\")\nimg = cv2.imread(r\"‪C:\\Users\\Erkan ALA\\Desktop\\Erkan Ala.jpg\")\ngozluk = cv2.imread(r\"OpenCV\\Images\\sunglasses.png\")\ngri = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nmerkez = []\nyuzler = yuzler_cascade.detectMultiScale(gri,1.3,5)\nfor (x,y,w,h) in yuzler:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gri[y:y+h,x:x+w]\n roi_color = img[y:y+h,x:x+w]\n gozler = goz_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in gozler:\n merkez.append((x+int(ex + 0.5*ew),y+int(ey+0.5*eh)))\n\n if len(merkez) > 0:\n gozluk_w = 2.12 * abs(merkez[1][0]-merkez[0][0])\n overimg = np.ones(img.shape,np.uint8) * 255\n h,w = gozluk.shape[:2]\n buyume_faktor = gozluk_w/w\n overgozluk = cv2.resize(gozluk,None,fx=buyume_faktor,fy=buyume_faktor,interpolation=cv2.INTER_AREA)\n x = merkez[0][0] if merkez[0][0] < merkez[1][0] else merkez[1][0]\n x -= 0.26*overgozluk.shape[1]\n y += 0.35*overgozluk.shape[0]\n\n h,w = overgozluk.shape[:2]\n\n overimg[int(y):int(y+h),int(x):int(x+w)]=overgozluk\n\n grigozluk = cv2.cvtColor(overimg,cv2.COLOR_BGR2GRAY)\n\n ret,mask = cv2.threshold(grigozluk,110,255,cv2.THRESH_BINARY)\n\n mask_inv = cv2.bitwise_not(mask)\n\n temp = cv2.bitwise_and(img,img,mask=mask)\n temp2 = cv2.bitwise_and(overimg,overimg,mask=mask_inv)\n\n son = cv2.add(temp,temp2)\n\n cv2.imshow(\"deneme\",son)\n\n\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","sub_path":"OpenCV/Erkan/Opencv11.py","file_name":"Opencv11.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"568585280","text":"#!/usr/bin/env python3\n#coding: utf-8\n\n\n# Logistic Regression Classifier\nimport pandas\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import model_selection\n\n\n# Load the Iris Datasets 2\ndataset2 = pandas.read_csv(\"iris_data.csv\")\nprint(\"Dados de Entrada...\")\nprint(\"Shape dos dados...\")\nprint(dataset2.shape)\nprint(\" \")\nprint(\"20 primeiras linhas...\")\nprint(dataset2.head(20))\nprint(\" \")\nprint(\"Descrição dos Dados...\")\nprint(dataset2.describe())\nprint(\" \")\n\narray = dataset2.values\nX = array[:,0:4]\nY = array[:,4]\nvalidation_size = 0.20\nseed = 7\nX_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X,Y,test_size=validation_size, random_state=seed)\n\nseed = 7\nscoring = 'accuracy'\n\nprint(\"Dados Alvo...\")\n# Fit a CART model to the data\n# model = classificador\nmodel = LogisticRegression(solver='liblinear', multi_class='ovr')\n\nkfold = model_selection.KFold(n_splits=10, random_state=seed)\ncv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)\n\nmsg = \"%s: %f (%f)\" % ('Logistic Regression:', cv_results.mean(), cv_results.std())\n\nprint(msg)\n","sub_path":"logistic_regression_example.py","file_name":"logistic_regression_example.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"276526243","text":"\"\"\"Класс collections.namedtuple()\"\"\"\n\nfrom collections import namedtuple\n\n# 'Resume' - имя кортежа\nRES = namedtuple('Resume', 'id first_name second_name')\nRESUME_PARTS = RES(id='1', first_name='Ivan', second_name='Ivanov')\n\nprint(RESUME_PARTS)\nprint(RESUME_PARTS.id)\n\n\n# Resume(id='1', first_name='Ivan', second_name='Ivanov')\n# 1","sub_path":"_STRUCTURES/Modul_Collection/Namedtuple/ex_5.py","file_name":"ex_5.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"39521249","text":"import json\nfrom time import sleep\nfrom nose.plugins.attrib import attr\nimport requests\nfrom requests.auth import HTTPDigestAuth\nfrom framework.base_test import BaseTest, setup_driver, teardown_driver\nfrom framework.utils.common_utils import generateId\nfrom framework.utils.data_fetcher import fetch_, from_\nfrom pages.allsubjectspage.add_subject_page import AddSubjectPage\nfrom pages.addsubjecttypepage.add_subject_type_page import AddSubjectTypePage\nfrom pages.createquestionnairepage.create_questionnaire_page import CreateQuestionnairePage\nfrom pages.dashboardpage.dashboard_page import DashboardPage\nfrom pages.loginpage.login_page import LoginPage\nfrom pages.projectoverviewpage.project_overview_page import ProjectOverviewPage\nfrom testdata.test_data import DATA_WINNER_LOGIN_PAGE, DATA_WINNER_ALL_SUBJECT, DATA_WINNER_ADD_SUBJECT, DATA_WINNER_DASHBOARD_PAGE, url\nfrom tests.dataextractionapitests.data_extraction_api_data import *\n\nclass DataExtractionAPITestCase(BaseTest):\n @classmethod\n def setUpClass(cls):\n cls.DIGEST_CREDENTIALS = HTTPDigestAuth('tester150411@gmail.com', 'tester150411')\n cls.driver = setup_driver()\n cls.prepare_submission_data()\n\n @classmethod\n def tearDownClass(cls):\n teardown_driver(cls.driver)\n\n @classmethod\n def login(cls):\n cls.driver.go_to(DATA_WINNER_LOGIN_PAGE)\n login_page = LoginPage(cls.driver)\n login_page.do_successful_login_with(VALID_CREDENTIALS)\n\n @classmethod\n def prepare_subject_type(cls):\n cls.driver.go_to(DATA_WINNER_ALL_SUBJECT)\n add_subject_type_page = AddSubjectTypePage(cls.driver)\n add_subject_type_page.click_on_accordian_link()\n cls.subject_type = SUBJECT_TYPE + generateId()\n cls.subject_type = cls.subject_type.strip()\n add_subject_type_page.add_entity_type_with(cls.subject_type)\n\n @classmethod\n def prepare_subject(cls):\n cls.driver.go_to(DATA_WINNER_ADD_SUBJECT + cls.subject_type + \"?web_view=True\")\n add_subject_page = AddSubjectPage(cls.driver)\n add_subject_page.add_subject_with(VALID_DATA)\n add_subject_page.submit_subject()\n flash_message = add_subject_page.get_flash_message()\n cls.subject_id = flash_message[flash_message.find(\":\") + 1:].strip()\n\n @classmethod\n def create_project(cls):\n cls.driver.go_to(DATA_WINNER_DASHBOARD_PAGE)\n dashboard_page = DashboardPage(cls.driver)\n\n create_project_page = dashboard_page.navigate_to_create_project_page()\n VALID_PROJECT_DATA[SUBJECT] = cls.subject_type\n create_project_page.create_project_with(VALID_PROJECT_DATA)\n create_project_page.continue_create_project()\n create_questionnaire_page = CreateQuestionnairePage(cls.driver)\n cls.form_code = create_questionnaire_page.get_questionnaire_code()\n create_questionnaire_page.add_question(QUESTION)\n create_questionnaire_page.save_and_create_project_successfully()\n cls.driver.wait_for_page_with_title(15, fetch_(PAGE_TITLE, from_(VALID_PROJECT_DATA)))\n\n @classmethod\n def activate_project(cls):\n overview_page = ProjectOverviewPage(cls.driver)\n overview_page.activate_project()\n\n @classmethod\n def submit_data(cls):\n overview_page = ProjectOverviewPage(cls.driver)\n data_page = overview_page.navigate_to_data_page()\n web_submission_tab = data_page.navigate_to_web_submission_tab()\n [web_submission_tab.fill_and_submit_answer(answer) for answer in VALID_ANSWERS]\n\n @classmethod\n def prepare_submission_data(cls):\n cls.login()\n cls.prepare_subject_type()\n cls.prepare_subject()\n cls.create_project()\n cls.activate_project()\n cls.submit_data()\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def get_data_by_uri(self, uri):\n http_response = requests.get(url(uri), auth=self.DIGEST_CREDENTIALS)\n return json.loads(http_response.content)\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/\" % (self.__class__.subject_type, self.__class__.subject_id))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 5)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_without_data_return(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"02-08-2012\", \"02-08-2012\"))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 0)\n self.assertEqual(result[\"message\"], NO_DATA_SUCCESS_MESSAGE_FOR_SUBJECT)\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_and_same_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"03-08-2012\", \"03-08-2012\"))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 1)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_and_different_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"03-08-2012\", \"06-08-2012\"))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 4)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_and_start_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"03-08-2012\"))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 5)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_subject_with_not_exist_subject_type(self):\n not_exist_subject_type = \"not_exist\"\n result = self.get_data_by_uri(\"/api/get_for_subject/%s/%s/\" % (not_exist_subject_type, \"001\"))\n self.assertFalse(result['success'])\n self.assertEqual(result['message'], NOT_EXIST_SUBJECT_TYPE_ERROR_MESSAGE_PATTERN % not_exist_subject_type)\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_not_exist_subject_id(self):\n not_exist_subject_id = \"not_exist\"\n result = self.get_data_by_uri(\"/api/get_for_subject/%s/%s/\" % (self.__class__.subject_type, not_exist_subject_id))\n self.assertFalse(result['success'])\n self.assertEqual(result['message'], NOT_EXIST_SUBJECT_ID_ERROR_MESSAGE_PATTERN % not_exist_subject_id)\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_and_wrong_date_format(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"03082012\", \"06082012\"))\n self.assertFalse(result['success'])\n self.assertEqual(result['message'], DATA_FORMAT_ERROR_MESSAGE)\n\n @attr('functional_test')\n def test_get_data_for_subject_with_subject_type_and_subject_id_and_wrong_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_subject/%s/%s/%s/%s\" % (\n self.__class__.subject_type, self.__class__.subject_id, \"06-08-2012\", \"03-08-2012\"))\n self.assertFalse(result['success'])\n self.assertEqual(result['message'], DATE_WRONG_ORDER_ERROR_MESSAGE)\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code(self):\n sleep(2)\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/\" % self.__class__.form_code)\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 4)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_and_same_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/%s/%s/\" % (self.__class__.form_code, '03-08-2012', '03-08-2012'))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 1)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_and_only_start_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/%s/\" % (self.__class__.form_code, '03-08-2012'))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 4)\n self.assertEqual(result[\"message\"], SUCCESS_MESSAGE)\n self.assertEqual(submissions[0][\"submission_data\"][QUESTION[QUESTION_NAME]], VALID_ANSWERS[0][1][ANSWER])\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_with_success_status_set_to_false_when_pass_not_exist_form_code(self):\n unknow_form_code = \"unknow_form_code\"\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/\" % unknow_form_code)\n submissions = result['submissions']\n self.assertFalse(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 0)\n self.assertEqual(result[\"message\"], DOES_NOT_EXISTED_FORM_ERROR_MESSAGE_PATTERN % unknow_form_code)\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_with_success_status_set_to_false_when_pass_wrong_date_format(self):\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/%s/%s/\" % (self.__class__.form_code, \"03082012\", \"06082012\"))\n submissions = result['submissions']\n self.assertFalse(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 0)\n self.assertEqual(result[\"message\"], DATA_FORMAT_ERROR_MESSAGE)\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_with_success_status_set_to_false_when_end_date_before_start_date(self):\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/%s/%s/\" % (self.__class__.form_code, '09-08-2012', '03-08-2012'))\n submissions = result['submissions']\n self.assertFalse(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 0)\n self.assertEqual(result[\"message\"], DATE_WRONG_ORDER_ERROR_MESSAGE)\n\n @attr('functional_test')\n def test_get_data_for_form_with_form_code_without_data_return(self):\n result = self.get_data_by_uri(\n \"/api/get_for_form/%s/%s/%s/\" % (self.__class__.form_code, '03-08-2011', '03-08-2011'))\n submissions = result['submissions']\n self.assertTrue(result['success'])\n self.assertIsInstance(result, dict)\n self.assertEqual(len(submissions), 0)\n self.assertEqual(result[\"message\"], NO_DATA_SUCCESS_MESSAGE_FOR_QUESTIONNAIRE)","sub_path":"func_tests/tests/dataextractionapitests/data_extraction_api_tests.py","file_name":"data_extraction_api_tests.py","file_ext":"py","file_size_in_byte":12701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"286168414","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Wrappers for protocol buffer enum types.\"\"\"\n\nimport enum\n\n\nclass FailoverInstanceRequest(object):\n class DataProtectionMode(enum.IntEnum):\n \"\"\"\n Attributes:\n DATA_PROTECTION_MODE_UNSPECIFIED (int): Defaults to LIMITED\\_DATA\\_LOSS if a data protection mode is not\n specified.\n LIMITED_DATA_LOSS (int): Instance failover will be protected with data loss control. More\n specifically, the failover will only be performed if the current\n replication offset diff between master and replica is under a certain\n threshold.\n FORCE_DATA_LOSS (int): Instance failover will be performed without data loss control.\n \"\"\"\n\n DATA_PROTECTION_MODE_UNSPECIFIED = 0\n LIMITED_DATA_LOSS = 1\n FORCE_DATA_LOSS = 2\n\n\nclass Instance(object):\n class State(enum.IntEnum):\n \"\"\"\n Represents the different states of a Redis instance.\n\n Attributes:\n STATE_UNSPECIFIED (int): Not set.\n CREATING (int): Redis instance is being created.\n READY (int): Redis instance has been created and is fully usable.\n UPDATING (int): Redis instance configuration is being updated. Certain kinds of updates\n may cause the instance to become unusable while the update is in\n progress.\n DELETING (int): Redis instance is being deleted.\n REPAIRING (int): Redis instance is being repaired and may be unusable.\n MAINTENANCE (int): Maintenance is being performed on this Redis instance.\n IMPORTING (int): Redis instance is importing data (availability may be affected).\n FAILING_OVER (int): Redis instance is failing over (availability may be affected).\n \"\"\"\n\n STATE_UNSPECIFIED = 0\n CREATING = 1\n READY = 2\n UPDATING = 3\n DELETING = 4\n REPAIRING = 5\n MAINTENANCE = 6\n IMPORTING = 8\n FAILING_OVER = 9\n\n class Tier(enum.IntEnum):\n \"\"\"\n Available service tiers to choose from\n\n Attributes:\n TIER_UNSPECIFIED (int): Not set.\n BASIC (int): BASIC tier: standalone instance\n STANDARD_HA (int): STANDARD\\_HA tier: highly available primary/replica instances\n \"\"\"\n\n TIER_UNSPECIFIED = 0\n BASIC = 1\n STANDARD_HA = 3\n","sub_path":"redis/google/cloud/redis_v1/gapic/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"111160038","text":"from Renamer import Renamer\n\n\nif __name__ == '__main__':\n ren = Renamer()\n ren.rename_all()\n print(f'Renamed {len(ren.vals.files)} files!')\n\n# for i in range(1001):\n# with open(f'C:\\\\RenameDir\\\\DeleteThis_RemoveThis_Important_#{i}.txt', 'w+'):\n# pass\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"217524642","text":"#!/usr/bin/python3\n\"\"\"\nTanvee Islam, 29th April, 2018\n\nSEARCHING PROGRAM\n\n Aim: The aim of this program is to successfully navigate a maze until a payload hidden inside the maze is identified\n\n\"\"\"\n\n# Importing necessary libraries\nfrom time import sleep\nimport sys, os\nfrom ev3dev.ev3 import *\n\n\n# CONNECTING SENSORS AND MOTORS\n\n# Connecting Motors\nrightMotor = LargeMotor(OUTPUT_A)\nassert rightMotor.connected\nleftMotor = LargeMotor(OUTPUT_D)\nassert leftMotor.connected\nservo = Motor(OUTPUT_C)\nassert servo.connected\n\nprint(\"\\nMotors Connected\\n\")\n\n# Connect sensors\nsleep(0.5)\nus = UltrasonicSensor(INPUT_1)\nassert us.connected\nus_front = UltrasonicSensor(INPUT_2)\nassert us_front.connected\nprint(\"Ultrasonics Connected\")\nprint()\ncs = ColorSensor(INPUT_3)\nassert cs.connected\nprint(\"Colour sensor connected\")\nprint()\ngs = GyroSensor(INPUT_4)\nassert gs.connected\nprint(\"Gyro sensor connected\")\nprint()\n# Checking EV3 buttons state\nbtn = Button()\n\n# ---GLOBAL IMPORTANT SETTINGS--- #\nultrasonic_wall_sensing_distance = 240\nfront_wall_sensing_distance = 20\nscan_rotation_speed = 150\nwheel_turn_rotations_per_turn = 360 * 0.89 * 1\ni = 65\ncan_not_found = True\n\n# ---MOVEMENT FUNCTIONS--- #\n#\n# this function moves the bot forward or backwards 1 block\ndef move_1_block(forward):\n spins = wheel_rotations_per_block\n if forward:\n spins = spins * -1\n\n leftMotor.run_to_rel_pos(position_sp=spins, speed_sp=150, ramp_down_sp=90)\n rightMotor.run_to_rel_pos(position_sp=spins, speed_sp=150, ramp_down_sp=90)\n\n left_running_state = leftMotor.state\n right_running_state = rightMotor.state\n print(\"returning the state flags of the motor \", left_running_state, right_running_state)\n\n # wait until motor stops before continuing with anything else\n print(\"returning the state flags of the motor \", leftMotor.state, rightMotor.state)\n while leftMotor.state == left_running_state and rightMotor.state == right_running_state:\n if us.value() < ultrasonic_wall_sensing_distance:\n stop_motors()\n print(\"Wall was sensed early so motor stopped\")\n\n\ndef move_1_block_2(forward):\n # TODO: Figure out how to do desired direction\n desired_direction = gs.value()\n\n global i\n\n print(\"The desired direction:\", desired_direction)\n if forward:\n a = 1\n else:\n a = -1\n\n leftMotor.run_direct(duty_cycle_sp=a*75)\n rightMotor.run_direct(duty_cycle_sp=a*75)\n\n left_running_state = leftMotor.state\n right_running_state = rightMotor.state\n i = 0\n while i < 67:\n print(\"while loop count:\", i)\n print(\"Front US value\", us_front.value())\n if us_front.value() < front_wall_sensing_distance:\n stop_motors()\n print()\n print(\"wall was sensed early so motor stopped\")\n break\n # elif gs.value() < desired_direction - 3:\n # leftMotor.run_direct(duty_cycle_sp=a*30)\n # rightMotor.run_direct(duty_cycle_sp=a*75)\n # i += 1\n # elif gs.value() > desired_direction + 3:\n # leftMotor.run_direct(duty_cycle_sp=a*75)\n # rightMotor.run_direct(duty_cycle_sp=a*30)\n # i += 1\n else:\n leftMotor.run_direct(duty_cycle_sp=a*75)\n rightMotor.run_direct(duty_cycle_sp=a*75)\n i += 1\n stop_motors()\n\n\ndef move_1_block_3():\n # TODO: Figure out how to do desired direction\n desired_direction = gs.value()\n print(\"-----ENTERING REVERSING FUNCTION-----\")\n global i\n\n a = -1\n ultrasonic_movement(180)\n print(\"ULTRASONIC SHOULD BE MOVING\")\n sleep(3)\n leftMotor.run_direct(duty_cycle_sp=a*75)\n rightMotor.run_direct(duty_cycle_sp=a*75)\n\n left_running_state = leftMotor.state\n right_running_state = rightMotor.state\n i = 0\n while i < 70:\n print(\"while loop count:\", i)\n if us.value() < 150:\n stop_motors()\n print()\n print(\"wall was sensed early so motor stopped\")\n break\n elif gs.value() < desired_direction - 3:\n leftMotor.run_direct(duty_cycle_sp=a*30)\n rightMotor.run_direct(duty_cycle_sp=a*75)\n i += 1\n elif gs.value() > desired_direction + 3:\n leftMotor.run_direct(duty_cycle_sp=a*75)\n rightMotor.run_direct(duty_cycle_sp=a*30)\n i += 1\n else:\n leftMotor.run_direct(duty_cycle_sp=a*75)\n rightMotor.run_direct(duty_cycle_sp=a*75)\n i += 1\n stop_motors()\n\n# this function stops both motors\ndef stop_motors():\n # leftMotor.reset()\n leftMotor.stop()\n # rightMotor.reset()\n rightMotor.stop()\n\n\ndef gsturn(left):\n # SET DIR PREFIX AND RECORD FIRST ANGLE\n print()\n print(\"------STARTING TURN------\")\n beginning_angle = gs.value()\n if left:\n # assuming that the right (clockwise) dir is positive\n direction_prefix = -1\n else:\n direction_prefix = 1\n\n # FIND NEAREST 90 IN THE DIRECTION OF TURN\n destination_angle = beginning_angle + (direction_prefix * 45)\n while destination_angle % 90 != 0:\n destination_angle += direction_prefix\n\n destination_angle = destination_angle + (-direction_prefix * 3)\n print(\"Destination is \", destination_angle)\n\n # START DRIVING IN CORRECT DIR\n leftMotor.run_to_rel_pos(position_sp=350 * direction_prefix, speed_sp=200, ramp_down_sp=90)\n rightMotor.run_to_rel_pos(position_sp=-350 * direction_prefix, speed_sp=200, ramp_down_sp=90)\n\n run_state = leftMotor.state\n\n # LOOP TO BREAK ONCE THE GYRO IS IN CORRECT RANGE\n while (gs.value() < destination_angle - 1 and gs.value() < destination_angle + 1) or (\n gs.value() > destination_angle - 1 and gs.value() > destination_angle + 1):\n print(gs.value())\n if leftMotor.state != run_state:\n print(\"Motor was stopped by rel_pos\")\n break\n\n # STOP MOTORS IMMEDIATELY\n stop_motors()\n print(\"finishing gyroscopic turn\")\n print(\"Final position is:\", gs.value())\n print()\n print(\"------TURN FINISHED-------\")\n print()\n\n\ndef ultrasonic_movement(destination):\n servo.run_to_abs_pos(position_sp=destination, speed_sp=200, ramp_down_sp=90)\n\n\ndef scan_walls():\n global node_info\n global i\n\n # Declaring constants\n DETECTION_DISTANCE = 215\n FRONT_DETECTION_DISTANCE = 260\n FRONT = 0\n RIGHT = 90\n LEFT = -90\n\n print()\n print(\"Lookin forward\")\n # forward\n ultrasonic_movement(FRONT)\n sleep(2)\n if us.value() <= FRONT_DETECTION_DISTANCE:\n forward = False\n print(\"Not goin that way\")\n else:\n forward = True\n print(\"Forwards clear\")\n\n print()\n print(\"Lookin to the side\")\n # left\n ultrasonic_movement(LEFT)\n sleep(2)\n if us.value() <= DETECTION_DISTANCE:\n left = False\n print(\"Not goin left\")\n else:\n left = True\n print(\"Left is clear\")\n\n # right\n print()\n print(\"Lookin the other side\")\n ultrasonic_movement(RIGHT)\n sleep(2)\n if us.value() <= DETECTION_DISTANCE:\n right = False\n print(\"Right is blocked\")\n else:\n right = True\n print(\"Right's clear\")\n\n print()\n print(\"This is what we know\")\n print((forward, left, right))\n if i < 40:\n node_info[steps] = [forward, right, left]\n else:\n node_info.append([forward, right, left])\n\n ultrasonic_movement(FRONT)\n sleep(1)\n\n\ndef can_check(us_last):\n difference = us_last - us_front.value()\n sleep(0.1)\n difference2 = us_last - us_front.value()\n sleep(0.1)\n difference3 = us_last - us_front.value()\n front_wall_us_sensing_distance = 10\n\n if (\n difference > 125 and difference2 > 125 and difference3 > 125) and cs.red >= 6 and us_front.value() <= front_wall_us_sensing_distance:\n print(\"\\nCAN SENSED\\n\")\n return False\n else:\n return True\n # beeping_flashing()\n\n\ndef main_program(past_moves, steps, last_backup):\n \"\"\"\n WRITE THE DAMN DOCSTRING\n :param past_moves:\n :param steps:\n :return:\n \"\"\"\n print(\"-----------RUNNING MAIN PROGRAM------------\")\n global i\n global can_not_found\n # can_not_found = can_check()\n while can_not_found: # This should eventually be replaced with a colour sensor reading\n print()\n if not last_backup:\n scan_walls()\n print()\n print(\"Bout to scan walls\")\n if node_info[steps][0] or node_info[steps][1] or node_info[steps][2]:\n print()\n print(\"Looks like there's somewhere to go\")\n decision_program(steps, last_backup)\n else:\n last_backup = True\n print()\n print(\"Time to back the fuck up\")\n print()\n backup_program(past_moves, steps)\n getting_back(past_moves)\n\ndef decision_program(steps, last_backup):\n \"\"\"\n\n :param steps: How many steps forward we have taken or the current reference index to past_moves\n\n :var : Each value appended to the list refers to a (movement)/(type of movement), as described below:\n 0 = Forward movement\n 1 = Right turn\n 2 = Left turn\n NB: Does not require a reverse unit, as every time it reverses it will delete the preceding block\n\n :return: NO RETURN\n \"\"\"\n\n print(\"-----RUNNING DECISION PROGRAM-----\")\n print()\n print(\"This is node info:\", node_info)\n print(\"This is past moves:\", past_moves)\n print(\"Steps:\", steps)\n print()\n sleep(1)\n\n global i\n\n if node_info[steps][0]:\n print()\n print(\"Let's go forward\")\n move_1_block_2(True)\n stop_motors()\n if i > 40:\n past_moves.append(0)\n steps += 1\n last_backup = False\n main_program(past_moves, steps, last_backup)\n else:\n print()\n print(\"Let's not go forward\")\n if node_info[steps][1]:\n print(\"We're goin right\")\n gsturn(False)\n past_moves.append(1)\n steps += 1\n node_info.append(0)\n sleep(1)\n move_1_block_2(True)\n stop_motors()\n if i > 40:\n past_moves.append(0)\n steps += 1\n last_backup = False\n main_program(past_moves, steps, last_backup)\n elif node_info[steps][2]:\n print(\"we're goin left\")\n # turn(-90, -1)\n gsturn(True)\n past_moves.append(2)\n node_info.append(0)\n steps += 1\n sleep(1)\n move_1_block_2(True)\n stop_motors()\n if i > 40:\n past_moves.append(0)\n steps += 1\n last_backup = False\n main_program(past_moves, steps, last_backup)\n\n\ndef backup_program(past_moves, steps):\n \"\"\"\n\n :return:\n \"\"\"\n last_entry = -1\n while node_info[steps][0] is False and node_info[steps][1] is False and node_info[steps][2] is False:\n print(\"-----INSIDE THE BACKUP LOOP-------\")\n print(\"This is the past moves list:\", past_moves)\n print(\"This is node_info:\", node_info)\n print(\"Steps:\", steps)\n print()\n print()\n if past_moves[steps] == 0:\n print(\"IN THE RERVERSY BIT\")\n print()\n move_1_block_3()\n sleep(5)\n stop_motors()\n past_moves = past_moves[: -1]\n print()\n print(\"Node_info:\", node_info)\n print(\"Steps:\", steps)\n print(\"node_info[steps][1]:\", node_info[steps][1])\n print()\n node_info[steps][1] = False\n steps -= 1\n print(\"END OF THE THE REVERSY BIT\")\n print()\n print()\n elif past_moves[steps] == 1:\n print(\"If we ever get to this bit holy shit\")\n gsturn(-1)\n past_moves = past_moves[: -1]\n steps -= 1\n node_info[steps][1] = False\n elif past_moves[steps] == 2:\n print(\"If we ever get to this bit holy shit: part 2\")\n gsturn(1)\n past_moves = past_moves[: -1]\n steps -= 1\n node_info[last_entry][2] = False\n else:\n print()\n print(\"-----Ya fucked up-----\")\n print(\"I fully expect to get here :(\")\n print()\n print(\"------OUTSIDE THE BACKUP LOOP-------\")\n\n\ndef getting_back(past_moves):\n print()\n print(\"---------TRYNA GET THE CAN TO SAFETY-------------\")\n past_moves = past_moves.reverse()\n gsturn(True)\n gsturn(True)\n for move in past_moves:\n if move == 0:\n move_1_block_2(True)\n elif move == 1:\n gsturn(False)\n elif move == 2:\n gsturn(True)\n else:\n print(\"Well we probably fucked it\")\n\n\npast_moves = [0] # Holds the information on how to get back to the beginning or back up to the last junction\nnode_info = [] # Holds the boolean values of the walls in each node, as we come across them\nsteps = 0 # This is our current step count\nlast_backup = False\n\nmain_program(past_moves, steps, last_backup)\n","sub_path":"Rescue_programs/Search_testing/main_search.py","file_name":"main_search.py","file_ext":"py","file_size_in_byte":13161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"681906","text":"from HelperFunctions import inputsplit\nfrom copy import deepcopy\n\ndirections = {'e': (1,-1,0), 'w': (-1,1,0), 'se': (0,-1,1), 'sw': (-1,0,1), 'ne': (1,0,-1), 'nw': (0,1,-1)}\n\ndef do1(puzzleInput):\n tiles = set()\n \n rotate(puzzleInput, tiles)\n\n return len(tiles) \n\ndef do2(puzzleInput):\n tiles = set()\n \n rotate(puzzleInput, tiles)\n\n newTiles = gameOfLife(tiles, 100)\n \n return len(newTiles)\n\ndef rotate(puzzleInput, tiles):\n for line in puzzleInput:\n tile = (0,0,0)\n direction = ''\n for thing in line:\n direction += thing\n if direction in directions:\n xStep,yStep,zStep = directions[direction]\n x,y,z = tile\n tile = (x + xStep, y + yStep, z + zStep)\n direction = ''\n if tile in tiles:\n tiles.remove(tile)\n else:\n tiles.add(tile)\n\ndef gameOfLife(tiles, rounds):\n newTiles = deepcopy(tiles)\n for _ in range(rounds):\n tiles = deepcopy(newTiles)\n neighbors = set()\n for tile in tiles:\n allNeighbors = getNeighbors(tile, tiles)\n neighbors = neighbors.union(allNeighbors)\n blackNeighbors = countBlackNeighbors(allNeighbors, tiles)\n if blackNeighbors == 0 or blackNeighbors > 2:\n newTiles.remove(tile)\n for neighbor in neighbors:\n if neighbor in tiles:\n continue\n blackNeighbors = countBlackNeighbors(getNeighbors(neighbor, tiles), tiles)\n if blackNeighbors == 2:\n newTiles.add(neighbor) \n return newTiles\n\ndef getNeighbors(tile, tiles):\n x,y,z = tile\n \n neighbors = set([(x + xStep, y + yStep, z + zStep) for xStep,yStep,zStep in directions.values()])\n\n return neighbors\n\ndef countBlackNeighbors(neighbors, tiles):\n count = len([neighbor for neighbor in neighbors if neighbor in tiles])\n return count\n\ndef do():\n with open ('Input/day24.txt') as f:\n strInput = f.read()\n\n puzzleInput = strInput.split('\\n')\n\n print(do1(puzzleInput))\n print(do2(puzzleInput))\n \ndo()","sub_path":"2020/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"172813571","text":"\"\"\"\nGroup Members:\n1.Himanshu(2019KUEC2009)\n2.Ayush Kumar Gupta(2019KUEC2018)\n3.Krishankant Garg(2019KUEC2025)\n4.Deepak Gurjar(2019KUEC2027)\n@copyright 2021\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication,QFileDialog,QMessageBox, QStyle, QLabel,QSizePolicy, QWidget, QPushButton ,QHBoxLayout, QGroupBox, QDialog, QVBoxLayout,QColorDialog\nfrom PyQt5.QtGui import QIcon, QFont , QImage , QPixmap\nfrom PyQt5.QtCore import pyqtSlot , QSize , Qt\nfrom background_blur import blur_image\nimport numpy as np;\nimport cv2\nimport os\nfrom Scanner import Scanner\nfrom Text_detection import Text_detection\nfrom Text_extraction import ocr_text\nfrom Hindi_translator import Hindi_translate\nfrom Text_to_speech import text_to_speech\n\nfrom detect_face_image import facedetectImage\nfrom detect_face_video import facedetectVideo\n\nclass App(QWidget):\n def __init__(self):\n super().__init__()\n self.title = \"ImagiFy\"\n self.left = 200\n self.top = 100\n self.width = 800\n self.height = 650\n self.initUI()\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.setWindowIcon(QIcon('./assets/logo.png'))\n self.createHorizontalLayout()\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QWidget{\n background-color: #f9b8ff;\n color: white;\n font-family: 'Roboto';\n \n }\n QPushButton{\n background-color: #333;\n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 20px;\n border:3px solid;\n }\"\"\")\n\n #color = QColorDialog.getColor()\n #code starting for headre main\n self.label=QLabel(self)\n self.pixmap = QPixmap('win15.png')\n self.label.setPixmap(self.pixmap)\n self.label.resize(self.pixmap.width(), self.pixmap.height())\n\t#code ending for header main\n \n\t#code start for footer main\n self.label1=QLabel(self)\n self.pixmap=QPixmap('win21.png')\n self.label1.setPixmap(self.pixmap)\n self.label1.resize(self.pixmap.width(), self.pixmap.height())\n self.label1.move(0,402)\n #code ending for footer main\n\n self.label2=QLabel(self)\n self.label2.setText(\"Click Below:-\")\n self.label2.setStyleSheet(\"\"\"\n\t\tcolor:black;\n\t\tpadding:0px;\n\t\tfont-size:18px;\n\t\twidth:500px;\n\t\t\"\"\")\n \n self.label2.move(100,260)\n \n \n self.show()\n def createHorizontalLayout(self):\n layout = QHBoxLayout()\n scanner_btn = QPushButton(\"Scanner\", self)\n scanner_btn.setToolTip(\"Click here to scan document\")\n scanner_btn.clicked.connect(self.handleScanner)\n layout.addWidget(scanner_btn)\n\n image_edit = QPushButton(\"Image Editor\", self)\n image_edit.setToolTip(\"Click Here for Image Edit\")\n image_edit.clicked.connect(self.handleImageEdit)\n layout.addWidget(image_edit)\n\n image_ocr = QPushButton(\"OCR\", self)\n image_ocr.setToolTip(\"Click here for OCR\")\n image_ocr.clicked.connect(self.handleImageOcr)\n layout.addWidget(image_ocr)\n \n Face_detection = QPushButton(\"Face Detection\", self)\n Face_detection.setToolTip(\"Click Here for Face Detection\")\n Face_detection.clicked.connect(self.handleFacedetection)\n layout.addWidget(Face_detection)\n self.setLayout(layout)\n\n @pyqtSlot()\n def handleScanner(self):\n self.cams = ScannerWindow(self) \n self.cams.show()\n self.close()\n @pyqtSlot()\n def handleImageEdit(self):\n self.cams = ImageEditWindow(self) \n self.cams.show()\n self.close()\n @pyqtSlot()\n def handleImageOcr(self):\n self.cams = ImageOCRWindow(self) \n self.cams.show()\n self.close()\n @pyqtSlot()\n def handleFacedetection(self):\n self.cams = FacedetectionWindow(self) \n self.cams.show()\n self.close()\n\n \nclass ScannerWindow(QDialog):\n def __init__(self, value, parent=None):\n super().__init__(parent)\n # Code for the Scanner Window\n self.setWindowTitle('Scanner')\n self.setGeometry(value.left, value.top, value.width, value.height)\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_FileDialogInfoView))\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QWidget{\n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #fc00ff, stop:1 #00dbde);\n color: white;\n font-family: 'Roboto';\n \n }\n \n QPushButton{\n \n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 20px;\n \n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #525252 , stop:1 #3d72b4);\n \n }\"\"\")\n \n label1 = QLabel(\"Scanner App\")\n self.button = QPushButton()\n self.button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.button.setIcon(QIcon('./assets/image_dialog.png'))\n self.button.setIconSize(QSize(170, 170))\n self.button.clicked.connect(self.getScanned)\n \n layoutV = QVBoxLayout()\n self.pushButton = QPushButton(self)\n self.pushButton.setStyleSheet('background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #136a8a, stop:1 #267871);')\n self.pushButton.setText('Back')\n self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))\n self.pushButton.setIconSize(QSize(20, 20))\n self.pushButton.clicked.connect(self.goMainWindow)\n layoutV.addWidget(self.pushButton)\n \n layoutH = QHBoxLayout()\n layoutV.addWidget(label1)\n label1.setFont(QFont('Roboto', 20))\n label1.setAlignment(Qt.AlignCenter)\n # closing button\n layoutH.addWidget(self.button)\n layoutV.addLayout(layoutH)\n self.setLayout(layoutV)\n \n\n def getScanned(self):\n # To fetch the scanner image\n options = QFileDialog.Options()\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n 'c:\\\\', \"Image files (*.jpg *.gif *.png)\", options=options)\n imagePath = fname[0]\n\n if imagePath:\n scanned = Scanner(imagePath)\n cv2.imshow(\"Scanned Image\", scanned)\n cv2.moveWindow(\"Scanned Image\", 200, 50)\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Question)\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n ret = QMessageBox.question(self, 'Save Image', \"Do You want to save this Scanned Image ?\", QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel, QMessageBox.Cancel)\n if ret == QMessageBox.Yes:\n outfname = \"scanned_image.jpg\"\n cv2.imwrite(os.path.expanduser(outfname), scanned)\n\n\n def goMainWindow(self):\n self.cams = App()\n self.cams.show()\n self.close() \n \n \nclass ImageEditWindow(QDialog):\n def __init__(self, value, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Image Editor')\n # Here value is the super class object\n # Main Window\n self.setGeometry(value.left, value.top, value.width, value.height)\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_FileDialogInfoView))\n\n\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QWidget{\n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #fc00ff, stop:1 #00dbde);\n color: white;\n font-family: 'Roboto';\n \n }\n \n QPushButton{\n \n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 20px;\n \n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #525252 , stop:1 #3d72b4);\n \n }\"\"\")\n # App Label\n label_2 = QLabel(\"Image Editor\")\n\n # Main Content\n self.button = QPushButton()\n self.button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.button.setIcon(QIcon('./assets/image_dialog.png'))\n self.button.setStyleSheet('background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #525252 , stop:1 #3d72b4);')\n self.button.setIconSize(QSize(200, 200))\n self.button.clicked.connect(self.getImage)\n \n layoutV = QVBoxLayout() # Vertical Layout For the App Flow\n # Back Button\n self.pushButton = QPushButton(self)\n self.pushButton.setStyleSheet('background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #136a8a, stop:1 #267871);')\n self.pushButton.setText('Back')\n self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))\n self.pushButton.setIconSize(QSize(20, 20))\n self.pushButton.clicked.connect(self.goMainWindow)\n layoutV.addWidget(self.pushButton)\n \n layoutH = QHBoxLayout() # Horizontal Layout for Main Content\n layoutV.addWidget(label_2)\n label_2.setFont(QFont('Roboto', 20))\n label_2.setAlignment(Qt.AlignCenter)\n layoutH.addWidget(self.button)\n layoutV.addLayout(layoutH)\n self.setLayout(layoutV)\n self.previewImage = QLabel()\n self.previewImage.setAlignment(Qt.AlignCenter)\n layoutV.addWidget(self.previewImage)\n\n def getImage(self):\n options = QFileDialog.Options()\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n 'c:\\\\', \"Image files (*.jpg *.gif *.png)\", options=options)\n imagePath = fname[0]\n if imagePath:\n cvImg = blur_image(imagePath)\n blurred_img = cvImg.copy()\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(cvImg, 'Modified', (100 , 100), font, 3, (0,0,0), 2, cv2.LINE_AA)\n original_image = cv2.imread(imagePath)\n font = cv2.FONT_HERSHEY_SIMPLEX \n cv2.putText(original_image, 'Original', (100 , 100), font, 3, (0,0,0), 2, cv2.LINE_AA)\n output_result = np.concatenate((original_image, cvImg), axis=1)\n height, width, channel = output_result.shape\n bytesPerLine = 3 * width\n qImg = QImage(output_result.data, width, height, bytesPerLine, QImage.Format_BGR888)\n pixmap = QPixmap(qImg)\n self.previewImage.setPixmap(pixmap.scaled(pixmap.width()//3, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Question)\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n ret = QMessageBox.question(self, 'Save Image', \"Do You want to save this blurred Image ?\", QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel, QMessageBox.Cancel)\n if ret == QMessageBox.Yes:\n outfname = \"blurred_img.jpg\"\n cv2.imwrite(os.path.expanduser(outfname), blurred_img)\n\n\n def goMainWindow(self):\n self.cams = App()\n self.cams.show()\n self.close() \n \n \nclass ImageOCRWindow(QDialog):\n def __init__(self, value, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Image OCR')\n self.setGeometry(value.left, value.top, value.width, value.height)\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_FileDialogInfoView))\n\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QWidget{\n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #fc00ff, stop:1 #00dbde);\n color: white;\n font-family: 'Roboto';\n \n }\n \n QPushButton{\n \n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 20px;\n \n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #525252 , stop:1 #3d72b4);\n \n }\"\"\")\n\n # label_3 = QLabel(\"Image OCR\")\n self.button = QPushButton()\n self.button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.button.setIcon(QIcon('./assets/image_dialog.png'))\n self.button.setIconSize(QSize(200, 200))\n self.button.clicked.connect(self.image_input)\n \n layoutV = QVBoxLayout()\n self.pushButton = QPushButton(self)\n self.pushButton.setStyleSheet('background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #136a8a, stop:1 #267871);')\n self.pushButton.setText('Back')\n self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))\n self.pushButton.setIconSize(QSize(20, 20))\n self.pushButton.clicked.connect(self.goMainWindow)\n layoutV.addWidget(self.pushButton)\n \n layoutH = QHBoxLayout()\n # layoutV.addWidget(label_3)\n # label_3.setFont(QFont('Roboto', 20))\n # label_3.setAlignment(Qt.AlignCenter)\n layoutH.addWidget(self.button)\n self.layoutH = layoutH\n self.layoutV = layoutV\n self.button.clicked.connect(self.createHorizontalLayout)\n layoutV.addLayout(layoutH)\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QPushButton{\n background-color: #333;\n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 14px;\n }\"\"\")\n self.setLayout(layoutV)\n def image_input(self):\n options = QFileDialog.Options()\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n 'c:\\\\', \"Image files (*.jpg *.gif *.png)\", options=options)\n imagePath = fname[0]\n\n if imagePath:\n # Get the image\n #ocr = OCR(imagePath)\n self.image_path = imagePath\n self.layoutH.removeWidget(self.button)\n self.button.deleteLater()\n self.button = None\n pixmap = QPixmap(imagePath)\n self.preview = QLabel()\n self.preview.setPixmap(pixmap)\n self.preview.setPixmap(pixmap.scaled(pixmap.width()//5, pixmap.height()//3, Qt.KeepAspectRatio, Qt.FastTransformation))\n self.layoutH.addWidget(self.preview)\n self.preview.setAlignment(Qt.AlignCenter)\n\n\n\n\n def createHorizontalLayout(self):\n layout = QHBoxLayout()\n text_detect = QPushButton(\"Text Detection\", self)\n text_detect.setToolTip(\"Click here to detect Text\")\n text_detect.clicked.connect(self.handleTextDetect)\n layout.addWidget(text_detect)\n\n text_extract = QPushButton(\"Text Extraction\", self)\n text_extract.setToolTip(\"Click Here to extract Text\")\n text_extract.clicked.connect(self.handleTextExtract)\n layout.addWidget(text_extract)\n\n tts = QPushButton(\"Text To Speech\", self)\n tts.setToolTip(\"Click here to speak the text\")\n tts.clicked.connect(self.handleTTS)\n layout.addWidget(tts)\n\n hindi_text = QPushButton(\"Get Hindi Text\", self)\n hindi_text.setToolTip(\"Click here to get the hindi text\")\n hindi_text.clicked.connect(self.getHindiText)\n layout.addWidget(hindi_text)\n self.layoutV.addLayout(layout)\n self.text = QLabel(\"\", self)\n self.layoutV.addWidget(self.text)\n self.text.setAlignment(Qt.AlignCenter)\n self.setStyleSheet(\"\"\"QLabel { \n padding: 5px;\n font-size: 14px;\n }\n QPushButton{\n background-color: #333;\n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 14px;\n }\"\"\")\n self.text.setWordWrap(True)\n\n def handleTextExtract(self):\n # Handles the text extraction , gets the text string\n extracted_text = ocr_text(self.image_path)\n #extracted_text = \"Hello this is extracted text\"\n self.text.setText(extracted_text)\n\n def handleTextDetect(self):\n # Handles the text detect, get the final image\n text_detect = Text_detection(self.image_path)\n text_detect = cv2.resize(text_detect, (960, 540))\n cv2.imshow('Detected Text',text_detect)\n cv2.moveWindow(\"Detected Text\", 200, 100)\n #height, width, channel = text_detect.shape\n #bytesPerLine = 3 * width\n #qImg = QImage(text_detect.data, width, height, bytesPerLine, QImage.Format_BGR888)\n #pixmap = QPixmap(qImg)\n #self.detected_text.setPixmap(pixmap.scaled(pixmap.width()//3, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n #pixmap = QPixmap(self.image_path)\n #self.detectedText = QLabel()\n #self.detectedText.setPixmap(pixmap)\n #self.detectedText.setPixmap(pixmap.scaled(pixmap.width()//1, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n #self.layoutV.addWidget(self.detectedText)\n #self.detectedText.setAlignment(Qt.AlignCenter)\n \n def handleTTS(self):\n # handle TTS\n # play the converted audio\n tts = text_to_speech(self.image_path)\n\n def getHindiText(self):\n extracted_text = Hindi_translate(self.image_path)\n # extracted_text = \"Hello this is Hindi extracted text\"\n self.text.setText(extracted_text)\n # self.layoutV.addWidget(self.text)\n\n def goMainWindow(self):\n self.cams = App()\n self.cams.show()\n self.close()\n\n\n\n\nclass FacedetectionWindow(QDialog):\n def __init__(self, value, parent=None):\n super().__init__(parent)\n self.setWindowTitle('Face Detection')\n self.setGeometry(value.left, value.top, value.width, value.height)\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_FileDialogInfoView))\n\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QWidget{\n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #fc00ff, stop:1 #00dbde);\n color: white;\n font-family: 'Roboto';\n \n }\n \n QPushButton{\n \n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 20px;\n \n background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #525252 , stop:1 #3d72b4);\n \n }\"\"\")\n\n # label_3 = QLabel(\"Image OCR\")\n self.button = QPushButton()\n self.button.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.button.setIcon(QIcon('./assets/image_dialog.png'))\n self.button.setIconSize(QSize(200, 200))\n self.button.clicked.connect(self.image_input)\n \n layoutV = QVBoxLayout()\n self.pushButton = QPushButton(self)\n self.pushButton.setStyleSheet('background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 #136a8a, stop:1 #267871);')\n self.pushButton.setText('Back')\n self.pushButton.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))\n self.pushButton.setIconSize(QSize(20, 20))\n self.pushButton.clicked.connect(self.goMainWindow)\n layoutV.addWidget(self.pushButton)\n \n layoutH = QHBoxLayout()\n # layoutV.addWidget(label_3)\n # label_3.setFont(QFont('Roboto', 20))\n # label_3.setAlignment(Qt.AlignCenter)\n layoutH.addWidget(self.button)\n self.layoutH = layoutH\n self.layoutV = layoutV\n self.button.clicked.connect(self.createHorizontalLayout)\n layoutV.addLayout(layoutH)\n self.setStyleSheet(\"\"\"QToolTip { \n padding: 5px;\n }\n QPushButton{\n background-color: #333;\n color: white;\n padding: 10px;\n font-family: 'Roboto';\n font-size: 14px;\n }\"\"\")\n self.setLayout(layoutV)\n def image_input(self):\n options = QFileDialog.Options()\n fname = QFileDialog.getOpenFileName(self, 'Open file',\n 'c:\\\\', \"Image files (*.jpg *.gif *.png *.jpeg)\", options=options)\n imagePath = fname[0]\n\n if imagePath:\n # Get the image\n #ocr = Face detect(imagePath)\n self.image_path = imagePath\n self.layoutH.removeWidget(self.button)\n self.button.deleteLater()\n self.button = None\n pixmap = QPixmap(imagePath)\n self.preview = QLabel()\n self.preview.setPixmap(pixmap)\n self.preview.setPixmap(pixmap.scaled(pixmap.width()//1, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n self.layoutH.addWidget(self.preview)\n self.preview.setAlignment(Qt.AlignCenter)\n\n\n\n\n def createHorizontalLayout(self):\n layout = QHBoxLayout()\n face_detect = QPushButton(\"Face Detection in Image\", self)\n face_detect.setToolTip(\"Click here to detect face in an iamge\")\n face_detect.clicked.connect(self.handleFaceDetect)\n layout.addWidget(face_detect)\n\n tts = QPushButton(\"Face detection in video\", self)\n tts.setToolTip(\"Click here to detect face in a video\")\n tts.clicked.connect(self.handleVideodetect)\n layout.addWidget(tts)\n self.layoutV.addLayout(layout)\n self.text = QLabel(\"\", self)\n self.layoutV.addWidget(self.text)\n\n\n def handleFaceDetect(self):\n # Handles the text detect, get the final image\n image_detect = facedetectImage(self.image_path)\n cv2.imshow('Result',image_detect)\n #height, width, channel = text_detect.shape\n #bytesPerLine = 3 * width\n #qImg = QImage(text_detect.data, width, height, bytesPerLine, QImage.Format_BGR888)\n #pixmap = QPixmap(qImg)\n #self.detected_text.setPixmap(pixmap.scaled(pixmap.width()//3, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n #pixmap = QPixmap(self.image_path)\n #self.detectedText = QLabel()\n #self.detectedText.setPixmap(pixmap)\n #self.detectedText.setPixmap(pixmap.scaled(pixmap.width()//1, pixmap.height()//2, Qt.KeepAspectRatio, Qt.FastTransformation))\n #self.layoutV.addWidget(self.detectedText)\n #self.detectedText.setAlignment(Qt.AlignCenter)\n \n def handleVideodetect(self):\n facedetectVideo()\n\n \n\n def goMainWindow(self):\n self.cams = App()\n self.cams.show()\n self.close()\n\n \nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App();\n sys.exit(app.exec_());\n\n\n \n","sub_path":"PythonProject.py","file_name":"PythonProject.py","file_ext":"py","file_size_in_byte":25718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"634890602","text":"# Owner(s): [\"module: fx\"]\n\nimport copy\nimport sys\nimport logging\nfrom typing import List, Tuple\n\nimport torch\nfrom torch.fx._symbolic_trace import symbolic_trace\nfrom torch.fx.experimental.proxy_tensor import make_fx\nfrom torch.fx.passes.backends.nvfuser import NvFuserBackend\n\nfrom torch.testing._internal.common_utils import run_tests, TEST_CUDA, TestCase\nfrom torch.testing._internal.common_device_type import (\n instantiate_device_type_tests,\n dtypes,\n)\n\nif not TEST_CUDA:\n print('CUDA not available, skipping tests', file=sys.stderr)\n TestCase = object # noqa: F811\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nclass HF_T5_Partial(torch.nn.Module):\n\n def inputs_meta(self):\n return [\n (torch.Size([512, 512]), torch.float32),\n (torch.Size([512, 512]), torch.float32),\n (torch.Size([512, 512]), torch.float32),\n (torch.Size([512, 512]), torch.float32),\n (torch.Size([512]), torch.float32),\n (torch.Size([2048, 512]), torch.float32),\n (torch.Size([512, 2048]), torch.float32),\n (torch.Size([512]), torch.float32),\n (torch.Size([8, 1024, 512]), torch.float32),\n (torch.Size([8, 8, 1024, 1024]), torch.float32),\n ]\n\n def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5,\n primals_6, primals_7, primals_8, primals_9, primals_10):\n pow_1 = torch.ops.aten.pow(primals_9, 2)\n mean = torch.ops.aten.mean(pow_1, [-1], True)\n add = torch.ops.aten.add(mean, 1e-06)\n rsqrt = torch.ops.aten.rsqrt(add)\n mul = torch.ops.aten.mul(primals_9, rsqrt)\n mul_1 = torch.ops.aten.mul(primals_5, mul)\n t = torch.ops.aten.t(primals_3)\n view = torch.ops.aten.view(mul_1, [8192, 512])\n mm = torch.ops.aten.mm(view, t)\n _unsafe_view = torch.ops.aten._unsafe_view(mm, [8, 1024, 512])\n view_1 = torch.ops.aten.view(_unsafe_view, [8, -1, 8, 64])\n transpose = torch.ops.aten.transpose(view_1, 1, 2)\n t_1 = torch.ops.aten.t(primals_1)\n view_2 = torch.ops.aten.view(mul_1, [8192, 512])\n mm_1 = torch.ops.aten.mm(view_2, t_1)\n _unsafe_view_1 = torch.ops.aten._unsafe_view(mm_1, [8, 1024, 512])\n view_3 = torch.ops.aten.view(_unsafe_view_1, [8, -1, 8, 64])\n transpose_1 = torch.ops.aten.transpose(view_3, 1, 2)\n t_2 = torch.ops.aten.t(primals_4)\n view_4 = torch.ops.aten.view(mul_1, [8192, 512])\n mm_2 = torch.ops.aten.mm(view_4, t_2)\n _unsafe_view_2 = torch.ops.aten._unsafe_view(mm_2, [8, 1024, 512])\n view_5 = torch.ops.aten.view(_unsafe_view_2, [8, -1, 8, 64])\n transpose_2 = torch.ops.aten.transpose(view_5, 1, 2)\n transpose_3 = torch.ops.aten.transpose(transpose_1, 3, 2)\n expand = torch.ops.aten.expand(transpose, [8, 8, 1024, 64])\n clone = torch.ops.aten.clone(expand, memory_format=torch.contiguous_format)\n _unsafe_view_3 = torch.ops.aten._unsafe_view(clone, [64, 1024, 64])\n expand_1 = torch.ops.aten.expand(transpose_3, [8, 8, 64, 1024])\n clone_1 = torch.ops.aten.clone(expand_1, memory_format=torch.contiguous_format)\n _unsafe_view_4 = torch.ops.aten._unsafe_view(clone_1, [64, 64, 1024])\n bmm = torch.ops.aten.bmm(_unsafe_view_3, _unsafe_view_4)\n _unsafe_view_5 = torch.ops.aten._unsafe_view(bmm, [8, 8, 1024, 1024])\n add_ = torch.ops.aten.add_(_unsafe_view_5, primals_10)\n _softmax = torch.ops.aten._softmax(add_, -1, False)\n expand_2 = torch.ops.aten.expand(_softmax, [8, 8, 1024, 1024])\n view_6 = torch.ops.aten.view(expand_2, [64, 1024, 1024])\n expand_3 = torch.ops.aten.expand(transpose_2, [8, 8, 1024, 64])\n clone_2 = torch.ops.aten.clone(expand_3, memory_format=torch.contiguous_format)\n _unsafe_view_6 = torch.ops.aten._unsafe_view(clone_2, [64, 1024, 64])\n bmm_1 = torch.ops.aten.bmm(view_6, _unsafe_view_6)\n _unsafe_view_7 = torch.ops.aten._unsafe_view(bmm_1, [8, 8, 1024, 64])\n transpose_4 = torch.ops.aten.transpose(_unsafe_view_7, 1, 2)\n clone_3 = torch.ops.aten.clone(transpose_4, memory_format=torch.contiguous_format)\n view_7 = torch.ops.aten.view(clone_3, [8, -1, 512])\n t_3 = torch.ops.aten.t(primals_2)\n view_8 = torch.ops.aten.view(view_7, [8192, 512])\n mm_3 = torch.ops.aten.mm(view_8, t_3)\n _unsafe_view_8 = torch.ops.aten._unsafe_view(mm_3, [8, 1024, 512])\n add_1 = torch.ops.aten.add(primals_9, _unsafe_view_8)\n pow_2 = torch.ops.aten.pow(add_1, 2)\n mean_1 = torch.ops.aten.mean(pow_2, [-1], True)\n add_2 = torch.ops.aten.add(mean_1, 1e-06)\n rsqrt_1 = torch.ops.aten.rsqrt(add_2)\n mul_2 = torch.ops.aten.mul(add_1, rsqrt_1)\n mul_3 = torch.ops.aten.mul(primals_8, mul_2)\n t_4 = torch.ops.aten.t(primals_6)\n view_9 = torch.ops.aten.view(mul_3, [8192, 512])\n mm_4 = torch.ops.aten.mm(view_9, t_4)\n _unsafe_view_9 = torch.ops.aten._unsafe_view(mm_4, [8, 1024, 2048])\n relu = torch.ops.aten.relu(_unsafe_view_9)\n t_5 = torch.ops.aten.t(primals_7)\n view_10 = torch.ops.aten.view(relu, [8192, 2048])\n mm_5 = torch.ops.aten.mm(view_10, t_5)\n _unsafe_view_10 = torch.ops.aten._unsafe_view(mm_5, [8, 1024, 512])\n add_3 = torch.ops.aten.add(add_1, _unsafe_view_10)\n return [add_3, rsqrt, _unsafe_view_3, t_3, _softmax, view_6, mul_2, t, view_9, t_1, primals_5, add_1,\n _unsafe_view_4, view_2, view_10, t_5, t_2, primals_8, view_4, view_8, rsqrt_1, primals_9, t_4,\n mul, _unsafe_view_6, relu, view]\n\n\nclass TestFxNvFuserBackend(TestCase):\n\n def _generate_random_inputs(self, device, inputs_meta: List[Tuple[torch.Size, torch.dtype]]):\n inputs = []\n for meta in inputs_meta:\n shape, dtype = meta\n\n if dtype in {torch.int, torch.int32, torch.int64, torch.bool, torch.int, torch.uint8}:\n input = torch.randint(0, 1, shape, dtype=dtype, device=device)\n else:\n input = torch.rand(shape, dtype=dtype, device=device)\n\n inputs.append(input)\n\n return inputs\n\n\n @dtypes(torch.float32)\n def test_nvfuser_call_module_backend(self, device, dtype):\n\n class Model(torch.nn.Module):\n\n def __init__(self):\n super(Model, self).__init__()\n self.bn = torch.nn.BatchNorm2d(3)\n self.relu = torch.nn.ReLU()\n\n def forward(self, inp):\n o = self.bn(inp)\n o = self.relu(o)\n return o\n\n inp = torch.randn(2, 3, 4, 5).to(dtype=dtype, device=device)\n m = Model().to(dtype=dtype, device=device)\n\n # note that the traced module here contains only `call_module` node,\n # which isn't fused by nvfuser backend. But `nvfuser.compile` should run without error\n traced = symbolic_trace(m)\n\n nvfuser = NvFuserBackend()\n compiled_module = nvfuser.compile(traced)\n\n eager_result = m(inp)\n nvfuser_result = compiled_module(inp)\n\n torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)\n\n\n @dtypes(torch.float32)\n def test_nvfuser_backend(self, device, dtype):\n m = HF_T5_Partial()\n m.to(device)\n\n traced = symbolic_trace(m)\n\n nvfuser = NvFuserBackend()\n compiled_module = nvfuser.compile(traced)\n\n inputs = self._generate_random_inputs(device, m.inputs_meta())\n\n eager_result = m(*inputs)\n nvfuser_result = compiled_module(*inputs)\n\n torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)\n\n @dtypes(torch.float32)\n def test_aten_square(self, device, dtype):\n\n def fn(x):\n square = torch.square(x)\n a = square + 1\n b = a + 1\n return b\n\n inputs = torch.randn(4, device=device)\n traced = make_fx(fn)(inputs)\n\n nvfuser = NvFuserBackend()\n compiled_module = nvfuser.compile(copy.deepcopy(traced))\n\n for node in compiled_module.graph.nodes:\n if node.op == \"call_function\":\n assert \"fused\" in str(node.target), \"the entire function should be fused into a single fusion group\"\n\n eager_result = traced(inputs)\n nvfuser_result = compiled_module(inputs)\n torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)\n\n @dtypes(torch.float32)\n def test_aten_leakyrelu(self, device, dtype):\n\n def fn(x):\n square = torch.ops.aten.leaky_relu(x, 0.1)\n a = square + 1\n b = a + 1\n return b\n\n inputs = torch.randn(4, device=device)\n traced = make_fx(fn)(inputs)\n\n nvfuser = NvFuserBackend()\n compiled_module = nvfuser.compile(copy.deepcopy(traced))\n\n for node in compiled_module.graph.nodes:\n if node.op == \"call_function\":\n assert \"fused\" in str(node.target), \"the entire function should be fused into a single fusion group\"\n\n eager_result = traced(inputs)\n nvfuser_result = compiled_module(inputs)\n torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)\n\n @dtypes(torch.float32)\n def test_aten_where(self, device, dtype):\n\n def fn(x):\n where = torch.ops.aten.where(x < 0, -x, x)\n a = where + 1\n b = a + 1\n return b\n\n inputs = torch.randn(4, device=device)\n traced = make_fx(fn)(inputs)\n\n nvfuser = NvFuserBackend()\n compiled_module = nvfuser.compile(copy.deepcopy(traced))\n\n for node in compiled_module.graph.nodes:\n if node.op == \"call_function\":\n assert \"fused\" in str(node.target), \"the entire function should be fused into a single fusion group\"\n\n eager_result = traced(inputs)\n nvfuser_result = compiled_module(inputs)\n torch.testing.assert_close(eager_result, nvfuser_result, rtol=1e-5, atol=1e-5)\n\ninstantiate_device_type_tests(TestFxNvFuserBackend, globals(), only_for=\"cuda\")\n\nif __name__ == \"__main__\":\n run_tests()\n","sub_path":"test/test_fx_backends.py","file_name":"test_fx_backends.py","file_ext":"py","file_size_in_byte":10242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"586977976","text":"'''\n\nOct 29th 2020 Daily\n\n849. Maximize Distance to Closest Person\n\nYou are given an array representing a row of seats where seats[i] = 1 represents a person sitting in the ith seat, and seats[i] = 0 represents that the ith seat is empty (0-indexed).\n\nThere is at least one empty seat, and at least one person sitting.\n\nAlex wants to sit in the seat such that the distance between him and the closest person to him is maximized. \n\nReturn that maximum distance to the closest person.\n\n \n\nExample 1:\n\n\nInput: seats = [1,0,0,0,1,0,1]\nOutput: 2\nExplanation: \nIf Alex sits in the second open seat (i.e. seats[2]), then the closest person has distance 2.\nIf Alex sits in any other open seat, the closest person has distance 1.\nThus, the maximum distance to the closest person is 2.\nExample 2:\n\nInput: seats = [1,0,0,0]\nOutput: 3\nExplanation: \nIf Alex sits in the last seat (i.e. seats[3]), the closest person is 3 seats away.\nThis is the maximum distance possible, so the answer is 3.\nExample 3:\n\nInput: seats = [0,1]\nOutput: 1\n \n\nConstraints:\n\n2 <= seats.length <= 2 * 104\nseats[i] is 0 or 1.\nAt least one seat is empty.\nAt least one seat is occupied.\n'''\n\nfrom typing import List\n\nclass Solution:\n def maxDistToClosest(self, seats: List[int]) -> int:\n # Finding maximal left and maximal right\n start_l, start_r = 0, len(seats) - 1\n \n while seats[start_l] != 1 and start_l != start_r:\n start_l += 1\n while seats[start_r] != 1 and start_r != start_l:\n start_r -= 1\n \n # Calculating distances using start_l and start_r\n d, t = float(\"-inf\"), None\n \n # Checking which tuple to start us off with\n if (start_l > len(seats) - 1 - start_r):\n d = start_l\n else:\n d = len(seats) - 1 - start_r\n \n l = start_l\n for i in range(start_l + 1, start_r + 1):\n # Checking to see if we can find a better pair\n if seats[i] == 1:\n max_distance = (i - l) // 2\n if max_distance > d:\n d = max_distance\n l = i\n \n return d\n\n ","sub_path":"Other/Daily/MaximizeDistanceToClosetPerson.py","file_name":"MaximizeDistanceToClosetPerson.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"168632544","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.auth.models import User\n\n\n\n@python_2_unicode_compatible\nclass BroadcastMessages(models.Model):\n broadcast_type = models.CharField(max_length=10, blank=True, null=True)\n broadcast_message = models.CharField(max_length=500, default='Enter a single line summary')\n group_role = models.CharField(max_length=10, blank=True, null=True)\n claim = models.BooleanField(default=False)\n claim_by = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True)\n br_params = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.broadcast_message\n","sub_path":"broadcast/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"414779689","text":"import re, sys\n\nt = \"\"\nfor fName in sys.argv[1:]:\n with open(fName) as infile:\n t=infile.read()\n\n if '\\u2005' in t or '\\u2212' in t or \"⋯\" in t:\n with open(fName,\"w\") as outfile:\n t = re.sub('\\u2212', ' ', t)\n t = re.sub(r'(:math:`.*)⋯([^`]*\\`)', r'\\1\\\\dots\\2', t)\n outfile.write(re.sub('\\u2005', ' ', t))\n\n print (\"replaced non-ascii spaces by ordinary spaces in\", fName)\n","sub_path":"eyes17/helpFiles/fixMathInRst.py","file_name":"fixMathInRst.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"473605280","text":"from django.utils.translation import ugettext as _\n\nfrom freenasUI.api.resources import (\n FTPResourceMixin, ISCSIPortalResourceMixin, ISCSITargetResourceMixin,\n ISCSITargetExtentResourceMixin, ISCSITargetToExtentResourceMixin,\n NFSResourceMixin, ServicesResourceMixin\n)\nfrom freenasUI.freeadmin.options import BaseFreeAdmin\nfrom freenasUI.freeadmin.site import site\nfrom freenasUI.services import models\n\n\nclass ServicesFAdmin(BaseFreeAdmin):\n\n resource_mixin = ServicesResourceMixin\n\n\nclass FTPFAdmin(BaseFreeAdmin):\n\n resource_mixin = FTPResourceMixin\n deletable = False\n icon_model = \"FTPIcon\"\n advanced_fields = (\n 'ftp_filemask',\n 'ftp_dirmask',\n 'ftp_fxp',\n 'ftp_ident',\n 'ftp_passiveportsmin',\n 'ftp_passiveportsmax',\n 'ftp_localuserbw',\n 'ftp_localuserdlbw',\n 'ftp_anonuserbw',\n 'ftp_anonuserdlbw',\n 'ftp_tls',\n 'ftp_tls_policy',\n 'ftp_tls_opt_allow_client_renegotiations',\n 'ftp_tls_opt_allow_dot_login',\n 'ftp_tls_opt_allow_per_user',\n 'ftp_tls_opt_common_name_required',\n 'ftp_tls_opt_enable_diags',\n 'ftp_tls_opt_export_cert_data',\n 'ftp_tls_opt_no_cert_request',\n 'ftp_tls_opt_no_empty_fragments',\n 'ftp_tls_opt_no_session_reuse_required',\n 'ftp_tls_opt_stdenvvars',\n 'ftp_tls_opt_use_implicit_ssl',\n 'ftp_tls_opt_dns_name_required',\n 'ftp_tls_opt_ip_address_required',\n 'ftp_ssltls_certfile',\n 'ftp_options',\n )\n\n\nclass ISCSITargetFAdmin(BaseFreeAdmin):\n\n menu_child_of = \"services.ISCSI\"\n icon_object = u\"TargetIcon\"\n icon_model = u\"TargetIcon\"\n icon_add = u\"AddTargetIcon\"\n icon_view = u\"ViewAllTargetsIcon\"\n\n exclude_fields = (\n 'id',\n 'iscsi_target_initialdigest',\n 'iscsi_target_type',\n )\n\n resource_mixin = ISCSITargetResourceMixin\n\n\nclass ISCSIPortalFAdmin(BaseFreeAdmin):\n\n menu_child_of = \"services.ISCSI\"\n icon_object = u\"PortalIcon\"\n icon_model = u\"PortalIcon\"\n icon_add = u\"AddPortalIcon\"\n icon_view = u\"ViewAllPortalsIcon\"\n inlines = [\n {\n 'form': 'iSCSITargetPortalIPForm',\n 'prefix': 'portalip_set',\n },\n ]\n\n resource_mixin = ISCSIPortalResourceMixin\n\n def get_datagrid_columns(self):\n columns = super(ISCSIPortalFAdmin, self).get_datagrid_columns()\n columns.insert(1, {\n 'name': 'iscsi_target_portal_ips',\n 'label': _('Listen'),\n 'sortable': False,\n })\n return columns\n\n\nclass ISCSIAuthCredentialFAdmin(BaseFreeAdmin):\n\n menu_child_of = \"services.ISCSI\"\n icon_object = u\"AuthorizedAccessIcon\"\n icon_model = u\"AuthorizedAccessIcon\"\n icon_add = u\"AddAuthorizedAccessIcon\"\n icon_view = u\"ViewAllAuthorizedAccessIcon\"\n\n exclude_fields = (\n 'id',\n 'iscsi_target_auth_secret',\n 'iscsi_target_auth_peersecret',\n )\n\n resource_name = 'services/iscsi/authcredential'\n\n\nclass ISCSITargetToExtentFAdmin(BaseFreeAdmin):\n\n menu_child_of = \"services.ISCSI\"\n icon_object = u\"TargetExtentIcon\"\n icon_model = u\"TargetExtentIcon\"\n icon_add = u\"AddTargetExtentIcon\"\n icon_view = u\"ViewAllTargetExtentsIcon\"\n\n resource_mixin = ISCSITargetToExtentResourceMixin\n\n\nclass ISCSITargetExtentFAdmin(BaseFreeAdmin):\n\n delete_form = \"ExtentDelete\"\n delete_form_filter = {'iscsi_target_extent_type__exact': 'File'}\n menu_child_of = \"services.ISCSI\"\n icon_object = u\"ExtentIcon\"\n icon_model = u\"ExtentIcon\"\n icon_add = u\"AddExtentIcon\"\n icon_view = u\"ViewAllExtentsIcon\"\n\n resource_mixin = ISCSITargetExtentResourceMixin\n\n exclude_fields = (\n 'id',\n 'iscsi_target_extent_filesize',\n )\n\n\nclass NFSFAdmin(BaseFreeAdmin):\n\n resource_mixin = NFSResourceMixin\n deletable = False\n icon_model = 'NFSIcon'\n\n\nsite.register(models.FTP, FTPFAdmin)\nsite.register(models.iSCSITarget, ISCSITargetFAdmin)\nsite.register(models.iSCSITargetPortal, ISCSIPortalFAdmin)\nsite.register(models.iSCSITargetAuthCredential, ISCSIAuthCredentialFAdmin)\nsite.register(models.iSCSITargetToExtent, ISCSITargetToExtentFAdmin)\nsite.register(models.iSCSITargetExtent, ISCSITargetExtentFAdmin)\nsite.register(models.NFS, NFSFAdmin)\nsite.register(models.services, ServicesFAdmin)\n","sub_path":"gui/services/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"106018963","text":"# force python 3.* compability\nfrom __future__ import absolute_import, division, print_function\nfrom builtins import (bytes, str, open, super, range,\n zip, round, input, int, pow, object)\n# regular imaports below:\nfrom pandas import Timedelta\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nclass TradingSimulator(object):\n def __init__(self, data=None, step_function=None):\n self.data = data\n #self.step_function = step_function\n self.available_data = None\n self.stock_assets = {}\n self.simulated_date = None\n self.interesting_stocks = ['NCC-A.ST']\n\n\n\n\n def perform_simulation(self):\n for self.simulated_date in self.data.index:\n self.available_data = self.get_available_data(self.data, self.simulated_date)\n self.step(self.available_data, self.simulated_date)\n self.calculate_current_worth()\n self.tear_down()\n\n def step(self, available_data, simulated_date):\n logging.debug('step base method')\n\n def calculate_current_worth(self):\n self.worth = self.cash\n for stock_name in self.stock_assets:\n self.worth += self.stock_assets[stock_name] * self.get_price_of_security(stock_name)\n self.recorder.ix[self.simulated_date, 'worth'] = self.worth\n\n def tear_down(self):\n logging.debug('tear_down base method')\n\n @staticmethod\n def get_available_data(data, simulated_date):\n data_available_until = simulated_date - Timedelta('1 days')\n available_data = data[:data_available_until]\n return available_data\n\n def put_order(self, security, number):\n try:\n price = self.get_price_of_security(security)\n cost = price * number\n self.stock_assets[security] = number\n self.cash -= cost\n except KeyError:\n logging.error('Could not perform order since price info does not exist.')\n\n def sell_order(self, security, number):\n try:\n price = self.get_price_of_security(security)\n sell_price = price * number\n self.stock_assets[security] -= number\n self.cash += sell_price\n except KeyError:\n logging.error('Could not perform order since price info does not exist.')\n\n\n def get_price_of_security(self, security):\n price = self.data.ix[self.simulated_date][security]\n return price","sub_path":"rikedom/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"293131817","text":"from datetime import timedelta, datetime\nfrom Option import OptionInterface, OptionStrategy, POSITION, VerticalSpread, IronCondor, Option, TYPE\nfrom scipy.stats import norm\nfrom math import sqrt, floor\nimport pandas\n\n\nclass MarketConditions:\n def __init__(self, short_term_duration: timedelta = timedelta(days=14),\n long_term_duration: timedelta = None,\n st_lt_ratio: float = 1.2):\n \"\"\"\n Determines what market conditions are needed for a trade to be made. It looks at the annualized volatility over\n the short_time_duration compared to the annualized volatility over the long_term_duration and if it is >= the\n st_lt_ratio then the market conditions are favorable for a trade.\n\n :param short_term_duration: timedelta - How many days back to use for the short term volatility\n :param long_term_duration: timedelta - How many days back to use for the long term volatility (Use None\n for all time)\n :param st_lt_ratio: float - The ratio of short term volatility / long term volatility\n \"\"\"\n if long_term_duration is None:\n self.use_all_time = True\n else:\n self.use_all_time = False\n self.long_term_duration = long_term_duration\n\n self.short_term_duration = short_term_duration\n\n self.st_lt_ratio = st_lt_ratio\n\n\nclass TradingStrategy:\n def __init__(self, managing_winners: float, managing_losers: float, market_conditions: MarketConditions,\n min_pop: float, max_pop: float, option_strategy: OptionStrategy, max_capital_r: float,\n min_capital_r: float, time_to_exp: timedelta, delta: timedelta):\n \"\"\"\n This class holds all the information to represent the trading strategy used for opening and closing option\n positions.\n\n :param managing_winners: float - at what percent of max profit should the position be closed\n :param managing_losers: float - at what percent of max loss should the position be closed\n :param market_conditions: MarketConditions - The market conditions for a favorable trade\n :param min_pop: float - minimum probability of profit of a trade\n :param max_pop: float - maximum probability of profit of a trade\n :param option_strategy: OptionStrategy - what type of simple option strategy is being used.\n :param max_capital_r: float - The max percentage of the current balance a trade's max loss can be\n :param min_capital_r: float - The min percentage of the current balance a trade's max loss can be\n :param time_to_exp: timedelta - How far out should the expiration be for a contract to be attractive\n :param delta: timedelta - Only look at options that are time_to_exp ± delta days from expiration\n \"\"\"\n self.time_to_exp = time_to_exp #\n self.delta = delta #\n self.managing_winners = managing_winners\n self.managing_losers = managing_losers\n self.market_conditions = market_conditions\n self.min_pop = min_pop\n self.max_pop = max_pop\n self.option_strategy = option_strategy\n self.max_capital_requirements = max_capital_r # too large and the trade is dangerous\n self.min_capital_requirements = min_capital_r # too small and the trade isn't worth it\n\n def should_make_trade(self, short_term_vol: float, long_term_vol: float) -> bool:\n \"\"\"\n Returns if a trade should be made based on the current market conditions\n\n :param short_term_vol: float - the annualized volatility over a short time frame\n :param long_term_vol: - the annualized volatility over a long time frame\n :return: bool - If a position should be opened or not\n \"\"\"\n if short_term_vol / long_term_vol >= self.market_conditions.st_lt_ratio:\n return True\n else:\n return False\n\n def manage_winner(self, option: OptionInterface, current_price: float) -> bool:\n \"\"\"\n Determines if the option is a winner and should be managed, i.e. close the position at a profit.\n\n :param option: OptionInterface - The option in question\n :param current_price: float - The current price of the underlying asset\n :return: bool - If the option is a winner and should be managed\n \"\"\"\n if option.get_returns(current_price) >= option.get_max_profit() * self.managing_winners:\n return True\n else:\n return False\n\n def manage_loser(self, option: OptionInterface, current_price: float) -> bool:\n \"\"\"\n Determines if the option is a loser and should be managed, i.e. close the position at a loss.\n\n :param option: OptionInterface - The option in question\n :param current_price: float - The current price of the underlying asset\n :return: bool - If the option is a loser and should be managed\n \"\"\"\n if option.get_returns(current_price) <= option.get_max_loss() * self.managing_losers:\n return True\n else:\n return False\n\n def find_option_position(self, all_options: pandas.DataFrame, current_price: float, annualized_vol: float,\n balance: float) -> OptionInterface:\n \"\"\"\n Finds suitable option trade. Annualized volatility needs to be in decimal form typically 0 < x < 1\n\n :param all_options: pandas.Dataframe - All of the available option contracts from today\n :param current_price: float - The current price of the underlying asset\n :param annualized_vol: float - The all time annualized volatility between 0 and 1\n :param balance: float - The account's current balance\n :return: OptionInterface - The found option (or None if none were found)\n \"\"\"\n # Get the options whose strike price is within our PoP range\n min_pop_percent_change = self.__get_percent_change(self.min_pop, annualized_vol)\n max_pop_percent_change = self.__get_percent_change(self.max_pop, annualized_vol)\n\n if self.option_strategy == OptionStrategy.iron_condor_short: # find an iron condor position to open\n return self.get_iron_condor(all_options, current_price, balance, min_pop_percent_change,\n max_pop_percent_change, POSITION.short)\n\n def get_iron_condor(self, all_options: pandas.DataFrame, current_price: float, balance: float,\n min_pop_percent_change: float, max_pop_percent_change: float, position: POSITION) -> IronCondor:\n \"\"\"\n Finds a suitable Iron Condor position to open.\n\n :param all_options: pandas.Dataframe - All of the available option contracts from today\n :param current_price: float - The current price of the underlying asset\n :param balance: float - The accounts current balance\n :param min_pop_percent_change: float - The minimum percent change in the option price associated with the\n minimum PoP\n :param max_pop_percent_change: float - The maximum percent change in the option price associated with the\n maximum PoP\n :param position: POSITION - the position of the iron condor\n :return: IronCondor - The found Iron Condor (or None if none were found)\n \"\"\"\n # Find the short contracts that are within our PoP range\n max_pop_put_price, min_pop_call_price = self.__get_prices(min_pop_percent_change, current_price)\n min_pop_put_price, max_pop_call_price = self.__get_prices(max_pop_percent_change, current_price)\n options_in_range = self.__get_options_in_exp_range(all_options, self.time_to_exp, self.delta)\n puts_in_range = options_in_range[options_in_range['call/put'] == 'P']\n calls_in_range = options_in_range[options_in_range['call/put'] == 'C']\n short_puts = self.__get_options_in_strike_range(puts_in_range, min_pop_put_price, max_pop_put_price)\n short_calls = self.__get_options_in_strike_range(calls_in_range, min_pop_call_price, max_pop_call_price)\n if len(short_calls) == 0 or len(short_puts) == 0: # no calls or puts were found within our range\n return None\n else:\n # Split the difference on the potential short puts and calls\n short_put = short_puts.iloc[len(short_puts) // 2, :]\n short_call = short_calls.iloc[len(short_calls) // 2, :]\n\n # Find the long contracts that are within our capital requirement ranges.\n min_exposure = self.min_capital_requirements * balance / 100\n max_exposure = self.max_capital_requirements * balance / 100\n min_exposure_per_leg = min_exposure / 2\n max_exposure_per_leg = max_exposure / 2\n\n max_long_put_price = short_put['strike'] - min_exposure_per_leg\n min_long_put_price = short_put['strike'] - max_exposure_per_leg\n min_long_call_price = short_call['strike'] + min_exposure_per_leg\n max_long_call_price = short_call['strike'] + max_exposure_per_leg\n\n long_puts = self.__get_options_in_strike_range(puts_in_range, min_long_put_price, max_long_put_price)\n long_calls = self.__get_options_in_strike_range(calls_in_range, min_long_call_price, max_long_call_price)\n if len(long_calls) == 0 or len(long_puts) == 0: # no calls or puts were found within our range\n return None\n else:\n # Split the difference on the potential long puts and calls\n long_call = long_calls.iloc[len(long_calls) // 2, :]\n long_put = long_puts.iloc[len(long_puts) // 2, :]\n\n # Get Option objects from pandas series data\n short_put_option = self.__series_to_option(short_put, position)\n short_call_option = self.__series_to_option(short_call, position)\n long_put_option = self.__series_to_option(long_put, position)\n long_call_option = self.__series_to_option(long_call, position)\n\n # Get Vertical Spreads from Options\n put_vertical = VerticalSpread(long_put_option, short_put_option, position)\n call_vertical = VerticalSpread(long_call_option, short_call_option, position)\n return IronCondor(call_vertical, put_vertical, position)\n\n\n def quantity(self, max_loss: float, balance: float) -> int:\n \"\"\"\n The number of options to open based on the option_cost\n :param max_loss: float - The max loss of the option position\n :param balance: float - The current account balance\n :return: int - The number of option contracts to open\n \"\"\"\n max_quantity = (balance * self.max_capital_requirements) // (abs(max_loss) * 100)\n min_quantity = (balance * self.min_capital_requirements) // (abs(max_loss) * 100)\n return max(1, floor((max_quantity + min_quantity) // 2))\n\n def __series_to_option(self, option: pandas.Series, position) -> Option:\n \"\"\"\n Takes in a pandas Series that has all the information for a given option and returns an Option object comprised\n of that data.\n\n :param option: pandas.Series - The Series containing all the option data\n :param position: POSITION - The position of the option\n :return: Option - The option object\n \"\"\"\n option_type = TYPE.call if option['call/put'] == 'C' else TYPE.put\n strike = option['strike']\n premium = (option['ask'] + option['bid']) / 2\n expiration = datetime.strptime(option['expiration'], '%m/%d/%y')\n return Option(option_type, strike, premium, expiration, position)\n\n def __get_options_in_strike_range(self, options: pandas.DataFrame, min_price: float, max_price: float) \\\n -> pandas.DataFrame:\n \"\"\"\n Returns all the options in the specified strike price range\n\n :param options: pandas.DataFrame - All the options to look through and find the appropriate ones\n :param min_price: float - The minimum strike price\n :param max_price: float - The maximum strike price\n :return: pandas.DataFrame - The options that have their strike price in the specified range\n \"\"\"\n mask_1 = options['strike'] >= min_price\n mask_2 = options['strike'] <= max_price\n return options[mask_1 & mask_2]\n\n def __get_options_in_exp_range(self, all_options: pandas.DataFrame,\n time_to_exp: timedelta, delta: timedelta) -> pandas.DataFrame:\n \"\"\"\n Returns all the options in the specified expiration range\n\n :param all_options: pandas.DataFrame - All the options to look through and find the appropriate ones\n :param time_to_exp: timedelta - number of days until expiration\n :param delta: timedelta - Any option within time_to_exp ± delta will be considered valid\n :return: pandas.DataFrame - The options that have their expiration in the specified range\n \"\"\"\n if all_options is None or len(all_options) == 0:\n return all_options\n exp = all_options['expiration'].iloc[0]\n today = datetime.strptime(all_options['date'].iloc[0], '%m/%d/%y')\n exp_range_start = -1\n exp_range_end = -1\n for i in range(len(all_options)):\n if all_options['expiration'].iloc[i] != exp:\n exp = all_options['expiration'].iloc[i]\n exp_date = datetime.strptime(exp, '%m/%d/%y')\n days_til_exp = abs((today - exp_date).days)\n if (time_to_exp - delta).days <= days_til_exp <= (time_to_exp + delta).days and exp_range_start == -1:\n exp_range_start = i\n if (time_to_exp + delta).days < days_til_exp and exp_range_start != -1 and exp_range_end == -1:\n exp_range_end = i\n break\n options = all_options[exp_range_start:exp_range_end]\n return options\n\n def __get_percent_change(self, pop: float, annualized_vol: float) -> float:\n \"\"\"\n Returns the percent underlying asset change that correspond to the specified probability of profit and the\n annualized volatility\n\n :param pop: float - The probability of profit desired\n :param annualized_vol: float - The historic annualized volatility (one standard deviation percent change)\n :return: float - The percent change of the option premium\n \"\"\"\n percent_change = abs(norm.ppf((1-pop)/2, 0, annualized_vol))\n return percent_change\n\n def __get_prices(self, percent_change: float, current_price: float) -> (float, float):\n \"\"\"\n The range of prices of the underlying asset based on the given probability of profit\n\n :param percent_change: float - The percent change of the asset based on the PoP\n :param current_price: float - The current price of the underlying asset\n :return: float - The prices of the underlying asset\n \"\"\"\n lower_price = current_price - current_price * (percent_change / sqrt(365 / self.time_to_exp.days))\n upper_price = current_price + current_price * (percent_change / sqrt(365 / self.time_to_exp.days))\n\n return lower_price, upper_price\n\n\n","sub_path":"Trading_Strategy.py","file_name":"Trading_Strategy.py","file_ext":"py","file_size_in_byte":15402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"645146281","text":"print(\"-------------------------------\")\r\nprint(\"30を言ったら負けゲーム\")\r\nprint(\"-------------------------------\")\r\nprint('\\n')\r\nprint(\"ルール\")\r\nprint(\"二人で1~30まで交互に数えていき最後に30を言った人が負け\")\r\nprint(\"1回につき最大3つの数字\")\r\n\r\nimport random\r\n\r\nx=0\r\nturn=random.randint(0,1)\r\n\r\nif turn==1:\r\n\tprint(\"あなたが先行\")\r\n\tfor i in range(30):\r\n\t\tvalue_a=int(input(\"1~3の数字を入力してください\"))\r\n\t\tx+=value_a\r\n\t\tif x>=30:\r\n\t\t\tx=30\r\n\t\tprint(\"現在の値:\"+str(x))\r\n\t\tif x==30:\r\n\t\t\tprint(\"あなたの負け\")\r\n\t\t\tbreak\r\n\t\tvalue_b=random.randint(1,3)\r\n\t\tx+=value_b\r\n\t\tif x>=30:\r\n\t\t\tx=30\r\n\t\tprint(\"現在の値:\"+str(x))\r\n\t\tif x==30:\r\n\t\t\tprint(\"あなたの勝ち\")\r\n\t\t\tbreak\r\nelse:\r\n print(\"あなたが後攻\")\r\n for i in range(30):\r\n value_c=random.randint(1,3)\r\n x+=value_c\r\n if x >= 30:\r\n \tx=30\r\n print(\"現在の値:\"+str(x))\r\n if x==30:\r\n print(\"あなたの勝ち\")\r\n break\r\n value_d=int(input(\"1~3の数字を入力してください\"))\r\n x+=value_d\r\n if x >= 30:\r\n x=30\r\n print(\"現在の値:\"+str(x))\r\n if x==30:\r\n print(\"あなたの負け\")\r\n break\r\n","sub_path":"30call-loser.py","file_name":"30call-loser.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"366477619","text":"import torch\nimport pickle\nimport numpy as np\nimport torch.utils.data as data_utils\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nfrom utils.evaluation import evaluate_vae as evaluate\nimport matplotlib.gridspec as gridspec\nimport sys\nfrom PIL import Image\nimport os\n\ndef plot_images(x_sample, dir, input_size=(1, 28, 28), input_type=\"binary\"):\n for i, sample in enumerate(x_sample):\n fig = plt.figure(figsize=(1/2.75, 1/2.75))\n fig.subplots_adjust(wspace=0, hspace=0)\n plt.axis('off')\n sample = sample.reshape(input_size)\n sample = sample.swapaxes(0, 2)\n sample = sample.swapaxes(0, 1)\n if input_type == 'binary' or input_type == 'gray':\n sample = sample[:, :, 0]\n plt.imshow(sample, cmap='gray')\n else:\n plt.imshow(sample)\n print(\"image saved: {}{}\".format(dir, i))\n plt.savefig(\"{}{}.png\".format(dir, i), bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\npath = sys.argv[1]\nfolder = sys.argv[2]\nx = sys.argv[3]\ny = sys.argv[4]\nz = sys.argv[5]\n\nBATCH_SIZE = 100\n\nmodel = torch.load(path)\nmodel.eval()\n\nsamples_x = model.generate_x(500)\ninput_size = [x, y, z]\n\nif not os.path.exists(\"/vae_vampprior/output/{}/\".format(folder)):\n os.makedirs(\"/vae_vampprior/output/{}/\".format(folder))\nplot_images(samples_x.cpu().detach().numpy(), \"/vae_vampprior/output/{}/\".format(folder), input_size)\n","sub_path":"generate_fake_images.py","file_name":"generate_fake_images.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"236117065","text":"# 2-16、注册输入验证码流程整合\nfrom selenium import webdriver\nimport time\nimport random\nfrom PIL import Image\nfrom ShowapiRequest import ShowapiRequest\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome() # 打开Google浏览器\ndriver.get(\"http://www.5itest.cn/register\") # 打开 网址\ntime.sleep(3) # 休息3秒\nprint(EC.title_contains(\"注册\")) # 判断网页标题是否包含“注册”\nemail_element = driver.find_element_by_id(\"register_email\") # 将定位邮箱输入框的元素方法封装\ndriver.save_screenshot(\"D:/Image/imooc.png\") # 截屏并保存图片\n# getcode_num:验证码图片元素的id,这一步就是获取code元素坐标\n# getcode_num识别的是验证码的定位元素,将这个定位的方法封装\ncode_element = driver.find_element_by_id(\"getcode_num\")\n# code_element.location:获取某个的位置\nprint(code_element.location) # 打印出的结果类似:{\"x\":123,\"y\":345}\n# 获取code图片坐标值,四个角的坐标值\nleft = code_element.location['x'] # 左上角坐标值\ntop = code_element.location['y'] # 右下角坐标值\nright = code_element.size['width'] + left # 宽度\nheight = code_element.size['height'] + top # 高度\n# 通过坐标值得到code image图\nim = Image.open(\"D:/Image/imooc.png\")\n# 将crop中的left、top、right、bottom封装成一个元组crop((left,top,right,bottom))\nimg = im.crop((left, top, right, height)) #按照一定的坐标对图片进行裁剪(这里是把验证码图片从网页图片中裁剪出来)\n# 将剪切下来的验证码图片保存\nimg.save(\"D:/Image/imooc2.png\")\n\n# 识别图片验证码是属于【识别 英数-文件】,对应的是“http://route.showapi.com/184-4”地址\nr = ShowapiRequest(\"http://route.showapi.com/184-4\",\"359461\",\"973cb85f8d50466ba8a11bf5bcb8835d\")\nr.addBodyPara(\"typeId\", \"35\")\nr.addBodyPara(\"convert_to_jpg\", \"0\")\nr.addBodyPara(\"needMorePrecise\", \"0\")\nr.addFilePara(\"image\",r\"D:/Image/imooc2.png\") # 需要转换的文件对象,filePath为绝对路径\nres = r.post()\ntext = res.json()['showapi_res_body']['Result']\nprint(text) # 返回信息\ntime.sleep(2)\ndriver.find_element_by_id('captcha_code').send_keys(text) # 在验证码输入框内输入验证码","sub_path":"5itest/Browers_Url.py","file_name":"Browers_Url.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"381956718","text":"from flask import Flask, request\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef trzy_liczby():\n formularz = \"\"\"\n
Podaj pierwszą liczbę\n
\n \n
\n \n
\n \n
\n \"\"\"\n if request.method == 'POST':\n try:\n first = float(request.form['first'])\n second = float(request.form['second'])\n third = float(request.form['third'])\n except ValueError:\n return \"Podaj liczby\" + formularz\n maximal = first\n if second > maximal:\n maximal = second\n if third > maximal:\n maximal = third\n return \"Największa z podanych liczb to \" + str(maximal)\n else:\n return formularz\n\n\napp.run(debug=True)\n","sub_path":"Zadania1/proste_zadanko_18.py","file_name":"proste_zadanko_18.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"133457418","text":"import pygame\nimport math\n\nfrom .Colors import *\nfrom .Field import Field\nfrom .RobotConn import Robot\n\n# Constants\nFRC_YEAR = 2020\nROBOT_SIZE = (60, 60)\n\n# Init pygame\npygame.init()\ngameDisplay = pygame.display.set_mode((1350, 650))\npygame.display.set_caption('5024 Fieldsim')\nclock = pygame.time.Clock()\n\n# Create a field & robot\nfield = Field(FRC_YEAR)\nrobot = Robot(ROBOT_SIZE)\n\n# Get field layers\nfield_base = field.getBase()\nfield_top = field.getTop()\n\n# Tracker for last connection state\nlast_conn_state = False\n\nwhile True:\n\n # Handle pushing connection info\n current_robot_state = robot.connected\n if current_robot_state != last_conn_state:\n print(\"Robot \" + \"Connected\" if current_robot_state else \"Disconnected\")\n last_conn_state = current_robot_state\n\n # Handle window close\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n # Clear the frame\n gameDisplay.fill(white)\n\n # Draw the field\n gameDisplay.blit(field_base, (0, 0))\n\n # Draw the \"robot\"\n robot.render(gameDisplay, field)\n\n # Add top of field\n gameDisplay.blit(field_top, (0, 0))\n\n # Update the screen\n pygame.display.update()\n clock.tick(60)\n\npygame.quit()\nquit()\n","sub_path":"fieldsim/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"640950341","text":"import dateutil.parser\nfrom datetime import datetime\nimport redis\nfrom flask import request\nfrom redis_manager import (users_pool, admins_pool)\n\nusers = redis.StrictRedis(connection_pool=users_pool)\nadmins = redis.StrictRedis(connection_pool=admins_pool)\n\ndef token_required(admin=False):\n def decorator(func):\n def wrapper(*args, **kwargs):\n token = request.headers.get('Authorization', None)\n if not token or len(token.split(' ')) < 2:\n return {\n 'message': 'Unathorized',\n 'description': 'Token is required.'\n }, 401\n token = token.split(' ')[1]\n if admin:\n token_expiry = admins.get(token)\n else:\n token_expiry = users.get(token)\n if not token_expiry:\n return {\n 'message': 'Unathorized'\n }, 401\n parsed_expiry = dateutil.parser.parse(token_expiry)\n if (parsed_expiry < datetime.now()):\n return {\n 'message': 'Unathorized',\n 'description': 'Token expired.'\n }, 401\n return func(*args, **kwargs)\n return wrapper\n return decorator\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"618253712","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.template.loader import get_template\nfrom django.template import Context, RequestContext\nfrom .models import Museo, Comentario, ConfigUsuario, Seleccionado\nfrom .parser import link_parse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\n\n\n@csrf_exempt\ndef inicio(request):\n template = get_template ('miplantilla/inicio.html')\n lista_museos = Museo.objects.all()\n #Museo.objects.all().delete() # Borro base datos\n\n if len(lista_museos) == 0: # SIN MUSEOS\n if request.method == \"GET\":\n titulo = \"Sin museos en la base de datos\"\n cargar = \"

\"\n c = RequestContext(request, {'titulo':titulo, 'cargar': cargar})\n elif request.method == 'POST':\n link_parse() # Cargo la info de museos en mi base de datos\n print (\"Asignando los atributos de models Museo...\")\n return HttpResponseRedirect('/')\n\n else: # CON MUSEOS\n\n if request.method == \"GET\":\n titulo = \"Museos con más comentarios\"\n filtrar = \"
\"\n\n elif request.method == \"POST\":\n formu = request.POST['accesible']\n if formu == \"1\":\n titulo = \"Museos accesibles\"\n lista_museos = lista_museos.filter(accesibilidad=True)\n filtrar = \"
\"\n elif formu == \"0\":\n titulo = \"Museos con más comentarios\"\n filtrar = \"
\"\n\n lista_museos = lista_museos.exclude(num_comentarios=0) # excluyo sin comentarios\n lista_museos = lista_museos.order_by('-num_comentarios') # ordeno de mayor a menor\n lista_museos = lista_museos[0:5] # los 5 primeros\n\n # Para la barra de usuarios lateral\n lista_usuarios = ConfigUsuario.objects.all()\n\n c = RequestContext(request, {'titulo':titulo, 'filtrar': filtrar, 'museos': lista_museos,\n 'usuarios': lista_usuarios})\n\n respuesta = template.render(c)\n return HttpResponse(respuesta)\n\n@csrf_exempt\ndef todos(request):\n template = get_template ('miplantilla/todos.html')\n titulo = \"Todos los museos\"\n dist_elegido = \"\"\n if request.method == 'GET':\n lista_museos = Museo.objects.all()\n\n # opciones del desplegable\n lista_distritos = Museo.objects.order_by() # distritos por orden\n lista_distritos = lista_distritos.values_list('distrito', flat=True).distinct()\n # formulario con elementos de base de datos\n filtrar = \"\"\n filtrar += \"\"\n filtrar += \"
\"\n\n elif request.method == \"POST\":\n dist_elegido = request.body.decode('utf-8').split(\"=\")[1].replace(\"+\", \" \")\n titulo += \" del distrito \" + dist_elegido\n lista_museos = \"\"\n lista_distritos = Museo.objects.filter(distrito=dist_elegido)\n lista_museos = lista_distritos\n\n # opciones del desplegable\n lista_distritos = Museo.objects.order_by()\n lista_distritos = lista_distritos.values_list('distrito', flat=True).distinct()\n # formulario de elementos de base de datos\n filtrar = \"
\"\n filtrar += \"\"\n filtrar += \"
\"\n #formulario para ver todos\n filtrar += \"
\"\n filtrar += \"
\"\n\n c = RequestContext(request, {'titulo':titulo, 'filtrar':filtrar, 'museos': lista_museos})\n\n respuesta = template.render(c)\n return HttpResponse(respuesta)\n\n@csrf_exempt\ndef museo(request, id):\n template = get_template ('miplantilla/museo.html')\n museo_elegido = Museo.objects.get(id=id)\n titulo = museo_elegido\n #accesibilidad\n accesible = museo_elegido.accesibilidad\n if accesible == True:\n accesible = \"Sí\"\n else:\n accesible = \"No\"\n #comentarios\n lista_coments = Comentario.objects.all()\n coments_museo = lista_coments.filter(museo=museo_elegido)\n\n #INTERFAZ PRIVADA\n form = '' #Sin formulario si no authenticated\n form2 = ''\n textoseleccion = ''\n if request.user.is_authenticated():\n #nuevo comentario si logged\n form = \"
\"\n form += \"\"\n form += \" \"\n form += \"\"\n form += \"
\"\n\n select = ConfigUsuario.objects.get(usuario=request.user)\n if request.method == \"GET\":\n try: #Si ya está seleccionado\n Seleccionado.objects.get(usuario=select, museo=museo_elegido)\n textoseleccion = \"PERTENECE A TU SELECCIÓN\"\n\t\t #borrar de mi selección de museos\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n\n except Seleccionado.DoesNotExist: #Si no está seleccionado\n textoseleccion = \"¿AÑADIR A SELECCIONADOS?\"\n #añadir a mi selección de museos\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n\n elif request.method == \"POST\":\n formu = request.POST['formulario']\n if formu == \"1\": # Si envío comentario\n # Guardo en mi base de datos el comentario\n texto = request.POST['texto']\n nuevo_coment = Comentario(texto=texto, museo=museo_elegido)\n nuevo_coment.save()\n #Actualizo el numero de comentarios\n museo_elegido.num_comentarios = museo_elegido.num_comentarios + 1\n museo_elegido.save()\n try: #Si ya está seleccionado\n Seleccionado.objects.get(usuario=select, museo=museo_elegido)\n textoseleccion = \"PERTENECE A TU SELECCIÓN\"\n\t\t #borrar de mi selección de museos\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n except Seleccionado.DoesNotExist: #Si no está seleccionado\n textoseleccion = \"¿AÑADIR A SELECCIONADOS?\"\n #añadir a mi selección de museos\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n elif formu == \"2\": # Si añado a selección\n # Guardo en mi base de datos la selección\n nueva_seleccion = Seleccionado(usuario=select, museo=museo_elegido)\n nueva_seleccion.save()\n print(\"se ha añadido\")\n textoseleccion = \"PERTENECE A TU SELECCIÓN\"\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n elif formu == \"3\": # Si borro de selección\n # Guardo en mi base de datos la deselección\n nueva_seleccion = Seleccionado.objects.get(usuario=select, museo=museo_elegido)\n nueva_seleccion.delete()\n print(\"se ha borrado\")\n textoseleccion = \"¿AÑADIR A SELECCIONADOS?\"\n form2 = \"
\"\n form2 += \"\"\n form2 += \" \"\n form2 += \"
\"\n\n c = RequestContext(request, {'titulo':titulo, 'museo': museo_elegido,\n\t\t 'accesible': accesible, 'comentarios': coments_museo, 'form': form,\n 'form2': form2, 'textoseleccion': textoseleccion})\n\n respuesta = template.render(c)\n return HttpResponse(respuesta)\n\n\ndef about(request):\n #Directamente en el html\n pass\n\n@csrf_exempt\ndef login_form(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n print(\"Autenticado como: \" + str(user))\n # user devuelve None si no está autenticado\n if user is not None:\n if user.is_active:\n login(request, user)\n try:\n registrado = ConfigUsuario.objects.get(usuario=username)\n except ConfigUsuario.DoesNotExist:\n registrado = ConfigUsuario(usuario=username)\n registrado.titulo = \"Página de \" + str(user) #inicializo el título\n registrado.save() #lo guardo en mi models\n print(registrado)\n\n return HttpResponseRedirect('/')\n\n\n@csrf_exempt\ndef logout_form(request):\n if request.method == \"POST\":\n logout(request)\n return HttpResponseRedirect('/')\n\n@csrf_exempt\ndef usuario(request, user):\n template = get_template ('miplantilla/usuario.html')\n # Inicializo el id para hacer la qs y poder acceder por páginas\n id = request.GET.get('id')\n if id == None:\n id = 0\n else:\n id = int(id)\n\n try:\n pagina_usuario = ConfigUsuario.objects.get(usuario=user)\n titulo = pagina_usuario.titulo\n usuario = user\n total_museos_selec = Seleccionado.objects.filter(usuario=pagina_usuario).count()\n\n lista_museos = Seleccionado.objects.all() #todas las elecciones.\n #solo los seleccionados, de 5 en 5 por id.\n lista_museos_usuario = Seleccionado.objects.filter(usuario=pagina_usuario)[id:id+5]\n id += 5 # A siguiente página\n\n if id >= total_museos_selec and total_museos_selec < 5:\n id = -1 # Solo hay una página\n elif id >= total_museos_selec:\n id = 0 # Para volver al inicio\n\n # Botón para generar el canal XML de página usuario\n xml_usu_form = '
'\n xml_usu_form += ''\n xml_usu_form += '
'\n\n # Botón para generar el canal JSON de página usuario\n json_usu_form = '
'\n json_usu_form += ''\n json_usu_form += '
'\n\n form1 = ''\n form2 = ''\n # INTERFAZ PRIVADA:\n # ha de estár autentificado y en su página\n if request.user.is_authenticated() and user == usuario:\n # Cambiar de título personal\n form1 = \"
\"\n form1 += \"\"\n form1 += \" \"\n form1 += \"\"\n form1 += \"
\"\n # Cambiar estilo de CSS\n form2 = \"
\"\n form2 += \"Color de fondo: \"\n form2 += \" Tamaño de letra (%): \"\n form2 += \" \"\n form2 += \"\"\n form2 += \"
\"\n\n if request.method == \"POST\":\n formu = request.POST['formulario']\n if formu == \"1\": # Si envío nuevo título\n titulo = request.POST['nuevo_titulo']\n # Actualizo el título\n ConfigUsuario.objects.filter(usuario=request.user).update(titulo=titulo)\n elif formu == \"2\": # Si envío nuevo estilo\n color = request.POST['nuevo_color']\n letra = request.POST['nueva_letra']\n # Actualizo el color de fondo\n ConfigUsuario.objects.filter(usuario=request.user).update(color_fondo=color)\n ConfigUsuario.objects.filter(usuario=request.user).update(tamaño_letra=letra)\n\n # si el recurso es incorrecto (nombre de usuario no registrado)\n except ConfigUsuario.DoesNotExist:\n titulo = \"Error, url no existe\"\n lista_museos_usuario = ''\n usuario = ''\n form1 = ''\n form2 = ''\n id = ''\n xml_usu_form = ''\n json_usu_form = ''\n\n c = RequestContext(request, {'titulo': titulo, 'seleccionados': lista_museos_usuario,\n 'id': id, 'usuario': usuario, 'form1': form1, 'form2': form2, 'xml_usu_form': xml_usu_form,\n 'json_usu_form': json_usu_form})\n\n respuesta = template.render(c)\n return HttpResponse(respuesta)\n\ndef xml_usuario(request, user):\n # Generar canal XML de la página de usuario.\n template = get_template('miplantilla/usuario_xml.xml')\n usuario = ConfigUsuario.objects.get(usuario=user)\n selecc_usuario = Seleccionado.objects.filter(usuario=usuario)\n\n c = RequestContext(request, {'usuario': usuario, 'seleccionados': selecc_usuario})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/xml\") #tipo xml\n\ndef json_usuario(request, user):\n # Generar canal JSON de la página de usuario.\n template = get_template('miplantilla/usuario_json.json')\n usuario = ConfigUsuario.objects.get(usuario=user)\n selecc_usuario = Seleccionado.objects.filter(usuario=usuario)\n\n c = RequestContext(request, {'usuario': usuario, 'seleccionados': selecc_usuario})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/json\") #tipo json\n\ndef xml_inicio(request):\n # Generar canal XML de la página de inicio.\n template = get_template('miplantilla/inicio_xml.xml')\n\n lista_museos = Museo.objects.all()\n lista_museos = lista_museos.exclude(num_comentarios=0) # excluyo sin comentarios\n lista_museos = lista_museos.order_by('-num_comentarios') # ordeno de mayor a menor\n lista_museos = lista_museos[0:5]\n\n c = RequestContext(request, {'museos': lista_museos})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/xml\") #tipo xml\n\ndef json_inicio(request):\n # Generar canal JSON de la página de inicio.\n template = get_template('miplantilla/inicio_json.json')\n\n lista_museos = Museo.objects.all()\n lista_museos = lista_museos.exclude(num_comentarios=0) # excluyo sin comentarios\n lista_museos = lista_museos.order_by('-num_comentarios') # ordeno de mayor a menor\n lista_museos = lista_museos[0:5]\n\n c = RequestContext(request, {'museos': lista_museos})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/json\") #tipo json\n\ndef rss_comentarios(request):\n #Generar rss con todos los comentarios. (Enlace desde inicio)\n template = get_template('miplantilla/rss_comentarios.rss')\n comentarios = Comentario.objects.all()\n\n c = RequestContext(request, {'comentarios': comentarios})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/rss\")\n\n@csrf_exempt\ndef registro(request):\n template = get_template('miplantilla/registro.html')\n if request.method == \"GET\":\n registro_form = \"
\"\n registro_form += \"\"\n registro_form += \"
Usuario:\"\n registro_form += \"
Contraseña:\"\n registro_form += \"
\"\n elif request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n nuevo_usuario = User.objects.create_user(username=username, password=password)\n nuevo_usuario.save()\n registro_form = \"\"\n return HttpResponseRedirect('/')\n\n c = RequestContext(request, {'registro_form': registro_form})\n respuesta = template.render(c)\n return HttpResponse(respuesta)\n\ndef css(request, mi_css):\n if request.user.is_authenticated():\n #print(\"Cargando plantilla de usuario...\")\n conf_usu = ConfigUsuario.objects.get(usuario=request.user)\n tamaño_letra = conf_usu.tamaño_letra\n color_fondo = conf_usu.color_fondo\n tamaño_letra = str(tamaño_letra) + '%'\n else:\n # plantilla por defecto\n tamaño_letra = '75%'\n color_fondo = 'white'\n\n template = get_template(\"miplantilla/css/style.css\")\n c = Context({'tamaño_letra': tamaño_letra, 'color_fondo': color_fondo})\n respuesta = template.render(c)\n return HttpResponse(respuesta, content_type=\"text/css\")\n\n","sub_path":"myfinalproject/museos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"44495018","text":"# Given a singly linked list where elements are sorted in ascending order, \n# convert it to a height balanced BST.\n\n\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nclass Solution:\n \"\"\"\n @param head: The first node of linked list.\n @return: a tree node\n \"\"\"\n def sortedListToBST(self, head):\n # write your code here\n if head==None:\n return None\n \n def helper(start, end): #why 2 arguments(start, end)? not just head?\n if start==end:\n return None\n fast=start\n slow=start\n while fast != end and fast.next !=end:\n slow=slow.next\n fast=fast.next.next\n \n root=TreeNode(slow.val)\n root.left=helper(start,slow)\n root.right=helper(slow.next,end)\n return root\n \n return helper(head, None)\n \n \n \n \n","sub_path":"convert_sorted_list_to_balanced_BST.py","file_name":"convert_sorted_list_to_balanced_BST.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"452570506","text":"# Author: Dominic Lupo\n# Date: 03/12/20\n# Description: Defines a soldier piece in the game Xiangqi. Inherits from Piece.\n\n\nfrom Piece import Piece\n\n\nclass Soldier(Piece):\n \"\"\"Represents a soldier piece in the game Xiangqi.\"\"\"\n\n def __init__(self, color):\n \"\"\"Initializes the piece with the passed color.\"\"\"\n\n possible_moves = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n\n Piece.__init__(self, color, \"S\", possible_moves)\n","sub_path":"Soldier.py","file_name":"Soldier.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"155189274","text":"import os\nimport logging\nlog = logging.getLogger(__name__)\n\n\"\"\"\nThis library provides the information where files are located.\nWith the flag _stay_local you can decide whether configuration files\nand log files are to be created inside of this directory (_stay_local=True)\nor where they belong according to standards (_stay_local=False).\n\n_stay_local=False has the advantages of not needing write permissions for\nthis directory and that different users have their own settings.\n\nFor getting the platform dependent standard paths the non-standard\nlibrary appdirs is used. It can be installed with\n pip install appdirs --user\nor downloaded from\n https://pypi.python.org/pypi/appdirs/1.2.0\n\nAPP_NAME and APP_AUTHOR are required for appdirs to get the correct paths.\n\"\"\"\n\n\nAPP_NAME = \"youtube-dl-gui\"\nAPP_AUTHOR = \"custom\"\n\n_stay_local = False\n\n\n\nif not _stay_local:\n try:\n import appdirs\n except:\n _stay_local = True\n\n_PATH_SELF = os.path.split(__file__)[0]\n_PATH_TEMPLATES = os.path.join(_PATH_SELF, \"templates\")\nif _stay_local:\n PATH_CONFIG = os.path.join(_PATH_SELF, \"config\")\n PATH_LOG = os.path.join(_PATH_SELF, \"log\")\nelse:\n PATH_CONFIG = appdirs.user_data_dir(APP_NAME, APP_AUTHOR)\n PATH_LOG = appdirs.user_log_dir(APP_NAME, APP_AUTHOR)\n \n\ndef get_config_ffn(fn, create=False, log=log):\n ffn = os.path.join(PATH_CONFIG, fn)\n if os.path.exists(ffn):\n #log.log(logging.DEBUG, \"using configuration file: {ffn!r}\".format(ffn=ffn))\n return ffn\n \n ffn_template = os.path.join(_PATH_TEMPLATES, fn)\n if not create:\n log.log(logging.DEBUG, \"{ffn!r} does not exist. using {ffn_template!r} instead\".format(ffn=ffn, ffn_template=ffn_template))\n return ffn_template\n\n if not os.path.isdir(PATH_CONFIG):\n log.log(logging.INFO, \"creating new directory {path!r} for config file {fn!r}\".format(path=PATH_CONFIG, fn=fn))\n os.makedirs(PATH_CONFIG)\n \n log.log(logging.INFO, \"creating new configuration file {ffn!r} from template {ffn_template!r}\".format(ffn_template=ffn_template, ffn=ffn))\n with open(ffn_template, 'rt') as f_from:\n with open(ffn, 'wt') as f_to:\n for ln in f_from:\n f_to.write(ln)\n #log.log(logging.DEBUG, \"using configuration file: {ffn!r}\".format(ffn=ffn))\n return ffn\n","sub_path":"metainfo.py","file_name":"metainfo.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"111095955","text":"# -*- coding: utf-8 -*-\n# pylint: disable=arguments-differ, abstract-method\nfrom __future__ import absolute_import\nimport logging\n\nimport mlflow\nimport torch\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass AverageMeter(torch.nn.Module):\n def __init__(self, *keys, use_mlflow=False):\n super(AverageMeter, self).__init__()\n self.step = 0\n self.use_mlflow = use_mlflow\n self.keys = keys\n self.reset()\n\n def reset(self, step=None, use_mlflow=False, mlflow_prefix=None):\n self.step = step if step is not None else self.step + 1\n\n if self.use_mlflow or use_mlflow:\n mlflow.log_metrics(self.get(prefix=mlflow_prefix), step=self.step)\n\n for key in self.keys:\n device = self._buffers[key].device if key in self._buffers else torch.device('cpu')\n self.register_buffer(key, torch.zeros(1, dtype=torch.float, device=device))\n self.register_buffer(key + '_count', torch.zeros(1, dtype=torch.int32, device=device))\n\n def update(self, key, value, n=1):\n if isinstance(value, torch.Tensor):\n value = value.detach()\n\n self._buffers[key] += value * n\n self._buffers[key + '_count'] += n\n return self\n\n def updates(self, dictionary, n=1):\n for key, value in dictionary.items():\n self.update(key, value, n)\n return self\n\n def get(self, prefix=None):\n if prefix is not None:\n return {prefix + '_' + key: self.__getitem__(key) for key in self.keys}\n return {key: self.__getitem__(key) for key in self.keys}\n\n def __getitem__(self, key):\n if key not in self._buffers:\n return 0.0\n if self._buffers[key + '_count'] == 0:\n return 0.0\n return self._buffers[key].item() / self._buffers[key + '_count'].item()\n\n def __str__(self):\n return ', '.join(['%s:%.3f' % (str(key), self.__getitem__(key)) for key in self.keys])\n","sub_path":"theconf/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"206807352","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nBasic forecast using LR, LDA, QDA, LSVC, RSVM, RF\r\nExog. Vars: Lags 1-5 of Return and Volume\r\n\r\nInput: Stock ticker\r\nExample: forecast_1(\"SPY\")\r\n\r\nOutput: Confusion matrix statistics\r\n\r\n@author: vincentole\r\n\"\"\"\r\n\r\nimport os\r\nfrom datetime import datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pandas_datareader.data as web\r\n \r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA \r\nfrom sklearn.svm import LinearSVC, SVC\r\nfrom sklearn.metrics import confusion_matrix, balanced_accuracy_score, recall_score, precision_score\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef create_lagged_series(ticker, lags = 5):\r\n \"\"\"Creates DataFrame that stores percentage returns of adjusted closing\r\n price and lagged returns\"\"\"\r\n \r\n # Get Data \r\n ts = web.DataReader(ticker, \"av-daily-adjusted\", datetime(2000,1,1), datetime.today(), api_key=os.getenv(\"ALPHAVANTAGE_API_KEY\"))\r\n \r\n # Create output data set\r\n df = pd.DataFrame(index = ts.index)\r\n \r\n # Create return today\r\n df[\"ret_l0\"] = ts[\"adjusted close\"].pct_change()*100\r\n \r\n # resolve issues with QDA for very snmall numbers\r\n df.loc[ abs(df[\"ret_l0\"]) < 0.0001, \"ret_l0\" ] = 0.0001\r\n \r\n # Create lags\r\n for lag in range(1, lags+1):\r\n df[\"ret_l%s\" % lag] = df[\"ret_l0\"].shift(lag)\r\n \r\n for lag in range(1, lag+1):\r\n df[\"volume_l%s\" % lag] = ts.volume.shift(lag)\r\n \r\n # Add direction column\r\n df[\"direction\"] = np.sign(df[\"ret_l0\"])\r\n \r\n # Drop return to not have look ahead bias and NAN \r\n df.drop([\"ret_l0\"], axis = 1, inplace = True)\r\n df.dropna(inplace = True)\r\n \r\n return df\r\n\r\n\r\ndef forecast_1(df):\r\n \"\"\"Create classification forecasts of 'direction' \"\"\"\r\n output = {}\r\n \r\n # Split df into train and test\r\n y = df.pop(\"direction\")\r\n X = df\r\n \r\n X_train,X_test,y_train,y_test = train_test_split(X, y ,train_size=0.65, shuffle = False)\r\n \r\n # Create models\r\n print(\"Hit Rates/Confusion Matrices:\\n\") \r\n models = [(\"LR\", LogisticRegression()),\r\n (\"LDA\", LDA()),\r\n (\"QDA\", QDA()),\r\n (\"LSVC\", LinearSVC()),\r\n (\"RSVM\", SVC(\r\n C=1000000.0, cache_size=200, class_weight=None,\r\n coef0=0.0, degree=3, gamma=0.0001, kernel=\"rbf\",\r\n max_iter=-1, probability=False, random_state=None,\r\n shrinking=True, tol=0.001, verbose=False)\r\n ),\r\n (\"RF\", RandomForestClassifier(\r\n n_estimators=1000, criterion=\"gini\",\r\n max_depth=None, min_samples_split=2,\r\n min_samples_leaf=1, max_features=\"auto\",\r\n bootstrap=True, oob_score=False, n_jobs=1,\r\n random_state=None, verbose=0)\r\n )]\r\n \r\n # Iterate through models\r\n for m in models:\r\n # Train each model\r\n m[1].fit(X_train, y_train)\r\n \r\n # Predict with each model\r\n pred = m[1].predict(X_test)\r\n \r\n # Output hit-rate and confusion matrix\r\n print(\"%s Balanced accuracy : %0.3f\" % (m[0], balanced_accuracy_score(y_test, pred)))\r\n print(\"%s Recall and Specificity: %s\" % (m[0], recall_score(y_test, pred, average=None)))\r\n print(\"%s Precision scores : %s\" % (m[0], precision_score(y_test, pred, average=None)))\r\n print(\"%s Confusion matrix\\n %s\" % (m[0], confusion_matrix(y_test, pred)))\r\n \r\n output[m[0]] = {\"balanced_accuracy\" : balanced_accuracy_score(y_test, pred),\r\n \"recall\" : recall_score(y_test, pred, average=None),\r\n \"precision\" : precision_score(y_test, pred, average=None)}\r\n \r\n return output\r\n\r\n\r\ndf = create_lagged_series(\"SPY\")\r\nout = forecast_1(df)","sub_path":"forecast_1.py","file_name":"forecast_1.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"398328493","text":"import library as lib\n\n\ndef register(mail, name):\n command='select user.key from user where email='+list_to_value_string([mail])\n result=do(command)\n if len(result)==0:\n key=id_hash_string()\n command='insert into user values('+list_to_value_string([key, mail, name])+')'\n do(command)\n command='select user.key from user where email='+list_to_value_string([mail])\n result=do(command)\n return result\n else:\n return result\n \ndef get_key(mail):\n command='select user.key from user where email='+list_to_value_string([mail])\n result=do(command)\n return result\n\ndef is_key_exist(key):\n command='select * from user where user.key='+list_to_value_string([key])\n result=do(command)\n return result\n\ndef create_model(key, name):\n id=id_hash_string()\n command='insert into model(`id`, `key`, `name`) values('+list_to_value_string([id, key, name])+')'\n result=do(command)\n return result\n\ndef model_list(key):\n command='select id, name, acc, loss from model where model.key='+list_to_value_string([key])+' and train_status not in (1,2)'\n result=do(command)\n return result\n\ndef get_model_info(model_id):\n command='SELECT `model`.`name`, `model`.`size`, `model`.`acc`, `model`.`loss`, `model`.`class_label`, `model`.`share`, `user`.`email` FROM `model`, `user` WHERE `model`.`key` = `user`.`key` AND `model`.`id` = '+list_to_value_string([model_id])\n result=do(command)\n return result\n\ndef get_label_info(label_id):\n command='select share from label where id='+list_to_value_string([label_id])\n result=do(command)\n return result\n\ndef get_model_key(model_id):\n command='select model.key from model where id='+list_to_value_string([model_id])\n result=do(command)\n if len(result)==0:\n return ''\n return result[0]['key']\n \ndef delete_model(model_id):\n labelid_name=label_list(model_id)\n command='delete from model where id='+list_to_value_string([model_id])\n result=do(command)\n try:\n lib.os.remove('models/'+model_id+'.h5')\n except:\n pass\n for label in labelid_name:\n lib.shutil.rmtree('labels/'+label['id'])\n return result\n\ndef is_model_exist(model_id):\n command='select * from model where model.id='+list_to_value_string([model_id])\n result=do(command)\n return result\n \ndef rename_model(id, newname):\n pass\n\ndef add_model_user(id,user):\n pass\n \ndef model_size(id):\n pass\n\ndef create_label(name, model_id):\n id=id_hash_string()\n command='insert into label values('+list_to_value_string([id, name, model_id, '0'])+')'\n result=do(command)\n lib.os.mkdir('labels/'+id)\n return result\n\ndef rename_label(id, newname):\n pass\n\ndef label_list(model_id):\n command='select id, name from label where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\n\ndef delete_label(label_id):\n command='delete from label where id='+list_to_value_string([label_id])\n result=do(command)\n lib.shutil.rmtree('labels/'+label_id)\n return result\n\ndef is_label_exist(label_id):\n command='select * from label where id='+list_to_value_string([label_id])\n result=do(command)\n return result\n\ndef get_label_key(label_id):\n command='select model.key from model, label where label.model_id=model.id and label.id='+list_to_value_string([label_id])\n result=do(command)\n if len(result)==0:\n return ''\n return result[0]['key']\n\ndef get_label_model_id(label_id):\n command='select model_id from label where id='+list_to_value_string([label_id])\n result=do(command)\n if len(result)==0:\n return ''\n return result[0]['model_id']\n\ndef progress_list(key):\n command='select id, name from model where model.key='+list_to_value_string([key])+' and train_status in (1,2)'\n result=do(command)\n return result\n\ndef predictable_model_list(key):\n command='SELECT `model`.`id`, `model`.`name`, `user`.`email` FROM `model`, `user` WHERE `model`.`key` = `user`.`key` AND `model`.`key` = '+list_to_value_string([key])+' AND train_status = '+list_to_value_string([lib.code_table.model_status_predictable])\n result=do(command)\n return result\n\ndef train_model(model_id):\n current_model_status=get_model_status(model_id)\n if current_model_status==lib.code_table.model_status_None:\n command='update model set train_status='+list_to_value_string([lib.code_table.model_status_first_train])+' where id='+list_to_value_string([model_id])\n elif current_model_status==lib.code_table.model_status_predictable:\n command='update model set train_status='+list_to_value_string([lib.code_table.model_status_improve])+' where id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef train_model_stop(model_id):\n current_model_status=get_model_status(model_id)\n if current_model_status==lib.code_table.model_status_first_train:\n command='update model set train_status='+list_to_value_string([lib.code_table.model_status_None])+' where id='+list_to_value_string([model_id])\n elif current_model_status==lib.code_table.model_status_improve:\n command='update model set train_status='+list_to_value_string([lib.code_table.model_status_predictable])+' where id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef train_model_finish(model_id):\n command='update model set train_status='+list_to_value_string([lib.code_table.model_status_predictable])+' where id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef get_model_status(model_id):\n command='select train_status from model where id='+list_to_value_string([model_id])\n result=do(command)\n return result[0]['train_status']\ndef fill_model_details(model_id, size, acc, loss, class_label):\n command='update model set size='+list_to_value_string([size])+', acc='+list_to_value_string([acc])+', loss='+list_to_value_string([loss])+', class_label='+list_to_value_string([class_label])+' where id='+list_to_value_string([model_id])\n result=do(command)\n return result\n\ndef get_class_label(model_id):\n command='select class_label from model where id='+list_to_value_string([model_id])\n result=do(command)\n return result[0]['class_label']\n\n#=== lists ===#\ndef get_progress(model_id):\n command='select epoch, acc, loss from progress_list where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef update_progress(model_id, epoch, acc, loss):\n if len(get_progress(model_id))==0:\n if acc==None:\n command='insert into progress_list(model_id, epoch) values('+list_to_value_string([model_id, epoch])+')'\n else:\n command='insert into progress_list(model_id, epoch, acc, loss) values('+list_to_value_string([model_id, epoch, acc, loss])+')'\n result=do(command)\n else:\n if acc==None:\n command='update progress_list set epoch='+list_to_value_string([epoch])+' where model_id='+list_to_value_string([model_id])\n else:\n command='update progress_list set epoch='+list_to_value_string([epoch])+', acc='+list_to_value_string([acc])+', loss='+list_to_value_string([loss])+' where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef optim_progress(model_id, pid):\n command='update progress_list set pid='+list_to_value_string([pid])+' where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef get_progress_pid(model_id):\n command='select pid from progress_list where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result[0]['pid']\ndef remove_progress(model_id):\n command='delete from progress_list where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef add_predict(model_id, predict):\n command='insert into predict_list values('+list_to_value_string([model_id, predict])+')'\n result=do(command)\n return result\ndef get_predict(model_id):\n command='select result from predict_list where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\ndef remove_predict(model_id):\n command='delete from predict_list where model_id='+list_to_value_string([model_id])\n result=do(command)\n return result\n \n\n\n\n#========== TRADE SYSTEM ==========#\ndef is_model_buyable(model_id):\n command='select * from model where (train_status='+list_to_value_string([lib.code_table.model_status_improve])+' OR train_status='+list_to_value_string([lib.code_table.model_status_predictable])+') and share='+list_to_value_string(['1'])\n result=do(command)\n return result\ndef share_model(model_id, share):\n command='update model set share='+list_to_value_string([share])+' where id='+list_to_value_string([model_id])\n result= do(command)\n return result\ndef model_store(keyword):\n command='select id, model.name, email from model, user where `model`.`key`=`user`.`key` and (train_status='+list_to_value_string([lib.code_table.model_status_improve])+' OR train_status='+list_to_value_string([lib.code_table.model_status_predictable])+') and share='+list_to_value_string(['1'])+' and (`model`.`name` LIKE BINARY \"%'+keyword+'%\" or `user`.`email` LIKE BINARY \"%'+keyword+'%\")'\n result=do(command)\n for i in result:\n i['description']='None'\n return result\ndef model_samples(model_id, bound=None):\n samples=[]\n for j in label_list(model_id):\n image_id=lib.os.listdir('labels/'+j['id'])[0]\n if bound==None:\n samples.append({\n 'name': j['name'],\n 'base64': str(lib.base64.b64encode(open('labels/'+j['id']+'/'+image_id, 'rb').read()).decode())\n })\n else:\n img=lib.cv2.imread('labels/'+j['id']+'/'+image_id)\n if img.shape[0]>img.shape[1]:\n img=lib.cv2.resize(img, (int(bound*img.shape[1]/img.shape[0]), bound), interpolation=lib.cv2.INTER_AREA)\n elif img.shape[0]img.shape[1]:\n img=lib.cv2.resize(img, (int(bound*img.shape[1]/img.shape[0]), bound), interpolation=lib.cv2.INTER_AREA)\n elif img.shape[0]%s_%s_%s_%s\" % (rna_split[4], rna_split[5], rna_split[2], rna_split[3])\n outfile.write(header + \"\\n\" + sequence + \"\\n\") \n \n \n \n","sub_path":"get_rna_sequences.py","file_name":"get_rna_sequences.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"149499629","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nlogger.py\n\n日志记录\n\n@author: Wu Yudi\n@email: jasper.wuyd@gmail.com\n@date: 2017.05.19\n\"\"\"\n\nfrom datetime import datetime\n\ntry:\n from termcolor import colored\nexcept ImportError:\n colored = lambda msg, clr: msg\n\n\nclass Logger:\n def __init__(self):\n return\n\n @classmethod\n def log(cls, message, level=\"info\", color=\"blue\"):\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n message = \"[] {} {}\".format(level.upper(), now, message)\n print(colored(message, color))\n\n @classmethod\n def info(cls, message, color=\"blue\"):\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n message = \"[INFO] {} {}\".format(now, message)\n print(colored(message, color))\n\n @classmethod\n def warn(cls, message, color=\"red\"):\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n message = \"[WARN] {} {}\".format(now, message)\n print(colored(message, color))\n\n @classmethod\n def debug(cls, message, color=\"blue\"):\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n message = \"[DEBUG] {} {}\".format(now, message)\n print(colored(message, color))\n\n @classmethod\n def error(cls, message, color=\"red\"):\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n message = \"[ERROR] {} {}\".format(now, message)\n print(colored(message, color))\n","sub_path":"python/devkit/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"495482081","text":"#!/usr/bin/python\n\"\"\"\nGiven an unsorted integer array, find the first missing positive integer.\n\nFor example,\nGiven [1,2,0] return 3,\nand [3,4,-1,1] return 2.\nYour algorithm should run in O(n) time and uses constant space.\n\n#41\nREDDO: need to figure out the trick first try. the corner cases on annoying.\n\"\"\"\ndef firstmpos(nums):\n \"\"\"\n my own solution\n \"\"\"\n i = 0\n n = len(nums)\n while i < n:\n targ = nums[i]\n if targ-1 < n and targ-1 >= 0 and targ-1 != i and nums[targ-1] != nums[i]:\n nums[targ-1], nums[i] = nums[i], nums[targ-1]\n else:\n i += 1\n\n for i in range(n):\n if nums[i]-1 != i:\n return i+1\n return n+1\n\ndef firstmpos2(nums):\n \"\"\"\n lc c++ solution. not better than mine\n \"\"\"\n n = len(nums)\n for i in range(n):\n targ = nums[i]\n while targ-1 < n and targ-1 >= 0 and nums[targ-1] != nums[i]:\n nums[targ-1], nums[i] = nums[i], nums[targ-1]\n targ = nums[i]\n\n for i in range(n):\n if nums[i]-1 != i:\n return i+1\n return n+1\n\ndef test1():\n nums = [1, 2, 0]\n print(firstmpos(nums))\n nums = [1, 2, 0]\n print(firstmpos2(nums))\n print('-----------------')\n\ndef test2():\n nums = [3,4,-1,1]\n print(firstmpos(nums))\n nums = [3,4,-1,1]\n print(firstmpos2(nums))\n print('-----------------')\n\ndef test3():\n nums = []\n print(firstmpos(nums))\n nums = []\n print(firstmpos2(nums))\n print('-----------------')\n\ndef test4():\n nums = [0]\n print(firstmpos(nums))\n nums = [0]\n print(firstmpos2(nums))\n print('-----------------')\n\ndef test5():\n nums = [1]\n print(firstmpos(nums))\n nums = [1]\n print(firstmpos2(nums))\n print('-----------------')\n\ndef test6():\n nums = [1,1]\n print(firstmpos(nums))\n nums = [1,1]\n print(firstmpos2(nums))\n print('-----------------')\n\nif __name__ == '__main__':\n test1()\n test2()\n test3()\n test4()\n test5()\n test6()\n","sub_path":"array/firstMissingPos.py","file_name":"firstMissingPos.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"456944760","text":"# pylint: disable=C,E1101,E1102\nimport unittest\n\nimport torch\n\nfrom e3nn import rs\nfrom e3nn.image.convolution import Convolution\n\n\nclass Tests(unittest.TestCase):\n def _test_equivariance(self, f):\n def rotate(t):\n # rotate 90 degrees in plane of axes 2 and 3\n return t.flip(2).transpose(2, 3)\n\n def unrotate(t):\n # undo the rotation by 3 more rotations\n return rotate(rotate(rotate(t)))\n\n inp = torch.randn(2, 1, 16, 16, 16)\n inp_r = rotate(inp)\n\n diff_inp = (inp - unrotate(inp_r)).abs().max().item()\n self.assertLess(diff_inp, 1e-10) # sanity check\n\n out = f(inp)\n out_r = f(inp_r)\n\n diff_out = (out - unrotate(out_r)).abs().max().item()\n self.assertLess(diff_out, 1e-10)\n\n def test_equivariance(self):\n torch.set_default_dtype(torch.float64)\n\n f = torch.nn.Sequential(\n Convolution([(1, 0)], [(2, 0), (2, 1), (1, 2)], size=5, steps=(0.5, 0.5, 0.9)),\n Convolution([(2, 0), (2, 1), (1, 2)], [(1, 0)], size=5),\n )\n\n self._test_equivariance(f)\n\n def _test_normalization(self, f):\n batch = 3\n size = 5\n input_size = 15\n Rs_in = [(20, 0), (20, 1), (10, 2)]\n Rs_out = [(2, 0), (2, 1), (2, 2)]\n\n conv = f(Rs_in, Rs_out, size)\n\n x = rs.randn(batch, Rs_in, input_size, input_size, input_size)\n y = conv(x)\n\n self.assertEqual(y.size(1), rs.dim(Rs_out))\n\n y_mean, y_std = y.mean().item(), y.std().item()\n\n self.assertAlmostEqual(y_mean, 0, delta=0.3)\n self.assertAlmostEqual(y_std, 1, delta=0.5)\n\n def test_normalization_conv(self):\n self._test_normalization(Convolution)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/image/convolution_test.py","file_name":"convolution_test.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"454335521","text":"from future import standard_library\n\nstandard_library.install_aliases()\nimport os\nimport tempfile\nimport urllib.request\n\nfrom cumulusci.core.exceptions import CumulusCIException\nfrom cumulusci.tasks.command import Command\n\n\nclass GenerateApexDocs(Command):\n \"\"\" Generate Apex documentation from local code \"\"\"\n\n apexdoc_repo_url = \"https://github.com/SalesforceFoundation/ApexDoc\"\n jar_file = \"apexdoc.jar\"\n task_options = {\n \"tag\": {\n \"description\": \"The tag to use for links back to the repo. If \"\n + \"not provided, source_url arg to ApexDoc is omitted.\"\n },\n \"source_directory\": {\n \"description\": \"The folder location which contains your apex \"\n + \".cls classes. default=/src/classes/\"\n },\n \"out_dir\": {\n \"description\": \"The folder location where documentation will be \"\n + \"generated to. Defaults to project config value \"\n + \"project/apexdoc/dir if present, otherwise uses repo root.\"\n },\n \"home_page\": {\n \"description\": \"The full path to an html file that contains the \"\n + \"contents for the home page's content area. Defaults to project \"\n + \"config value project/apexdoc/homepage if present, otherwise is \"\n + \"not used.\"\n },\n \"banner_page\": {\n \"description\": \"The full path to an html file that contains the \"\n + \"content for the banner section of each generated page. \"\n + \"Defaults to project config value project/apexdoc/banner if \"\n + \"present, otherwise is not used.\"\n },\n \"scope\": {\n \"description\": \"A semicolon separated list of scopes to \"\n + \"document. Defaults to project config value \"\n + \"project/apexdoc/scope if present, otherwise allows ApexDoc to \"\n + \"use its default (global;public;webService).\"\n },\n \"version\": {\n \"description\": \"Version of ApexDoc to use. Defaults to project \"\n + \"config value project/apexdoc/version.\"\n },\n }\n\n def _init_options(self, kwargs):\n super(GenerateApexDocs, self)._init_options(kwargs)\n self.options[\"command\"] = None\n if \"source_directory\" not in self.options:\n self.options[\"source_directory\"] = os.path.join(\n self.project_config.repo_root, \"src\", \"classes\"\n )\n if \"out_dir\" not in self.options:\n self.options[\"out_dir\"] = (\n self.project_config.project__apexdoc__dir\n if self.project_config.project__apexdoc__dir\n else self.project_config.repo_root\n )\n if \"tag\" not in self.options:\n self.options[\"tag\"] = None\n if \"home_page\" not in self.options:\n self.options[\"home_page\"] = (\n self.project_config.project__apexdoc__homepage\n if self.project_config.project__apexdoc__homepage\n else None\n )\n if \"banner_page\" not in self.options:\n self.options[\"banner_page\"] = (\n self.project_config.project__apexdoc__banner\n if self.project_config.project__apexdoc__banner\n else None\n )\n if \"scope\" not in self.options:\n self.options[\"scope\"] = (\n self.project_config.project__apexdoc__scope\n if self.project_config.project__apexdoc__scope\n else None\n )\n if \"version\" not in self.options:\n if not self.project_config.project__apexdoc__version:\n raise CumulusCIException(\"ApexDoc version required\")\n self.options[\"version\"] = self.project_config.project__apexdoc__version\n\n def _init_task(self):\n super(GenerateApexDocs, self)._init_task()\n self.working_dir = tempfile.mkdtemp()\n self.jar_path = os.path.join(self.working_dir, self.jar_file)\n if self.options[\"tag\"] and not self.project_config.project__git__repo_url:\n raise CumulusCIException(\"Repo URL not found in cumulusci.yml\")\n\n def _run_task(self):\n self._get_jar()\n cmd = \"java -jar {} -s {} -t {}\".format(\n self.jar_path, self.options[\"source_directory\"], self.options[\"out_dir\"]\n )\n if self.options[\"tag\"]:\n cmd += \" -g {}/blob/{}/src/classes/\".format(\n self.project_config.project__git__repo_url, self.options[\"tag\"]\n )\n if self.options[\"home_page\"]:\n cmd += \" -h {}\".format(self.options[\"home_page\"])\n if self.options[\"banner_page\"]:\n cmd += \" -a {}\".format(self.options[\"banner_page\"])\n if self.options[\"scope\"]:\n cmd += ' -p \"{}\"'.format(self.options[\"scope\"])\n self.options[\"command\"] = cmd\n self._run_command({})\n\n def _get_jar(self):\n url = \"{}/releases/download/{}/{}\".format(\n self.apexdoc_repo_url, self.options[\"version\"], self.jar_file\n )\n urllib.request.urlretrieve(url, self.jar_path)\n","sub_path":"cumulusci/tasks/apexdoc.py","file_name":"apexdoc.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"256969194","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open(name)\ncounts = dict()\nfor line in handle:\n line = line.rstrip()\n #print(line)\n email = line.split()\n if len(email)<1:\n continue\n if email[0]!= 'From':\n continue\n \n else:\n d = email[0:]\n c = email[1]\n print(d)\n \n print(c)\n \n for c in d:\n counts[c] = counts.get(c,0)+1\n \n \n \n \nbigcount = None\nbigword = None\nfor c,count in counts.items():\n if bigcount is None or count>bigcount:\n bigword = c\n bigcount = count\n\n\n#print(bigword,bigcount)","sub_path":"Counting_prac_dict.py","file_name":"Counting_prac_dict.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"574901364","text":"# This file is part of the GBI project.\n# Copyright (C) 2013 Omniscale GmbH & Co. KG \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import (\n render_template,\n Blueprint,\n flash,\n redirect,\n url_for,\n request,\n current_app,\n session,\n)\nfrom flask.ext.login import current_user, login_user\nfrom flask.ext.babel import gettext as _\nfrom werkzeug.exceptions import Unauthorized, Forbidden\nfrom sqlalchemy.exc import IntegrityError\n\nfrom geoalchemy import WKTSpatialElement\nfrom geoalchemy.postgis import pg_functions\nfrom shapely.geometry import asShape\n\nfrom json import loads\n\nfrom gbi_server.extensions import db\nfrom gbi_server.model import User, EmailVerification, Log, WMTS\nfrom gbi_server.forms.admin import CreateUserForm, WMTSForm\nfrom gbi_server.forms.user import RecoverSetForm, EditAddressForm\nfrom gbi_server.lib.helper import send_mail\nfrom gbi_server.lib.couchdb import init_user_boxes\nfrom gbi_server.lib.external_wms import write_mapproxy_config\n\n\nadmin = Blueprint(\"admin\", __name__, template_folder=\"../templates\")\n\n\ndef assert_admin_user():\n if current_app.config.get(\"ADMIN_PARTY\"):\n return\n if current_user.is_anonymous():\n raise Unauthorized()\n if not current_user.is_admin:\n raise Forbidden()\n\n\nadmin.before_request(assert_admin_user)\n\n\n@admin.route(\"/admin\")\ndef index():\n return render_template(\"admin/index.html\")\n\n\n@admin.route(\"/admin/user_list\", methods=[\"GET\"])\ndef user_list():\n return render_template(\"admin/user_list.html\", users=User.query.all())\n\n\n@admin.route(\"/admin/user_detail/\", methods=[\"GET\", \"POST\"])\ndef user_detail(id):\n user = User.by_id(id)\n return render_template(\"admin/user_detail.html\", user=user)\n\n\n@admin.route(\"/admin/verify_user/\", methods=[\"GET\"])\ndef verify_user(id):\n user = User.by_id(id)\n user.verified = True\n db.session.commit()\n flash(_(\"User verified\", email=user.email), \"success\")\n return redirect(url_for(\"admin.user_detail\", id=id))\n\n\n@admin.route(\"/admin/login_as/\", methods=[\"GET\"])\ndef loging_as(id):\n user = User.by_id(id)\n login_user(user)\n session[\"authproxy_token\"] = user.authproxy_token\n return redirect(url_for(\"user.home\"))\n\n\n@admin.route(\"/admin/activate_user/\", methods=[\"GET\"])\ndef activate_user(id):\n user = User.by_id(id)\n user.active = True\n db.session.commit()\n\n send_mail(\n _(\"Account activated mail subject\"),\n render_template(\"user/activated_mail.txt\", user=user, _external=True),\n [user.email],\n )\n\n flash(_(\"User activated\", email=user.email), \"success\")\n return redirect(url_for(\"admin.user_detail\", id=id))\n\n\n@admin.route(\"/admin/create_user\", methods=[\"GET\", \"POST\"])\ndef create_user():\n form = CreateUserForm()\n if form.validate_on_submit():\n user = User(form.data[\"email\"], form.data[\"password\"])\n user.realname = form.data[\"realname\"]\n user.florlp_name = form.data[\"florlp_name\"]\n user.type = form.data.get(\"type\")\n user.street = form.data[\"street\"]\n user.housenumber = form.data[\"housenumber\"]\n user.zipcode = form.data[\"zipcode\"]\n user.city = form.data[\"city\"]\n if not form.data[\"verified\"]:\n verify = EmailVerification.verify(user)\n db.session.add(verify)\n send_mail(\n _(\"Email verification mail subject\"),\n render_template(\n \"user/verify_mail.txt\", user=user, verify=verify, _external=True\n ),\n [user.email],\n )\n else:\n user.verified = True\n if form.data[\"activate\"]:\n user.active = True\n db.session.add(user)\n db.session.commit()\n\n init_user_boxes(user, current_app.config.get(\"COUCH_DB_URL\"))\n\n flash(_(\"User created\", email=user.email), \"success\")\n return redirect(url_for(\"admin.user_list\"))\n return render_template(\"admin/create_user.html\", form=form)\n\n\n@admin.route(\"/admin/edit_user/\", methods=[\"GET\", \"POST\"])\ndef edit_user(id):\n user = User.by_id(id)\n form = EditAddressForm(request.form, user)\n if form.validate_on_submit():\n user.realname = form.data[\"realname\"]\n user.florlp_name = form.data[\"florlp_name\"]\n user.street = form.data[\"street\"]\n user.housenumber = form.data[\"housenumber\"]\n user.zipcode = form.data[\"zipcode\"]\n user.city = form.data[\"city\"]\n db.session.commit()\n flash(_(\"User edited\", username=user.realname), \"success\")\n return redirect(url_for(\"admin.user_detail\", id=id))\n return render_template(\"admin/edit_user.html\", form=form)\n\n\n@admin.route(\"/admin/reset_user_password/\", methods=[\"GET\", \"POST\"])\ndef reset_user_password(id):\n form = RecoverSetForm()\n if form.validate_on_submit():\n user = User.by_id(id)\n user.update_password(form.password.data)\n db.session.commit()\n flash(_(\"Password reset\", username=user.realname), \"success\")\n return redirect(url_for(\"admin.user_detail\", id=id))\n return render_template(\"admin/reset_user_password.html\", form=form)\n\n\n@admin.route(\"/admin/remove_user/\", methods=[\"GET\", \"POST\"])\ndef remove_user(id):\n user = User.by_id(id)\n if request.method == \"POST\":\n email = user.email\n db.session.delete(user)\n db.session.commit()\n flash(_(\"User removed\", username=email), \"success\")\n return redirect(url_for(\"admin.user_list\"))\n return render_template(\"admin/remove_user.html\", user=user)\n\n\n@admin.route(\"/admin/user_log/\", methods=[\"GET\"])\ndef user_log(id):\n user = User.by_id(id)\n result = (\n db.session.query(Log, Log.geometry.envelope().wkt).filter_by(user=user).all()\n )\n return render_template(\"admin/user_log.html\", logs=result)\n\n\n@admin.route(\"/admin/wmts/list\", methods=[\"GET\"])\ndef wmts_list():\n return render_template(\"admin/wmts_list.html\", wmts=WMTS.query.all())\n\n\n@admin.route(\"/admin/wmts/edit\", methods=[\"GET\", \"POST\"])\n@admin.route(\"/admin/wmts/edit/\", methods=[\"GET\", \"POST\"])\ndef wmts_edit(id=None):\n\n wmts = (\n db.session.query(WMTS, pg_functions.geojson(WMTS.view_coverage.transform(3857)))\n .filter_by(id=id)\n .first()\n if id\n else None\n )\n if wmts:\n wmts[0].view_coverage = wmts[1]\n wmts = wmts[0]\n form = WMTSForm(request.form, wmts)\n else:\n form = WMTSForm(request.form)\n\n if form.validate_on_submit():\n if not wmts:\n wmts = WMTS()\n db.session.add(wmts)\n if form.data[\"is_background_layer\"]:\n old_background_layer = WMTS.query.filter_by(\n is_background_layer=True\n ).first()\n if old_background_layer:\n old_background_layer.is_background_layer = False\n wmts.url = form.data[\"url\"]\n wmts.username = form.data[\"username\"]\n wmts.password = form.data[\"password\"]\n wmts.name = form.data[\"name\"]\n wmts.title = form.data[\"title\"]\n wmts.layer = form.data[\"layer\"]\n wmts.format = form.data[\"format\"]\n wmts.srs = form.data[\"srs\"]\n wmts.matrix_set = form.data[\"matrix_set\"]\n geom = asShape(loads(form.data[\"view_coverage\"]))\n wmts.view_coverage = WKTSpatialElement(\n geom.wkt, srid=3857, geometry_type=\"POLYGON\"\n )\n\n wmts.view_level_start = form.data[\"view_level_start\"]\n wmts.view_level_end = form.data[\"view_level_end\"]\n wmts.is_background_layer = form.data[\"is_background_layer\"]\n wmts.is_baselayer = not form.data[\"is_transparent\"]\n wmts.is_overlay = form.data[\"is_transparent\"]\n wmts.is_transparent = form.data[\"is_transparent\"]\n wmts.is_visible = form.data[\"is_visible\"]\n wmts.is_public = form.data[\"is_public\"]\n try:\n db.session.commit()\n write_mapproxy_config(current_app)\n flash(_(\"Saved WMTS\"), \"success\")\n return redirect(url_for(\"admin.wmts_list\"))\n except IntegrityError:\n db.session.rollback()\n flash(_(\"WMTS with this name already exist\"), \"error\")\n return render_template(\"admin/wmts_edit.html\", form=form, id=id)\n\n\n@admin.route(\"/admin/wmts/remove/\", methods=[\"GET\"])\ndef wmts_remove(id):\n wmts = WMTS.by_id(id)\n if wmts:\n db.session.delete(wmts)\n db.session.commit()\n flash(_(\"WMTS removed\"), \"success\")\n return redirect(url_for(\"admin.wmts_list\"))\n","sub_path":"gr/gbi-server/app/gbi_server/views/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":9089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"84640495","text":"from datetime import timedelta\n\nfrom . import defaults\n\n\nsettings = defaults.merge({\n 'sqlalchemy': {\n 'url': 'postgresql+psycopg2://localhost/axial',\n 'pool_recycle': 3600,\n },\n\n 'celery': {\n 'CELERY_ENABLE_UTC': True,\n 'CELERY_MESSAGE_COMPRESSION': 'gzip',\n 'CELERY_TASK_PUBLISH_RETRY': True,\n 'CELERY_DEFAULT_RATE_LIMIT': None,\n 'CELERY_TASK_RESULT_EXPIRES': timedelta(days=1),\n 'CELERY_ACKS_LATE': True,\n 'CELERY_MAX_CACHED_RESULTS': 5000,\n 'CELERY_CHORD_PROPAGATES': True,\n 'CELERY_TASK_SERIALIZER': 'json',\n 'CELERY_ACCEPT_CONTENT': ['application/json'],\n 'CELERY_RESULT_SERIALIZER': 'json',\n 'CELERYD_CONCURRENCY': 4,\n 'CELERYD_PREFETCH_MULTIPLIER': 4,\n 'BROKER_URL': 'amqp://guest@localhost//',\n 'BROKER_HEARTBEAT': 10.0,\n 'BROKER_HEARTBEAT_CHECKRATE': 2.0,\n 'BROKER_USE_SSL': False,\n 'BROKER_CONNECTION_TIMEOUT': 4,\n 'BROKER_CONNECTION_RETRY': True,\n 'BROKER_CONNECTION_MAX_RETIES': 100,\n }\n})\n","sub_path":"src/axm/axm/config/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"638429390","text":"# _*_ coding: utf-8 _*_\n\n\"\"\"\nconcur_async.py by xianhu\n\"\"\"\n\nimport re\nimport sys\nimport time\nimport asyncio\nimport logging\nimport datetime\nimport aiohttp\nfrom ..abcbase import TPEnum, BasePool\nfrom ..utilities import get_url_legal, make_random_useragent\n\n\nclass BaseAsyncPool(BasePool):\n \"\"\"\n class of BaseAsyncPool, as the subclass of BasePool\n \"\"\"\n\n def __init__(self, max_repeat=3, sleep_time=0, max_deep=0, save_pipe=sys.stdout, url_filter=None, loop=None):\n \"\"\"\n constructor\n \"\"\"\n BasePool.__init__(self, url_filter=url_filter)\n\n self.max_repeat = max_repeat # default: 3, maximum repeat fetching time for a url\n self.sleep_time = sleep_time # default: 0, sleeping time after a fetching for a url\n self.max_deep = max_deep # default: 0, if -1, spider will not stop until all urls are fetched\n self.save_pip = save_pipe # default: sys.stdout, also can be a file handler\n\n self.loop = loop or asyncio.get_event_loop() # event_loop from parameter or call get_event_loop()\n self.queue = asyncio.PriorityQueue(loop=self.loop) # (priority, url, keys, deep, repeat)\n\n self.start_time = time.time() # start time of this pool\n return\n\n def start_work_and_wait_done(self, fetcher_num=10, is_over=True):\n \"\"\"\n start this pool, and wait for finishing\n :param fetcher_num: the count of tasks\n :param is_over: not useful in this class\n \"\"\"\n try:\n self.start_time = time.time()\n self.loop.run_until_complete(self._start(fetcher_num=fetcher_num))\n except KeyboardInterrupt as excep:\n logging.warning(\"%s KeyboardInterrupt: %s\", self.__class__.__name__, excep)\n finally:\n self.loop.stop()\n self.loop.run_forever()\n self.loop.close()\n return\n\n async def _start(self, fetcher_num):\n \"\"\"\n start tasks, and wait for finishing\n \"\"\"\n tasks_list = [asyncio.Task(self.work(index), loop=self.loop) for index in range(fetcher_num)]\n await self.queue.join()\n for task in tasks_list:\n task.cancel()\n self.print_status()\n return\n\n async def work(self, index):\n \"\"\"\n working process, must be rewrite in subclass\n \"\"\"\n raise NotImplementedError\n\n def update_number_dict(self, key, value):\n \"\"\"\n update self.number_dict of this pool\n \"\"\"\n self.number_dict[key] += value\n return\n\n def add_a_task(self, task_name, task_content):\n \"\"\"\n add a task based on task_name, if queue is full, blocking the queue\n \"\"\"\n if (task_content[-1] > 0) or (not self.url_filter) or self.url_filter.check_and_add(task_content[1]):\n self.queue.put_nowait(task_content)\n self.update_number_dict(TPEnum.URL_NOT_FETCH, +1)\n return\n\n async def get_a_task(self, task_name):\n \"\"\"\n get a task based on task_name, if queue is empty, raise queue.Empty\n \"\"\"\n task_content = await self.queue.get()\n self.update_number_dict(TPEnum.URL_NOT_FETCH, -1)\n self.update_number_dict(TPEnum.TASKS_RUNNING, +1)\n return task_content\n\n def finish_a_task(self, task_name):\n \"\"\"\n finish a task based on task_name, call queue.task_done()\n \"\"\"\n self.queue.task_done()\n self.update_number_dict(TPEnum.TASKS_RUNNING, -1)\n return\n\n def print_status(self):\n \"\"\"\n print the information of this pool\n \"\"\"\n info = \"%s status: running_tasks=%s;\" % (self.__class__.__name__, self.number_dict[TPEnum.TASKS_RUNNING])\n\n info += \" fetch=(%d, %d);\" % (self.number_dict[TPEnum.URL_NOT_FETCH], self.number_dict[TPEnum.URL_FETCH])\n info += \" parse=(%d, %d);\" % (self.number_dict[TPEnum.HTM_NOT_PARSE], self.number_dict[TPEnum.HTM_PARSE])\n info += \" save=(%d, %d);\" % (self.number_dict[TPEnum.ITEM_NOT_SAVE], self.number_dict[TPEnum.ITEM_SAVE])\n\n info += \" total_seconds=%d\" % (time.time() - self.start_time)\n logging.warning(info)\n return\n\n\nclass AsyncPool(BaseAsyncPool):\n \"\"\"\n class of AsyncPool, as the subclass of BaseAsyncPool\n \"\"\"\n\n async def work(self, index):\n \"\"\"\n working process, fetching --> parsing --> saving\n \"\"\"\n logging.warning(\"Worker[%s] start\", index)\n\n headers = {\"User-Agent\": make_random_useragent(), \"Accept-Encoding\": \"gzip\"}\n session = aiohttp.ClientSession(loop=self.loop, headers=headers)\n try:\n while True:\n # get a task\n priority, url, keys, deep, repeat = await self.get_a_task(task_name=TPEnum.URL_FETCH)\n\n # fetch the content of a url ================================================================\n fetch_result, content = await self.fetch(session, url, keys, repeat)\n if fetch_result > 0:\n self.update_number_dict(TPEnum.URL_FETCH, +1) # =======================\n\n # parse the content of a url ============================================================\n self.update_number_dict(TPEnum.HTM_NOT_PARSE, +1)\n parse_result, url_list, save_list = await self.parse(priority, url, keys, deep, content)\n self.update_number_dict(TPEnum.HTM_NOT_PARSE, -1)\n\n if parse_result > 0:\n self.update_number_dict(TPEnum.HTM_PARSE, +1) # =======================\n\n # add new task to self.queue\n for _url, _keys, _priority in url_list:\n self.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))\n\n # save the item of a url ============================================================\n for item in save_list:\n self.update_number_dict(TPEnum.ITEM_NOT_SAVE, +1)\n save_result = await self.save(url, keys, item)\n self.update_number_dict(TPEnum.ITEM_NOT_SAVE, -1)\n\n if save_result:\n self.update_number_dict(TPEnum.ITEM_SAVE, +1) # =======================\n elif fetch_result == 0:\n self.add_a_task(TPEnum.URL_FETCH, (priority+1, url, keys, deep, repeat+1))\n else:\n pass\n\n # finish a task\n self.finish_a_task(task_name=TPEnum.URL_FETCH)\n\n # print the information of this pool\n if self.number_dict[TPEnum.URL_FETCH] % 100 == 0:\n self.print_status()\n except asyncio.CancelledError:\n pass\n\n session.close()\n logging.warning(\"Worker[%s] end\", index)\n return\n\n async def fetch(self, session, url: str, keys: object, repeat: int) -> (int, object):\n \"\"\"\n fetch the content of a url, must \"try, expect\" and don't change parameters and return\n :return (fetch_result, content): fetch_result can be -1(fetch failed), 0(need repeat), 1(fetch success), content can be anything\n \"\"\"\n logging.debug(\"Fetcher start: keys=%s, repeat=%s, url=%s\", keys, repeat, url)\n\n try:\n response = await session.get(url, params=None, data=None, timeout=5)\n if response.history:\n logging.debug(\"Fetcher redirect: keys=%s, repeat=%s, url=%s\", keys, repeat, url)\n\n fetch_result, content = 1, (response.status, response.url, await response.text())\n await response.release()\n except Exception as excep:\n if repeat >= self.max_repeat:\n fetch_result, content = -1, None\n logging.error(\"Fetcher error: %s, keys=%s, repeat=%s, url=%s\", excep, keys, repeat, url)\n else:\n fetch_result, content = 0, None\n logging.debug(\"Fetcher repeat: %s, keys=%s, repeat=%s, url=%s\", excep, keys, repeat, url)\n\n logging.debug(\"Fetcher end: fetch_result=%s, url=%s\", fetch_result, url)\n return fetch_result, content\n\n async def parse(self, priority: int, url: str, keys: object, deep: int, content: object) -> (int, list, list):\n \"\"\"\n parse the content of a url, must \"try, except\" and don't change parameters and return\n :return (parse_result, url_list, save_list): parse_result can be -1(parse failed), 1(parse success)\n :return (parse_result, url_list, save_list): url_list is [(url, keys, priority), ...], save_list is [item, ...]\n \"\"\"\n logging.debug(\"Parser start: priority=%s, keys=%s, deep=%s, url=%s\", priority, keys, deep, url)\n\n try:\n *_, cur_html = content\n\n parse_result, url_list = 1, []\n if (self.max_deep < 0) or (deep < self.max_deep):\n a_list = re.findall(r\"[\\w\\W]{5,}?)\\\"[\\w\\W]*?>[\\w\\W]+?\", cur_html, flags=re.IGNORECASE)\n url_list = [(_url, keys, priority + 1) for _url in [get_url_legal(href, url) for href in a_list]]\n else:\n logging.debug(\"Parser stop parse urls: priority=%s, keys=%s, deep=%s, url=%s\", priority, keys, deep, url)\n\n title = re.search(r\"(?P<title>[\\w\\W]+?)\", cur_html, flags=re.IGNORECASE)\n save_list = [(title.group(\"title\"), datetime.datetime.now()), ] if title else []\n except Exception as excep:\n parse_result, url_list, save_list = -1, [], []\n logging.error(\"Parser error: %s, priority=%s, keys=%s, deep=%s, url=%s\", excep, priority, keys, deep, url)\n\n logging.debug(\"Parser end: parse_result=%s, len(url_list)=%s, len(save_list)=%s, url=%s\", parse_result, len(url_list), len(save_list), url)\n return parse_result, url_list, save_list\n\n async def save(self, url: str, keys: object, item: object) -> bool:\n \"\"\"\n save the item of a url, must \"try, except\" and don't change parameters and return\n :return save_result: True or False\n \"\"\"\n logging.debug(\"Saver start: keys=%s, url=%s\", keys, url)\n\n try:\n self.save_pip.write(\"\\t\".join([url, str(keys)] + [str(i) for i in item]) + \"\\n\")\n self.save_pip.flush()\n save_result = True\n except Exception as excep:\n save_result = False\n logging.error(\"Saver error: %s, keys=%s, url=%s\", excep, keys, url)\n\n logging.debug(\"Saver end: save_result=%s, url=%s\", save_result, url)\n return save_result\n","sub_path":"spider/concurrent/concur_async.py","file_name":"concur_async.py","file_ext":"py","file_size_in_byte":10738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"387711635","text":"import xml.etree.ElementTree as ET\r\n\r\ntree = ET.parse(\"xml_test.xml\") #xmltest.xml为xml的文档\r\nroot = tree.getroot() #获取所有内容\r\nprint(root.tag) #获���文件的头标题\r\n\r\n#1:遍历xml文档\r\nfor child in root:\r\n print(child.tag, child.attrib)\r\n for i in child:\r\n print(i.tag,i.text)\r\n\r\n#2:只遍历year 节点\r\nfor node in root.iter('year'):\r\n print(node.tag,node.text)\r\n\r\n#3:修改\r\nfor node in root.iter('year'):\r\n new_year = int(node.text) + 1\r\n node.text = str(new_year)\r\n node.set(\"updated\",\"yes\") #此处给year添加属性\r\ntree.write(\"xmltest.xml\")\r\n\r\n#4:删除node\r\nfor country in root.findall('country'):\r\n rank = int(country.find('rank').text)\r\n if rank > 50:\r\n root.remove(country)\r\n\r\ntree.write('output.xml')\r\n\r\n","sub_path":"education/B:oldBoy/5:Day/5:XML学习.py","file_name":"5:XML学习.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"416142199","text":"import numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\n# import matplotlib.image as mpimg\n# from skimage import io, color, transform, img_as_ubyte, img_as_float\nfrom PIL import Image\nfrom ensemble import AdaBoostClassifier\nfrom feature import NPDFeature\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nimgs = []\nimg_labels = []\nimg_features = []\nWEAKERS_LIMIT = 5\n\n\ndef load_img():\n for i in range(0, 500):\n with Image.open(\"./datasets/original/face/face_\"+\"{:0>3d}\".format(i)+\".jpg\") as image:\n image = image.convert('L')\n image = image.resize((24, 24))\n imgs.append(np.array(image))\n img_labels.append(1)\n with Image.open(\"./datasets/original/nonface/nonface_\" + \"{:0>3d}\".format(i) + \".jpg\") as image:\n image = image.convert('L')\n image = image.resize((24, 24))\n imgs.append(np.array(image))\n img_labels.append(-1)\n\n\ndef npd_feature():\n for i in range(0, len(imgs)):\n print(i)\n features = NPDFeature(imgs[i]).extract()\n img_features.append(features)\n \n \nif __name__ == \"__main__\":\n load_img()\n npd_feature()\n img_features = np.array(img_features)\n img_labels = np.array(img_labels).reshape((-1, 1))\n print(img_features.shape)\n print(img_features)\n X_train, X_val, y_train, y_val = train_test_split(img_features, img_labels, test_size=0.25)\n print(X_train.shape, X_val.shape, y_train.shape, y_val.shape)\n\n ada = AdaBoostClassifier(DecisionTreeClassifier, WEAKERS_LIMIT)\n ada.fit(X_train, y_train)\n\n y_predict = ada.predict(X_val)\n acc = ada.predict_scores(X_val, y_val)\n\n print(acc)\n\n y_val = np.array(list(map(lambda x: int(x), y_val.reshape(1, -1)[0])))\n y_predict = np.array(list(map(lambda x: int(x), y_predict.reshape(1, -1)[0])))\n\n print(y_predict)\n print(y_val)\n\n reportContent = 'Accuracy = ' + str(acc) + '\\n'\n reportContent += classification_report(y_val, y_predict)\n\n with open('report.txt', 'w') as report:\n report.write(reportContent)\n\n pass\n ","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"572655389","text":"import zipfile\nimport re\nimport os\nimport shutil\nfilename1 = '../../16_Etazh.xlsx'\n\n\ndef unzip(name):\n \"\"\"\n Фунция разархивирует zip архив с именем name в папку TEMP в\n директории с программой. Возвращает False если файла не существует,\n иначе True.\n \"\"\"\n if not os.path.exists(name) or not os.path.isfile(name):\n return False\n with zipfile.ZipFile(name) as z:\n z.extractall('TEMP')\n return True\n\n\ndef remove_dir(name):\n shutil.rmtree(name)\n pass\n\n\ndef readfile(name):\n with open(name, encoding='utf-8') as f:\n data = f.read()\n return data\n\n\ndef extract_strings(data):\n return re.findall(r'(.*?)', data)\n\n\ndef extract_rows(page):\n content = re.search(r'(.*)', page).group()\n return re.findall(r'(.*?)', content)\n\n\ndef parse_flags(data):\n excel_position = re.search(r'r=\".*?\"', data).group()\n flag = re.findall(r't=\"(.*?)\"', data)\n if len(flag) > 0: flag = flag[0]\n letters = re.search('[A-Z]+', excel_position).group().encode(encoding='utf-8')\n column = int(re.search(r'\\d+', excel_position).group())\n row = 0\n for i in range(len(letters)):\n row = row * 26 + letters[i] - 64\n return ((column - 1, row - 1), flag)\n\n\ndef parse_row(row, strings):\n result = {}\n item_reg = re.compile(r'.*?(.*?)')\n items = re.findall(item_reg, row)\n for i in range(len(items)):\n flags = parse_flags(items[i][0])\n pos = flags[0]\n val = items[i][1]\n if flags[1] == 's': val = strings[int(val)]\n result[pos] = val\n return result\n\n\ndef main():\n unzip(filename1)\n\n strings = extract_strings(readfile('TEMP/xl/sharedStrings.xml'))\n rows = extract_rows(readfile('TEMP/xl/worksheets/sheet1.xml'))\n items = {}\n for row in rows: items.update(parse_row(row, strings))\n #print(rows)\n for i in items: print('%-2d, %-2d: \"%s\"' % (i[0], i[1], items[i]))\n\n remove_dir('TEMP')\n\n\nif __name__ == '__main__':\n main()","sub_path":"parsing_Excel.py","file_name":"parsing_Excel.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"35358660","text":"#!/user/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\r\n\r\n你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。\r\n\r\n示例:\r\n\r\n给定 nums = [2, 7, 11, 15], target = 9\r\n\r\n因为 nums[0] + nums[1] = 2 + 7 = 9\r\n所以返回 [0, 1]\r\n\"\"\"\r\n\r\n\r\n# 我的答案\r\ndef twoSum(nums, target):\r\n result = []\r\n tem_list = []\r\n for i in range(len(nums)):\r\n n = 1\r\n for j in nums[i+1:]:\r\n if j not in tem_list:\r\n if nums[i] + j == target:\r\n result.append(i)\r\n result.append(i+n)\r\n tem_list.append(nums[i])\r\n tem_list.append(j)\r\n n += 1\r\n return result\r\n\r\n\r\n# 通过的答案\r\ndef twoSum(nums, target):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type target: int\r\n :rtype: List[int]\r\n \"\"\"\r\n hashmap = {}\r\n for index, num in enumerate(nums):\r\n another_num = target - num\r\n if another_num in hashmap:\r\n return [hashmap[another_num], index]\r\n hashmap[num] = index\r\n return None\r\n\r\n\r\nprint(twoSum([3, 3, 3, 3], 6))\r\n","sub_path":"leed_code/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"6143472","text":"\nfrom tkinter import*\n\nclass RadioFont(Frame):\n \n def __init__(self):\n \n #Radio\n Frame.__init__(self)\n self.pack(expand = YES,fill = BOTH)\n self.master.title(\"Demostracao do RadioButton, checbutton e botao\")\n \n self.frame1 = Frame(self)\n self.frame1.pack()\n \n self.text = Entry(self.frame1,width=40,font=\"Arial 10\")\n self.text.insert(INSERT,\"Trocando de Cor Para\")\n self.text.pack(padx=5,pady=5)\n \n self.frame2 = Frame(self)\n self.frame2.pack()\n \n self.chosenColor = StringVar()\n self.chosenColor.set(0)\n self.radioRed = Radiobutton(self.frame2, text = \"Vermelho\", variable = self.chosenColor, value=\"red\",command=self.changeColor)\n self.radioRed.pack(side=LEFT,padx=5,pady=5)\n self.radioGreen = Radiobutton(self.frame2, text = \"Verde\", variable = self.chosenColor, value=\"green\",command=self.changeColor)\n self.radioGreen.pack(side=LEFT,padx=5,pady=5)\n self.radioBlue = Radiobutton(self.frame2, text = \"Azul\", variable = self.chosenColor, value=\"blue\",command=self.changeColor)\n self.radioBlue.pack(side=LEFT,padx=5,pady=5)\n \n #Checkbox\n self.frame3 = Frame(self)\n self.frame3.pack()\n \n self.boldOn = BooleanVar()\n self.checkBold = Checkbutton(self.frame3,text = \"Bold\" , variable = self.boldOn, command = self.changeFont)\n self.checkBold.pack(side=LEFT,padx=5,pady=5)\n \n self.boldOnItalic = BooleanVar()\n self.checkItalic = Checkbutton(self.frame3,text = \"Bold\" , variable = self.boldOnItalic, command = self.changeFont)\n self.checkItalic.pack(side=LEFT,padx=5,pady=5)\n \n #Button Com Imagem\n self.frame4 = Frame(self)\n self.frame4.pack()\n \n self.myImageClear = PhotoImage(file=\"\") #<-- Caminho da imagem\n self.ClearButton = Button(self.frame4,image = self.myImageClear,command = self.clearFields)\n self.ClearButton.pack(side=LEFT,padx=5,pady=5)\n \n def changeColor(self):\n if self.chosenColor.get() == \"red\":\n self.text.configure(fg=\"red\")\n elif self.chosenColor.get() == \"green\":\n self.text.configure(fg=\"green\") \n elif self.chosenColor.get() == \"blue\":\n self.text.configure(fg=\"blue\") \n \n def changeFont(self):\n \n desiredFont = \"Arial 10\"\n \n if self.boldOn.get():\n desiredFont += \" bold\"\n \n if self.boldOnItalic.get():\n desiredFont += \" italic\"\n \n self.text.config(font=desiredFont)\n \n def clearFields(self):\n self.text.delete(0,END)\n \ndef main():\n RadioFont().mainloop()\n \nif __name__ == \"__main__\":\n main()\n \n\n ","sub_path":"EstudePhyton/src/GUI/ButtonRadioButtonCheckButton.py","file_name":"ButtonRadioButtonCheckButton.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"426942497","text":"# write .pol files for the polygon features in gis/poly-features\n\nfrom stompy.spatial import wkb2shp\nimport os\nimport numpy as np\n\ndef gen_polygons(path):\n shp_fn=os.path.join(os.path.dirname(__file__),\n \"gis/poly-features.shp\")\n polys = wkb2shp.shp2geom(shp_fn)\n\n for feat in polys:\n print(feat['name'])\n pol_fn=os.path.join(path,feat['name']+\".pol\")\n with open(pol_fn,'wt') as fp:\n fp.write(f\"* Transcribed from {shp_fn}\\n\")\n fp.write(f\"{feat['name']}\\n\")\n pnts=np.array( feat['geom'].exterior )\n fp.write(f\"{len(pnts)} 2\\n\")\n for xy in pnts:\n fp.write(\"%.3f %.3f\\n\"%(xy[0],xy[1]))\n\nif __name__ == '__main__':\n gen_polygons('.')\n","sub_path":"dflowfm/gen_polygons.py","file_name":"gen_polygons.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"503477583","text":"#!/bin/python\n# -*- coding: utf-8 -*-\n# vim:set ts=8 sts=8 sw=8 tw=80 noet cc=80:\n\nimport sys\nimport os\nimport configparser\nimport logging\nimport readline\nimport re\nimport hashlib\nimport rl\nfrom optparse import OptionParser\nfrom getpass import getpass\nfrom client import Client\n\nimport datetime\n\n_TIME_FORMAT = '%Y%m%dT%H:%M:%S'\n_PRINT_FORMAT = '%H:%M:%S'\n\ndef time(at=None):\n\t\"\"\"Stringify time in ISO 8601 format.\"\"\"\n\tif not at:\n\t\tat = utcnow()\n\tif type(at) == float:\n\t\tat = datetime.datetime.fromtimestamp(at)\n\tst = at.strftime(_TIME_FORMAT)\n\ttz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'\n\tst += ('Z' if tz == 'UTC' else tz)\n\treturn st\n\ndef localtime(at=None):\n\tif not at:\n\t\tat = now()\n\tif type(at) == float:\n\t\tat = datetime.datetime.fromtimestamp(at)\n\tst = at.strftime(_PRINT_FORMAT)\n\treturn st\n\ndef utcnow():\n\treturn datetime.datetime.utcnow()\n\ndef now():\n\treturn datetime.datetime.now()\n\nlogger = logging.getLogger(__name__)\n\nPROMPT = '%s%s '\nmode = '>'\nenable_bell = False\nno_colors = False\nshow_timestamps = True\n\nPLAIN = '>'\nSTEALTH = '$'\nGOLD = '#'\n\nxmpp = None\n\nencrypted_message_info = \"[Diese Nachricht ist nur für Lima-Gold-Mitglieder \" \\\n\t\t\"lesbar. Mehr auf lima-city.de/gold]\"\nencrypted_link_info = \"[Dieser Link ist nur für Lima-Gold-Mitglieder lesbar. \" \\\n\t\t\"Mehr auf lima-city.de/gold]\"\nencrypted_section_info = \"[Dieser Teil der Nachricht ist nur für \" \\\n\t\t\"Lima-Gold-Mitglieder lesbar. Mehr auf lima-city.de/gold]\"\n\nurl_regex = re.compile(r'(https?|ftps?|ssh|sftp|irc|xmpp)://([a-zA-Z0-9]+)')\njid_regex = re.compile(r'[a-zA-Z0-9]+@(?:[a-zA-Z0-9]+\\.)+[a-zA-Z0-9]+(?:/.*)?')\n\nlongest = 0\nrpad = False\ncolor_sequences = re.compile('\\033\\\\[[^m]+?m')\n\n\nCOLORS = [ \"[31m\", \"[32m\", \"[33m\", \"[34m\", \"[35m\", \"[36m\", \"[37m\" ]\nMENTION_COLOR = \"[93m\"\nINPUT_COLOR = \"[36m\"\nSTEALTH_COLOR = \"[96m\"\nENCRYPTED_COLOR = \"[33m\" # \"gold\"\n\ndef get_nick_color(nick):\n\tmd5 = hashlib.md5(nick.encode())\n\treturn COLORS[md5.digest()[0] % len(COLORS) ]\n\ndef prompt():\n\tglobal xmpp\n\treturn PROMPT % (xmpp.nick, mode)\n\ndef escape_vt(text):\n\treturn text.replace(\"\\033\", \"^[\")\n\ndef show_raw(raw):\n\tif no_colors:\n\t\traw = color_sequences.sub('', raw)\n\t\trl.echo(raw)\n\telse:\n\t\trl.echo(\"\\033[0m%s\" % raw, prompt_prefix=\"\\033%s\" % INPUT_COLOR)\n\ndef show(msg):\n\tshow_raw(escape_vt(msg))\n\ndef show_input(msg):\n\tshow_raw(\"\\033%s%s %s%s\\033[0m\" % (INPUT_COLOR, localtime(),\n\t\t\tescape_vt(prompt()), escape_vt(msg)))\n\nclass Help(object):\n\tdef __init__(self, usage=None, info=None, see=[], topic=None):\n\t\tself.iscommand = usage is not None\n\t\tself.usage = usage\n\t\tself.info = info\n\t\tself.see = see\n\nonline_help = { \"/help\": Help(\"/help [command]\", \"shows help\"),\n\t\t\"/quit\": Help(\"/quit\", \"quit the client\"),\n\t\t\"/encrypt\": Help(\"/encrypt\", \"switch to encrypted (gold) mode. \"\n\t\t\t\t\"Everyone will see, that there was a message, \"\n\t\t\t\t\"but only users with the key can read them\",\n\t\t\t\tsee=[\"/plain\", \"/stealth\", \"/status\", \"modes\"]),\n\t\t\"/plain\": Help(\"/plain\", \"switch to plaintext mode. This is the\"\n\t\t\t\t\" mode, every XMPP client supports.\",\n\t\t\t\tsee=[\"/encrypt\", \"/stealth\", \"/status\",\n\t\t\t\t\t\"modes\"]),\n\t\t\"/gold\": Help(\"/gold\", \"alias for /encrypt\", see=[\"/encrypt\",\n\t\t\t\t\"/stealth\", \"/status\", \"modes\"]),\n\t\t\"/stealth\": Help(\"/stealth\", \"switch to stealth mode. Messages \"\n\t\t\t\t\"are sent encrypted and regular XMPP clients \"\n\t\t\t\t\"will not see the message at all\",\n\t\t\t\tsee=[\"/encrypt\", \"/plain\", \"/status\", \"modes\"]),\n\t\t\"/status\": Help(\"/status\", \"show the current status.\",\n\t\t\t\tsee=[\"/encrypt\", \"/plain\", \"/stealth\",\n\t\t\t\t\t\"modes\"]),\n\t\t\"/msg\": Help(\"/msg nick|jid message\", \"send a private message \"\n\t\t\t\t\"to \\\"nick\\\" or \\\"jid\\\".\"),\n\t\t\"/enc\": Help(\"/enc text\", \"encrypt text and display the result \"\n\t\t\t\t\"locally. Probably only useful for debugging.\",\n\t\t\t\tsee=[\"/dec\", \"/encr\"]),\n\t\t\"/dec\": Help(\"/dec text\", \"decrypt text and display the result \"\n\t\t\t\t\"locally. Probably only useful for debugging.\",\n\t\t\t\tsee=[\"/enc\", \"/encr\"]),\n\t\t\"/encr\": Help(\"/encr text\", \"encrypt text in the same way as \"\n\t\t\t\t\"/enc does, but send the result unencrypted \"\n\t\t\t\t\"over XMPP. Probably only useful to annoy \"\n\t\t\t\t\"someone.\", see=[\"/enc\", \"/dec\"]),\n\t\t\"/e\": Help(\"/e text\", \"send encrypted text, exactly in the same\"\n\t\t\t\t\"way as in the \\\"encrypt\\\" mode, but without \"\n\t\t\t\t\"switching the mode.\", see=[\"/encrypt\"]),\n\t\t\"/p\": Help(\"/p text\", \"send plain text, exactly in the same \"\n\t\t\t\t\"way as in the \\\"plain\\\" mode, but without \"\n\t\t\t\t\"switching the mode.\", see=[\"/plain\"]),\n\t\t\"/q\": Help(\"/q text\", \"send text quietly (stealth), exactly in \"\n\t\t\t\t\"the same way as in the \\\"stealth\\\" mode, but \"\n\t\t\t\t\"without switching the mode.\",\n\t\t\t\tsee=[\"/stealth\"]),\n\t\t\"/say\": Help(\"/say text\", \"send text literally. This allows to \"\n\t\t\t\t\"start a message with a \\\"/\\\".\"),\n\t\t\"/me\": Help(\"/me text\", \"send a message starting with \\\"/me\\\". \"\n\t\t\t\t\"You know, why this might be useful...\"),\n\t\t\"/bell\": Help(\"/bell [on|off]\", \"sets or shows the usage of the\"\n\t\t\t\t\" terminal's bell. If enabled, the bell will \"\n\t\t\t\t\"ring if a message is received.\"),\n\t\t\"/es\": Help(\"/es plain$encrypted\", \"send a message whose plain \"\n\t\t\t\t\"section is sent unmodified, but the encrypted \"\n\t\t\t\t\"section is replaced with a [censored] message \"\n\t\t\t\t\"for all normal clients. To escape the \"\n\t\t\t\t\"separator character (\\\"$\\\") prefix it with a \"\n\t\t\t\t\"\\\"for the cheat, set \\\\$21 = $33\\\".\",\n\t\t\t\tsee=[\"/e\", \"/eq\", \"/el\"]),\n\t\t\"/eq\": Help(\"/eq plain$encrypted\", \"send a message whose plain \"\n\t\t\t\t\"section is sent unmodified, but the encrypted \"\n\t\t\t\t\"section is completely removed for all normal \"\n\t\t\t\t\"clients. To escape the separator character \"\n\t\t\t\t\"(\\\"$\\\") prefix it with a \\\"\\\\\\\", e.g. \"\n\t\t\t\t\"\\\"for the cheat, set \\\\$21 = $33\\\".\",\n\t\t\t\tsee=[\"/e\", \"/es\", \"/el\"]),\n\t\t\"/el\": Help(\"/el text\", \"send a message where everything \"\n\t\t\t\t\"starting at the first link is [encrypted].\",\n\t\t\t\tsee=[\"/e\", \"/es\", \"/eq\"]),\n\t\t\"/ls\": Help(\"/ls [detail]\", \"list all users in the room\"),\n\t\t\"/macro\": Help(\"/macro text = replacement\", \"define a new \"\n\t\t\t\t\"macro. They are evaluated on the input text \"\n\t\t\t\t\"and can therefore invoke any command. However,\"\n\t\t\t\t\" you cannot override any command for \"\n\t\t\t\t\"manipulating macros.\",\n\t\t\t\tsee=[\"/dmacro\", \"/macros\"]),\n\t\t\"/dmacro\": Help(\"/dmacro macro\", \"delete a macro.\",\n\t\t\t\tsee=[\"/macro\", \"/macros\"]),\n\t\t\"/macros\": Help(\"/macros\", \"list all currently defined macros.\",\n\t\t\t\tsee=[\"/macro\", \"/dmacro\"]),\n\t\t\"/save\": Help(\"/save\", \"saves the configuration. Only the \"\n\t\t\t\t\"default mode, the bell setting and the macros \"\n\t\t\t\t\"are saved.\"),\n\t\t\"modes\": Help(topic=\"Modes\", info=\"There exist 3 different \"\n\t\t\t\t\"modes of operation: plaintext, encrypted and \"\n\t\t\t\t\"stealth mode. They influence how messages are \"\n\t\t\t\t\"sent and if a regular client can see and/or \"\n\t\t\t\t\"read them. The current mode is indicated by \"\n\t\t\t\t\"the last character of the prompt.\\n\"\n\t\t\t\t\"> = plaintext, # = encrypted, $ = stealth\",\n\t\t\t\tsee=[\"/plain\", \"/encrypt\", \"/stealth\",\n\t\t\t\t\t\"/status\"]),\n\t\t\"config\": Help(topic=\"Configuration file\", info=\"This clinet \"\n\t\t\t\t\"can be configured with a configuration file \"\n\t\t\t\t\"called xmpp.cfg and located in the current \"\n\t\t\t\t\"directory. The syntax is like a plain ini \"\n\t\t\t\t\"file: there are multiple sections, wich are \"\n\t\t\t\t\"started with a \\\"[section]\\\" and multiple \"\n\t\t\t\t\"values inside a section, one key/value pair \"\n\t\t\t\t\"per line.\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"The full list of all configurable options:\\n\"\n\t\t\t\t\"[xmpp]\\n\"\n\t\t\t\t\"jid = account@jabber.server\\n\"\n\t\t\t\t\"password = secret\\n\"\n\t\t\t\t\"room = room@conference.jabber.server\\n\"\n\t\t\t\t\"nick = Me\\n\"\n\t\t\t\t\"key = secret\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"[client]\\n\"\n\t\t\t\t\"bell = False\\n\"\n\t\t\t\t\"history = True\\n\"\n\t\t\t\t\"mode = plain\\n\"\n\t\t\t\t\"logfile = xmpp.log\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"[ui]\\n\"\n\t\t\t\t\"rpadnicks = False\\n\"\n\t\t\t\t\"colors = True\\n\"\n\t\t\t\t\"timestamps = True\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"The options have the following meaning:\\n\"\n\t\t\t\t\"jid: the jid of the jabber account.\\n\"\n\t\t\t\t\"password: the account's password. This is \"\n\t\t\t\t\"optional. If it is missing, the client will \"\n\t\t\t\t\"ask on startup.\\n\"\n\t\t\t\t\"room: the jid of the MUC room to join.\\n\"\n\t\t\t\t\"nick: the nick you would like to use in the \"\n\t\t\t\t\"room.\\n\"\n\t\t\t\t\"key: the encryption key for all the encrypted \"\n\t\t\t\t\"messages.\\n\"\n\t\t\t\t\"bell: if this is enabled, the client will \"\n\t\t\t\t\"output a bell character each time a new \"\n\t\t\t\t\"message is received.\\n\"\n\t\t\t\t\"history: if this is enabled, the client will \"\n\t\t\t\t\"try to get 20 lines of history after joining \"\n\t\t\t\t\"the room.\\n\"\n\t\t\t\t\"mode: the default mode. It can be \\\"plain\\\", \"\n\t\t\t\t\"\\\"encrypt\\\" or \\\"stealth\\\". This has the same \"\n\t\t\t\t\"effect as if you enter a /plain, /encrypt or \"\n\t\t\t\t\"/stealth command by hand at each start.\\n\"\n\t\t\t\t\"logfile: the log file for history logging.\\n\"\n\t\t\t\t\"rpadnicks: pad nicks, such that all messages \"\n\t\t\t\t\"are aligned\\n\"\n\t\t\t\t\"colors: enable coloring of messages\\n\"\n\t\t\t\t\"timestamps: show timestamps\"),\n\t\t\"about\": Help(topic=\"About\", info=\"This client was written to \"\n\t\t\t\t\"allow private group chats in public muc \"\n\t\t\t\t\"rooms. The encrypted mode was invented to \"\n\t\t\t\t\"show other participants, that a conversation \"\n\t\t\t\t\"is going on, and to show them, that they \"\n\t\t\t\t\"have no chance to participate. The stealth \"\n\t\t\t\t\"mode was invented to hide the fact, that there\"\n\t\t\t\t\" is a conversation at all. To make this \"\n\t\t\t\t\"possible, the XMPP protocol was extended, \"\n\t\t\t\t\"such that regular clients silently ignore the \"\n\t\t\t\t\"stealth messages, but the conference server \"\n\t\t\t\t\"still distributes them to all clients.\")\n\t\t}\n\ndef print_help():\n\tcommands = \" \".join(sorted([ key for key in online_help.keys() \\\n\t\t\tif online_help[key].iscommand ]))\n\ttopics = \" \".join(sorted([ key for key in online_help.keys() \\\n\t\t\tif not online_help[key].iscommand ]))\n\tshow(\"commands: %s\\nhelp topics: %s\\nFor more information, type /help \"\n\t\t\t\"\" % (commands, topics))\n\ndef show_help(subject):\n\tif subject in online_help:\n\t\thlp = online_help[subject]\n\t\tif hlp.iscommand:\n\t\t\ttext = \"COMMAND: %s\\nINFO: %s\" % (hlp.usage, hlp.info)\n\t\telse:\n\t\t\ttext = hlp.info\n\t\tif len(hlp.see) > 0:\n\t\t\ttext += \"\\nSEE ALSO: %s\" % \", \".join(hlp.see)\n\t\tshow(text)\n\telse:\n\t\tshow(\"no help entry found\")\n\nclass NickCompleter(object):\n\tdef __init__(self, xmpp):\n\t\tself.xmpp = xmpp\n\n\tdef complete(self, text, state):\n\t\tif state == 0: # first time for this text: find nicks\n\t\t\tif text:\n\t\t\t\tparticipants = self.xmpp.get_participants()\n\t\t\t\tself.matches = [ participants[jid][\"nick\"] for \\\n\t\t\t\t\t\tjid in participants if \\\n\t\t\t\t\t\tparticipants[jid][\"nick\"] \\\n\t\t\t\t\t\t\t\t.startswith(text) ]\n\t\t\telse:\n\t\t\t\tself.matches = []\n\t\ttry:\n\t\t\treturn self.matches[state]\n\t\texcept IndexError:\n\t\t\treturn None\n\nxmpp = None\nif __name__ == \"__main__\":\n\tlogging.basicConfig(level=logging.ERROR,\n\t\t format=\"%(levelname)-8s %(message)s\")\n\n\tparser = OptionParser()\n\tparser.add_option(\"-f\", \"--file\", dest=\"file\", help=\"Config file path\")\n\tparser.add_option(\"-j\", \"--jid\", dest=\"jid\", help=\"JID\")\n\tparser.add_option(\"-p\", \"--password\", dest=\"password\", help=\"Password\")\n\tparser.add_option(\"-r\", \"--room\", dest=\"room\", help=\"Conference room\")\n\tparser.add_option(\"-n\", \"--nick\", dest=\"nick\", help=\"Nick\")\n\tparser.add_option(\"-k\", \"--key\", dest=\"key\", help=\"Encryption key\")\n\tparser.add_option(\"-l\", \"--log\", dest=\"log\", help=\"Log file path\")\n\tparser.add_option(\"-b\", \"--bell\", dest=\"bell\",\n\t\t\taction=\"store_true\", help=\"Enable bell\")\n\tparser.add_option(\"-B\", \"--no-bell\", dest=\"bell\",\n\t\t\taction=\"store_false\", help=\"Disable bell\")\n\tparser.add_option(\"-m\", \"--mode\", dest=\"mode\", help=\"Default mode\")\n\tparser.add_option(\"-i\", \"--history\", dest=\"history\",\n\t\t\taction=\"store_true\", help=\"Disable history on connect\")\n\tparser.add_option(\"-H\", \"--no-history\", dest=\"history\",\n\t\t\taction=\"store_false\", help=\"Disable history on connect\")\n\tparser.add_option(\"-a\", \"--rpad\", dest=\"rpad\",\n\t\t\taction=\"store_true\", help=\"rpad nicks\")\n\tparser.add_option(\"-A\", \"--no-rpad\", dest=\"rpad\",\n\t\t\taction=\"store_false\", help=\"Do not rpad nicks\")\n\tparser.add_option(\"-c\", \"--colors\", dest=\"colors\",\n\t\t\taction=\"store_true\", help=\"Disable colors\")\n\tparser.add_option(\"-C\", \"--no-colors\", dest=\"colors\",\n\t\t\taction=\"store_false\", help=\"Disable colors\")\n\tparser.add_option(\"-t\", \"--timestamps\", dest=\"Enable timestamps\",\n\t\t\taction=\"store_true\", help=\"Disable timestamps\")\n\tparser.add_option(\"-T\", \"--no-timestamps\", dest=\"timestamps\",\n\t\t\taction=\"store_false\", help=\"Disable timestamps\")\n\tparser.add_option(\"-E\", \"--encrypted\", dest=\"encrypted\",\n\t\t\thelp=\"Replacement text for encrypted messages\")\n\tparser.add_option(\"-L\", \"--link\", dest=\"link\",\n\t\t\thelp=\"Replacement text for encrypted links\")\n\tparser.add_option(\"-S\", \"--section\", dest=\"section\",\n\t\t\thelp=\"Replacement text for encrypted sections\")\n\tparser.add_option(\"-J\", \"--no-join-log\", dest=\"joinlog\",\n\t\t\taction=\"store_false\", default=True,\n\t\t\thelp=\"Disable join-time join messages\")\n\t(options, args) = parser.parse_args()\n\n\n\tfilenames = [ \"/etc/limagold.conf\", os.path.expanduser(\"~/.limagoldrc\"),\n\t\t\t\"xmpp.cfg\" ]\n\tif options.file is not None:\n\t\tfilenames += [ options.file ]\n\n\tconfig = configparser.SafeConfigParser()\n\tcfgfiles = config.read(filenames)\n\n\tfor section in [ \"xmpp\", \"client\", \"ui\", \"messages\" ]:\n\t\tif not config.has_section(section):\n\t\t\tconfig.add_section(section)\n\n\tif options.jid is not None:\n\t\tconfig.set(\"xmpp\", \"jid\", options.jid)\n\tif options.password is not None:\n\t\tconfig.set(\"xmpp\", \"password\", options.password)\n\tif options.room is not None:\n\t\tconfig.set(\"xmpp\", \"room\", options.room)\n\tif options.nick is not None:\n\t\tconfig.set(\"xmpp\", \"nick\", options.nick)\n\tif options.key is not None:\n\t\tconfig.set(\"xmpp\", \"key\", options.key)\n\tif options.log is not None:\n\t\tconfig.set(\"client\", \"logfile\", options.log)\n\tif options.bell is not None:\n\t\tconfig.set(\"client\", \"bell\", str(options.bell))\n\tif options.mode is not None:\n\t\tconfig.set(\"client\", \"mode\", options.mode)\n\tif options.history is not None:\n\t\tconfig.set(\"client\", \"history\", str(options.history))\n\tif options.rpad is not None:\n\t\tconfig.set(\"ui\", \"rpadnicks\", str(options.rpad))\n\tif options.colors is not None:\n\t\tconfig.set(\"ui\", \"colors\", str(options.colors))\n\tif options.timestamps is not None:\n\t\tconfig.set(\"ui\", \"timestamps\", str(options.timestamps))\n\tif options.encrypted is not None:\n\t\tconfig.set(\"messages\", \"encrypted\", options.encrypted)\n\tif options.link is not None:\n\t\tconfig.set(\"messages\", \"encrypted_link\", options.link)\n\tif options.section is not None:\n\t\tconfig.set(\"messages\", \"encrypted_section\", options.section)\n\n\tjid = config.get(\"xmpp\", \"jid\")\n\ttry:\n\t\tpassword = config.get(\"xmpp\", \"password\")\n\texcept:\n\t\tpassword = getpass(\"Password: \")\n\troom = config.get(\"xmpp\", \"room\")\n\tnick = config.get(\"xmpp\", \"nick\")\n\tkey = config.get(\"xmpp\", \"key\", fallback=None)\n\tlogfile_name = config.get(\"client\", \"logfile\", fallback=\"xmpp.log\")\n\tenable_bell = config.getboolean(\"client\", \"bell\", fallback=False)\n\tdefault_mode = config.get(\"client\", \"mode\", fallback=\"plain\")\n\thistory = config.getboolean(\"client\", \"history\", fallback=True)\n\trpad = config.getboolean(\"ui\", \"rpadnicks\", fallback=False)\n\tno_colors = not config.getboolean(\"ui\", \"colors\", fallback=True)\n\tshow_timestamps = config.getboolean(\"ui\", \"timestamps\", fallback=True)\n\tencrypted_message_info = config.get(\"messages\", \"encrypted\",\n\t\t\tfallback=encrypted_message_info)\n\tencrypted_link_info = config.get(\"messages\", \"encrypted_link\",\n\t\t\tfallback=encrypted_link_info)\n\tencrypted_section_info = config.get(\"messages\", \"encrypted_section\",\n\t\t\tfallback=encrypted_section_info)\n\tjoin_log = options.joinlog\n\n\tmode = GOLD if key is not None else PLAIN\n\n\txmpp = Client(jid, password, room, nick, key, history=history,\n\t\t\tencrypted_msg_info=encrypted_message_info)\n\txmpp.register_plugin(\"xep_0030\") # Service Discovery\n\txmpp.register_plugin(\"xep_0045\") # Multi-User Chat\n\txmpp.register_plugin(\"xep_0199\") # XMPP Ping\n\txmpp.register_plugin(\"encrypt-im\") # encrypted stealth MUC\n\n\tmacros = {}\n\tif config.has_section(\"macros\"):\n\t\tkeys = config.options(\"macros\")\n\t\tfor macro in keys:\n\t\t\tmacros[macro] = config.get(\"macros\", macro)\n\n\tif default_mode == \"plain\" or key is None:\n\t\txmpp.encrypt = False\n\t\tmode = PLAIN\n\telif default_mode == \"gold\" or default_mode == \"encrypt\":\n\t\txmpp.encrypt = True\n\t\tmode = GOLD\n\telif default_mode == \"stealth\":\n\t\txmpp.encrypt = False\n\t\tmode = STEALTH\n\n\tlogfile = open(logfile_name, \"a\")\n\n\tdef log_msg(msgtype, msg, nick):\n\t\tnick = get_formatted_nick(nick);\n\t\tt = time()\n\t\tlines = msg.count(\"\\n\")\n\t\tline = \"%sR %s %03d <%s> %s\" % (msgtype, t, lines, nick, msg)\n\t\ttry:\n\t\t\tlogfile.write(\"%s\\n\" % line)\n\t\t\tlogfile.flush()\n\t\texcept Exception as e:\n\t\t\tshow(\"exception while writing log: %s\" % e)\n\n\tdef log_status(info):\n\t\tt = time()\n\t\tlines = info.count(\"\\n\")\n\t\tline = \"MI %s %03d %s\" % (t, lines, info)\n\t\ttry:\n\t\t\tlogfile.write(\"%s\\n\" % line)\n\t\t\tlogfile.flush()\n\t\texcept Exception as e:\n\t\t\tshow(\"exception while writing log: %s\" % e)\n\n\tdef get_formatted_nick(nick):\n\t\tglobal longest\n\t\tglobal rpad\n\t\tif rpad and len(nick) > longest:\n\t\t\tlongest = len(nick)\n\n\t\treturn nick if not rpad else nick.rjust(longest, ' ')\n\n\tdef muc_msg(msg, nick, jid, role, affiliation, msgtype, echo):\n\t\tnick = get_formatted_nick(nick);\n\t\tif enable_bell and not echo:\n\t\t\tsys.stdout.write(\"\\007\")\n\t\tcolor = INPUT_COLOR if echo else get_nick_color(nick)\n\t\tnormal_color = INPUT_COLOR if echo else \"[0m\"\n\t\tt = localtime()\n\t\ttimestamp = \"%s \" % t if show_timestamps else \"\"\n\t\ttimestamp_nos = \"%s\" % t if show_timestamps else \"\"\n\t\tif msgtype == xmpp.STEALTH:\n\t\t\tif msg.startswith(\"/me \"):\n\t\t\t\tshow_raw(\"\\033%s%s$\\033%s*** %s\\033%s %s\" %\n\t\t\t\t\t\t(STEALTH_COLOR, timestamp_nos,\n\t\t\t\t\t\t\tcolor, escape_vt(nick),\n\t\t\t\t\t\t\tnormal_color,\n\t\t\t\t\t\t\tescape_vt(msg[4:])))\n\t\t\telse:\n\t\t\t\tshow_raw(\"\\033%s%s$\\033%s<%s>\\033%s %s\" %\n\t\t\t\t\t\t(STEALTH_COLOR, timestamp_nos,\n\t\t\t\t\t\t\tcolor, nick,\n\t\t\t\t\t\t\tnormal_color, msg))\n\t\t\tlog_msg(\"Q\", msg, nick)\n\t\telif msgtype == xmpp.ENCRYPTED:\n\t\t\tif msg.startswith(\"/me \"):\n\t\t\t\tshow_raw(\"\\033%s%s#\\033%s*** %s\\033%s %s\" %\n\t\t\t\t\t\t(ENCRYPTED_COLOR, timestamp_nos,\n\t\t\t\t\t\t\tcolor, escape_vt(nick),\n\t\t\t\t\t\t\tnormal_color,\n\t\t\t\t\t\t\tescape_vt(msg[4:])))\n\t\t\telse:\n\t\t\t\tshow_raw(\"\\033%s%s#\\033%s<%s>\\033%s %s\" %\n\t\t\t\t\t\t(ENCRYPTED_COLOR, timestamp_nos,\n\t\t\t\t\t\t\tcolor, escape_vt(nick),\n\t\t\t\t\t\t\tnormal_color,\n\t\t\t\t\t\t\tescape_vt(msg)))\n\t\t\tlog_msg(\"E\", msg, nick)\n\t\telse:\n\t\t\tif msg.startswith(\"/me \"):\n\t\t\t\tshow_raw(\"\\033%s%s\\033%s*** %s\\033%s %s\" %\n\t\t\t\t\t\t(normal_color, timestamp, color,\n\t\t\t\t\t\t\tescape_vt(nick),\n\t\t\t\t\t\t\tnormal_color,\n\t\t\t\t\t\t\tescape_vt(msg[4:])))\n\t\t\telse:\n\t\t\t\tshow_raw(\"\\033%s%s\\033%s<%s>\\033%s %s\" %\n\t\t\t\t\t\t(normal_color, timestamp,\n\t\t\t\t\t\tcolor, escape_vt(nick),\n\t\t\t\t\t\tnormal_color, escape_vt(msg)))\n\t\t\tlog_msg(\"M\", msg, nick)\n\n\tdef muc_mention(msg, nick, jid, role, affiliation, msgtype, echo, body):\n\t\tnick = get_formatted_nick(nick);\n\t\tif enable_bell and not echo:\n\t\t\tsys.stdout.write(\"\\007\")\n\t\tcolor = get_nick_color(nick)\n\t\tmsgcolor = INPUT_COLOR if echo else MENTION_COLOR\n\t\ttimestamp = \"\\033%s%s \" % (MENTION_COLOR, localtime()) if \\\n\t\t\t\tshow_timestamps else \"\"\n\t\ttimestamp_nos = \"\\033%s%s\" % (MENTION_COLOR, localtime()) if \\\n\t\t\t\tshow_timestamps else \"\"\n\t\tif msgtype == xmpp.STEALTH:\n\t\t\tshow_raw(\"%s\\033%s$\\033%s<<<%s>>>\\033%s \" \"%s\\033[0m\" %\n\t\t\t\t\t(timestamp_nos, msgcolor, color,\n\t\t\t\t\t\tnick, msgcolor, msg))\n\t\t\tlog_msg(\"Q\", body, nick)\n\t\telif msgtype == xmpp.ENCRYPTED:\n\t\t\tshow_raw(\"%s\\033%s#\\033%s<<<%s>>>\\033%s \" \"%s\\033[0m\" %\n\t\t\t\t\t(timestamp_nos, msgcolor, color,\n\t\t\t\t\t\tnick, msgcolor, msg))\n\t\t\tlog_msg(\"E\", body, nick)\n\t\telse:\n\t\t\tshow_raw(\"%s\\033%s<<<%s>>>\\033%s %s\\033[0m\" %\n\t\t\t\t\t(timestamp, color, nick, msgcolor, msg))\n\t\t\tlog_msg(\"M\", body, nick)\n\n\tdef priv_msg(msg, jid):\n\t\tif enable_bell:\n\t\t\tsys.stdout.write(\"\\007\")\n\t\ttimestamp = \"%s \" % localtime() if show_timestamps else \"\"\n\t\tshow_raw(\"\\033%s%s %s\\033[0m\" % (MENTION_COLOR,\n\t\t\t\ttimestamp, escape_vt(jid), escape_vt(msg)))\n\n\tdef muc_online(jid, nick, role, affiliation, localjid, info):\n\t\tglobal longest\n\n\t\tif history or not info:\n\t\t\ttimestamp = \"%s \" % localtime() if show_timestamps \\\n\t\t\t\t\telse \"\"\n\t\t\tshow(\"%s*** online: %s (%s; %s)\" % (timestamp, nick,\n\t\t\t\tjid, role))\n\t\tif not info or join_log:\n\t\t\tlog_status(\"%s <%s> has joined\" % (nick, jid))\n\n\t\tif len(nick) > longest:\n\t\t\tlongest = len(nick)\n\n\tdef muc_offline(jid, nick):\n\t\ttimestamp = \"%s \" % localtime() if show_timestamps else \"\"\n\t\tshow(\"%s*** offline: %s\" % (timestamp, nick))\n\t\tlog_status(\"%s has left\" % nick)\n\n\tdef muc_joined():\n\t\tlog_status('You have joined as \"%s\"' % xmpp.nick)\n\t\tshow('You have joined as \"%s\"' % xmpp.nick)\n\n\tdef save_config():\n\t\tif len(cfgfiles) == 0:\n\t\t\tshow(\"no config file\")\n\t\t\treturn\n\t\tcfgfile = cfgfiles[0]\n\t\tcfg = configparser.SafeConfigParser()\n\t\tcfg.read(cfgfile)\n\t\tstr_mode = \"plain\" if mode == PLAIN else \"encrypt\" \\\n\t\t\t\tif mode == GOLD else \"stealth\"\n\t\tnewcfg = {\n\t\t\t\t\"client\": {\n\t\t\t\t\t\"mode\": str_mode,\n\t\t\t\t\t\"bell\": str(enable_bell) }\n\t\t}\n\t\tfor section in newcfg:\n\t\t\tfor option in newcfg[section]:\n\t\t\t\tcfg.set(section, option,\n\t\t\t\t\t\tnewcfg[section][option])\n\t\tif not cfg.has_section(\"macros\"):\n\t\t\tcfg.add_section(\"macros\")\n\t\tfor macro in cfg.options(\"macros\"):\n\t\t\tcfg.remove_option(\"macros\", macro)\n\t\tfor macro in macros:\n\t\t\tcfg.set(\"macros\", macro, macros[macro])\n\t\tif len(cfg.options(\"macros\")) == 0:\n\t\t\tcfg.remove_section(\"macros\")\n\t\ttry:\n\t\t\twith open(cfgfile, \"w\") as f:\n\t\t\t\tcfg.write(f, True)\n\t\t\tshow('wrote config to \"%s\"' % cfgfile)\n\t\texcept Exception as e:\n\t\t\tshow(\"exception: %s\" % e)\n\n\txmpp.add_message_listener(muc_msg)\n\txmpp.add_mention_listener(muc_mention)\n\txmpp.add_online_listener(muc_online)\n\txmpp.add_offline_listener(muc_offline)\n\txmpp.add_private_listener(priv_msg)\n\txmpp.add_init_complete_listener(muc_joined)\n\n\tif xmpp.connect():\n\t\txmpp.process(block=False)\n\telse:\n\t\tprint(\"Unable to connect\")\n\t\tsys.exit(1)\n\n\treadline.read_init_file()\n\treadline.parse_and_bind(\"tab: complete\")\n\treadline.set_completer(NickCompleter(xmpp).complete)\n\trl.set_delete_input()\n\n\ttry:\n\t\twhile True:\n\t\t\tif not no_colors:\n\t\t\t\tsys.stdout.write(\"\\033%s\" % INPUT_COLOR)\n\t\t\t\tsys.stdout.flush()\n\t\t\tline = input(prompt())\n\t\t\tif not line:\n\t\t\t\tcontinue\n\t\t\tmsg = line.strip()\n\t\t\tif len(msg) == 0:\n\t\t\t\tcontinue\n\t\t\tif msg.startswith(\"/macro \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[7:].strip()\n\t\t\t\tsplit = None\n\t\t\t\ttry:\n\t\t\t\t\tsplit = text.index(\"=\")\n\t\t\t\texcept ValueError as e:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsplit = text.index(\":\")\n\t\t\t\t\texcept ValueError as e:\n\t\t\t\t\t\tshow(\"I have no idea what to \"\n\t\t\t\t\t\t\t\t\"do with \"\n\t\t\t\t\t\t\t\t\"that...\")\n\t\t\t\t\t\tcontinue\n\t\t\t\tcmd = text[:split].strip()\n\t\t\t\tvalue = text[split + 1:].strip()\n\t\t\t\tmacros[cmd] = value\n\t\t\t\tshow(\"new macro: '%s' -> '%s'\" % (cmd, value))\n\t\t\t\tcontinue\n\t\t\telif msg.startswith(\"/dmacro \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[8:].strip()\n\t\t\t\tif text in macros:\n\t\t\t\t\tdel macros[text]\n\t\t\t\t\tshow(\"macro '%s' deleted\" % text)\n\t\t\t\telse:\n\t\t\t\t\tshow(\"no such macro\")\n\t\t\t\tcontinue\n\t\t\telif msg == \"/macros\" or msg == \"/lmacros\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tshow(\"macros: %s\" % (\"none\" if len(macros) == 0\n\t\t\t\t\t\telse \", \".join([ '\"%s\"' % macro\n\t\t\t\t\t\tfor macro in macros ])))\n\t\t\t\tcontinue\n\t\t\telif msg in macros:\n\t\t\t\tshow_input(msg)\n\t\t\t\tmsg = macros[msg]\n\t\t\tif msg == \"/help\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tprint_help()\n\t\t\telif msg.startswith(\"/help \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[6:].strip()\n\t\t\t\tshow_help(text)\n\t\t\telif msg == \"/quit\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tbreak\n\t\t\telif msg == \"/encrypt\" or msg == \"/gold\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tshow(\"no encryption key set\")\n\t\t\t\telse:\n\t\t\t\t\txmpp.encrypt = True\n\t\t\t\t\tmode = GOLD\n\t\t\telif msg == \"/plain\":\n\t\t\t\tshow_input(msg)\n\t\t\t\txmpp.encrypt = False\n\t\t\t\tmode = PLAIN\n\t\t\telif msg == \"/stealth\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tshow(\"no encryption key set\")\n\t\t\t\telse:\n\t\t\t\t\tmode = STEALTH\n\t\t\telif msg == \"/status\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tshow(\"key %s, mode is %s\" % (\"available\" if\n\t\t\t\t\t\txmpp.key is not None else\n\t\t\t\t\t\t\"not available\", \"plaintext\" if\n\t\t\t\t\t\tmode == PLAIN else \"gold\" if\n\t\t\t\t\t\tmode == GOLD else \"stealth\" if\n\t\t\t\t\t\tmode == STEALTH else \"strange\"))\n\t\t\telif msg.startswith(\"/msg \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\tnick, text = None, None\n\t\t\t\ttry:\n\t\t\t\t\tnick = msg[5:msg[5:].index(\" \") + 5] \\\n\t\t\t\t\t\t\t.strip()\n\t\t\t\t\ttext = msg[5 + len(nick) + 1:].strip()\n\t\t\t\texcept ValueError as e:\n\t\t\t\t\tshow(\"syntax error\")\n\t\t\t\tparticipants = xmpp.get_participants()\n\t\t\t\tnicks = [ participants[jid][\"nick\"]\n\t\t\t\t\t\tfor jid in participants ]\n\t\t\t\tif not nick in nicks:\n\t\t\t\t\tif jid_regex.match(nick) is not None:\n\t\t\t\t\t\txmpp.msg_send(nick, text, False)\n\t\t\t\t\telse:\n\t\t\t\t\t\tshow(\"error: no such user\")\n\t\t\t\telse:\n\t\t\t\t\txmpp.msg_send(nick, text, True)\n\t\t\telif msg.startswith(\"/enc \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[5:].strip()\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tshow(\"error: no key set\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = xmpp.encode(text)\n\t\t\t\t\t\tshow(data)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tshow(\"exception: %s\" % e)\n\t\t\telif msg.startswith(\"/dec \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[5:].strip()\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tshow(\"error: no key set\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = xmpp.decode(text)\n\t\t\t\t\t\tshow(\"'%s'\" % data)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tshow(\"exception: %s\" % e)\n\t\t\telif msg.startswith(\"/e \"):\n\t\t\t\ttext = msg[3:].strip()\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tshow(\"error: no key set\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\txmpp.muc_send(text, enc=True)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tshow(\"exception: %s\" % e)\n\t\t\telif msg.startswith(\"/q \"):\n\t\t\t\ttext = msg[3:].strip()\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tprint(\"error: no key set\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\txmpp.muc_send(text,\n\t\t\t\t\t\t\t\tstealth=True)\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"exception: %s\" % e)\n\t\t\telif msg.startswith(\"/p \"):\n\t\t\t\ttext = msg[3:].strip()\n\t\t\t\txmpp.muc_send(text, enc=False)\n\t\t\telif msg.startswith(\"/encr \"):\n\t\t\t\ttext = msg[6:].strip()\n\t\t\t\tif xmpp.key is None:\n\t\t\t\t\tprint(\"error: no key set\")\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = xmpp.encode(text)\n\t\t\t\t\t\txmpp.muc_send(data, enc=False)\n\t\t\t\t\t\tprint(\"%s> %s\" % (nick, data))\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"exception: %s\" % e)\n\t\t\telif msg.startswith(\"/es \"):\n\t\t\t\ttext = msg[4:].strip()\n\t\t\t\tplain_text = \"\"\n\t\t\t\tcipher_text = \"\"\n\t\t\t\tcipher = False\n\t\t\t\tescape = False\n\t\t\t\tfor c in text:\n\t\t\t\t\tif cipher:\n\t\t\t\t\t\tcipher_text += c\n\t\t\t\t\telif escape:\n\t\t\t\t\t\tescape = False\n\t\t\t\t\t\tplain_text += c\n\t\t\t\t\telif c == \"\\\\\":\n\t\t\t\t\t\tescape = True\n\t\t\t\t\telif c == \"$\":\n\t\t\t\t\t\tcipher = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tplain_text += c\n\t\t\t\tplain_msg = \"%s %s\" % (plain_text.strip(),\n\t\t\t\t\t\tencrypted_section_info)\n\t\t\t\tcipher_msg = \"%s%s\" % (plain_text, cipher_text)\n\t\t\t\txmpp.muc_send_encrypted(cipher_msg, plain_msg)\n\t\t\telif msg.startswith(\"/eq \"):\n\t\t\t\ttext = msg[4:].strip()\n\t\t\t\tplain_text = \"\"\n\t\t\t\tcipher_text = \"\"\n\t\t\t\tcipher = False\n\t\t\t\tescape = False\n\t\t\t\tfor c in text:\n\t\t\t\t\tif cipher:\n\t\t\t\t\t\tcipher_text += c\n\t\t\t\t\telif escape:\n\t\t\t\t\t\tescape = False\n\t\t\t\t\t\tplain_text += c\n\t\t\t\t\telif c == \"\\\\\":\n\t\t\t\t\t\tescape = True\n\t\t\t\t\telif c == \"$\":\n\t\t\t\t\t\tcipher = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tplain_text += c\n\t\t\t\tplain_msg = plain_text.strip()\n\t\t\t\tcipher_msg = \"%s%s\" % (plain_text, cipher_text)\n\t\t\t\txmpp.muc_send_encrypted(cipher_msg, plain_msg)\n\t\t\telif msg.startswith(\"/el \"):\n\t\t\t\ttext = msg[4:].strip()\n\t\t\t\tmatch = url_regex.search(text)\n\t\t\t\tif match is not None:\n\t\t\t\t\tmsg = text[:match.start()]\n\t\t\t\t\turl = text[match.start():]\n\t\t\t\t\tplain_msg = \"%s %s\" % (msg.strip(),\n\t\t\t\t\t\t\tencrypted_link_info)\n\t\t\t\t\tcipher_msg = \"%s%s\" % (msg, url)\n\t\t\t\t\txmpp.muc_send_encrypted(cipher_msg,\n\t\t\t\t\t\t\tplain_msg)\n\t\t\t\telse:\n\t\t\t\t\txmpp.muc_send(msg)\n\t\t\telif msg.startswith(\"/say \"):\n\t\t\t\ttext = msg[5:].strip()\n\t\t\t\txmpp.muc_send(text)\n\t\t\telif msg == \"/bell\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tshow(\"bell is %s\" % (\"enabled\" if enable_bell\n\t\t\t\t\telse \"disabled\"))\n\t\t\telif msg.startswith(\"/bell \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\ttext = msg[6:].strip()\n\t\t\t\tif text == \"on\":\n\t\t\t\t\tenable_bell = True\n\t\t\t\t\tshow(\"bell is now enabled\")\n\t\t\t\telif text == \"off\":\n\t\t\t\t\tenable_bell = False\n\t\t\t\t\tshow(\"bell is now disabled\")\n\t\t\t\telse:\n\t\t\t\t\tshow(\"syntax error\")\n\t\t\telif msg == \"/ls\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tparticipants = xmpp.get_participants()\n\t\t\t\tnicks = sorted([ participants[jid][\"nick\"]\n\t\t\t\t\t\tfor jid in participants ])\n\t\t\t\tshow(\"currently %d participants: %s\" %\n\t\t\t\t\t\t(len(nicks), \", \".join(nicks)))\n\t\t\telif msg == \"/ls detail\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tparticipants = xmpp.get_participants()\n\t\t\t\tnicks = sorted([ \"%s (%s)\" %\n\t\t\t\t\t\t(participants[jid][\"nick\"], jid)\n\t\t\t\t\t\tfor jid in participants ])\n\t\t\t\tshow(\"currently %d participants: %s\" %\n\t\t\t\t\t\t(len(nicks), \", \".join(nicks)))\n\t\t\telif msg == \"/save\":\n\t\t\t\tshow_input(msg)\n\t\t\t\tsave_config()\n\t\t\telif msg[0] == \"/\" and not msg.startswith(\"/me \"):\n\t\t\t\tshow_input(msg)\n\t\t\t\tshow(\"unknown command\")\n\t\t\telse:\n\t\t\t\tif mode == STEALTH:\n\t\t\t\t\txmpp.muc_send(msg, stealth=True)\n\t\t\t\telse:\n\t\t\t\t\txmpp.muc_send(msg)\n\n\texcept KeyboardInterrupt: pass\n\texcept EOFError: pass\n\n\tsys.stdout.write(\"\\033[0m\")\n\tsys.stdout.flush()\n\n\txmpp.disconnect()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"3503606","text":"import gzip\nimport os\n\ndef unzipFile(path):\n \n for ele in os.listdir(path):\n if ele.endswith(\".gz\"):\n \n inPathName = os.path.join(path, ele)\n inF = gzip.GzipFile(inPathName, 'rb')\n s = inF.read()\n inF.close()\n \n outFileName = ele[:-3]\n outPathName = os.path.join(path, outFileName)\n outF = file(outPathName, 'wb')\n outF.write(s)\n outF.close()\n \n","sub_path":"unzipFile.py","file_name":"unzipFile.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"421258007","text":"from skmultiflow.data.file_stream import FileStream\nimport numpy as np\nfrom Goowe import Goowe\nfrom skmultiflow.data import SEAGenerator\nimport logging\nfrom GooweMS import GooweMS\nimport random\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n# Prepare the data stream\nstream_1 = SEAGenerator()\nstream_2 = SEAGenerator()\nstream_3 = SEAGenerator()\nstream_1.prepare_for_use()\nstream_2.prepare_for_use()\nstream_3.prepare_for_use()\n\nENSEMBLE_TYPE = 'av'\ninstances_num = 10000\ninstances_counter = 0\nnum_features = stream_1.n_features\nnum_targets = stream_1.n_targets\nnum_classes = 2\ntarget_values = [0., 1.]\nlogging.info(\"\\n\\tStreams are generated and prepared for use.\\n\\tNumber of features: {0} - Number of targets: {1} - Number of classes: {2} - Target values: {3}\"\n .format(num_features, num_targets, num_classes, target_values))\n\nN_MAX_CLASSIFIERS = 15\nCHUNK_SIZE = 500 # User-specified\nWINDOW_SIZE = 100 # User-specified\n\n### Probability of drift in streams\np1_threshold = 0.7\np2_threshold = 0.8\np3_threshold = 0.75\n\n### Arrays for storing accuracy values for Streams\naccuracies_1 = []\naccuracies_2 = []\naccuracies_3_mv = []\naccuracies_3_av = []\naccuracies_3_goowe = []\n\n# Initialize the ensemble\ngoowe_1 = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_1.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n# Initialize the ensemble\ngoowe_2 = Goowe(n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_2.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n\n\ngoowe_3 = GooweMS(goowe_1, goowe_2, n_max_components=N_MAX_CLASSIFIERS,\n chunk_size=CHUNK_SIZE,\n window_size=WINDOW_SIZE,\n logging = False)\ngoowe_3.prepare_post_analysis_req(num_features, num_targets, num_classes, target_values)\n# For the first chunk, there is no prediction.\n\nX_init, y_init = stream_1.next_sample(CHUNK_SIZE)\ngoowe_1.partial_fit(X_init, y_init)\n\nX_init, y_init = stream_2.next_sample(CHUNK_SIZE)\ngoowe_2.partial_fit(X_init, y_init)\n\nX_init, y_init = stream_3.next_sample(CHUNK_SIZE)\n#a = goowe_3.predict(X_init)\n#print('==============', a)\ngoowe_3.update(X_init, y_init, 1, 1)\n# TODO: update_from(goowe_1, goowe_2) :: updates existing goowe by selecting N_MAX_CLASSIFIERS / 2 components from each of them.\n\naccuracy_1 = 0.0\ntotal_1 = 0.0\ntrue_predictions_1 = 0.0\n\naccuracy_2 = 0.0\ntotal_2 = 0.0\ntrue_predictions_2 = 0.0\n\naccuracy_3 = 0.0\ntotal_3 = 0.0\ntrue_predictions_3_mv = 0.0\ntrue_predictions_3_av = 0.0\ntrue_predictions_3_goowe = 0.0\n\ntotal = 0.\n\nfor i in range(CHUNK_SIZE):\n total += 1\n cur_1 = stream_1.next_sample()\n cur_2 = stream_2.next_sample()\n cur_3 = stream_3.next_sample()\n X_1, y_1 = cur_1[0], cur_1[1]\n X_2, y_2 = cur_2[0], cur_2[1]\n X_3, y_3 = cur_3[0], cur_3[1]\n preds_1 = goowe_1.predict(X_1)\n preds_2 = goowe_2.predict(X_2)\n preds_3_mv = goowe_3.predict(X_3, ensemble_type='mv')\n preds_3_av = goowe_3.predict(X_3, ensemble_type='av')\n preds_3_goowe = goowe_3.predict(X_3, ensemble_type='goowe')\n true_predictions_1 += np.sum(preds_1 == y_1)\n true_predictions_2 += np.sum(preds_2 == y_2)\n true_predictions_3_mv += np.sum(preds_3_mv == y_3)\n true_predictions_3_av += np.sum(preds_3_av == y_3)\n true_predictions_3_goowe += np.sum(preds_3_goowe == y_3)\n accuracy_1 = true_predictions_1 / total\n accuracy_2 = true_predictions_2 / total\n accuracy_3_mv = true_predictions_3_mv / total\n accuracy_3_av = true_predictions_3_av / total\n accuracy_3_goowe = true_predictions_3_goowe / total\n print('\\tSTREAM 1 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_1*100.0, 3)))\n print('\\tSTREAM 2 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_2*100.0, 3)))\n print('\\tSTREAM 3 :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_3_mv*100.0, 3), round(accuracy_3_av*100.0, 3), round(accuracy_3_goowe*100.0, 3)))\n print('\\t==========================================================================')\n goowe_1.partial_fit(X_1, y_1)\n goowe_2.partial_fit(X_2, y_2)\n goowe_3.update(X_3, y_3, 1, 1)\n\n# Now, for the remaining instances, do ITTT (Interleaved Test Then Train).\nwhile(stream_1.has_more_samples() and stream_2.has_more_samples() and stream_3.has_more_samples() and instances_counter < instances_num):\n\n if(instances_counter % CHUNK_SIZE == 0):\n accuracy_1 = 0.0\n total_1 = 0.0\n true_predictions_1 = 0.0\n accuracy_2 = 0.0\n total_2 = 0.0\n true_predictions_2 = 0.0\n accuracy_3_mv = 0.0\n accuracy_3_av = 0.0\n accuracy_3_goowe = 0.0\n total_3 = 0.0\n true_predictions_3_mv = 0.0\n true_predictions_3_av = 0.0\n true_predictions_3_goowe = 0.0\n total = 0.\n\n\n ### Generating drifts by generating random values for each Stream\n p1 = random.random()\n p2 = random.random()\n p3 = random.random()\n if p1 > p1_threshold:\n stream_1.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 1')\n if p2 > p2_threshold:\n stream_2.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 2')\n if p3 > p3_threshold:\n stream_3.generate_drift()\n logging.info('\\n\\tDrift generatoed for STREAM 3')\n total += 1\n cur_1 = stream_1.next_sample()\n cur_2 = stream_2.next_sample()\n cur_3 = stream_3.next_sample()\n X_1, y_1 = cur_1[0], cur_1[1]\n X_2, y_2 = cur_2[0], cur_2[1]\n X_3, y_3 = cur_3[0], cur_3[1]\n preds_1 = goowe_1.predict(X_1)\n preds_2 = goowe_2.predict(X_2)\n preds_3_goowe = goowe_3.predict(X_3, ensemble_type='goowe')\n preds_3_mv = goowe_3.predict(X_3, ensemble_type='mv')\n preds_3_av = goowe_3.predict(X_3, ensemble_type='av')\n true_predictions_1 += np.sum(preds_1 == y_1)\n true_predictions_2 += np.sum(preds_2 == y_2)\n true_predictions_3_mv += np.sum(preds_3_mv == y_3)\n true_predictions_3_av += np.sum(preds_3_av == y_3)\n true_predictions_3_goowe += np.sum(preds_3_goowe == y_3)\n accuracy_1 = true_predictions_1 / total\n accuracy_2 = true_predictions_2 / total\n accuracy_3_mv = true_predictions_3_mv / total\n accuracy_3_av = true_predictions_3_av / total\n accuracy_3_goowe = true_predictions_3_goowe / total\n accuracies_1.append(accuracy_1)\n accuracies_2.append(accuracy_2)\n accuracies_3_mv.append(accuracy_3_mv)\n accuracies_3_av.append(accuracy_3_av)\n accuracies_3_goowe.append(accuracy_3_goowe)\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_1.npy', np.asarray(accuracies_1))\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_2.npy', np.asarray(accuracies_2))\n #np.save('results/agrawal_'+ENSEMBLE_TYPE+'_accuracies_3.npy', np.asarray(accuracies_3))\n np.save('results/sea_accuracies_1.npy', np.asarray(accuracies_1))\n np.save('results/sea_accuracies_2.npy', np.asarray(accuracies_2))\n np.save('results/sea_mv_accuracies_3.npy', np.asarray(accuracies_3_mv))\n np.save('results/sea_av_accuracies_3.npy', np.asarray(accuracies_3_av))\n np.save('results/sea_goowe_accuracies_3.npy', np.asarray(accuracies_3_goowe))\n print('\\tSTREAM 1 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_1*100.0, 3)))\n print('\\tSTREAM 2 :: Data instance: {} - Accuracy: {}'.format(int(total), round(accuracy_2*100.0, 3)))\n print('\\tSTREAM 3 :: Data instance: {} - Accuracies: MV: {} - AV: {} - Goowe: {}'.format(int(total),\n round(accuracy_3_mv*100.0, 3), round(accuracy_3_av*100.0, 3), round(accuracy_3_goowe*100.0, 3)))\n print('\\t==========================================================================')\n goowe_1.partial_fit(X_1, y_1) # Then train\n goowe_2.partial_fit(X_2, y_2) # Then train\n goowe_3.update(X_3, y_3, 1, 1)\n instances_counter += 1\n\n# TODO: Create new goowe_3 by using components of the other two goowes with highest weights (5 from each i.e.)\n# TODO: AND update goowe_3 at each chunk (each while step)\n","sub_path":"ms_sea_experiment.py","file_name":"ms_sea_experiment.py","file_ext":"py","file_size_in_byte":8295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"60392648","text":"# -*- coding: utf-8 -*-\n\nimport itertools\nimport unittest\nfrom mock import patch\n\nwith patch('rknfilter.conf.settings.DB', 'sqlite:///:memory:'):\n from rknfilter.db import engine, Session\n from rknfilter.db.models import Base, IP, URL, Domain\n\n\nclass ModelTestCase(unittest.TestCase):\n def setUp(self):\n Base.metadata.create_all(engine)\n\n def tearDown(self):\n Base.metadata.drop_all(engine)\n Session.remove()\n\n\nclass DomainTestCase(ModelTestCase):\n def add_domain(self, name='rknfilter.ru'):\n d = Domain(domain=name)\n Session.add(d)\n return d\n\n def assert_urls(self, urls, d=None):\n if d is not None:\n self.assertEqual(sorted(urls), sorted(d.urls_list))\n else:\n self.assertEqual(sorted(u.url for u in Session.query(URL).all()), sorted(urls))\n\n def assert_ips(self, ips, d=None):\n if d is not None:\n self.assertEqual(sorted(d.ips_list), sorted(set(ips)))\n else:\n self.assertEqual(sorted(i.ip for i in Session.query(IP).all()), sorted(set(ips)))\n\n def assert_ips_from_rkn(self, ips):\n for i in ips:\n ip = Session.query(IP).filter(IP.ip==i).one()\n print(ip)\n self.assertTrue(ip.from_rkn)\n self.assertFalse(ip.from_resolver)\n\n def assert_ips_from_resolver(self, ips):\n for i in ips:\n ip = Session.query(IP).filter(IP.ip==i).one()\n print(ip)\n self.assertFalse(ip.from_rkn)\n self.assertTrue(ip.from_resolver)\n\n def assert_ips_from_all(self, ips):\n for i in ips:\n ip = Session.query(IP).filter(IP.ip==i).one()\n print(ip)\n self.assertTrue(ip.from_rkn)\n self.assertTrue(ip.from_resolver)\n\n def assert_domains_from_rkn(self, domains):\n self.assertEqual(Session.query(Domain).count(), len(domains))\n for d, o in domains.items():\n q = Session.query(Domain).filter(Domain.domain==d).one()\n self.assert_ips(o['ips'], q)\n self.assert_ips_from_rkn(o['ips'])\n self.assert_urls(o['urls'], q)\n self.assert_urls(itertools.chain.from_iterable([i['urls'] for i in domains.values()]))\n\n all_ips = set(itertools.chain.from_iterable([i['ips'] for i in domains.values()]))\n self.assert_ips(all_ips)\n self.assert_ips_from_rkn(all_ips)\n\n def test_sync_urls(self):\n d = self.add_domain()\n\n def sync_and_assert(urls):\n d.sync_urls(urls)\n Session.commit()\n self.assert_urls(urls, d)\n self.assert_urls(urls)\n\n sync_and_assert([\n 'http://rknfilter.ru/test/',\n 'http://rknfilter.ru/contacts/',\n ])\n\n sync_and_assert([\n 'http://rknfilter.ru/contacts/',\n 'http://rknfilter.ru/contacts/test.html',\n '//I love Blackie Lawless vocals so much!!//gfg',\n ])\n\n sync_and_assert([])\n\n sync_and_assert([\n 'http://rknfilter.ru/contacts/tests.html',\n 'I hate Java so much, because it tells me that I am an employee and no more',\n ])\n\n def test_sync_ips(self):\n d = self.add_domain()\n\n def sync_and_assert(ips, ips_stored=None):\n d.sync_ips(ips, ips_stored=ips_stored)\n Session.commit()\n self.assert_ips(ips, d)\n\n sync_and_assert([\n '1.1.1.1',\n '2.2.2.2',\n '3.3.3.3',\n ])\n\n sync_and_assert([\n '1.1.1.1',\n '2.2.2.2',\n ])\n\n sync_and_assert([])\n\n sync_and_assert([\n '1.1.1.1',\n '5.5.5.5',\n '6.6.6.6',\n '7.7.7.7',\n ])\n\n sync_and_assert([\n '8.8.8.8',\n ], IP.as_dict(session=Session))\n\n # TODO: add delete orphans test\n\n def test_domains_sync_from_rkn(self):\n def sync_and_assert(domains):\n Domain.sync_from_rkn(Session, domains)\n self.assert_domains_from_rkn(domains)\n\n domains = {\n 'ya.ru': {\n 'ips' : [\n '1.1.1.1',\n '2.2.2.2',\n ],\n\n 'urls' : [\n 'https://ya.ru/index.php',\n 'http://ya.ru/images/pravda.jpg#rkn',\n ],\n }\n }\n sync_and_assert(domains)\n\n domains = {\n 'ya.ru': {\n 'ips' : [\n '1.1.1.1',\n ],\n\n 'urls' : [\n 'http://ya.ru/index.php',\n ],\n }\n }\n sync_and_assert(domains)\n\n domains = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.1',\n '8.8.8.8',\n ],\n\n 'urls' : [\n 'http://test.ru/index.php',\n ],\n }\n }\n sync_and_assert(domains)\n\n domains = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.9',\n '8.8.8.8',\n '9.9.9.9',\n ],\n\n 'urls' : [\n 'http://test.ru/index.php',\n ],\n },\n\n 'ya.ru': {\n 'ips' : [],\n\n 'urls' : [\n 'http://ya.ru/',\n 'http://ya.ru/commerce/test.yaml',\n 'https://ya.ru/test.php',\n ],\n },\n }\n sync_and_assert(domains)\n\n domains = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.9',\n '8.8.8.8',\n ],\n\n 'urls' : [\n 'http://test.ru/index.php',\n 'http://test.ru/dsfg/3453/sfsdf/#dfsdf',\n ],\n },\n\n 'ya.ru': {\n 'ips' : [\n '10.10.10.10',\n '1.1.1.9',\n ],\n\n 'urls' : [\n 'http://ya.ru/',\n 'http://ya.ru/commerce/test.yaml',\n 'https://ya.ru/test.php',\n ],\n },\n\n 'r1.com': {\n 'ips': [\n '1.1.1.9',\n '6.6.6.6',\n '7.7.7.7',\n '8.8.8.8',\n '9.9.9.9',\n '10.10.10.10',\n '1.1.1.1',\n '1.1.1.9',\n ],\n 'urls': [],\n },\n }\n sync_and_assert(domains)\n\n def test_domains_sync_from_resolver(self):\n domains = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.1',\n ],\n 'urls' : [],\n },\n\n 'ya.ru': {\n 'ips' : [\n '3.3.3.3',\n ],\n 'urls' : [],\n },\n\n 'r1.com': {\n 'ips': [\n '5.5.5.5',\n ],\n 'urls': [],\n },\n }\n Domain.sync_from_rkn(Session, domains)\n\n domains_resolver = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.1',\n '2.2.2.2',\n ],\n },\n\n 'ya.ru': {\n 'ips' : [\n '8.8.8.8',\n ],\n },\n\n 'r1.com': {\n 'ips': [\n '5.5.5.5',\n ],\n 'urls': [],\n },\n\n }\n Domain.sync_from_resolver(Session, domains_resolver)\n\n ips_rkn = [\n '3.3.3.3',\n ]\n\n ips_resolver = [\n '2.2.2.2',\n '8.8.8.8',\n ]\n\n ips_rkn_resolver = [\n '1.1.1.1',\n '5.5.5.5',\n ]\n\n self.assert_ips_from_rkn(ips_rkn)\n self.assert_ips_from_resolver(ips_resolver)\n self.assert_ips_from_all(ips_rkn_resolver)\n\n for i in domains:\n d = Session.query(Domain).filter(Domain.domain==i).one()\n self.assertEqual(sorted(d.ips_list), sorted(set(domains[i]['ips']+domains_resolver[i]['ips'])))\n\n\n domains_resolver = {\n 'test.ru': {\n 'ips' : [\n '1.1.1.1',\n ],\n },\n\n 'ya.ru': {\n 'ips' : [\n '8.8.8.8',\n ],\n },\n\n 'r1.com': {\n 'ips': [\n '5.5.5.5',\n '2.2.2.2',\n ],\n },\n\n }\n Domain.sync_from_resolver(Session, domains_resolver)\n\n ips_rkn = [\n '3.3.3.3',\n ]\n\n ips_resolver = [\n '2.2.2.2',\n '8.8.8.8',\n ]\n\n ips_rkn_resolver = [\n '1.1.1.1',\n '5.5.5.5',\n ]\n\n self.assert_ips_from_rkn(ips_rkn)\n self.assert_ips_from_resolver(ips_resolver)\n self.assert_ips_from_all(ips_rkn_resolver)\n\n for i in domains:\n d = Session.query(Domain).filter(Domain.domain==i).one()\n self.assertEqual(sorted(d.ips_list), sorted(set(domains[i]['ips']+domains_resolver[i]['ips'])))\n\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":9328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585844524","text":"\n\nfrom astropy.io import fits\n\n__all__ = ['check_stim_global']\n\nSTIM_KEYWORDS = {'FUVA':['STIMA_LX',\n 'STIMA_LY',\n 'STIMA_RX',\n 'STIMA_RY'],\n 'FUVB':['STIMB_LX',\n 'STIMB_LY',\n 'STIMB_RX',\n 'STIMB_RY']}\n\nSTIM_KEYWORDS['BOTH'] = STIM_KEYWORDS['FUVA'] + STIM_KEYWORDS['FUVB']\n\n#-------------------------------------------------------------------------------\n\nclass StimError(Exception):\n pass\n\n#-------------------------------------------------------------------------------\n\ndef check_stim_global(filename, verbose=0):\n \"\"\" Check for stims missing from the entire observation\n\n Checks for a negative value indicating the stim was not found internally\n by CalCOS.\n\n \"\"\"\n\n if verbose:\n print(\"Checking for STIMS as found by CalCOS\")\n\n hdu = fits.open(filename)\n\n if not hdu[0].header['DETECTOR'] == 'FUV':\n raise ValueError('Filename {} must be FUV data'.format(filename))\n\n segment = hdu[0].header['SEGMENT']\n if verbose: print(\"Segment: {}\".format(segment))\n\n missing_stims = []\n for keyword in STIM_KEYWORDS[segment]:\n if 'A_' in keyword: n_events = hdu[1].header['NEVENTSA']\n if 'B_' in keyword: n_events = hdu[1].header['NEVENTSB']\n\n if (hdu[1].header[keyword] < 0) and (n_events > 0):\n missing_stims.append(keyword)\n\n if verbose: print(\"{} @ {} n_events: {}\".format(keyword,\n hdu[1].header[keyword],\n n_events))\n\n if len(missing_stims):\n raise StimError('{} has missing stims: {}'.format(filename,\n missing_stims))\n\n#-------------------------------------------------------------------------------\n","sub_path":"cos_monitoring/stim/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"123661082","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom functools import reduce\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=64):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n \n self.fc = nn.Sequential(\n nn.Linear(state_size, fc1_units),\n nn.ReLU(),\n nn.Linear(fc1_units, fc2_units),\n nn.ReLU(),\n nn.Linear(fc2_units, action_size))\n \n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n return self.fc(state)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"490341921","text":"# RESSOURCES ______________________________________________________________________________\n# https://pypi.org/project/SpeechRecognition/\n# https://pythonprogramminglanguage.com/speech-recognition/\n# https://stackoverflow.com/questions/62040401/alsa-error-running-a-flask-application-on-linux-ubuntu-using-pyaudio\n# https://askubuntu.com/questions/608480/alsa-problems-with-python2-7-unable-to-open-slave\n# https://stackoverflow.com/questions/31603555/unknown-pcm-cards-pcm-rear-pyaudio#31729510\n# https://bbs.archlinux.org/viewtopic.php?id=245040\n# https://github.com/Uberi/speech_recognition/issues/191\n\nimport speech_recognition as sr\n\n\nfrom text_to_speech import say_text\nfrom default_parameters import user_language\n\n\ndef listen_text():\n \"\"\"\n Prompt user to say something.\n Return what he/she said like a string.\n\n \"\"\"\n if user_language[0] == \"fr\":\n invitation_text = \"J'écoute.\"\n \n else: # user_language[0] == \"en\": \n invitation_text = \"I listen.\"\n\n # get audio from the microphone\n r = sr.Recognizer()\n with sr.Microphone() as source:\n # listen for 1 second to calibrate the energy threshold for ambient noise levels.\n r.adjust_for_ambient_noise(source)\n say_text(invitation_text)\n audio = r.listen(source)\n\n try:\n if user_language[0] == \"fr\":\n text = r.recognize_google(audio, language=\"fr-FR\")\n \n else: # user_language[0] == \"en\":\n text = r.recognize_google(audio)\n\n except sr.UnknownValueError:\n text = \"IDNU\"\n except sr.RequestError as e:\n text = \"error \"\n\n return text","sub_path":"speech_rec.py","file_name":"speech_rec.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"324309222","text":"import pandas\nfrom sklearn.externals import joblib\nimport TrainTestProcesser\nfrom sklearn.ensemble import RandomForestClassifier\nfrom Select_OF_File import get_subdir\nimport matplotlib.pyplot as mp\nimport sklearn.model_selection as ms\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport itertools\ndef main():\n #获取数据集\n #不使用第一列作为行索引\n data_set = pandas.read_csv(\"dataset.csv\",index_col=False,encoding='gbk')\n print(\"数据集的shape:\",data_set.shape)\n #将数据集分为特征x和标签y\n dnumpy_x,dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n #使用StratifiedKFold将数据集分为训练集和测试集\n folds= TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)\n #创建模型\n model=RandomForestClassifier(n_estimators=23)\n #使用kfol交叉验证\n TrainTestProcesser.apply_SKfold(model, folds)\n #训练模型\n TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)\n #保存模型以备将来使用\n joblib.dump(model,\"RFC_model.plk\")\n\n\n\ndef getconfusion_matrix():\n mp.rcParams['font.family'] = ['sans-serif']\n mp.rcParams['font.sans-serif'] = ['SimHei']\n classes=get_subdir(\"音频文件\")\n data_set = pandas.read_csv(\"dataset.csv\",index_col=False,encoding='gbk')\n dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)\n train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x, dnumpy_y, test_size=0.25, random_state=7)\n model=joblib.load(\"RFC_model.plk\")\n pred_test_y = model.predict(test_x)\n #混淆矩阵\n cm=confusion_matrix(test_y, pred_test_y)\n # 获取分类报告\n r = classification_report(test_y, pred_test_y)\n print('分类报告为:', r, sep='\\n')\n\n mp.figure()\n plot_confusion_matrix(cm, classes=classes, normalize=True,\n title='随机森林分类器混淆矩阵')\n\ndef plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',\n cmap=mp.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"混淆矩阵归一化\")\n else:\n print('混淆矩阵未归一化')\n\n print(\"混淆矩阵为:\",cm)\n\n mp.imshow(cm, interpolation='nearest', cmap=cmap)\n mp.title(title)\n mp.colorbar()\n tick_marks = np.arange(len(classes))\n mp.xticks(tick_marks, classes, rotation=45)\n mp.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n mp.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n mp.tight_layout()\n mp.ylabel('True label')\n mp.xlabel('Predicted label')\n mp.savefig('confusion_matrix_RFC.png', format='png')\n mp.show()\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n getconfusion_matrix()","sub_path":"Speaker/co_/Include/RFC_Model.py","file_name":"RFC_Model.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"291553465","text":"import sqlite3\n\n\nclass MovieAdder:\n @staticmethod\n def add_to_db(db_path, movie):\n \"\"\"Insert given movie (instance of movie class) to database (by given db_path).\n Raises exception with appropriate message if movie already is in database\"\"\"\n\n connection = sqlite3.connect(db_path)\n cur = connection.cursor()\n\n # test_query to check if specified movie already exists in db\n cur.execute(\"SELECT title from movies WHERE title = :title;\", {'title': movie.title})\n test_query = cur.fetchone()\n\n if test_query:\n raise Exception(f\"Given movie ({movie.title}) already in database!\")\n else:\n cur.execute(''' INSERT INTO movies (title, year, runtime, genre, director, actors, writer, language,\n country, awards, imdb_rating, imdb_votes, box_office) VALUES(:title, :year, :runtime, :genre,\n :director, :cast, :writer, :language, :country, :awards, :imdb_rating, :imdb_votes,:box_office);''',\n {'title': movie.title, 'year': movie.year, 'runtime': movie.runtime, 'genre': movie.genre,\n 'director': movie.director, 'cast': movie.actors, 'writer': movie.writer,\n 'language': movie.language, 'country': movie.country, 'awards': movie.awards,\n 'imdb_rating': movie.imdb_rating, 'imdb_votes': movie.imdb_votes,\n 'box_office': movie.box_office})\n print(f\"Data from movie - {movie.title} has been inserted to database\")\n\n connection.commit()\n connection.close()\n\n","sub_path":"movie_adder.py","file_name":"movie_adder.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"63302790","text":"#!/usr/bin/python\n\n# MUST BE RUN AS ROOT (due to GPIO access)\n#\n# Required software includes Adafruit_Thermal, Python Imaging and PySerial\n# libraries. Other libraries used are part of stock Python install.\n#\n# Resources:\n# http://www.adafruit.com/products/597 Mini Thermal Receipt Printer\n# http://www.adafruit.com/products/600 Printer starter pack\n\nfrom __future__ import print_function\nimport RPi.GPIO as GPIO\nimport subprocess, time, socket\nfrom PIL import Image\nfrom Adafruit_Thermal import *\n\nledPin = 18\nbuttonPin = 23\nholdTime = 2 # Duration for button hold (shutdown)\ntapTime = 0.01 # Debounce time for button taps\nprinter = Adafruit_Thermal(\"/dev/serial0\", 19200, timeout=5)\n\n# Called when button is briefly tapped. Invokes time/temperature script.\ndef tap():\n GPIO.output(ledPin, GPIO.HIGH) # LED on while working\n subprocess.call([\"python\", \"timetemp.py\"])\n GPIO.output(ledPin, GPIO.LOW)\n\n# Called when button is held down. Prints image, invokes shutdown process.\ndef hold():\n GPIO.output(ledPin, GPIO.HIGH)\n printer.printImage(Image.open('gfx/goodbye.png'), True)\n printer.feed(4)\n subprocess.call(\"sync\")\n subprocess.call([\"shutdown\", \"-h\", \"now\"])\n GPIO.output(ledPin, GPIO.LOW)\n\n# Initialization\n\n# Use Broadcom pin numbers (not Raspberry Pi pin numbers) for GPIO\nGPIO.setmode(GPIO.BCM)\n\n# Enable LED and button (w/pull-up on latter)\nGPIO.setup(ledPin, GPIO.OUT)\nGPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# LED on while working\nGPIO.output(ledPin, GPIO.HIGH)\n\n# Processor load is heavy at startup; wait a moment to avoid\n# stalling during greeting.\ntime.sleep(30)\n\n# make sure network is reachable\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 0))\nexcept:\n printer.boldOn()\n printer.println('Network is unreachable.')\n printer.boldOff()\n printer.feed(4)\n exit(0)\n\n# Poll initial button state and time\nprevButtonState = GPIO.input(buttonPin)\nprevTime = time.time()\ntapEnable = False\nholdEnable = False\n\n# Main loop\nwhile(True):\n\n # Poll current button state and time\n buttonState = GPIO.input(buttonPin)\n t = time.time()\n\n # Has button state changed?\n if buttonState != prevButtonState:\n prevButtonState = buttonState # Yes, save new state/time\n prevTime = t\n else: # Button state unchanged\n if (t - prevTime) >= holdTime: # Button held more than 'holdTime'?\n # Yes it has. Is the hold action as-yet untriggered?\n if holdEnable == True: # Yep!\n hold() # Perform hold action (usu. shutdown)\n holdEnable = False # 1 shot...don't repeat hold action\n tapEnable = False # Don't do tap action on release\n elif (t - prevTime) >= tapTime: # Not holdTime. tapTime elapsed?\n # Yes. Debounced press or release...\n if buttonState == True: # Button released?\n if tapEnable == True: # Ignore if prior hold()\n tap() # Tap triggered (button released)\n tapEnable = False # Disable tap and hold\n holdEnable = False\n else: # Button pressed\n tapEnable = True # Enable tap and hold actions\n holdEnable = True\n\n # LED blinks while idle, for a brief interval every 2 seconds.\n # Pin 18 is PWM-capable and a \"sleep throb\" would be nice, but\n # the PWM-related library is a hassle for average users to install\n # right now. Might return to this later when it's more accessible.\n if ((int(t) & 1) == 0) and ((t - int(t)) < 0.15):\n GPIO.output(ledPin, GPIO.HIGH)\n else:\n GPIO.output(ledPin, GPIO.LOW)\n","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"241055822","text":"from fetch import fetch_pdb_pose\nfrom pyrosetta import init\nfrom pyrosetta.rosetta.core.pose import get_chain_from_chain_id\nfrom biopandas.pdb import PandasPdb\nfrom lattice_scoring_tool.residue_class import Residue\nfrom lattice_scoring_tool.build_lattice_coordinate import pdb_to_lattice\nfrom lattice_scoring_tool.lattice_scoring_function import *\nimport numpy as np\nimport math\nimport sys\nimport logging\n\n\ndef lattice_scoring(pose, ratio, pdb_log):\n pdb_log.info(\"Compute lattice score.\")\n Dill_HP_Score = 0\n Bomberg_HP_Score = 0\n Li_HP_Score = 0\n HPNX_Score = 0\n YhHX_Score = 0\n hHPNX_Score = 0\n residue_list, duplicate_recorder = pdb_to_lattice(pose, pdb_log)\n coordinate_list = []\n for residue in residue_list:\n coordinate_list.append(list(residue.lattice_XYZ))\n\n for coordinate_index, coordinate in enumerate(coordinate_list):\n temp_score = cal_one_residue_score(\n residue_list, coordinate_list, coordinate_index, coordinate, ratio, pdb_log)\n Dill_HP_Score += temp_score[0]\n Bomberg_HP_Score += temp_score[1]\n Li_HP_Score += temp_score[2]\n HPNX_Score += temp_score[3]\n YhHX_Score += temp_score[4]\n hHPNX_Score += temp_score[5]\n\n return [Dill_HP_Score, Bomberg_HP_Score, Li_HP_Score, HPNX_Score, YhHX_Score, hHPNX_Score], duplicate_recorder\n\n\ndef main():\n init()\n lattice_scoring(fetch_pdb_pose('5G5D'), 2)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lattice_scoring.py","file_name":"lattice_scoring.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"90922725","text":"########################################################################\nfrom Gaudi.Configuration import *\nfrom Configurables import DaVinci \nfrom Configurables import GaudiSequencer, CombineParticles, FilterDesktop\nfrom PhysSelPython.Wrappers import Selection, SelectionSequence, DataOnDemand\nfrom CommonMCParticles import StandardMCPhi, StandardMCJpsi, StandardMCKaons, StandardMCMuons\n\n#Truth matched commonparticles: \n_jpsi = DataOnDemand(Location='Phys/StdMCJpsi2MuMu/Particles')\n_kaon = DataOnDemand(Location='Phys/StdMCKaons/Particles')\n\n\n#\n# MC matching\n#\n\nmatchBu2JpsiK = \"(mcMatch('[B+ ==> J/psi(1S) K+]CC'))\"\n\n\n_bu2jpsik = CombineParticles(\"bu2jpsik\")\n_bu2jpsik.DecayDescriptor = \"[B+ -> J/psi(1S) K+]cc\"\n_bu2jpsik.MotherCut = matchBu2JpsiK\n_bu2jpsik.Preambulo = [\n \"from LoKiPhysMC.decorators import *\",\n \"from PartProp.Nodes import CC\" ]\n\nSelBu2JpsiK = Selection( \"SelBu2JpsiK\",\n Algorithm = _bu2jpsik,\n RequiredSelections=[_jpsi,_kaon]) \n\nSeqBu2JpsiK = SelectionSequence('MCFilter',TopSelection = SelBu2JpsiK) \n\n#\n# Write DST\n#\ndstExtension = \".\" + DaVinci().InputType.lower()\nenablePacking = True\nfrom DSTWriters.microdstelements import *\nfrom DSTWriters.Configuration import (SelDSTWriter,\n stripDSTStreamConf,\n stripDSTElements\n )\nSelDSTWriterElements = { 'default' : stripDSTElements(pack=enablePacking) }\nSelDSTWriterConf = { 'default' : stripDSTStreamConf(pack=enablePacking,fileExtension=dstExtension,selectiveRawEvent=False) }\ndstWriter = SelDSTWriter( \"MyDSTWriter\",\n StreamConf = SelDSTWriterConf,\n MicroDSTElements = SelDSTWriterElements,\n OutputFileSuffix ='Filtered',\n SelectionSequences = [ SeqBu2JpsiK],\n )\n#\n# DaVinci Configuration\n#\nfrom Configurables import DaVinci\nDaVinci().HistogramFile = \"DVHistos.root\"\nDaVinci().ProductionType = \"Stripping\"\nDaVinci().Simulation = True\nDaVinci().EvtMax = -1 # Number of events\nDaVinci().appendToMainSequence( [SeqBu2JpsiK.sequence(), dstWriter.sequence() ])\n\n# Change the column size of Timing table\nfrom Configurables import TimingAuditor, SequencerTimerTool\nTimingAuditor().addTool(SequencerTimerTool,name=\"TIMER\")\nTimingAuditor().TIMER.NameSize = 60\n\n","sub_path":"DBASE/WG/HLTConfig/options/BuJpsiK.py","file_name":"BuJpsiK.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"575677165","text":"\"\"\"empty message\n\nRevision ID: 390af3535dcb\nRevises: 53c9f816694b\nCreate Date: 2019-01-06 16:00:38.330426\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '390af3535dcb'\ndown_revision = '53c9f816694b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('inventorys_ibfk_2', 'inventorys', type_='foreignkey')\n op.drop_column('inventorys', 'parent')\n op.drop_column('inventorys', 'count')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('inventorys', sa.Column('count', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))\n op.add_column('inventorys', sa.Column('parent', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.create_foreign_key('inventorys_ibfk_2', 'inventorys', 'inventorys', ['parent'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/390af3535dcb_.py","file_name":"390af3535dcb_.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"188977492","text":"\"\"\"\n-------------------------------------------------------------------------------\nMODULE\n SAIRD_ShortEndCurve_Management\n\nDESCRIPTION\n Date : 05/08/2014\n Purpose : Short end curve management module\n Department and Desk : Middle Office\n Requester : Helder Loio\n Developer : Jakub Tomaga\n CR Number : CHNG0002036323\n\nHISTORY\n===============================================================================\nDate CR number Developer Description\n-------------------------------------------------------------------------------\n02/10/2014 2325358 Jakub Tomaga Support for price testing added.\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport os\nfrom at_ael_variables import AelVariableHandler\nfrom ShortEndProvisionReport import (ResetRiskReport, ProvisionPerResetReport,\n ProvisionPerResetBucketReportCreator)\n\n\ndef instruments_hook(selected_variable):\n instruments = selected_variable.handler.get('PriceTestingInstruments')\n if selected_variable.value == 'true':\n instruments.enabled = True\n else:\n instruments.enabled = False\n\n\nael_variables = AelVariableHandler()\nael_variables.add('InputType',\n label='Report Input Type',\n collection=['Filter', 'Portfolio'],\n default='Filter')\n\nael_variables.add('Portfolio',\n label='Portfolio',\n cls='FPhysicalPortfolio',\n mandatory=False,\n multiple=True)\n\nael_variables.add('TrdFilter',\n label='Trade Filter',\n cls='FTradeSelection',\n mandatory=False,\n multiple=True)\n\nael_variables.add('ReportType',\n label='Report Type',\n collection=['Reset Risk', 'Provision Per Reset Bucket',\n 'Provision Per Reset'],\n default='Provision Per Reset')\n\nael_variables.add('OutputType',\n label='Output Type',\n collection=['Excel', 'CSV'],\n default='Excel')\n\nael_variables.add('Outpath',\n label='Output Path',\n default='/services/frontnt/Task/')\n\nael_variables.add('Currency',\n label='Currency',\n cls='FCurrency',\n default='ZAR',\n multiple=True)\n\nael_variables.add('Curve',\n label='Forward Yield Curve',\n cls='FYieldCurve',\n default='ZAR-SWAP',\n multiple=True)\n\nael_variables.add_bool('PriceTesting',\n label='Price Testing',\n default=False,\n hook=instruments_hook)\n\nael_variables.add('PriceTestingInstruments',\n label='Instruments',\n multiple=True,\n cls='FInstrument',\n enabled=False,\n mandatory=False)\n\n\ndef ael_main(config):\n if config['OutputType'] == 'CSV':\n file_suffix = 'csv'\n csv_writer_parameters = None\n else:\n file_suffix = 'xls'\n csv_writer_parameters = {'dialect': 'excel-tab'}\n\n output_path = config['Outpath']\n input_type = config['InputType']\n if input_type == 'Portfolio':\n source = config['Portfolio'][0]\n else:\n source = config['TrdFilter'][0]\n\n yield_curve = config['Curve'][0]\n currency = config['Currency'][0]\n report_type = config['ReportType']\n\n if config['PriceTesting']:\n market_rate_instruments = config['PriceTestingInstruments']\n else:\n market_rate_instruments = None\n\n if report_type == 'Reset Risk':\n collection = ResetRiskReport(file_suffix, output_path,\n csv_writer_parameters, source, yield_curve,\n currency, market_rate_instruments)\n collection.create_reports()\n elif report_type == 'Provision Per Reset':\n collection = ProvisionPerResetReport(file_suffix,\n output_path, csv_writer_parameters, source, yield_curve,\n currency, market_rate_instruments)\n collection.create_reports()\n elif report_type == 'Provision Per Reset Bucket':\n file_name = '_'.join([output_path + 'Data_File', input_type,\n source.Name(), currency.Name(), yield_curve.Name(),\n report_type.replace(' ', ''), 'Per_Reset'])\n report_parameters = {\n 'file_name': file_name,\n 'file_suffix': file_suffix,\n 'path': output_path,\n 'csv_writer_parameters': csv_writer_parameters\n }\n report = ProvisionPerResetBucketReportCreator(report_parameters,\n source, yield_curve, currency, market_rate_instruments)\n report.create_report()\n print('Wrote secondary output to {0}'.format(os.path.join(\n report_parameters['path'], '.'.join([report_parameters['file_name'], report_parameters['file_suffix']]))))\n\n print('Completed successfully')\n","sub_path":"Python modules/SAIRD_ShortEndCurve_Management.py","file_name":"SAIRD_ShortEndCurve_Management.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"416261256","text":"import cv2, os, sys\nimport numpy as np\n\nfrom os.path import basename\n\n# USAGE:\n# python unifyColor.py PATH_TO_ANNOTATION PATH_TO_OUTPUT_FOLDER\n#\n# EXAMPLE:\n# python unifyColor.py /home/lotte/Desktop/encp/labels/ /home/lotte/Desktop/encp/labels-unified/\n\ndef main():\n classes_colors = [[0, 0, 0], #background\n [0, 0, 255], #roof\n [128, 255, 255], #sky\n [255, 255, 0], #wall\n [128, 0, 255], #balcony\n [255, 0, 0], #window\n [255, 128, 0], #door\n [0, 255, 0]] #shop\n\n # \"balcony\" : {[85,255,170], [170,255,85]},\n # \"window\" : {[0, 85, 255], [255, 255, 0]},\n # \"wall\" : {[0, 0, 255], [0, 255, 255]},\n # \"door\" : {[0, 170, 255], [170, 0, 0]},\n\n files = os.listdir(sys.argv[1])\n\n for file in files:\n\n path = sys.argv[1] + file\n name, file_extension = os.path.splitext(file)\n output = sys.argv[2] + name + \".png\"\n\n if(os.path.isfile(path)):\n\n print(\"Unifying facade color classes...\")\n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n rows, cols, bands = image.shape\n im = np.zeros((rows, cols, bands), dtype=np.uint8)\n\n for i in range(rows):\n for j in range(cols):\n color = image[i, j]\n color = map(int, color)\n\n # balcony\n if (color == [85, 255, 170]) or (color == [170, 255, 85]):\n color_code = classes_colors[4]\n # window\n elif (color == [0, 85, 255]) or (color == [255, 255, 0]):\n color_code = classes_colors[5]\n # door\n elif (color == [0, 170, 255]) or (color == [170, 0, 0]):\n color_code = classes_colors[6]\n # wall\n else:\n color_code = classes_colors[3]\n\n\n im[i][j] = color_code\n\n else:\n print(file + \" is not a valid file. Check and try again!\")\n\n cv2.imwrite(output, cv2.cvtColor(im, cv2.COLOR_RGB2BGR))\n print(\"Annotated raster with unified classes saved as \" + output + \"\\n\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"cnn-projects/input-preparation/color/unifyColor.py","file_name":"unifyColor.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"261083682","text":"\"\"\"\nWrite a program that will take hours and rate per hour as input and will compute the total payment.\nHere your program will give the employee 1.5 times the hourly rate for hours worked above 40 hours.\n\nExample:\nEnter Hours: 45\nEnter Rate: 10\nPay: 475.00\n\n\"\"\"\n\n# take hours and rate as input\nhours = int(input('Enter Hours: '))\nrate = float(input('Enter Rate:'))\npay = 0\n\nif hours>40:\n r=hours-40\n pay=(40*rate)+(r*rate)*1.5\nelse:\n pay=hours*rate\n\n\nprint('Pay: {:.2f}'.format(pay))\n","sub_path":"02_compute_payment_with_over_time.py","file_name":"02_compute_payment_with_over_time.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"481510199","text":"import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport sqlalchemy.orm\nfrom contextlib import contextmanager\nfrom tests.test_db.test_dbobjects import person, site1_in_db, datasource1_in_db\nimport pathlib\nfrom .. import conf\n\n\n\n@pytest.fixture(scope='class')\ndef db(conf):\n \"\"\"\n Creates a database in memory with the schema from the ORM classes,\n an admin user with Password 'test' and the basic quality levels\n \"\"\"\n from odmf import config\n config.conf = conf\n from odmf import db\n from odmf.tools import create_db as cdb\n cdb.create_all_tables()\n cdb.add_admin('test')\n cdb.add_quality_data(cdb.quality_data)\n return db\n\n\n@pytest.fixture()\ndef session(db) -> sqlalchemy.orm.Session:\n with db.session_scope() as session:\n yield session\n\n\n@contextmanager\ndef temp_in_database(obj, session):\n \"\"\"\n Adds the ORM-object obj to the session and commits it to the database.\n After usage the object is deleted from the session and is commited again\n \"\"\"\n session.add(obj)\n session.commit()\n yield obj\n session.delete(obj)\n session.commit()\n\n@pytest.fixture()\ndef quality(db, session):\n with temp_in_database(\n db.Quality(\n id=4, name='this is a name', comment='this is a comment'\n ),\n session) as quality:\n yield quality\n\nclass TestQuality:\n def test_quality(self, quality):\n assert quality\n assert quality.id == 4\n assert str(quality).startswith(quality.name)\n d = quality.__jdict__()\n assert isinstance(d, dict)\n assert 'id' in d\n\n\n@pytest.fixture()\ndef value_type(db, session):\n with temp_in_database(\n db.ValueType(\n id=1, name='this is a name', unit='this is a unit',\n comment='this is a comment', minvalue=0.00, maxvalue=110.20\n ),\n session) as value_type:\n yield value_type\n\n\nclass TestValueType:\n def test_ValueType(self, value_type):\n assert value_type\n assert value_type.id == 1\n assert str(value_type).startswith(value_type.name)\n d = value_type.__jdict__()\n assert isinstance(d, dict)\n assert 'id' in d\n\n def test_ValueType_load(self, value_type, session, db, record):\n value_type_1 = session.query(db.ValueType).get(1)\n assert hash(value_type_1) == hash(value_type)\n assert not value_type < value_type\n assert not value_type_1 == value_type\n assert value_type.minvalue < value_type.maxvalue\n assert ((value_type.minvalue is None or value_type.minvalue <= record.value)\n and (value_type.maxvalue is None or value_type.maxvalue >= record.value))\n\n\n@pytest.fixture()\ndef dataset(db, session, value_type, quality, person, datasource1_in_db, site1_in_db):\n with temp_in_database(\n db.Dataset(\n id=1, name='this is a name', filename='this is a filename', type=None,\n start=datetime.datetime(2020, 2, 20), end=datetime.datetime(2030, 12, 20),\n site=site1_in_db, valuetype=value_type, measured_by=person, quality=quality,\n source=datasource1_in_db, calibration_offset=0, calibration_slope=1, comment='this is a comment',\n level=2\n ),\n session) as dataset:\n yield dataset\n\n\nclass TestDataset:\n def test_dataset(self, site1_in_db, dataset):\n assert dataset.id == 1\n assert dataset.type is None\n assert dataset.site == site1_in_db\n\n\n@pytest.fixture()\ndef timeseries(db, session, value_type, quality, person, datasource1_in_db, site1_in_db):\n with temp_in_database(\n db.Timeseries(\n id=1, name='this is a name', filename='this is a filename',\n start=datetime.datetime(2020, 2, 20), end=datetime.datetime(2020, 2, 21),\n site=site1_in_db, valuetype=value_type, measured_by=person, quality=quality,\n source=datasource1_in_db, calibration_offset=0, calibration_slope=1, comment='this is a comment',\n level=2\n ),\n session) as timeseries:\n yield timeseries\n timeseries.records.delete()\n\n\n@pytest.fixture()\ndef record(db, session, timeseries):\n with temp_in_database(\n db.Record(\n id=1, dataset=timeseries, time=datetime.datetime(2021, 5, 10),\n value=5, sample='this is a sample', comment='this is a comment',\n is_error=False\n ),\n session) as record:\n yield record\n\n\n@pytest.fixture()\ndef thousand_records(tmp_path, db, session, timeseries):\n # Make a dataframe in the structure of the record table\n n = 1000\n value_step = 0.2\n value_start = -10\n df = pd.DataFrame(dict(\n id=range(1, n + 1),\n dataset=timeseries.id,\n time=pd.date_range('2022-01-01', periods=n, freq='h'),\n value=np.arange(value_start, value_step * n + value_start, value_step),\n is_error=False,\n ))\n # Write dataframe to pandas\n # cf. odmf.dataimport.pandas_import.submit l.410\n df.to_sql('record', session.connection(), if_exists='append', index=False, method='multi', chunksize=1000)\n timeseries.start = df.time.iloc[1].to_pydatetime()\n timeseries.end = df.time.iloc[-1].to_pydatetime()\n session.commit()\n yield df\n session.query(db.Record).filter_by(_dataset=timeseries.id).delete()\n\n\nclass TestTimeseriesThousandRecords:\n\n def test_timeseries_thousand_records(self, timeseries, thousand_records):\n assert timeseries.size() == 1000\n assert timeseries.records.count() == 1000\n assert timeseries.maxrecordid() == 1000\n\n def test_timeseries_asseries(self, timeseries, thousand_records):\n ts_df = timeseries.asseries()\n assert ts_df.mean() == thousand_records.value.mean()\n assert len(ts_df) == 1000\n\n def test_timeseries_statistics(self, timeseries, thousand_records):\n mean, std, n = timeseries.statistics()\n assert mean == np.mean(thousand_records.value)\n assert std == np.std(thousand_records.value)\n assert n == len(thousand_records)\n\n\nclass TestTimeseries:\n\n def test_timeseries_empty(self, timeseries):\n assert timeseries\n assert timeseries.records.count() == 0\n d = timeseries.__jdict__()\n assert isinstance(d, dict)\n assert 'id' in d\n\n def test_timeseries_empty_statistics(self, timeseries):\n mean, std, n = timeseries.statistics()\n assert (mean, std, n) == (0.0, 0.0, 0.0)\n\n def test_record(self, timeseries, record):\n assert record\n assert record.id == 1\n assert str(record).startswith(record.dataset.name)\n d = record.__jdict__()\n assert isinstance(d, dict)\n assert 'id' in d\n assert timeseries.records.count() == 1\n\n\n","sub_path":"tests/test_db/test_dbdataset.py","file_name":"test_dbdataset.py","file_ext":"py","file_size_in_byte":6787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"301379567","text":"import tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\nimport tornado.options\nimport tornado.httpclient\nimport platform\nfrom tornado import gen\nfrom gne import GeneralNewsExtractor\n\n\n\nfrom tornado.options import define, options\ndefine(\"port\", default=9999, help=\"run on the given port\", type=int)\n\nif platform.system() == \"Windows\":\n import asyncio\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\nextractor = GeneralNewsExtractor()\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n @gen.coroutine\n def get(self):\n url = self.request.query_arguments['link']\n client = tornado.httpclient.AsyncHTTPClient()\n response = yield client.fetch(url)\n self.write(response)\n\n @gen.coroutine\n def post(self):\n htmlStr = self.request.body_arguments['content'][0].decode()\n result = self.content_extract(htmlStr)\n self.write({\"data\":result})\n\n def content_extract(self,htmlStr):\n result = extractor.extract(htmlStr)\n # print(result)\n return result\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[\n (r\"/getData\", IndexHandler)\n ],\n debug=True,\n\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"tornado_test_00.py","file_name":"tornado_test_00.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"226751141","text":"import simplegui\nimport math\n\nwidth = 500\nheight = 500\n\nwaveC = height - 90\nboatP = waveC - 50\n\n\ndef draw_wave(canvas):\n global width\n global height\n global start\n global waveC\n global boatP\n start = 0\n \n #draws the waves\n\n canvas.draw_polygon([(0, height), (0, waveC), (width, waveC), (width, height)], 3, \"blue\", \"blue\")\n for i in range(6):\n canvas.draw_circle((start, waveC), 50, 2, \"blue\", \"white\")\n start = start + 100\n \n canvas.draw_polygon([(0, waveC - 3),\n (0, waveC - 53),\n (500, waveC - 53),\n (500, waveC - 3)], 3, \"white\", \"white\")\n \n #boat base\n canvas.draw_circle((width/2, boatP), 75, 3, \"black\", \"white\")\n canvas.draw_line((width/2 - 73, boatP + 20),\n (width/2 + 73, boatP + 20), 3, \"black\")\n canvas.draw_polygon([(width/2 - 80, boatP + 17),\n (width/2 + 80,boatP + 17),\n (width/2 + 80,boatP - 85),\n (width/2 - 80,boatP - 85)], 3,\n \"white\", \"white\")\n #boat top\n canvas.draw_line((width/2, boatP + 20),(width/2,boatP - 82),\n 4, \"black\")\n canvas.draw_polygon([(width/2, boatP - 33),\n (width/2, boatP - 94),\n (width/2 + 60, boat - 33)], 4, \"black\")\n \nframe = simplegui.create_frame(\"Simple animation\", width, height)\nframe.set_canvas_background(\"white\")\nframe.set_draw_handler(draw_wave)\nframe.start()\n","sub_path":"simple animation new.py","file_name":"simple animation new.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"277875734","text":"def text(str):\n str = str.upper().split(\" \")\n diccionario = {}\n \n for cadena in str:\n for i in range(0, len(cadena)):\n char = cadena[i]\n if char not in diccionario:\n diccionario[char] = cadena\n elif char in diccionario and len(diccionario[char]) < len(cadena):\n diccionario[char] = cadena\n \n return diccionario\n \ntexto = \"En una manana lluviosa de octubre swirzt se cayo por las escaleras nuevas de la facultad z\"\nprint(text(texto)) ","sub_path":"1_año/ProgII/Python/p6.2/ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"251159652","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('groups', '0004_permission_key_marker'),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name='permission',\n unique_together=set([('key', 'key_marker')]),\n ),\n ]\n","sub_path":"apps/groups/migrations/0005_auto_20150710_1322.py","file_name":"0005_auto_20150710_1322.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"279659456","text":"from flask import Blueprint, request, jsonify\n\nfrom app import injector\n\ncrashes_api = Blueprint('crashes_api', __name__)\n\n\n@crashes_api.route(\"/\", methods=['POST'])\ndef save_crash():\n print(request.get_json())\n save_crash = injector.provide_save_crash()\n crash = save_crash.execute(request.get_json())\n return jsonify(crash)\n\n\n@crashes_api.route(\"/\", methods=['GET'])\ndef get_crashes():\n get_crashes = injector.provide_get_crashes()\n crashes = get_crashes.execute()\n return jsonify(crashes)\n","sub_path":"app/mod_api/routes/CrashesApi.py","file_name":"CrashesApi.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"55919672","text":"# -*- coding: utf-8 -*-\nimport urlparse\nimport scrapy\nfrom dailyteedeals.items import ProductItemLoader\n\n\nclass QwerteeCommon(scrapy.Spider):\n allowed_domains = [\"qwertee.com\"]\n\n def build_currency_string(self, doc, currency):\n return self.__extract_currency(doc, currency) + currency.upper()\n\n def __extract_currency(self, doc, currency):\n return doc.xpath('@data-tee-price-' + currency).extract_first()\n\n def parse_artist_page(self, response):\n item = response.meta['item']\n loader = ProductItemLoader(item, response=response)\n loader.add_xpath('artist_urls', '//div[@id=\"page-header\"]/span/text()')\n return loader.load_item()\n\n def build_image_url(self, product_id):\n return 'https://www.qwertee.com/images/designs/zoom/%s.jpg' % product_id\n\n\nclass QwerteeDealSpider(QwerteeCommon):\n name = \"qwertee_deal\"\n start_urls = ['https://www.qwertee.com']\n\n def parse(self, response):\n expiry = response.xpath('//div[@class=\"index-countdown\"]/@data-time').extract_first()\n\n for index, sel in enumerate(response.css('.big-slide.tee > div')):\n product_id = sel.xpath('@data-id').extract_first()\n\n artist_id = sel.xpath('@data-user-id').extract_first()\n currencies = [self.build_currency_string(sel, c)\n for c in ['usd', 'gbp', 'eur']]\n\n loader = ProductItemLoader(response=response, selector=sel)\n loader.add_xpath('name', '@data-name')\n loader.add_value('url', response.url)\n loader.add_value('image_url', self.build_image_url(product_id))\n loader.add_xpath('artist_name', '@data-user')\n loader.add_value('prices', ' / '.join(currencies))\n loader.add_value('last_chance', index > 2)\n loader.add_value('expires_at', expiry)\n\n request = scrapy.Request(urlparse.urljoin(response.url, '/profile/' + artist_id),\n callback=self.parse_artist_page,\n dont_filter=True)\n request.meta['item'] = loader.load_item()\n yield request\n\n\nclass QwerteeFullSpider(QwerteeCommon):\n name = \"qwertee_full\"\n start_urls = (\n 'https://www.qwertee.com/shop/all',\n )\n\n def parse(self, response):\n for sel in response.css('.tee-list-item>a'):\n product_url = sel.xpath('@href').extract_first()\n yield scrapy.Request(urlparse.urljoin(response.url, product_url),\n callback=self.__parse_product_page)\n\n def __parse_product_page(self, response):\n if '/product/' in response.url:\n product_id = response.xpath('//a[contains(@class, \"buy-button\")]/@data-id').extract_first()\n artist = response.xpath('//span[@class=\"author\"]/a')\n currency_sel = response.xpath('//p[@class=\"product-price\"]')\n currencies = [self.build_currency_string(currency_sel, c)\n for c in ['usd', 'gbp', 'eur']]\n\n loader = ProductItemLoader(response=response)\n loader.add_xpath('name', '//span[@class=\"name\"]/text()')\n loader.add_value('url', response.url)\n loader.add_value('image_url', self.build_image_url(product_id))\n loader.add_value('artist_name', artist.xpath('text()').extract())\n loader.add_value('prices', ' / '.join(currencies))\n loader.add_value('last_chance', response.url == 'https://www.qwertee.com/last-chance')\n\n request = scrapy.Request(urlparse.urljoin(response.url, artist.xpath('@href').extract_first()),\n callback=self.parse_artist_page,\n dont_filter=True)\n request.meta['item'] = loader.load_item()\n return request\n","sub_path":"dailyteedeals/spiders/qwertee.py","file_name":"qwertee.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"405719797","text":"from appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\nimport traceback\n\ndesired_Capabilities = {\n \"deviceName\": \"emulator5554\",\n \"platformName\": \"Android\",\n \"app\": \"C:\\\\Users\\\\Abraham\\\\Downloads\\\\Walmart_v19.32.1_apkpure.com.apk\",\n \"appPackage\": \"com.walmart.android\",\n \"appWaitActivity\": \"com.walmart.android.app.main.HomeActivity\",\n \"platformVersion\": \"8.0\"\n}\n\n# Create Driver\ndriver = webdriver.Remote(\"http://localhost:4723/wd/hub\", desired_Capabilities)\ndriver.implicitly_wait(30)\n\n# Getting elements of class widget.Button in a list\nuser_action = TouchAction(driver)\nelement_class_button = driver.find_elements_by_class_name(\"android.widget.Button\")\n\n# Testing elements in the buttons class list\nfor elements_counting in element_class_button:\n i = 1\n element_class_button[i].click()\n\n try:\n check_shelf_result = driver.find_element_by_id('com.walmart.android:id/shelf_result_count')\n check_shelf_result.is_enabled()\n # call search function.. EditText Class.\n\n # Check Sort and Filter\n TouchAction(driver) .press(x=475, y=862) .move_to(x=475, y=491) .release() .perform()\n driver.find_element_by_id('com.walmart.android:id/shelf_results_sort_filter').click()\n\n # Done, then exit\n driver.find_element_by_xpath('/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/androidx.drawerlayout.widget.DrawerLayout/android.widget.FrameLayout[1]/android.view.ViewGroup/android.widget.RelativeLayout/android.view.ViewGroup/androidx.appcompat.widget.LinearLayoutCompat').click()\n\n\n # Check Cart function\n driver.find_element_by_id('com.walmart.android:id/online_cart_icon').click()\n TouchAction(driver).tap(x=158, y=245).perform()\n driver.back()\n except:\n traceback.print_exc()\n i = +1\n\n # Scroll tab for next three ta\n if i == 3:\n while element_class_button[2].is_displayed():\n # swipe laterally continuously\n TouchAction(driver).press(x=964, y=606).move_to(x=615, y=611).release().perform()\n else:\n driver.refresh()\n","sub_path":"Button Class Script.py","file_name":"Button Class Script.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"410036228","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom usuarios.models import *\nfrom usuarios.models import *\n# Create your views here.\n\n@csrf_exempt\ndef usuarios(request):\n try:\n cookie = request.COOKIES['sessionid']\n login = {\"id\":request.session['user_id'],\"login\":request.session['login']}\n except:\n login = False\n usuarios = Usuario.objects.all().values()\n return render_to_response('usuarios.html',{\"usuarios\":usuarios,\"login\":login})\n\n\ndef home(request):\n try:\n if(request.COOKIES['sessionid']):\n login = request.session['login']\n if(login == False):\n return render_to_response('home.html')\n else:\n user = Usuario.objects.filter(i_d=request.session[\"user_id\"]).values()\n return render_to_response('login.html',{\"user\":user[0]})\n except:\n return render_to_response('home.html')","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"144019077","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom astropy import units as u\nfrom astropy.utils.data import get_pkg_data_filename\nfrom gammapy.catalog import (\n GammaCatResource,\n GammaCatResourceIndex,\n SourceCatalogGammaCat,\n)\nfrom gammapy.utils.testing import (\n assert_quantity_allclose,\n requires_data,\n requires_dependency,\n)\n\nSOURCES = [\n {\n \"name\": \"Vela X\",\n \"str_ref_file\": \"data/gammacat_vela_x.txt\",\n \"spec_type\": \"ecpl\",\n \"dnde_1TeV\": 1.36e-11 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"dnde_1TeV_err\": 7.531e-13 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"flux_1TeV\": 2.104e-11 * u.Unit(\"cm-2 s-1\"),\n \"flux_1TeV_err\": 1.973e-12 * u.Unit(\"cm-2 s-1\"),\n \"eflux_1_10TeV\": 9.265778680255336e-11 * u.Unit(\"erg cm-2 s-1\"),\n \"eflux_1_10TeV_err\": 9.590978299538194e-12 * u.Unit(\"erg cm-2 s-1\"),\n \"n_flux_points\": 24,\n \"is_pointlike\": False,\n \"spatial_model\": \"GaussianSpatialModel\",\n \"ra\": 128.287003,\n \"dec\": -45.189999,\n },\n {\n \"name\": \"HESS J1848-018\",\n \"str_ref_file\": \"data/gammacat_hess_j1848-018.txt\",\n \"spec_type\": \"pl\",\n \"dnde_1TeV\": 3.7e-12 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"dnde_1TeV_err\": 4e-13 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"flux_1TeV\": 2.056e-12 * u.Unit(\"cm-2 s-1\"),\n \"flux_1TeV_err\": 3.187e-13 * u.Unit(\"cm-2 s-1\"),\n \"eflux_1_10TeV\": 6.235650344765057e-12 * u.Unit(\"erg cm-2 s-1\"),\n \"eflux_1_10TeV_err\": 1.2210315515569183e-12 * u.Unit(\"erg cm-2 s-1\"),\n \"n_flux_points\": 11,\n \"is_pointlike\": False,\n \"spatial_model\": \"GaussianSpatialModel\",\n \"ra\": 282.119995,\n \"dec\": -1.792,\n },\n {\n \"name\": \"HESS J1813-178\",\n \"str_ref_file\": \"data/gammacat_hess_j1813-178.txt\",\n \"spec_type\": \"pl2\",\n \"dnde_1TeV\": 2.678e-12 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"dnde_1TeV_err\": 2.55e-13 * u.Unit(\"cm-2 s-1 TeV-1\"),\n \"flux_1TeV\": 2.457e-12 * u.Unit(\"cm-2 s-1\"),\n \"flux_1TeV_err\": 3.692e-13 * u.Unit(\"cm-2 s-1\"),\n \"eflux_1_10TeV\": 8.923614018939419e-12 * u.Unit(\"erg cm-2 s-1\"),\n \"eflux_1_10TeV_err\": 1.4613807070890267e-12 * u.Unit(\"erg cm-2 s-1\"),\n \"n_flux_points\": 13,\n \"is_pointlike\": False,\n \"spatial_model\": \"GaussianSpatialModel\",\n \"ra\": 273.362915,\n \"dec\": -17.84889,\n },\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef gammacat():\n filename = \"$GAMMAPY_DATA/catalogs/gammacat/gammacat.fits.gz\"\n return SourceCatalogGammaCat(filename=filename)\n\n\n@requires_data()\nclass TestSourceCatalogGammaCat:\n def test_source_table(self, gammacat):\n assert gammacat.name == \"gamma-cat\"\n assert len(gammacat.table) == 162\n\n def test_positions(self, gammacat):\n assert len(gammacat.positions) == 162\n\n def test_w28_alias_names(self, gammacat):\n for name in [\n \"W28\",\n \"HESS J1801-233\",\n \"W 28\",\n \"SNR G6.4-0.1\",\n \"SNR G006.4-00.1\",\n \"GRO J1801-2320\",\n ]:\n assert gammacat[name].index == 112\n\n def test_sort_table(self, gammacat):\n name = \"HESS J1848-018\"\n sort_keys = [\"ra\", \"dec\", \"reference_id\"]\n for sort_key in sort_keys:\n # this test modifies the catalog, so we make a copy\n cat = gammacat.copy()\n cat.table.sort(sort_key)\n assert cat[name].name == name\n\n def test_to_sky_models(self, gammacat):\n sources = gammacat.to_sky_models()\n source = sources.skymodels[0]\n\n assert len(sources.skymodels) == 74\n assert source.name == \"CTA 1\"\n assert_allclose(source.spectral_model.parameters[\"index\"].value, 2.2)\n\n\n@requires_data()\nclass TestSourceCatalogObjectGammaCat:\n def test_data(self, gammacat):\n source = gammacat[0]\n\n assert isinstance(source.data, dict)\n assert source.data[\"common_name\"] == \"CTA 1\"\n assert_quantity_allclose(source.data[\"dec\"], 72.782997 * u.deg)\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_str(self, gammacat, ref):\n actual = str(gammacat[ref[\"name\"]])\n expected = open(get_pkg_data_filename(ref[\"str_ref_file\"])).read()\n assert actual == expected\n\n def test_data_python_dict(self, gammacat):\n source = gammacat[0]\n data = source._data_python_dict\n assert type(data[\"ra\"]) == float\n assert data[\"ra\"] == 1.649999976158142\n assert type(data[\"sed_e_min\"]) == list\n assert type(data[\"sed_e_min\"][0]) == float\n assert_allclose(data[\"sed_e_min\"][0], 0.5600000023841858)\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_spectral_model(self, gammacat, ref):\n source = gammacat[ref[\"name\"]]\n spectral_model = source.spectral_model\n\n assert source.data[\"spec_type\"] == ref[\"spec_type\"]\n\n e_min, e_max, e_inf = [1, 10, 1e10] * u.TeV\n\n dne = spectral_model(e_min)\n flux = spectral_model.integral(emin=e_min, emax=e_inf)\n eflux = spectral_model.energy_flux(emin=e_min, emax=e_max).to(\"erg cm-2 s-1\")\n\n assert_quantity_allclose(dne, ref[\"dnde_1TeV\"], rtol=1e-3)\n assert_quantity_allclose(flux, ref[\"flux_1TeV\"], rtol=1e-3)\n assert_quantity_allclose(eflux, ref[\"eflux_1_10TeV\"], rtol=1e-3)\n\n @requires_dependency(\"uncertainties\")\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_spectral_model_err(self, gammacat, ref):\n source = gammacat[ref[\"name\"]]\n spectral_model = source.spectral_model\n\n e_min, e_max, e_inf = [1, 10, 1e10] * u.TeV\n\n dnde, dnde_err = spectral_model.evaluate_error(e_min)\n flux, flux_err = spectral_model.integral_error(emin=e_min, emax=e_inf)\n eflux, eflux_err = spectral_model.energy_flux_error(emin=e_min, emax=e_max).to(\n \"erg cm-2 s-1\"\n )\n\n assert_quantity_allclose(dnde, ref[\"dnde_1TeV\"], rtol=1e-3)\n assert_quantity_allclose(flux, ref[\"flux_1TeV\"], rtol=1e-3)\n assert_quantity_allclose(eflux, ref[\"eflux_1_10TeV\"], rtol=1e-3)\n\n assert_quantity_allclose(dnde_err, ref[\"dnde_1TeV_err\"], rtol=1e-3)\n assert_quantity_allclose(flux_err, ref[\"flux_1TeV_err\"], rtol=1e-3)\n assert_quantity_allclose(eflux_err, ref[\"eflux_1_10TeV_err\"], rtol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_flux_points(self, gammacat, ref):\n source = gammacat[ref[\"name\"]]\n\n flux_points = source.flux_points\n\n assert len(flux_points.table) == ref[\"n_flux_points\"]\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_position(self, gammacat, ref):\n source = gammacat[ref[\"name\"]]\n\n position = source.position\n\n assert_allclose(position.ra.deg, ref[\"ra\"], atol=1e-3)\n assert_allclose(position.dec.deg, ref[\"dec\"], atol=1e-3)\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_spatial_model(self, gammacat, ref):\n source = gammacat[ref[\"name\"]]\n\n spatial_model = source.spatial_model\n assert spatial_model.frame == \"galactic\"\n\n # TODO: put better asserts on model properties\n # TODO: add a point and shell source -> separate list of sources for morphology test parametrization?\n assert spatial_model.__class__.__name__ == ref[\"spatial_model\"]\n\n assert source.is_pointlike == ref[\"is_pointlike\"]\n\n @pytest.mark.parametrize(\"ref\", SOURCES, ids=lambda _: _[\"name\"])\n def test_sky_model(self, gammacat, ref):\n gammacat[ref[\"name\"]].sky_model\n\n\nclass TestGammaCatResource:\n def setup(self):\n self.resource = GammaCatResource(\n source_id=42, reference_id=\"2010A&A...516A..62A\", file_id=2\n )\n self.global_id = \"42|2010A&A...516A..62A|2|none\"\n\n def test_global_id(self):\n assert self.resource.global_id == self.global_id\n\n def test_eq(self):\n resource1 = self.resource\n resource2 = GammaCatResource(source_id=42, reference_id=\"2010A&A...516A..62A\")\n\n assert resource1 == resource1\n assert resource1 != resource2\n\n def test_lt(self):\n resource = GammaCatResource(\n source_id=42, reference_id=\"2010A&A...516A..62A\", file_id=2\n )\n\n assert not resource < resource\n\n assert resource < GammaCatResource(\n source_id=43, reference_id=\"2010A&A...516A..62A\", file_id=2\n )\n assert resource < GammaCatResource(\n source_id=42, reference_id=\"2010A&A...516A..62B\", file_id=2\n )\n assert resource < GammaCatResource(\n source_id=42, reference_id=\"2010A&A...516A..62A\", file_id=3\n )\n\n assert resource > GammaCatResource(\n source_id=41, reference_id=\"2010A&A...516A..62A\", file_id=2\n )\n\n def test_repr(self):\n expected = (\n \"GammaCatResource(source_id=42, reference_id='2010A&A...516A..62A', \"\n \"file_id=2, type='none', location='none')\"\n )\n assert repr(self.resource) == expected\n\n def test_to_dict(self):\n expected = {\n \"source_id\": 42,\n \"reference_id\": \"2010A&A...516A..62A\",\n \"file_id\": 2,\n \"type\": \"none\",\n \"location\": \"none\",\n }\n assert self.resource.to_dict() == expected\n\n def test_dict_roundtrip(self):\n actual = GammaCatResource.from_dict(self.resource.to_dict())\n assert actual == self.resource\n\n\nclass TestGammaCatResourceIndex:\n def setup(self):\n self.resource_index = GammaCatResourceIndex(\n [\n GammaCatResource(source_id=99, reference_id=\"2014ApJ...780..168A\"),\n GammaCatResource(\n source_id=42,\n reference_id=\"2010A&A...516A..62A\",\n file_id=2,\n type=\"sed\",\n ),\n GammaCatResource(\n source_id=42, reference_id=\"2010A&A...516A..62A\", file_id=1\n ),\n ]\n )\n\n def test_repr(self):\n assert repr(self.resource_index) == \"GammaCatResourceIndex(n_resources=3)\"\n\n def test_eq(self):\n resource_index1 = self.resource_index\n resource_index2 = GammaCatResourceIndex(resource_index1.resources[:-1])\n\n assert resource_index1 == resource_index1\n assert resource_index1 != resource_index2\n\n def test_unique_source_ids(self):\n expected = [42, 99]\n assert self.resource_index.unique_source_ids == expected\n\n def test_unique_reference_ids(self):\n expected = [\"2010A&A...516A..62A\", \"2014ApJ...780..168A\"]\n assert self.resource_index.unique_reference_ids == expected\n\n def test_global_ids(self):\n expected = [\n \"99|2014ApJ...780..168A|-1|none\",\n \"42|2010A&A...516A..62A|2|sed\",\n \"42|2010A&A...516A..62A|1|none\",\n ]\n assert self.resource_index.global_ids == expected\n\n def test_sort(self):\n expected = [\n \"42|2010A&A...516A..62A|1|none\",\n \"42|2010A&A...516A..62A|2|sed\",\n \"99|2014ApJ...780..168A|-1|none\",\n ]\n assert self.resource_index.sort().global_ids == expected\n\n def test_to_list(self):\n result = self.resource_index.to_list()\n assert isinstance(result, list)\n assert len(result) == 3\n\n def test_list_roundtrip(self):\n data = self.resource_index.to_list()\n actual = GammaCatResourceIndex.from_list(data)\n assert actual == self.resource_index\n\n def test_to_table(self):\n table = self.resource_index.to_table()\n assert len(table) == 3\n assert table.colnames == [\n \"source_id\",\n \"reference_id\",\n \"file_id\",\n \"type\",\n \"location\",\n ]\n\n def test_table_roundtrip(self):\n table = self.resource_index.to_table()\n actual = GammaCatResourceIndex.from_table(table)\n assert actual == self.resource_index\n\n @requires_dependency(\"pandas\")\n def test_to_pandas(self):\n df = self.resource_index.to_pandas()\n df2 = df.query(\"source_id == 42\")\n assert len(df2) == 2\n\n @requires_dependency(\"pandas\")\n def test_pandas_roundtrip(self):\n df = self.resource_index.to_pandas()\n actual = GammaCatResourceIndex.from_pandas(df)\n assert actual == self.resource_index\n\n @requires_dependency(\"pandas\")\n def test_query(self):\n resource_index = self.resource_index.query('type == \"sed\" and source_id == 42')\n assert len(resource_index.resources) == 1\n assert resource_index.resources[0].global_id == \"42|2010A&A...516A..62A|2|sed\"\n","sub_path":"gammapy/catalog/tests/test_gammacat.py","file_name":"test_gammacat.py","file_ext":"py","file_size_in_byte":12859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"636471629","text":"from bs4 import BeautifulSoup\nimport requests\nfrom reqStatus import requestProduct\nfrom openpyxl import load_workbook, Workbook\n\ndef makeQoo10Product(productNumber):\n productJson = requestProduct(productNumber)\n\n sellerCode = productJson['optimus_id']\n status = 'S2' # S1 거래대기 / S2 거래가능 / S4 거래폐지\n twoCatCode = '300000704' # 카테고리 코드 \n itemName = productJson['name'] # 최대 글자 100글자\n itemDescription = productJson['html_content']\n shortTitle = productJson['name'] # 최대 글자 20자\n itemDetailHeader = '브리치 헤더'\n itemDetailFooter = '브리치 푸터'\n\n tags = []\n tagLists = productJson['manual_tags']\n for tag in tagLists:\n tags.append(tag['name'])\n print(tags)\n \n briefDescription = '상품 간략설명' #외부 검색시 키워드 브리치 키워드 매칭 \n imageURL = productJson['images'][0]['url']\n print(imageURL)\n sellPrice = productJson['discounted_price']\n sellQty = '재고수량' # ['option_groups]['options] -> for array ['place_stock]['stock]\n shippingGroupNo = '배송정책 번호' # Null 무료\n itemWeight = '상품무게' # 카테고리별 매칭\n optionTitle = []\n for i in range(1,5):\n optTitle = 'title' + str(i)\n if productJson['option_groups'][0][optTitle] == None:\n continue\n else:\n optionTitle.append(productJson['option_groups'][0][optTitle])\n print(optionTitle)\n optionResults = []\n valueLists = []\n for value in productJson['option_groups'][0]['options']:\n optionResult = []\n optimusID = value['optimus_id']\n stock = value['place_stock']['stock']\n optionPrice = value['price']\n for idx, title in enumerate(optionTitle):\n # print(title, idx)\n optValue = 'value' + str(idx +1)\n optionResult.append(title)\n optionResult.append(value[optValue])\n optionResult.append(optionPrice) \n optionResult.append(stock)\n optionResult.append(optimusID)\n optionRow = '||*'.join(map(str, optionResult))\n optionResults.append(optionRow)\n inventoryInfo = '$$'.join(optionResults) #재고 리스트\n print(inventoryInfo)\n \n makerNo = productJson['provider']['optimus_id'] # 메이커 번호\n brandNo = productJson['main_brand']['code'] # 메이커 번호\n productModelName = productJson['custom_code'] # 상품 모델 번호\n retailPrice = productJson['price'] #..\n originType = '2' # 해외\n placeOfOrigin = '대한민국' # 국가명\n industrialCode = '산업코드' #JAN CODE\n itemCondition = '1' # 1 신상품\n manufactureDate = '' #재조연월일 YYYY/MM\n adultProduct = 'N'\n asInfo = '' #as 인포 브리치 공통\n availableDate = '14' #배송예정일은 14일\n gift = ''\n\n subImgaes = productJson['sub_images']\n subImgList = []\n for subImg in subImgaes:\n subImgList.append(subImg['url'])\n additionalItemImage = '$$'.join(subImgList) # 상품 추가 이미지\n print(additionalItemImage)\n\n inventoryCoverImage = ''\n multiShippingRate = '' # 옵션배송비 코드\n\nmakeQoo10Product(756041703)","sub_path":"bflowToQoo10.py","file_name":"bflowToQoo10.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"613043017","text":"import json\nfrom napalm import get_network_driver\ndriver = get_network_driver('ios')\nl2 = driver('10.99.99.11', 'sntuser', 'Ilovenetworks99', optional_args={\"secret\": \"cisco\"})\nl2.open()\n \nios_output = l2.get_facts()\n#Doesnt look nice\nprint (\"NOT NICE\\n\")\nprint (ios_output) \nprint (\"\\nNICE\\n\")\nprint (json.dumps(ios_output, indent=4))\nl2.close()\n","sub_path":"12 napalm/napalm1.py","file_name":"napalm1.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"122308154","text":"import sys\nimport math\n\nmultiplication = {\n '1': {\n '1': (1, '1'),\n 'i': (1, 'i'),\n 'j': (1, 'j'),\n 'k': (1, 'k')\n },\n 'i': {\n '1': (1, 'i'),\n 'i': (-1, '1'),\n 'j': (1, 'k'),\n 'k': (-1, 'j')\n },\n 'j': {\n '1': (1, 'j'),\n 'i': (-1, 'k'),\n 'j': (-1, '1'),\n 'k': (1, 'i')\n },\n 'k': {\n '1': (1, 'k'),\n 'i': (1, 'j'),\n 'j': (-1, 'i'),\n 'k': (-1, '1')\n }\n}\n\n\ndef multiply(number, l):\n sign, sym = multiplication[number[1]][l]\n return (number[0] * sign, sym)\n\n\ndef find(s, index, ijk, max_length):\n length = len(s)\n value = (1, '1')\n\n while index < max_length:\n value = multiply(value, s[index % length])\n index += 1\n if value == (1, ijk): break\n\n return index\n\n\ndef solve(repeat, s):\n length = len(s)\n four_length = 4 * length\n\n index = find(s, 0, 'i', four_length)\n if four_length == index: return 'NO'\n\n index = find(s, index, 'j', four_length * 2)\n if four_length * 2 == index: return 'NO'\n\n index = find(s, index, 'k', four_length * 3)\n if four_length * 3 == index: return 'NO'\n\n if repeat <= (index - 1) // length: return 'NO'\n\n value = (1, '1')\n remaining = (repeat - 1 - (index - 1) // length) % 4\n while index % length != 0:\n value = multiply(value, s[index % length])\n index += 1\n for i in range(remaining * length):\n value = multiply(value, s[index % length])\n index += 1\n\n return 'YES' if value == (1, '1') else 'NO'\n\n\nif __name__ == \"__main__\":\n testcases = int(input())\n\n for caseNr in range(1, testcases + 1):\n L, X = [int(x) for x in input().split(' ')]\n S = input()\n print(\"Case #%i: %s\" % (caseNr, solve(X, S)))","sub_path":"solutions_5670465267826688_1/Python/zsong/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"161790536","text":"\n\ndef profile_action_handler_page_profile_edit(context, action, entity_id, profile_bundle, **args):\n\t\n\tentitier = IN.entitier\n\t\n\tnabar = entitier.load_single('Nabar', entity_id)\n\t\n\tif not nabar:\n\t\tcontext.not_found()\n\t\n\tlogged_in_nabar_id = context.nabar.id\n\t\n\tif logged_in_nabar_id == nabar.id:\n\t\tcontext.access('nabar_edit_profile_own', nabar, True)\n\telse:\n\t\tcontext.access('nabar_edit_profile_other', nabar, True)\n\t\n\tif profile_bundle not in entitier.entity_bundle['Profile']:\n\t\tcontext.not_found()\n\t\n\tcontent = Object.new('TextDiv', {\n\t\t'css' : ['i-grid i-grid-small'],\n\t})\n\t\n\ttab = content.add('Ul', {\n\t\t'css' : ['i-tab i-tab-left i-width-medium-1-4']\n\t})\n\t\n\tif 'Profile' in entitier.entity_bundle:\n\t\tprofile_config = entitier.entity_bundle['Profile']\n\t\tfor bundle in sorted(profile_config.keys(), key = lambda o:o):\n\t\t\tbundle_conf = profile_config[bundle]\n\t\t\ttab.add('Li', {\n\t\t\t\t'css' : ['i-active' if profile_bundle == bundle else '']\n\t\t\t}).add('Link', {\n\t\t\t\t'value' : bundle_conf['data']['title'],\n\t\t\t\t'href' : ''.join(('/nabar/', str(entity_id), '/edit/profile/!' + bundle))\n\t\t\t})\n\t\n\t\n\tprofile_tab = content.add('TextDiv', {\n\t\t'css' : ['i-width-medium-3-4'],\n\t})\n\t\n\t\n\t# add profile entity edit form\n\t\n\t# check if profile exists\t\n\tprofiles = In.profile.nabar_profile(nabar.id, profile_bundle)\n\t\n\tif profiles:\n\t\tform = IN.former.load('ProfileEditForm', args = {\n\t\t\t'data' : {\n\t\t\t\t'id' : '-'.join(('ProfileEditForm', str(nabar.id))),\n\t\t\t\t'entity_type' : 'Profile',\n\t\t\t\t'entity_bundle' : profile_bundle,\n\t\t\t\t'entity_id' : next(iter(profiles.keys())),\n\t\t\t},\n\t\t\t'nabar_id' : nabar.id,\n\t\t})\n\t\tprofile_tab.add(form)\n\telse:\n\t\tform = IN.former.load('ProfileAddForm', args = {\n\t\t\t'data' : {\n\t\t\t\t'id' : '-'.join(('ProfileAddForm', str(nabar.id))),\n\t\t\t\t'entity_type' : 'Profile',\n\t\t\t\t'entity_bundle' : profile_bundle,\n\t\t\t},\n\t\t\t'nabar_id' : nabar.id,\n\t\t\t'entity_type' : 'Profile',\n\t\t\t'entity_bundle' : profile_bundle,\t\t\t\n\t\t})\n\t\tprofile_tab.add(form)\n\t\t\n\tcontext.response.output.add(content)\n\t\n","sub_path":"In/profile/page/profile_edit.py","file_name":"profile_edit.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"578590360","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom creator.models import (\n TemporaryProduct,\n TemporaryProductImage,\n TemporaryChapter,\n TemporaryLecture,\n TemporaryLectureContent,\n TemporaryLectureContentDescription,\n TemporaryLectureContentImage,\n TemporaryKit,\n TemporaryKitImage\n ) \nfrom product.models import ( \n MainCategory,\n SubCategory,\n Difficulty,\n LectureContent,\n LectureContentDescription,\n LectureContentImageUrl,\n ProductSubImage,\n Chapter,\n Lecture,\n Product,\n LectureVideo\n )\nfrom kit.models import Kit, KitSubImageUrl\nfrom core import S3FileManager, random_number_generator\nfrom core.utils import login_decorator\nfrom clnass_101.settings import S3_BUCKET_URL\n\nclass FirstTemporaryView(View):\n @login_decorator()\n def get(self, request, temporary_id):\n user = request.user \n categories = MainCategory.objects.filter(\n Q(name='크리에이티브') |\n Q(name='커리어') |\n Q(name='머니')).prefetch_related('subcategory_set')\n \n category_list = [{\n 'id' : category.id,\n 'name' : category.name,\n 'subCategories' : [{\n 'id' : sub.id, \n 'name' : sub.name\n } for sub in category.subcategory_set.all()]\n } for category in categories]\n \n if TemporaryProduct.objects.filter(id=temporary_id, user=user).exists():\n temp = TemporaryProduct.objects.prefetch_related('temporaryproductimage_set').get(id=temporary_id, user=user)\n temp_info = {\n 'mainCategoryId' : temp.main_category_id,\n 'subCategoryId' : temp.sub_category_id,\n 'difficultyId' : temp.difficulty_id,\n 'name' : temp.name,\n 'price' : temp.price,\n 'sale' : temp.sale,\n 'images' : [S3_BUCKET_URL + image.image_url for image in temp.temporaryproductimage_set.all()]\n }\n else:\n temp_info = None\n\n return JsonResponse({\n 'categories' : category_list,\n 'difficulties' : [difficulty for difficulty in Difficulty.objects.values()],\n 'temporaryInformation' : temp_info}, status=200)\n \n @login_decorator()\n @transaction.atomic\n def post(self, request, temporary_id):\n try:\n data = json.loads(request.POST['body'])\n user = request.user\n images = request.FILES.getlist('files') \n\n # 프론트측의 요청으로 아이디가 아닌 name으로 판별\n category = MainCategory.objects.get(name=data['categoryName'])\n sub_category = SubCategory.objects.get(name=data['subCategoryName'])\n difficulty = Difficulty.objects.get(name=data['difficultyName'])\n\n temp = TemporaryProduct.objects.update_or_create(\n id = temporary_id,\n user = user,\n defaults = {\n 'main_category' : category,\n 'sub_category' : sub_category,\n 'name' : data['name'],\n 'price' : data['price'],\n 'sale' : data['sale'],\n 'difficulty' : difficulty\n }\n )[0]\n \n # 기존 이미지 삭제\n exist_images = TemporaryProductImage.objects.filter(temporary_product=temp)\n for image in exist_images:\n url = image.image_url\n S3FileManager().file_delete(url)\n image.delete()\n \n # 이미지 삽입\n for image in images:\n file_name = 'images/' + random_number_generator()\n url = S3FileManager().file_upload(image, file_name)\n TemporaryProductImage.objects.create(\n temporary_product = temp,\n image_url = url\n )\n\n return JsonResponse({'message':'SUCCESS'}, status=200)\n\n except ObjectDoesNotExist as e:\n return JsonResponse({'message':str(e)}, status=400)\n except KeyError:\n return JsonResponse({'message':'KEY_ERROR'}, status=400)\n\nclass SecondTemporaryView(View):\n @login_decorator()\n def get(self, request, temporary_id):\n chapters = TemporaryChapter.objects.filter(temporary_product_id=temporary_id).prefetch_related('temporarylecture_set')\n \n return JsonResponse({\n 'chapters' : [{\n 'chapterId' : chapter.id,\n 'name' : chapter.name,\n 'mainImage' : S3_BUCKET_URL + chapter.thumbnail_image if chapter.thumbnail_image else None,\n 'lectures' : [{\n 'name' : lecture.name,\n 'order' : lecture.order\n } for lecture in chapter.temporarylecture_set.all()]\n } for chapter in chapters]\n }, status=200)\n \n @login_decorator()\n @transaction.atomic\n def post(self, request, temporary_id):\n try:\n data = json.loads(request.POST['body'])\n images = request.FILES.getlist('files')\n\n if not TemporaryProduct.objects.filter(id=temporary_id).exists():\n return JsonResponse({'message':'TEMPORARY_PRODUCT_DOES_NOT_EXIST'}, status=404)\n\n chapters = data['chapters']\n \n temps = TemporaryChapter.objects.filter(temporary_product_id=temporary_id)\n for temp in temps:\n # 기존 이미지 삭제\n if temp.thumbnail_image:\n url = temp.thumbnail_image\n S3FileManager().file_delete(url)\n temp.delete()\n\n lectures = []\n for i, chapter in enumerate(chapters, start=1):\n temp = TemporaryChapter.objects.create(\n temporary_product_id = temporary_id,\n order = i,\n name = chapter['name'] \n )\n \n if images:\n image = images.pop(0)\n file_name = 'images/' + random_number_generator()\n url = S3FileManager().file_upload(image, file_name)\n temp.thumbnail_image = url \n temp.save()\n \n lectures += [{\n 'name' : lecture.get('name'),\n 'chapter' : temp.name,\n 'chapter_id' : temp.id\n } for lecture in chapter['lectures']]\n \n # 다음 chapter로 바뀌면 order를 1로 변환\n order = 1\n chapter_name = None\n for lecture in lectures:\n if lecture['chapter'] != chapter_name:\n order = 1\n chapter_name = lecture['chapter']\n\n TemporaryLecture.objects.create(\n temporary_product_id = temporary_id,\n temporary_chapter_id = lecture['chapter_id'],\n order = order,\n name = lecture['name']\n )\n order += 1\n\n return JsonResponse({'message':'SUCCESS'}, status=200)\n\n except KeyError:\n return JsonResponse({'message':'KEY_ERROR'}, status=400)\n\nclass ThirdTemporaryView(View):\n @login_decorator()\n def get(self, request, temporary_id):\n chapters = TemporaryChapter.objects.filter(temporary_product_id=temporary_id).prefetch_related(\n 'temporarylecture_set__temporarylecturecontent_set',\n 'temporarylecture_set__temporarylecturecontent_set__image',\n 'temporarylecture_set__temporarylecturecontent_set__description'\n )\n\n return JsonResponse({\n 'products' : [{\n 'chapter_id' : chapter.id,\n 'chapterName' : chapter.name,\n 'chapterOrder' : chapter.order,\n 'lectures' : [{\n 'lecture_id' : lecture.id,\n 'name' : lecture.name,\n 'videoUrl' : S3_BUCKET_URL + lecture.video_url if lecture.video_url else None,\n 'order' : lecture.order,\n 'content' : [{\n 'image' : S3_BUCKET_URL + content.image.image_url if content.image.image_url else None,\n 'description' : content.description.description,\n 'order' : content.order\n } for content in lecture.temporarylecturecontent_set.all()]\n } for lecture in chapter.temporarylecture_set.all()]\n } for chapter in chapters]\n })\n \n @login_decorator()\n @transaction.atomic\n def post(self, request, temporary_id):\n try:\n data = json.loads(request.POST['body'])\n\n if not TemporaryProduct.objects.filter(id=temporary_id).exists():\n return JsonResponse({'message':'TEMPORARY_PRODUCT_DOES_NOT_EXIST'}, status=404)\n \n for lecture in data['lectures']:\n if not TemporaryLecture.objects.filter(id=lecture['lecture_id']).exists():\n return JsonResponse({'message':'TEMPORARY_LECTURE_DOES_NOT_EXIST'}, status=404)\n\n temp = TemporaryProduct.objects.prefetch_related(\n 'temporarylecturecontent_set',\n 'temporarylecturecontentimage_set', \n 'temporarylecturecontentdescription_set'\n ).get(id=temporary_id)\n\n # 기존 이미지 제거\n images = temp.temporarylecturecontentimage_set.all()\n for image in images:\n S3FileManager().file_delete(image.image_url)\n image.delete()\n \n # 기존 글 제거\n temp.temporarylecturecontentdescription_set.all().delete()\n \n # 기존 글그림 연결 제거\n temp.temporarylecturecontent_set.all().delete()\n \n videos = request.FILES.getlist('videos')\n images = request.FILES.getlist('images')\n\n for lecture in data['lectures']:\n temp = TemporaryLecture.objects.get(id=lecture['lecture_id'])\n\n # 기존 비디오 제거\n if temp.video_url:\n url = temp.video_url\n S3FileManager().file_delete(url)\n \n # 비디오 삽입\n if videos:\n video = videos.pop(0)\n file_name = 'videos/' + random_number_generator()\n url = S3FileManager().file_upload(video, file_name)\n temp.video_url = url\n temp.save()\n \n for i, content in enumerate(lecture['contents'], start=1):\n # 이미지 생성\n if images:\n image = images.pop(0)\n file_name = 'images/' + random_number_generator()\n url = S3FileManager().file_upload(image, file_name)\n image = TemporaryLectureContentImage.objects.create(\n temporary_lecture = temp,\n image_url = url,\n temporary_product_id = temporary_id\n )\n \n # 글생성\n description = TemporaryLectureContentDescription.objects.create(\n temporary_lecture = temp,\n description = content['description'],\n temporary_product_id = temporary_id\n )\n \n # 글 그림 연결\n TemporaryLectureContent.objects.create(\n order = i,\n image = image,\n description = description,\n temporary_lecture = temp,\n temporary_product_id = temporary_id\n )\n return JsonResponse({'message':'SUCCESS'},status=200)\n\n except KeyError:\n return JsonResponse({'message':'KEY_ERROR'}, status=400)\n\nclass FourthTemporaryView(View):\n @login_decorator()\n def get(self, request, temporary_id):\n kits = TemporaryKit.objects.filter(temporary_product_id=temporary_id).prefetch_related('temporarykitimage_set')\n\n return JsonResponse({\n 'kits' : [{\n 'id' : kit.id,\n 'name' : kit.name,\n 'imageUrls' : [S3_BUCKET_URL + image.image_url for image in kit.temporarykitimage_set.all()]\n } for kit in kits]\n })\n\n @login_decorator()\n @transaction.atomic\n def post(self, request, temporary_id):\n try:\n if not TemporaryProduct.objects.filter(id=temporary_id).exists():\n return JsonResponse({'message':'TEMPORARY_PRODUCT_DOES_NOT_EXIST'}, status=404)\n\n data = json.loads(request.POST['body'])\n images = request.FILES.getlist('files')\n\n # 기존 이미지 삭제\n temp_images = TemporaryKitImage.objects.filter(temporary_kit__temporary_product_id=temporary_id)\n for image in temp_images:\n S3FileManager().file_delete(image.image_url)\n\n # 기존 키트 삭제\n TemporaryKit.objects.filter(temporary_product_id=temporary_id).delete()\n \n # 키트 생성\n kits = [kit.get('name') for kit in data['kits']]\n for kit in kits:\n temp = TemporaryKit.objects.create(name=kit, temporary_product_id=temporary_id)\n\n if images:\n image = images.pop(0)\n file_name = 'images/' + random_number_generator()\n url = S3FileManager().file_upload(image, file_name)\n TemporaryKitImage.objects.create(image_url=url, temporary_kit=temp, temporary_product_id=temporary_id)\n\n return JsonResponse({'message':'SUCCESS'}, status=200)\n \n except KeyError:\n return JsonResponse({'message':'KEY_ERROR'}, status=400)\n \nclass CreateTemporaryView(View):\n @login_decorator()\n @transaction.atomic\n def post(self, request, temporary_id):\n user = request.user\n\n if not TemporaryProduct.objects.filter(id=temporary_id).exists():\n return JsonResponse({'message':'TEMPORARY_PRODUCT_DOES_NOT_EXIST'}, status=404)\n\n temp = TemporaryProduct.objects.prefetch_related(\n 'temporaryproductimage_set',\n 'temporarylecture_set',\n 'temporarylecturecontent_set',\n 'temporarykitimage_set'\n ).get(id=temporary_id)\n\n product_images = [image for image in temp.temporaryproductimage_set.all()]\n lectures = [lecture for lecture in temp.temporarylecture_set.all()]\n contents = [content for content in temp.temporarylecturecontent_set.all()]\n kit_images = [image for image in temp.temporarykitimage_set.all()]\n\n # 강의 개설\n product = Product.objects.create(\n name = temp.name, \n price = temp.price, \n sale = temp.sale,\n difficulty = temp.difficulty,\n main_category = temp.main_category,\n sub_category = temp.sub_category,\n start_date = timezone.now(),\n creator = user,\n thumbnail_image = product_images[0].image_url\n )\n \n # sub image\n for image in product_images:\n ProductSubImage.objects.create(product=product, image_url=image.image_url)\n\n # lecture\n for lecture in lectures:\n if Chapter.objects.filter(name=lecture.temporary_chapter.name, product=product).exists():\n chapter = Chapter.objects.get(name=lecture.temporary_chapter.name, product=product)\n lecture_video = LectureVideo.objects.create(video_url=lecture.video_url)\n Lecture.objects.create(name=lecture.name, product=product, chapter=chapter, order=lecture.order, video=lecture_video)\n else:\n new_chapter = Chapter.objects.create(\n name = lecture.temporary_chapter.name,\n thumbnail_image = lecture.temporary_chapter.thumbnail_image,\n order = lecture.temporary_chapter.order,\n product = product\n )\n lecture_video = LectureVideo.objects.create(video_url=lecture.video_url)\n Lecture.objects.create(name=lecture.name, product=product, chapter=new_chapter, order=lecture.order, video=lecture_video)\n\n # lecture 내용\n for content in contents:\n image_url = content.image.image_url\n description = content.description.description\n order = content.order\n lecture = Lecture.objects.get(name=content.temporary_lecture.name, product=product)\n\n new_description = LectureContentDescription.objects.create(description=description)\n new_image_url = LectureContentImageUrl.objects.create(image_url=image_url)\n LectureContent.objects.create(description=new_description, image_url=new_image_url, order=order, lecture=lecture, product=product)\n \n # kit \n for image in kit_images:\n if Kit.objects.filter(name=image.temporary_kit.name).exists():\n kit = Kit.objects.get(name=image.temporary_kit.name)\n KitSubImageUrl.objects.create(kit=kit, image_url=image.image_url)\n product.kit.add(kit)\n else:\n new_kit = Kit.objects.create(name=image.temporary_kit.name, main_image_url=image.image_url)\n KitSubImageUrl.objects.create(kit=new_kit, image_url=image.image_url)\n product.kit.add(new_kit)\n\n # temp 삭제\n temp.delete()\n\n return JsonResponse({'message':'SUCCESS'},status=200)\n","sub_path":"creator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"427092178","text":"# Client\n\nimport socket, os, time\n\nBUFFER_SIZE = 512\nhost = 'localhost'\nport = 1337\n\n# Establishing conection\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\n\nprint('Connected...')\n\n# Managing the file\nwhile True:\n\n\tfile_name = input('What file to share -> ')\n\n\ttry:\n\t\tfile = open(file_name, 'r')\n\t\tbreak\n\texcept FileNotFoundError as err:\n\t\tprint('File not found. Type again!')\n\ncorrect_file_name = file.name\n# Send file's name\nprint('Sending file name')\ns.send(correct_file_name.encode('utf-8'))\ndata = s.recv(BUFFER_SIZE)\ntime.sleep(0.5)\n\nprint('Sending file size')\nfile_size = str(os.stat(correct_file_name).st_size) # Get the file size\ns.send(file_size.encode('utf-8'))\ndata = s.recv(BUFFER_SIZE)\ntime.sleep(0.5)\n\n\n# Send file\nfor line in file:\n\ts.send(line.encode('utf-8'))\n\nfile.close()\n\n\ndata = s.recv(BUFFER_SIZE)\ns.close()\n\nprint(data.decode())\n\n\n","sub_path":"client_side.py","file_name":"client_side.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"123063227","text":"from django.http import JsonResponse\nfrom django.views.decorators.http import require_GET\nimport sqlite3\nimport os\nimport time\nfrom datetime import datetime, timedelta\nimport csv\n\n\ndef dict_factory(cursor, row):\n \"\"\"\n Returns db row as a dictionary.\n :param cursor: current cursor\n :param row: current row\n :return: dictionary\n \"\"\"\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\nbase_metadata = {\n \"metaInfo\": {\n \"model\": \"cyan\",\n \"collection\": \"qed\",\n \"modelVersion\": 1.0,\n \"description\": \"The Cyanobacteria Assessment Network (CyAN) /state endpoint provides weekly \"\n \"estimates and summary statistics for the concentration of cyanobacteria (cells/ml) for each\"\n \"state in the continental United States. \"\n \"This dataset was produced through partnership with the National Oceanic and Atmospheric \"\n \"Administration (NOAA), the National Aeronautics and Space Administration (NASA), the \"\n \"United States Geological Survey (USGS), and the United States Environmental Protection \"\n \"Agency (USEPA). This cyanobacteria dataset was derived using the European Space Agency \"\n \"(ESA) Envisat satellite and 300x300 meter MEdium Resolution Imaging Spectrometer (MERIS) satellite\"\n \" imagery.\",\n \"status\": \"Finished\",\n \"timestamp\": \"2017-06-29T13:37:07Z\",\n \"url\": {\n \"type\": \"application/json\"\n }\n }\n}\n\nstart = time.clock()\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ndb_path = os.path.join(BASE_DIR, \"cyan.db\")\ndb_con = sqlite3.connect(db_path)\ndb_con.row_factory = dict_factory\nc = db_con.cursor()\n\ncyan_data = {}\noutput = open('document.csv','a')\n\nc.execute(\"SELECT comid FROM lakes\")\nlakes = c.fetchall()\ndata = []\nfor lake in lakes:\n # lake in states\n comid = lake[\"comid\"]\n c.execute(\"SELECT state_abbr FROM state_lakes WHERE comid=?\", (comid,))\n states = c.fetchall()\n stateList = []\n for state in states:\n stateList.append(state[\"state_abbr\"])\n\n # area of lake\n c.execute(\"SELECT areasqkm, gnis_name FROM lakes WHERE comid=?\", (comid,))\n lake_data = c.fetchall()\n\n q = \"SELECT DISTINCT start_date from cyan_lakes WHERE \" + \" comid=?\"\n c.execute(q, (comid,))\n dates = c.fetchall()\n nDates = len(dates)\n if len(dates) == 0:\n continue\n start_date = dates[0]\n end_date = dates[nDates - 1]\n\n query = 'SELECT max(max), avg(mean), min(min) ' \\\n 'FROM cyan_lakes WHERE ' + ' comid =?'\n c.execute(query, (comid,))\n cI_data = c.fetchall()[0]\n\n query = 'SELECT DISTINCT start_date, high_extent ' \\\n 'FROM cyan_lakes WHERE high_extent > 0 AND ' + ' comid =?'\n c.execute(query, (comid,))\n high_extent = c.fetchall()\n\n query = 'SELECT DISTINCT start_date, moderate_extent ' \\\n 'FROM cyan_lakes WHERE moderate_extent > 0 AND ' + ' comid =?'\n c.execute(query, (comid,))\n moderate_extent = c.fetchall()\n\n query = 'SELECT DISTINCT start_date, low_extent ' \\\n 'FROM cyan_lakes WHERE low_extent > 0 AND ' + ' comid =?'\n c.execute(query, (comid,))\n low_extent = c.fetchall()\n\n query = 'SELECT max(high_extent), avg(high_extent), max(moderate_extent), avg(moderate_extent), ' \\\n 'max(low_extent), avg(low_extent) FROM cyan_lakes WHERE ' + ' comid=?'\n c.execute(query, (comid,))\n extent = c.fetchall()[0]\n\n # states.values() get values from list of dictionary.\n cyan_data[comid] = {\n \"lake_info\": {\n \"lakeCOMID\": comid,\n \"GNISname\": lake_data[0][\"gnis_name\"],\n \"lakeArea\": lake_data[0][\"areasqkm\"],\n \"state\": stateList,\n \"start_date\": start_date[\"start_date\"],\n \"end_date\": end_date[\"start_date\"]\n },\n \"lake_cyan_info\": {\n \"maxCI\": cI_data[\"max(max)\"],\n \"meanCI\": cI_data[\"avg(mean)\"],\n \"minCI\": cI_data[\"min(min)\"],\n \"freqHigh\": len(high_extent) / nDates,\n \"freqModerate\": len(moderate_extent) / nDates,\n \"freqLow\": len(low_extent) / nDates,\n \"maxHighExtent\": float(extent[\"max(high_extent)\"]) * 300,\n \"meanHighExtent\": float(extent[\"avg(high_extent)\"]) * 300,\n \"maxModerateExtent\": float(extent[\"max(moderate_extent)\"]) * 300,\n \"meanModerateExtent\": float(extent[\"avg(moderate_extent)\"]) * 300,\n \"maxLowExtent\": float(extent[\"max(low_extent)\"]) * 300,\n \"meanLowExtent\": float(extent[\"avg(low_extent)\"]) * 300\n }\n }\n\n myData = comid \n # myData = comid + \",\" + lake_data[0][\"gnis_name\"] + \",\" + lake_data[0][\"areasqkm\"]\n data.append(myData)\n\n # data.append(str(cyan_data[comid]))\nmetadata = base_metadata[\"metaInfo\"]\nmetadata[\"url\"][\"href\"] = \"https://qedinternal.epa.gov/cyan/rest/api/v1/lakes/info\"\nmetadata[\"timestamp\"] = str(datetime.utcnow()) + \"Z\"\nend = time.clock()\nmetadata[\"query_time\"] = end - start\ndata = {\"metaInfo\": metadata, \"outputs\": {\"lakeData\": cyan_data}}\noutput.close()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"414486792","text":"import datarobot as dr\nimport pandas as pd\nimport boto3\n\nname = \"tintpkg\"\n\ndef connect_datarobot(configFilePath='datarobot-config.yaml'):\n dr.Client(config_path=configFilePath)\n\ndef load_data_from_dr_project(project, bucket):\n com_index = project.file_name.find(\"com\")\n key = project.file_name[com_index + 4 :]\n data_location = 's3://{}/{}'.format(bucket, key)\n data = pd.read_csv(data_location)\n return data\n\ndef load_data_from_s3_bucket(key, bucket):\n data_location = 's3://{}/{}'.format(bucket, key)\n data = pd.read_csv(data_location)\n return data\n\ndef upload_to_s3(data, bucket, key):\n from io import StringIO\n csv_buffer = StringIO()\n data.to_csv(csv_buffer)\n s3_resource = boto3.resource('s3')\n s3_resource.Object(bucket, key).put(Body=csv_buffer.getvalue())\n return \"Data uploaded to {}/{}\".format(bucket, key)\n\n","sub_path":"tintpkg/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"587269901","text":"def _prepare_file(ctx, output, target_file):\n\n output_file = ctx.actions.declare_file(output)\n\n ctx.actions.symlink(\n output = output_file,\n target_file = target_file,\n )\n\n return output_file\n\ndef _webapp_impl(ctx):\n\n files = []\n\n for file in ctx.files.srcs:\n files.append(_prepare_file(ctx, file.short_path[len(ctx.label.package) + 1:], file))\n\n for providers in ctx.attr.deps:\n if JavaInfo in providers:\n for file in providers[JavaInfo].transitive_runtime_jars.to_list():\n files.append(_prepare_file(ctx, \"WEB-INF/lib/\" + file.basename, file))\n\n return [DefaultInfo(runfiles = ctx.runfiles(files = files))]\n\nwebapp = rule(\n\n implementation = _webapp_impl,\n\n attrs = {\n \"srcs\": attr.label_list(allow_files = True),\n \"deps\": attr.label_list(),\n },\n\n)","sub_path":"webapp/tools/tools.bzl","file_name":"tools.bzl","file_ext":"bzl","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"573570343","text":"from django.urls import path, include\nfrom banner import views\n\napp_name = \"banner\"\n\nurlpatterns = [\n path('', views.ViewBanner.as_view(), name=\"banner_list\"),\n path('website-logo/', views.WebSiteLogo.as_view(), name=\"website_logo\"),\n\n # Ajax URLField\n path('banner-delete/', views.DeleteBannerImage, name=\"delete_banner_image\"),\n path('banner-ad-delete/', views.DeleteadBannerImage, name=\"delete_banner_ad_image\"),\n]\n","sub_path":"zaptayfresh/banner/admin_urls.py","file_name":"admin_urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"585864725","text":"import uuid\n\nfrom django.conf import settings\nfrom django.test import TestCase\n\nfrom cloudant.database import CloudantDatabase\n\nfrom server.utils import cloudant_client\n\n\nclass CloudantStorageTests(TestCase):\n \"\"\"\n Testing the temporary integration of Zapier to the\n Microsoft Azure database.\n \"\"\"\n def setUp(self):\n \"\"\"\n Establish Cloudant and Feedly API connections\n \"\"\"\n name = 'test_db'\n self.content = cloudant_client.CloudantClient(settings.CLOUDANT_CREDENTIALS)\n self.database = self.content.get_or_create_database(name)\n\n def get_dbs(self):\n size = self.content.all_dbs()\n self.assertEqual(len(size), 3, 'Correct number of databases')\n\n def test_get_or_create_database(self):\n self.assertIsInstance(self.database, CloudantDatabase, \"Database object not return\")\n\n def test_create_database_document(self):\n \"\"\"\n The document creation expects to recieve a parsed Feedly http response body.\n \"\"\"\n fixture = []\n content = {\n \"title\": \"Zoho Projects: Inexpensive Task and Project Management\",\n \"url\": \"http://www.noupe.com/?p=96519\",\n \"author\": \"Noupe Editorial Team\",\n \"published_date\": \"march 21st 2016\",\n \"id\": \"http://www.noupe.com?p=96519\",\n \"keywords\": \"(Free) Services, Business Online, Essentials, Workflow\",\n \"content\": \"Zoho has kept me company for more than 10 years\"\n }\n\n fixture.append(content)\n\n document = self.content.create_database_document(self.database, fixture)\n\n self.assertTrue(document.exists(), \"The document was not stored in the db\")\n\n def test_delete_document(self):\n fixture = [{ 'url': 'http://example.com', 'id': str(uuid.uuid4())}]\n document = self.content.create_database_document(self.database, fixture)\n\n is_deleted = self.content.delete_database_document(document)\n self.assertTrue(is_deleted, \"The document has not been deleted\")\n","sub_path":"tests/test_cloudant_client.py","file_name":"test_cloudant_client.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"109234391","text":"l = [\"ujjwal\", \"harry\", \"good\", \"ritik\", \"Ayushman\", \"Hermin\"]\n\ni = 1\n\nfor item in l:\n if i % 2 != 0:\n print(f\"{item} is great leader \")\n i += 1\nprint()\n\"\"\"\nenumerate function provide key as well as index\n\"\"\"\nfor j, item in enumerate(l): # here j value initially is 0\n if j % 2 == 0:\n print(f\"{item} is great caption\")\n","sub_path":"No_31_Enumarate_function.py","file_name":"No_31_Enumarate_function.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"341613356","text":"import os\nimport time\nimport sys\nimport fnmatch\nfrom datetime import datetime\nimport getopt\nfrom pathlib import Path\nimport math\nimport csv\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass RecBuilder:\n def __init__(self, line,ax,color):\n self.line = line\n self.ax = ax\n self.color = color\n self.xs = []\n self.ys = []\n self.cid = line.figure.canvas.mpl_connect('button_press_event', self)\n self.counter = 0\n self.shape_counter = 0\n self.shape = {}\n self.precision = 10\n self.last_plot = {}\n self.last_points = {}\n\n def __call__(self, event):\n if event.inaxes!=self.line.axes: return\n if self.counter%2 == 0:\n # remove previous drawings\n # remove lines\n if self.last_plot:\n for i, line in enumerate(self.last_plot):\n self.last_plot.pop(i)\n line.remove()\n\n # remove points\n if self.last_points:\n for x in range(5):\n xy = np.delete(self.last_points.get_offsets(), 0, axis=0)\n self.last_points.set_offsets(xy)\n\n # reset data\n self.xs = []\n self.ys = []\n\n # append first point\n self.xs.append(event.xdata)\n self.ys.append(event.ydata)\n\n else:\n # create rect's four corners based on second position to create a closed plot\n self.xs.append(event.xdata)\n self.ys.append(self.ys[0])\n \n self.xs.append(event.xdata)\n self.ys.append(event.ydata)\n\n self.xs.append(self.xs[0])\n self.ys.append(event.ydata)\n \n self.xs.append(self.xs[0])\n self.ys.append(self.ys[0])\n\n # save to shap history\n self.shape[self.shape_counter] = [self.xs,self.ys]\n self.shape_counter = self.shape_counter + 1\n\n # plot the rect\n self.last_plot = self.ax.plot(self.xs,self.ys,color=self.color)\n\n # delete the first point because it will be re-drew\n xy = np.delete(self.last_points.get_offsets(), 0, axis=0)\n self.last_points.set_offsets(xy)\n\n # draw all points\n self.last_points = self.ax.scatter(self.xs,self.ys,s=60,color=self.color)\n \n # update canvas\n self.line.figure.canvas.draw()\n self.counter = self.counter + 1\n \n\ndef create_shape_on_image(data,cmap='jet'):\n def change_shapes(shapes):\n new_shapes = {}\n for i in range(len(shapes)):\n l = len(shapes[i][1])\n new_shapes[i] = np.zeros((l,2),dtype='int')\n for j in range(l):\n new_shapes[i][j,0] = shapes[i][0][j]\n new_shapes[i][j,1] = shapes[i][1][j]\n return new_shapes\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('click to choose two points to define the area and close to continue\\n')\n line = ax.imshow(data) \n ax.set_xlim(0,data[:,:,0].shape[1])\n ax.set_ylim(0,data[:,:,0].shape[0])\n recbuilder = RecBuilder(line,ax,'red')\n plt.gca().invert_yaxis()\n plt.show()\n new_shapes = change_shapes(recbuilder.shape)\n return new_shapes\n\ndef get_ticker_time(ticker):\n ticker = int(ticker/5)\n second = ticker%60\n minute = int(ticker/60)\n return \"{:02d}:{:02d}\".format(minute, second)\n\ndef process_frames(input_folder, output_folder, count, log, fps):\n \n minute_right = 0\n minute_left = 0\n total_right = 0\n total_left = 0\n delta = 0\n\n full_right = 0\n full_left = 0\n full_right_count = 0\n full_left_count = 0\n full_right_start = 0\n full_right_end = 0\n full_left_start = 0\n full_left_end = 0\n\n minute_distance = 0\n total_distance = 0\n distance = 0\n\n full_right_distance = 0\n full_left_distance = 0\n\n data_per_minute = []\n\n delta_file = open(\"./delta.txt\", \"w\")\n delta_filter_1 = 20\n delta_filter_2 = 20\n\n previous_delta = 0\n\n turn_file = open(\"./turn.txt\", \"w\")\n\n for i in range(1, count+1):\n inputFile = \"{}/frame{:07d}.jpg\".format(input_folder, i)\n outputFile = \"{}/frame{:07d}.jpg\".format(output_folder, i)\n print_log(\"processing {}\".format(inputFile), log)\n \n img0 = cv2.imread(inputFile,0)\n img = cv2.copyMakeBorder(img0, 2 , 2, 2, 2, cv2.BORDER_CONSTANT, value=[255, 255, 255])\n \n ret,thresh = cv2.threshold(img,100,255,0)\n toDraw = thresh\n\n # Find all contours\n contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\n # Find the index of the second largest contour, which is mice\n areas = [cv2.contourArea(c) for c in contours]\n arr = np.array(areas)\n max_index = (-arr).argsort()[:2]\n cnt=contours[max_index[1]]\n\n #for k in range(len(contours)):\n # toDraw2 = toDraw.copy()\n # cnt = contours[k]\n # cv2.drawContours(toDraw2, [cnt], -1, (0,255,0), 3)\n # cv2.imshow('kk{}'.format(k), toDraw2)\n #cv2.waitKey(0)\n\n # Draw original contour\n cv2.drawContours(toDraw, [cnt], -1, (0,255,0), 3)\n\n # Fit with the ellipse and draw\n ellipse = cv2.fitEllipse(cnt)\n cv2.ellipse(toDraw,ellipse,(0,255,0),2)\n\n # Fit with line and draw\n rows,cols = img.shape[:2]\n [vx,vy,x,y] = cv2.fitLine(cnt, cv2.DIST_L2,0,0.01,0.01)\n lefty = int((-x*vy/vx) + y)\n righty = int(((cols-x)*vy/vx)+y)\n cv2.line(toDraw,(cols-1,righty),(0,lefty),(0,255,0),2) \n cv2.imwrite(outputFile, toDraw)\n\n # Find distance travelled based ellipse center above\n if i > 1:\n x_diff = ellipse[0][0]-previous_center[0]\n y_diff = ellipse[0][1]-previous_center[1]\n distance = math.sqrt(x_diff*x_diff + y_diff*y_diff)\n total_distance = total_distance + distance\n minute_distance = minute_distance + distance\n previous_center = ellipse[0]\n \n print_log(\"distance: {}\".format(distance), log)\n \n # Find angle rotated based on center line above\n delta_y = righty-lefty\n delta_x = 0 - (cols-1)\n angle = math.atan2(delta_y, delta_x) * 180 / math.pi\n if angle > 180:\n angle = angle - 180\n elif angle < 0:\n angle = angle + 180\n\n # because of 5 fps frame rate, detla of angle must be less 90 degree between 2 consecutive frames, which is an important assumption in this calculation\n if i > 1:\n delta = angle - previous_angle\n if delta > 90:\n delta = (180 - delta) * -1\n elif delta < -90:\n delta = (-180 - delta) * -1\n \n orig_delta = delta \n # filtering delta noise\n if abs(delta) < delta_filter_1: # or (abs(delta) < delta_filter_2 and delta*previous_delta < 0):\n delta = 0\n\n #if abs(orig_delta) > delta_filter_1: \n # previous_delta = orig_delta\n\n delta_file.write(\"{},{}\\n\".format(i, delta))\n \n if delta < 0:\n total_right = total_right - delta\n minute_right = minute_right - delta\n full_right = full_right - delta\n full_right_distance = full_right_distance + distance\n full_left = 0\n full_left_distance = 0\n full_left_start = i\n elif delta > 0:\n total_left = total_left + delta\n minute_left = minute_left + delta\n full_left = full_left + delta\n full_left_distance = full_left_distance + distance\n full_right = 0\n full_right_distance = 0\n full_right_start = i\n\n turn_format = \"{}: time {} - {} frame {} - {} distance {:7d}\" \n if full_right >= 360:\n full_right_count = full_right_count + 1\n full_right_end = i\n print_log(turn_format.format(\"cw\", get_ticker_time(full_right_start), get_ticker_time(full_right_end), full_right_start, full_right_end, int(full_right_distance)), turn_file)\n full_right_start = i\n full_right = 0\n \n \n if full_left >= 360:\n full_left_count = full_left_count + 1\n full_left_end = i\n print_log(turn_format.format(\"ccw\", get_ticker_time(full_left_start), get_ticker_time(full_left_end), full_left_start, full_left_end, int(full_left_distance)), turn_file)\n full_left_start =i\n full_left = 0\n\n if i == 1:\n previous_angle = angle\n elif delta != 0:\n previous_angle = angle\n\n print_log(\"angle: {}\".format(angle), log)\n print_log(\"angle delta: {}\".format(delta), log)\n \n # record and reset every one minute\n minute_frames_count = fps*60\n minute_mark = int(i/minute_frames_count)\n if i%minute_frames_count == 0 or i == count:\n per_minute = [minute_right/360, minute_left/360, minute_distance]\n data_per_minute.append(per_minute)\n minute_right = 0\n minute_left = 0\n minute_distance = 0\n print(\"full right {} left {}\".format(full_right_count, full_left_count))\n return data_per_minute, [total_right/360, total_left/360, total_distance]\n \ndef main(argv):\n\n \"\"\"\nMice detection program to detect mice's movement including distance and rotation\nUsage: python md.py -i inputfile -s start_time -d duration -r rotation [flags]\nExample: python md.py -i pre.mp4 -s 00:01:05 -d 600 -r -3 -v\nArguments:\n-i, --ifile input video file, required\n-s, --stime start time of video, optional, and default is 0\n-d, --duration duration (seconds) of video, optional, and default is \"to-end\"\n-r, --rotation rotation of video (degree), optional, negative is counterclockwise and positive is clockwise \nFlags:\n-h, --help show this help \n-v, --verbose be verbose\n \"\"\"\n \n def usage():\n print(main.__doc__)\n sys.exit()\n \n inputfile = ''\n start_time = '00:00:00'\n duration = 0\n rotation = 0\n verbose = False\n \n try:\n opts, args = getopt.getopt(argv,\"hvi:s:d:r:\",[\"verbose\", \"help\",\"ifile=\",\"stime=\", \"duration=\", \"rotation=\"])\n except getopt.GetoptError as err:\n print('ERROR: ', err)\n usage()\n for opt, arg in opts:\n if opt == '-h':\n usage()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-s\", \"--stime\"):\n start_time = arg\n elif opt in (\"-d\", \"--duration\"):\n duration = int(arg)\n elif opt in (\"-r\", \"--rotation\"):\n rotation = int(arg)\n elif opt in (\"-v\", \"--verbose\"):\n verbose = True\n \n if inputfile == '':\n print('ERROR: inputfile is required')\n usage()\n \n return inputfile, start_time, duration, rotation, verbose\n\ndef print_log(str, file):\n print(str)\n file.write(\"{}\\n\".format(str))\n \n# main program\n\n# use timestamp as workspace folder name\nnow = datetime.now\nworkspace = \"./{}\".format(now().strftime('%Y-%m-%d-%H:%M:%S'))\nos.system(\"mkdir {}\".format(workspace))\n\n# log files\nlog_file = \"{}/process.log\".format(workspace)\nPath(log_file).touch()\nlog = open(log_file, \"w\")\n\nffmpeg_log_file = \"{}/ffmpeg.log\".format(workspace)\nPath(ffmpeg_log_file).touch()\n#ffmpeg_log = \">> {} 2>&1\".format(ffmpeg_log_file)\n# ignore it for now\nffmpeg_log = \"\"\n\ninput_file, start_time, duration, rotation_angle, verbose = main(sys.argv[1:])\n\nif verbose:\n print_log('Input file is {}'.format(input_file), log)\n print_log('Start time is {}'.format(start_time), log)\n print_log('duration is {}'.format(duration), log)\n print_log('rotation is {}'.format(rotation_angle), log)\n print_log('verbose is {}'.format(verbose), log)\n\nrotation_filter = \"rotate={}*PI/180\".format(rotation_angle)\nsample_time = start_time\n\n\nif rotation_angle == 0:\n sample_command = \"ffmpeg -ss {} -i {} -vframes 1 -q:v 2 sample.png {}\".format(sample_time, input_file, ffmpeg_log)\nelse:\n sample_command = \"ffmpeg -ss {} -i {} -vframes 1 -q:v 2 -filter:v {} sample.png {}\".format(sample_time, input_file, rotation_filter, ffmpeg_log)\nprint_log(sample_command, log)\nos.system(sample_command)\nos.system(\"mv sample.png {}\".format(workspace))\n\n# user defined area\nimg = plt.imread(\"{}/sample.png\".format(workspace))\nshapes = create_shape_on_image(img)\n\nif len(shapes) == 0:\n sys.exit(0)\n \nshape = shapes[len(shapes)-1]\nrec_x = shape[0][0]\nrec_y = shape[0][1]\nrec_w = shape[2][0] - rec_x\nrec_h = shape[2][1] - rec_y\n\n# pause to confirm\nprint_log(\"******************************\", log)\nprint_log(\"start time: {}\".format(start_time), log)\nprint_log(\"duration: {}\".format(duration), log)\nprint_log(\"rotation angle: {}\".format(rotation_angle), log)\nprint_log(\"defined area: ({},{}), ({},{})\".format(shape[0][0], shape[0][1], shape[2][0], shape[2][1]), log)\nprint_log(\"workspace: {}\".format(workspace), log)\nprint_log(\"******************************\", log)\nuser_input = input(\"Press enter to continue or type q to exit: \")\nif user_input == 'q':\n sys.exit(0)\n\n# crop and trim the video by the area\ncrop_input = \"crop_{}\".format(input_file)\ncrop_filter = \"crop={}:{}:{}:{}\".format(rec_w, rec_h, rec_x, rec_y)\nif duration == 0:\n if rotation_angle == 0:\n crop_command = \"ffmpeg -i {} -ss {} -filter:v \\\"{}\\\" {} {}\".format(input_file, start_time, crop_filter, crop_input, ffmpeg_log)\n else:\n crop_command = \"ffmpeg -i {} -ss {} -filter:v \\\"{}, {}\\\" {} {}\".format(input_file, start_time, crop_filter, rotation_filter, crop_input, ffmpeg_log)\nelse:\n if rotation_angle == 0:\n crop_command = \"ffmpeg -i {} -ss {} -t {} -filter:v \\\"{}\\\" {} {}\".format(input_file, start_time, duration, crop_filter, crop_input, ffmpeg_log)\n else:\n crop_command = \"ffmpeg -i {} -ss {} -t {} -filter:v \\\"{}, {}\\\" {} {}\".format(input_file, start_time, crop_filter, rotation_filter, crop_input, ffmpeg_log)\nprint_log(crop_command, log)\nos.system(crop_command)\nos.system(\"mv {} {}\".format(crop_input, workspace))\n\n# take the frames from video\nframe_output = \"frame%07d.jpg\"\nfps = 5\nframe_command = \"ffmpeg -i {}/{} -vf fps={}/1 {} -hide_banner {}\".format(workspace, crop_input, fps, frame_output, ffmpeg_log)\nprint_log(frame_command, log)\nos.system(frame_command)\n\n# move frames to sub dir\nframes_folder = \"{}/frames\".format(workspace)\nos.system(\"mkdir {}\".format(frames_folder))\nos.system(\"mv frame*.jpg {}\".format(frames_folder))\n\n# make output folder\noutput_folder = \"{}/output\".format(workspace)\nos.system(\"mkdir {}\".format(output_folder))\n\n# find the count of frames\nframe_count = len(fnmatch.filter(os.listdir(frames_folder), \"frame*.jpg\"))\n\n# process the frames\ndata_per_minute, total = process_frames(frames_folder, output_folder, frame_count, log, fps)\n\n# write data to csv file\ndata_file = \"{}/data.csv\".format(workspace)\nwith open(data_file, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow([\"right_turn\", \"left_turn\", \"distance\"])\n for line in data_per_minute:\n writer.writerow(line)\n \n# plot the final result\nminute_count = int(frame_count/(fps*60))\nt = np.arange(1, minute_count + 1, 1)\nrights = []\nlefts = []\nturns = []\ndistances = []\n\nfor i in range(minute_count):\n rights.append(data_per_minute[i][0])\n lefts.append(data_per_minute[i][1])\n turns.append(data_per_minute[i][0] + data_per_minute[i][1])\n distances.append(data_per_minute[i][2])\n\nplt.figure(1)\nplt.subplot(211)\nplt.plot(t, rights, 'ro', label ='right turns')\nplt.plot(t, lefts, 'bo', label ='left turns')\nplt.plot([], [], ' ', label=\"total turns: {:6.2f}\".format(total[0] + total[1]))\nplt.legend(loc='best')\nplt.ylabel('turns')\nplt.xlabel('minute')\nplt.title('number of turns per minute - {}'.format(input_file))\nplt.savefig('{}/turns_per_minute.png'.format(workspace))\n\nplt.figure(2)\nplt.subplot(211)\nplt.plot(t, distances, 'go', label ='distance')\nplt.plot([], [], ' ', label=\"total distance: {:d}\".format(int(total[2])))\nplt.legend(loc='best')\nplt.ylabel('dpi')\nplt.xlabel('minute')\nplt.title('distance per minute - {}'.format(input_file))\nplt.savefig('{}/distance_per_minute.png'.format(workspace))\n\nplt.show(block=False)\n\n# close and save the log file\nlog.close()\n\ninput(\"Press enter to end ...\")\n\n\n\n","sub_path":"md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":16637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"253718732","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n#Load Database\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n#Label the digits\nDigits_labels = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten']\n#Pre-Process the images\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n#Model - Sequential\nmodel = keras.Sequential([keras.layers.Flatten(input_shape=(28,28)),\n keras.layers.Dense(250, activation=tf.nn.relu),\n keras.layers.Dense(10, activation = tf.nn.softmax)])\n#Compile model\nmodel.compile(optimizer = tf.train.AdamOptimizer(),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n#Train model\nmodel.fit(train_images, train_labels, epochs=3)\n#print prediction\nprediction = model.predict(test_images)\nprint(np.argmax(prediction[9]))\n#show digit\nplt.figure()\nplt.imshow(test_images[9])\nplt.colorbar()\nplt.gca().grid(False)\nplt.xlabel(test_labels[9])\nplt.show()\n\n","sub_path":"HandWritten digits tensorflow.py","file_name":"HandWritten digits tensorflow.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"517566578","text":"#!/usr/bin/env python\nfrom decimal import *\ngetcontext().prec = 102\n\ndef f(n):\n return sum( map( int\n , str( Decimal(n).sqrt() ).split('.')[1][:100]\n )\n )\ncands = set(range(1,102))\nexcept_not = set(map(lambda x: x**2, range(1,12)))\n\n#print(len(except_not))\n#print(len(cands-except_not))\nprint(sum(map(f, cands-except_not)))\n\n","sub_path":"euler/080/_old/py.py","file_name":"py.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"2866159","text":"import pymysql.cursors\n\n\nclass Test1:\n def __init__(self, first_name, last_name, id):\n self.first_name = first_name\n self.last_name = last_name\n self.id = id\n db = pymysql.connect(\"localhost\", \"root\", \"1973\", \"python_db\")\n self.cursor = db.cursor()\n self.db = db\n\n def selectAll(self):\n if(len(self.id) == 0):\n print(\"please insert\")\n return \"none\"\n elif(len(self.id) > 0):\n if(int(self.id) > 0):\n sql = \"SELECT * FROM test1 WHERE id= %s\" % self.id\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return self.cursor.fetchall()\n except:\n self.db.rollback()\n return False\n else:\n sql = \"SELECT * FROM test1\"\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return self.cursor.fetchall()\n except:\n self.db.rollback()\n return False\n \n def insert(self):\n sql = (\n \"INSERT INTO test1 (first_name,last_name) VALUES ('%s','%s')\"\n %\n (self.first_name, self.last_name)\n )\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return True\n except:\n self.db.rollback()\n return False\n\n def update(self):\n sql = (\n \"UPDATE test1 SET first_name='%s' , last_name='%s' WHERE id='%s'\"\n %\n (self.first_name, self.last_name, self.id)\n )\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return True\n except:\n self.db.rollback()\n return False\n\n def delete(self):\n sql = \"DELETE FROM test1 WHERE id='%s'\" % self.id\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return True\n except:\n self.db.rollback()\n return False\n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"320870857","text":"# -*- coding: utf-8 -*-\nimport threading\nimport LayerPhy\nimport math\nimport random\nfrom Tools.DebugOut import DebugOut\nimport time\n\n\nclass NetworkStack(object):\n\n def __init__(self, masterHost='127.0.0.1', baseport=10000, ownIdentifier='x', autoEnter=True):\n self.__debugOut=DebugOut()\n self.__applicationList=[]\n self.__sendDelay=0\n self.__layerDelay=0\n self.__layerPhy=LayerPhy.LayerPhy(ownIdentifier, upperLayerCallbackFunction=self.layer2_incomingPDU, masterHost=masterHost, baseport=baseport, autoEnter=autoEnter)\n # You may want to change the following part\n self.__ownIdentifier=ownIdentifier\n self.outgoingPacketStack=[]\n self.outgoingPacketStackLock=threading.Lock()\n\n #SN TB ETAPE DE L'INITIALISATION DU TOKEN\n self.initToken = 2\n\n #SN TB compteur -> compteur pour le nombre de slot (couche 2)\n self.compteur = 2\n self.indice = 0\n self.paquetRecu = \"\"\n self.paquetAEnvoyer = \"\".encode(\"UTF-8\")\n\n\n def leaveNetwork(self):\n self.__layerPhy.API_leave()\n\n def enableGlobalDebug(self):\n self.__layerPhy.API_subscribeDebug()\n\n def configureDelay(self,sendDelay=None,layerDelay=None):\n if sendDelay!=None:\n self.__sendDelay=sendDelay\n if layerDelay!=None:\n self.__layerDelay=layerDelay\n\n # Do not change!\n # This is the application layer protocol part: Each application has its specific port\n # The application registers a callback function that is called when a packet arrives for that particular application\n def applicationAddCallback(self, applicationPort, callBack):\n self.__applicationList.append((applicationPort, callBack))\n\n # Do not change!\n # The application sends packets which are stored in a buffer before being submitted\n def applicationSend(self, destination, applicationPort, pdu):\n self.outgoingPacketStackLock.acquire()\n self.outgoingPacketStack.insert(0,(destination, applicationPort,pdu))\n self.outgoingPacketStackLock.release()\n\n\n#############################################################################################################################################\n#############################################################################################################################################\n\n # Please change: This sends the first TOKEN to the ring\n # In fact, sending a TOKEN requires the creation of a new thread\n def initiateToken(self):\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"Initiating TOKEN\" )\n tokenThread=threading.Thread(target=self.application_layer_outgoingPDU, args=(True,))\n tokenThread.start()\n\n # Please adapt if required : This is the top layer that usually sends the data to the application\n # If pdu is None, the packet is not valid\n # forceToken determines that the return packet needs to be a TOKEN\n def application_layer_incomingPDU(self, forceToken, source, pdu):\n time.sleep(self.__layerDelay)\n self.__debugOut.debugOutLayer(self.__ownIdentifier,5,self.__debugOut.INFO,\"%s: application_layer_in: received (%s) \" % (self.__ownIdentifier,pdu))\n\n if pdu!=None:\n applicationPort=int.from_bytes(pdu[0:1],byteorder=\"little\",signed=False)\n sdu=pdu[1:]\n\n # We deliver the SDU to the application that handles this message\n for (thisApplicationPort, thisApplication) in self.__applicationList:\n if thisApplicationPort==applicationPort:\n thisApplication(source, applicationPort, sdu.decode('UTF-8'))\n\n # We dive back down into the network stack\n self.application_layer_outgoingPDU(forceToken)\n\n\n # Please adapt if required: This is the top layer that retrieves one element from the application layer\n def application_layer_outgoingPDU(self, forceToken=False):\n time.sleep(self.__layerDelay)\n self.outgoingPacketStackLock.acquire()\n\n #TB SN on compare le initToken et le ownIdentifier pour initialiser le paquet avec le computeur 'A'\n\n if (self.initToken != 0 and self.__ownIdentifier == 'A') or forceToken:\n destination=\"X\"\n applicationPort=20\n sdu=\"TOKEN\"\n\n else:\n if self.outgoingPacketStack != []:\n destination,applicationPort,sdu=self.outgoingPacketStack.pop()\n else :\n destination=\"X\"\n applicationPort=20\n sdu=\"TOKEN\"\n\n self.outgoingPacketStackLock.release()\n\n pdu=applicationPort.to_bytes(1,byteorder=\"little\",signed=False)+sdu.encode(\"UTF-8\")\n self.__debugOut.debugOutLayer(self.__ownIdentifier,5,self.__debugOut.INFO,\"%s: application_layer_out: sending (%s) \" % (self.__ownIdentifier,pdu))\n self.layer4_outgoingPDU(destination, applicationPort, pdu)\n\n\n # Please adapt!\n # Take care: The parameters of incoming (data packets arriving at the computer) and outgoing (data packets leaving from the computer)\n # should generally agree with one layer difference (i.e. here we treat the applicationPort, an identifier that knows which application\n # is asked to handle the traffic\n def layer4_incomingPDU(self, source, pdu):\n time.sleep(self.__layerDelay)\n # Let us assume that this is the layer where we determine the applicationPort\n # We also decide whether we can send immediately send a new packet or whether we need to be friendly and send a TOKEN\n # We are not friendly and send a packet if our application has one with 100% chance\n self.__debugOut.debugOutLayer(self.__ownIdentifier,4,self.__debugOut.INFO,\"%s: Layer4_in: Received (%s) from %s \" % (self.__ownIdentifier,pdu, source))\n self.application_layer_incomingPDU(False,source,pdu)\n\n # Please adapt\n def layer4_outgoingPDU(self, destination, applicationPort, pdu):\n time.sleep(self.__layerDelay)\n self.__debugOut.debugOutLayer(self.__ownIdentifier,4,self.__debugOut.INFO,\"%s: Layer4_out: Sending (%s) to %s \" % (self.__ownIdentifier, pdu, destination))\n self.layer3_outgoingPDU(destination, pdu)\n\n # Please adapt!\n # The current situation is that in this layer, the network stack takes the decision to forcibly keep the packet because it thinkgs that it is destined to this computer\n # It also authorizes immediately that a new packet can be put onto the network.\n def layer3_incomingPDU(self, interface, pdu):\n time.sleep(self.__layerDelay)\n\n #TB SN On va recuperer l'expéditeur et le destinaire du pdu grâce au code fournit dans les annexes\n\n expediteur = pdu[0:1].decode('UTF-8')\n destinataire = pdu[1:2].decode('UTF-8')\n\n if destinataire == 'X':\n self.__debugOut.debugOutLayer(self.__ownIdentifier,3,self.__debugOut.INFO,\"%s: Layer3_in: tirage (%s) -> layer4_in\\n\" % (self.__ownIdentifier, pdu))\n self.layer4_incomingPDU(None,None)\n\n\n #SN TB Test pour savoir si le paquet reçus est celui qu'on à envoyer\n elif expediteur == self.__ownIdentifier:\n self.layer4_incomingPDU(None,None)\n\n #SN TB Test pour savoir si on est le destinaire\n elif destinataire == self.__ownIdentifier:\n print(\"Nice ca\");\n #SN TB Si oui, on envoie à la couche 4 incoming\n self.__debugOut.debugOutLayer(self.__ownIdentifier,3,self.__debugOut.INFO,\"%s: Layer3_in: tirage (%s) -> layer4_in\\n\" % (self.__ownIdentifier, pdu))\n self.layer4_incomingPDU(expediteur,pdu[2:])\n else:\n #SN TB Si non, on renvoie le paquet à la couche 2 outgoing pour transmettre le paquet au computer suivant\n taille = len(pdu)\n if taille < 10 :\n taille = '0'+str(taille)\n else :\n taille = str(len(pdu))\n pdu = taille.encode(\"UTF-8\")+pdu\n self.__debugOut.debugOutLayer(self.__ownIdentifier,3,self.__debugOut.INFO,\"%s: Layer3_in: tirage (%s) -> Packet to be destroyed\\n\" % (self.__ownIdentifier, pdu))\n self.layer2_outgoingPDU(interface,pdu)\n\n\n # Please adapt\n def layer3_outgoingPDU(self, destination, pdu):\n time.sleep(self.__layerDelay)\n # Here, we store the packet and wait until an empty token packet arrives\n\n #SN TB On récupère l'expéditeur et la destination, que l'on encode et ajoute au PDU\n expediteur = self.__ownIdentifier\n pdu = expediteur.encode(\"UTF-8\")+destination.encode(\"UTF-8\")+pdu\n\n #TB SN On récupère la taille et on l'ajoute au pdu\n taille = len(pdu)\n if taille < 10 :\n taille = '0'+str(taille)\n else :\n taille = str(len(pdu))\n pdu = taille.encode(\"UTF-8\")+pdu\n\n self.__debugOut.debugOutLayer(self.__ownIdentifier,3,self.__debugOut.INFO,\"%s: Layer3_out: Sending out (%s) via interface %d \" % (self.__ownIdentifier, pdu, 0))\n self.layer2_outgoingPDU(0, pdu)\n\n # Please adapt\n def layer2_incomingPDU(self, interface, pdu):\n time.sleep(self.__layerDelay)\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_in: Received (%s) on Interface %d \" % (self.__ownIdentifier, pdu, interface))\n\n if interface == 0 : # same ring\n\n print(pdu)\n self.paquetRecu = pdu\n\n if self.compteur <= 0 :\n self.compteur = 2\n self.indice = 0\n self.paquetAEnvoyer = \"\".encode(\"UTF-8\")\n #self.paquetRecu = pdu\n taille = self.paquetRecu[self.indice:self.indice+2]\n taille = int(taille.decode('UTF-8'))\n\n\n self.indice += 2\n pdu = self.paquetRecu[self.indice:self.indice+taille]\n self.indice += taille\n self.compteur -= 1\n\n\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_in: tirage (%s) -> layer3_in\\n\" % (self.__ownIdentifier, pdu))\n self.layer3_incomingPDU(interface,pdu)\n else: # Another Ring, this is for routing, see later\n pass\n\n def layer2_outgoingPDU(self, interface, pdu):\n if self.initToken > 0 and self.__ownIdentifier == 'A' : #TB SN On regarde si on continue d'initialiser le paquet\n print('lol')\n if self.initToken >= 2 : #TB SN on est pas sur le dernier slot donc on continue d'initialiser\n self.initToken -= 1\n self.paquetAEnvoyer += pdu\n self.compteur -=1\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_out: Sending in (%s) via interface %d \" % (self.__ownIdentifier, self.paquetAEnvoyer, interface))\n self.application_layer_outgoingPDU(True)\n #TB SN On initialise le dernier slot du paquet, donc on envoie le paquet initialisé au noeud suivant\n elif self.initToken == 1:\n self.initToken -= 1\n self.paquetAEnvoyer += pdu\n self.compteur -=1\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_out: Sending in (%s) via interface %d \" % (self.__ownIdentifier, self.paquetAEnvoyer, interface))\n self.__layerPhy.API_sendData(interface, self.paquetAEnvoyer)\n #self.layer2_incomingPDU(interface,self.paquetRecu)\n #SN TB Tant que tous les slots n'ont pas été traités\n elif self.compteur > 0 :\n self.paquetAEnvoyer += pdu\n time.sleep(self.__layerDelay)\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_out: Sending out (%s) via interface %d \" % (self.__ownIdentifier, self.paquetAEnvoyer, interface))\n if self.__sendDelay!=0:\n self.__debugOut.debugOutLayer(self.__ownIdentifier,2,self.__debugOut.INFO,\"%s: Layer2_out: Sleeping for %ds\" % (self.__ownIdentifier,self.__sendDelay))\n time.sleep(self.__sendDelay)\n #SN TB On envoie en couche 2 incoming pour traiter le slot suivant\n self.layer2_incomingPDU(interface,self.paquetRecu)\n #SN TB tous les slots ont été traités\n else :\n self.paquetAEnvoyer += pdu\n #SN TB on envoie au noeud suivant\n self.__layerPhy.API_sendData(interface, self.paquetAEnvoyer)\n ","sub_path":"NetworkStack.py","file_name":"NetworkStack.py","file_ext":"py","file_size_in_byte":12440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"2"} +{"seq_id":"206726336","text":"import discord\nimport sys\nimport sqlite3\nimport typing\nimport asyncio\nimport datetime\nimport requests\nimport os\nimport math\nimport random\nimport re\nimport youtube_dl\nimport json\nimport lavalink #for musicbot!!!\nimport emoji\nfrom io import BytesIO\nfrom unqlite import UnQLite\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pathlib import Path\nfrom urllib import parse\nfrom discord.ext import commands\n\nTOKEN = 'NjEyNzE4MjU1NjM4ODM5MzA4.XVmccQ.nBZB73prpOvrIab84VMjGj0BFes' # Not a real token. Demonstration purposes only.\nintents = discord.Intents.all()\nbot = commands.Bot(command_prefix='$', case_insensitive=True, help_command=None, intents=intents)\nG_logChannel = None\nG_greetingChannel = None\nG_confirmationChannel = None\nG_startingRole = None\nG_confirmedRole = None\nG_mutedRole = None\n\n@bot.event\nasync def on_ready():\n print(f\"Logged on as {bot.user}!\")\n print(f\"Running version {sys.version}\")\n global G_logChannel\n global G_greetingChannel\n global G_confirmationChannel\n global G_startingRole\n global G_confirmedRole\n bot.add_cog(Logging(bot))\n bot.add_cog(Moderator(bot))\n bot.add_cog(Utility(bot))\n bot.add_cog(Info(bot))\n bot.add_cog(Fun(bot))\n bot.add_cog(Help(bot))\n db = UnQLite(\"FHDatabase.db\")\n channels = db.collection(\"Channels\")\n fetched = channels.filter(lambda obj: obj[\"name\"] == \"logs\")[0][\"id\"]\n if fetched != None:\n G_logChannel = bot.get_channel(fetched)\n\n fetched = channels.filter(lambda obj: obj[\"name\"] == \"greetings\")[0][\"id\"]\n if fetched != None:\n G_greetingChannel = bot.get_channel(fetched)\n\n fetched = channels.filter(lambda obj: obj[\"name\"] == \"confirmation\")[0][\"id\"]\n if fetched != None:\n G_confirmationChannel = bot.get_channel(fetched)\n db.close()\n await statusChanger()\n\nclass Logging(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self._last_member = None\n\n def calculateUserAge(self, member: discord.Member):\n timeNow = datetime.datetime.utcnow()\n userCreated = member.created_at\n account_age = timeNow - userCreated\n years = account_age.days // 365\n months = (account_age.days - (years * 365)) // 30\n days = account_age.days - (years * 365) - (months * 30)\n hours = account_age.seconds // 3600\n minutes = (account_age.seconds - (hours * 3600)) // 60\n seconds = account_age.seconds - (hours * 3600) - (minutes * 60)\n age = {\"years\": years, \"months\": months, \"days\": days, \"hours\": hours, \"minutes\": minutes, \"seconds\": seconds}\n age_string = \"\"\n if age[\"years\"] == 1:\n age_string = age_string + \"1 year, \"\n elif age[\"years\"]:\n age_string = age_string + f\"{age['years']} years, \"\n if age[\"months\"] == 1:\n age_string = age_string + \"1 month, \"\n elif age[\"months\"]:\n age_string = age_string + f\"{age['months']} months, \"\n if age[\"days\"] == 1:\n age_string = age_string + \"1 day, \"\n elif age[\"days\"]:\n age_string = age_string + f\"{age['days']} days, \"\n if not age[\"years\"] and not age[\"months\"]:\n if age[\"hours\"] == 1:\n age_string = age_string + \"1 hour, \"\n elif age[\"hours\"]:\n age_string = age_string + f\"{age['hours']} hours, \"\n if age[\"minutes\"] == 1:\n age_string = age_string + \"1 minute, \"\n elif age[\"minutes\"]:\n age_string = age_string + f\"{age['minutes']} minutes, \"\n if age[\"seconds\"] == 1:\n age_string = age_string + \"1 second, \"\n elif age[\"seconds\"]:\n age_string = age_string + f\"{age['seconds']} seconds, \"\n return age_string[:-2]\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n global G_logChannel\n # Check if the member was muted and reapply it, just in case someone is trying to avoid a mute by rejoining the server\n db = UnQLite(\"FHDatabase.db\")\n mutes = db.collection(\"Mutes\")\n fetched = mutes.filter(lambda obj: obj[\"id\"] == member.id)\n if fetched:\n muted_role = await createMuteRole(member.guild)\n await member.add_roles(muted_role)\n # Notify about the member joining in logs\n embed = discord.Embed(title=\"Member Joined\", description=f\"{member.mention} joined the server!\", color=0x00ff00)\n embed.set_thumbnail(url=member.avatar_url)\n age = self.calculateUserAge(member)\n embed.add_field(name=\"Account Age\", value=age, inline=False)\n await G_logChannel.send(embed=embed)\n print(f\"User joined with an account that is {age} old.\")\n\n # Give the user a joining role\n roles = db.collection(\"Roles\")\n roleid = roles.filter(lambda obj: obj[\"name\"] == \"read_the_rules\")[0][\"id\"]\n startingRole = member.guild.get_role(roleid)\n await member.add_roles(startingRole)\n\n # Message the user with the specified welcoming message\n greetings = db.collection(\"BotGreetings\")\n dm_message = greetings.filter(lambda obj: obj[\"name\"] == \"greeting_dm\")[0][\"content\"]\n try:\n await member.send(dm_message.replace(\"$[mention]\", f\"{member.mention}\").replace(\"$[user]\", f\"{member.name}\"))\n except:\n print(f\"Couldn't message {member.name}\")\n\n # Store the new member info in the database, \"warned\" is for inactivity warning\n new_members = db.collection(\"NewMembers\")\n new_members.store({'id': member.id, 'member_number': member.guild.member_count, 'warned': False})\n db.close()\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n global G_logChannel\n embed = discord.Embed(title=\"Member Left\", description=f\"{member.mention} left the server\", color=0xff7f00)\n embed.set_author(name=member, icon_url=member.avatar_url)\n roles = \"\"\n for role in member.roles:\n if role.name != \"@everyone\":\n roles += f\"{role.mention}, \"\n roles = roles[:-2]\n embed.add_field(name=\"Roles\", value=roles, inline=False)\n await G_logChannel.send(embed=embed)\n # Remove the new member information from the database to save space\n db = UnQLite(\"FHDatabase.db\")\n new_members = db.collection(\"NewMembers\")\n fetched = new_members.filter(lambda obj: obj[\"id\"] == member.id)\n # Only activate on members who haven't gone through rule confirmation\n if fetched:\n id = fetched[0][\"__id\"]\n new_members.delete(id)\n\n @commands.Cog.listener()\n async def on_message(self, message):\n global G_confirmationChannel\n global G_greetingChannel\n db = UnQLite(\"FHDatabase.db\")\n if message.channel == G_confirmationChannel:\n roles = db.collection(\"Roles\")\n passphrases = db.collection(\"Passphrases\")\n passphrase_found = False\n for passphrase in passphrases.all():\n if passphrase[\"content\"] in message.content:\n passphrase_found = True\n if passphrase_found:\n try:\n await message.delete()\n except:\n print(\"Too slow! Rule confirmation message was already deleted by someone else.\")\n role = roles.filter(lambda obj: obj[\"name\"] == \"newbie\")[0][\"id\"]\n confirmedRole = message.guild.get_role(role)\n await message.author.add_roles(confirmedRole)\n role = roles.filter(lambda obj: obj[\"name\"] == \"read_the_rules\")[0][\"id\"]\n startingRole = message.guild.get_role(role)\n await message.author.remove_roles(startingRole)\n new_members = db.collection(\"NewMembers\")\n fetched = new_members.filter(lambda obj: obj[\"id\"] == message.author.id)\n # If the member is found in the database, use the member number there. Otherwise use the number from API\n if fetched:\n member_number = fetched[0][\"member_number\"]\n else:\n member_number = message.guild.member_count\n welcome_message, imageFile, location = await create_welcome(message.author, member_number)\n await G_greetingChannel.send(welcome_message, file=imageFile)\n os.remove(location)\n # Only activate on members who joined after the change\n if fetched:\n id = fetched[0][\"__id\"]\n new_members.delete(id)\n db.close()\n\n @commands.Cog.listener()\n async def on_member_update(self, before, after):\n global G_logChannel\n roles_before = before.roles\n roles_after = after.roles\n if before.nick != after.nick:\n db = UnQLite(\"FHDatabase.db\")\n nicknames = db.collection(\"Nicknames\")\n fetched = nicknames.filter(lambda obj: obj[\"id\"] == after.id and obj[\"nickname\"] == before.nick)\n if (before.nick != None and not fetched):\n nicknames.store({\"id\": after.id, \"nickname\": before.nick})\n fetched = nicknames.filter(lambda obj: obj[\"id\"] == after.id)\n if len(fetched) > 10:\n nicknames.delete(fetched[0][\"__id\"])\n embed = discord.Embed(title=\"Nickname Updated\", description=f\"{after.mention} had their nickname changed.\", color=after.color)\n embed.set_author(name=after, icon_url=after.avatar_url)\n embed.add_field(name=\"Previously\", value=before.nick, inline=True)\n embed.add_field(name=\"Now\", value=after.nick, inline=True)\n await G_logChannel.send(embed=embed)\n db.close()\n roles_removed = [role for role in before.roles if not role in after.roles]\n roles_added = [role for role in after.roles if not role in before.roles]\n if roles_removed:\n embed = discord.Embed(title=\"Role Removed\", description=f\"{after.mention} was removed from {roles_removed[0].mention}\", color=after.color)\n embed.set_author(name=after, icon_url=after.avatar_url)\n await G_logChannel.send(embed=embed)\n elif roles_added:\n embed = discord.Embed(title=\"Role Added\", description=f\"{after.mention} was added to {roles_added[0].mention}\", color=after.color)\n embed.set_author(name=after, icon_url=after.avatar_url)\n await G_logChannel.send(embed=embed)\n\n @commands.Cog.listener()\n async def on_user_update(self, before, after):\n global G_logChannel\n # The member's ID to be saved in the database\n memberid = before.id\n # Get the user as a member to grab their role colour for embed colour\n member = bot.get_guild(223340988314157056).get_member(memberid)\n # The avatar's URL before and after update\n avatar_before = before.avatar_url\n avatar_after = after.avatar_url\n # The avatar's hash before and after update\n avahash_before = before.avatar\n avahash_after = after.avatar\n # If the usernames don't match, the user has changed their username\n if before.name != after.name:\n db = UnQLite(\"FHDatabase.db\")\n usernames = db.collection(\"Usernames\")\n fetched = usernames.filter(lambda obj: obj[\"id\"] == after.id and obj[\"username\"] == before.name)\n if(not fetched):\n usernames.store({\"id\": after.id, \"username\": before.name})\n fetched = usernames.filter(lambda obj: obj[\"id\"] == after.id)\n if len(fetched) > 10:\n usernames.delete(fetched[0][\"__id\"])\n embed = discord.Embed(title=\"Username Updated\", description=f\"{member.mention} updated their username\", color=member.color)\n embed.set_author(name=after, icon_url=avatar_after)\n embed.add_field(name=\"Previously\", value=before.name, inline=True)\n embed.add_field(name=\"Now\", value=after.name, inline=True)\n await G_logChannel.send(embed=embed)\n db.close()\n # If the avatar hashes don't match, the user changed their avatar\n if avahash_before != avahash_after:\n embed = discord.Embed(title=\"Avatar Updated\", description=f\"{member.mention} updated their avatar\", color=member.color)\n embed.set_author(name=after, icon_url=avatar_before)\n embed.set_thumbnail(url=avatar_after)\n await G_logChannel.send(embed=embed)\n\n @bot.event\n async def on_bulk_message_delete(messages):\n global G_logChannel\n # Name of the log file in the form of year-month-day-hour-minute-second-microsecond.txt\n filename = f\"{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')}.txt\"\n # Start the file with a title: \"Bulk message deletion:\" and specify the time of the deletion\n content = f\"Bulk message deletion: {datetime.datetime.utcnow().strftime('%A, %B %d %Y, %H:%M:%S UTC')}\\n----------\\n\"\n # Specify how many messages were deleted\n content += f\"{len(messages)} messages\\n\\n\"\n # Specify whose message was deleted, their ID, what time the message was deleted, and what the message content was\n for message in messages:\n content += f\"From {message.author} ({message.author.id}) at {message.created_at.strftime('%B %d, %H:%M UTC')} in #{message.channel.name}\\n\"\n content += f\"{message.clean_content}\\n\\n\"\n # Save the file to logs directory with the specified name\n with open(f\"logs/{filename}\", \"w\") as text_file:\n text_file.write(content)\n logFile = discord.File(f\"logs/{filename}\")\n await G_logChannel.send(f\"**Bulk Message Deletion**: {len(messages)} messages logged\", file=logFile)\n os.remove(f\"logs/{filename}\")\n\n print(f\"{len(messages)} messages deleted, log saved on server\")\n\n @bot.event\n async def on_message_delete(message):\n global G_logChannel\n embed = discord.Embed(title=\"Message Deleted\", description=f\"Message in {message.channel.mention} by {message.author.mention} was deleted.\", color=0xfc7805)\n embed.set_author(name=message.author, icon_url=message.author.avatar_url)\n if message.content is None:\n embed.add_field(name=\"Content\", value=\"**Cannot retrieve content**\", inline=False)\n elif message.content.startswith(\"$feedback \"):\n return\n else:\n embed.add_field(name=\"Content\", value=message.content, inline=False)\n await G_logChannel.send(embed=embed)\n\n @bot.event\n async def on_message_edit(before, after):\n # Ignore if the message author is a bot or if the message \"edit\" is just an embed appearing\n if after.author.bot or (not before.embeds and after.embeds):\n return\n global G_logChannel\n embed = discord.Embed(title=\"Message Edited\", description=f\"Message in {after.channel.mention} by {after.author.mention} was edited. [Jump to message]({after.jump_url})\", color=0x05fceb)\n embed.set_author(name=after.author, icon_url=after.author.avatar_url)\n if not before.content or not after.content:\n embed.add_field(name=\"Content\", value=\"**Cannot retrieve content**\", inline=False)\n else:\n before_message = before.content\n if len(before_message) > 250:\n before_message = before_message[:250]\n before_message += \"...\"\n after_message = after.content\n if len(after_message) > 250:\n after_message = after_message[:250]\n after_message += \"...\"\n embed.add_field(name=\"Previously\", value=f\"\\u200b{before_message}\", inline=False)\n embed.add_field(name=\"Now\", value=f\"\\u200b{after_message}\", inline=False)\n await G_logChannel.send(embed=embed)\n\n# Create a user action the bot listens to which will grant the user another role\n\nasync def create_welcome(member: discord.Member, memberNumber: int):\n # CREATE THE WELCOMING BANNER\n # name of the banner image with file extension\n banner_name = \"banner.png\"\n # open the banner file and resize it to 1000x300 px, save the height to variable\n im1 = Image.open(f\"images/{banner_name}\").resize((1000, 300))\n im1_h = im1.height\n # get the url of the joining member's avatar and resize it to 250x250 px, saving the height to variable again\n url = member.avatar_url\n response = requests.get(url)\n im2 = Image.open(BytesIO(response.content)).resize((250, 250))\n im2_h = im2.height\n # draw a mask image with black background and white circle in the middle\n mask = Image.new(\"L\", im2.size, 0)\n maskdraw = ImageDraw.Draw(mask)\n maskdraw.ellipse((0, 0, mask.width, mask.height), fill=255)\n # save the offset to variable, 20 px off the left side and centered vertically\n offset = (20, int((im1_h - im2_h) / 2))\n # paste the avatar with the circle mask applied on top of the banner using the offset above\n im1.paste(im2, offset, mask)\n draw = ImageDraw.Draw(im1)\n # determine the font and its size to use in the welcoming text\n font_name = \"arial.ttf\"\n font_size = 48\n font = ImageFont.truetype(f\"fonts/{font_name}\", font_size)\n # determine the welcoming text to use and draw it on top of the banner, 300px from the left and centered by the height of the text and amount of lines, lambda function to get the ordinal number\n ordinal = lambda n: \"%d%s\" % (n,\"tsnrhtdd\"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])\n member_ordinal = ordinal(memberNumber)\n db = UnQLite(\"FHDatabase.db\")\n greetings = db.collection(\"BotGreetings\")\n welcome_message = greetings.filter(lambda obj: obj[\"name\"] == \"greeting_message\")[0][\"content\"]\n welcome_text = greetings.filter(lambda obj: obj[\"name\"] == \"greeting_image\")[0][\"content\"]\n welcome_message = welcome_message.replace(\"$[mention]\", f\"{member.mention}\").replace(\"$[user]\", f\"{member.name}\").replace(\"$[nth]\", f\"{member_ordinal}\").replace(\"$[n]\", f\"{member.guild.member_count}\")\n welcome_text = welcome_text.replace(\"$[user]\", f\"{member.name}\").replace(\"$[nth]\", f\"{member_ordinal}\").replace(\"$[n]\", f\"{member.guild.member_count}\")\n text_w, text_h = font.getsize(welcome_text)\n draw.text((300, (im1_h - text_h) / 2), welcome_text, font=font, fill=(255,255,255,255), stroke_width=2, stroke_fill=(0,0,0,255))\n # save the created file as an image file and send it to the determined channel\n location = f\"images/{member.id}_welcome.png\"\n im1.save(location)\n imagefile = discord.File(f\"images/{member.id}_welcome.png\")\n db.close()\n return welcome_message, imagefile, location\n\nclass Moderator(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n# MODERATOR ONLY COMMANDS\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason: typing.Optional[str]):\n if member == ctx.author:\n await ctx.send(\"I... don't think that's a good idea. :3\")\n return\n await add_to_logs(ctx.message.content, ctx.author)\n try:\n await member.send(f\"**You were kicked from Furry Hangout.**\\nReason specified: {reason}\")\n except:\n print(f\"Couldn't DM {member} ({member.id}) with kick reason.\")\n await member.kick(reason=reason)\n await ctx.send(f\"{member.name} was kicked\")\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: typing.Union[discord.User, int], delete: typing.Optional[int] = 0, *, reason: typing.Optional[str]):\n await add_to_logs(ctx.message.content, ctx.author)\n # Initialise the member ID just in case\n memberid = 0\n # Check if the user entered an ID or tagged a member\n if type(member) is int:\n memberid = member\n else:\n memberid = member.id\n # Don't ban yourself\n if ctx.author.id == memberid:\n await ctx.send(\"You can't ban yourself! You're not as powerful as Yoja.\")\n return\n # The bot can't remove messages older than 7 days, so change any number greater than 7 to 7\n if delete > 7:\n delete = 7\n # Ban the member and delete the specified amount of messages (default 0), also inform about the action\n await ctx.message.guild.ban(discord.Object(id=memberid), reason=reason, delete_message_days=delete)\n await ctx.send(f\"<@!{memberid}> was banned.\")\n print(f\"{ctx.author} banned {member} with reason: {reason}\")\n @ban.error\n async def ban_error(self, ctx, error):\n # Catch any errors if the user enters an invalid name or ID\n if isinstance(error, discord.ext.commands.BadUnionArgument):\n await ctx.send(\"Member not found.\")\n elif isinstance(error, discord.ext.commands.errors.CommandInvokeError):\n await ctx.send(\"User not found.\")\n else:\n print(f\"{ctx.author} attempted to run the command 'ban' and was met with {type(error)}: {error}\")\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, memberid: int, *, reason: typing.Optional[str]):\n await add_to_logs(ctx.message.content, ctx.author)\n # Unban the member and inform about it\n await ctx.guild.unban(discord.Object(id=memberid), reason=reason)\n await ctx.send(f\"<@!{memberid}> was unbanned.\")\n print(f\"{ctx.author} unbanned {memberid} with reason: {reason}\")\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def warn(self, ctx, member: discord.Member, *, reason):\n await add_to_logs(ctx.message.content, ctx.author)\n # Check if the user tagged a proper member\n try:\n warnedid = member.id\n except:\n await ctx.send(\"Member not found.\")\n # Connect to the database and save the warning there\n db = UnQLite(\"FHDatabase.db\")\n warnings = db.collection(\"Warnings\")\n warnings.store({\"id\": warnedid, \"reason\": reason, \"issuer\": ctx.author.id})\n db.close()\n # Inform about the warning and send a message to the user who was warned\n print(f\"{ctx.author} warned {member} for: {reason}\")\n await ctx.send(f\"{member.mention} was warned.\")\n try:\n await member.send(f\"You were warned for: {reason}\")\n except:\n print(f\"Unable to message {member} with the warning message\")\n\n @warn.error\n async def warn_error(self, ctx, error):\n # Check that the user actually includes a reason\n if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):\n await ctx.send(\"Please include a reason for logging purposes.\")\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def removewarn(self, ctx, arg: int):\n await add_to_logs(ctx.message.content, ctx.author)\n # check that the user enters an integer\n try:\n warnid = int(arg)\n except ValueError:\n await ctx.send(f'{arg} is not a valid ID.')\n return\n # connect to database and check if a warning with selected ID is found\n db = UnQLite(\"FHDatabase.db\")\n warnings = db.collection(\"Warnings\")\n fetched = warnings.filter(lambda obj: obj[\"__id\"] == warnid)\n # if no warning is found, inform the user and end the function\n if not fetched:\n await ctx.send(f\"No warnings with ID {warnid} found.\")\n return\n else:\n fetched = fetched[0]\n # in case of a typo, confirm that the warning is the correct one and ask the user to add a reaction accordingly\n bot_message = await ctx.send(f'Are you sure you want to remove `{fetched[\"reason\"]}` from <@!{fetched[\"id\"]}>?')\n await bot_message.add_reaction(\"✅\")\n await bot_message.add_reaction(\"❌\")\n\n # check that the user who reacts is the user who initialised the command, and that they react with the correct reaction\n def check_approve(reaction, user):\n return user == ctx.author and (reaction.emoji == \"✅\" or reaction.emoji == \"❌\")\n\n # use the above check to proceed and do nothing if no reaction is given in 60 seconds\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check_approve)\n except asyncio.TimeoutError:\n await bot_message.delete()\n else:\n # if approved, inform that the warning was deleted and proceed to remove it from the database, otherwise do nothing\n if reaction.emoji == \"✅\":\n print(f'{ctx.author} removed warning {fetched[\"reason\"]} from {fetched[\"id\"]}')\n await bot_message.edit(content=f'Warning {fetched[\"reason\"]} removed from <@!{fetched[\"id\"]}>')\n await bot_message.clear_reactions()\n warnings.delete(warnid)\n db.close()\n elif reaction.emoji == \"❌\":\n await bot_message.delete()\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def warnings(self, ctx, member: discord.Member):\n await add_to_logs(ctx.message.content, ctx.author)\n # Check if the user tagged a proper member\n try:\n memberid = member.id\n except:\n await ctx.send(\"Member not found.\")\n return\n # Connect to the database and get the warnings of the specified user\n db = UnQLite(\"FHDatabase.db\")\n # Collect the warnings into an embed and show the warning's ID, issuer and the reason\n warnings = db.collection(\"Warnings\")\n fetched = warnings.filter(lambda obj: obj[\"id\"] == memberid)\n embed = discord.Embed(title=f\"Warnings of {member.display_name}\", description=f\"{len(fetched)} warning(s)\", color=0xff0000)\n if not fetched:\n embed.add_field(name=\"No Warnings Found\", value=\"\\u200b\")\n for warning in fetched:\n embed.add_field(name=f\"ID: {warning['__id']}\", value=f\"**Issuer:** <@!{warning['issuer']}>\\n**Reason:** {warning['reason']}\", inline=False)\n await ctx.send(embed=embed)\n db.close()\n\n async def createMuteRole(self, guild: discord.Guild):\n db = UnQLite(\"FHDatabase.db\")\n roles = db.collection(\"Roles\")\n fetched = roles.filter(lambda obj: obj[\"name\"] == \"muted\")[0][\"id\"]\n muted_role = guild.get_role(fetched)\n if not muted_role:\n # Return None to inform that an invalid role has been given\n return None\n return muted_role\n\n @commands.command()\n @commands.has_permissions(manage_roles=True)\n async def mute(self, ctx, member: discord.Member, *, reason: typing.Optional[str]):\n await add_to_logs(ctx.message.content, ctx.author)\n muted_role = await self.createMuteRole(ctx.guild)\n if not muted_role:\n await ctx.send(\"Error: An invalid role has been specified in the bot settings.\")\n return\n # Move the role's position right below the bot's top role, if it's not already there\n role_position = ctx.guild.me.roles[-1].position - 1\n if muted_role.position != role_position:\n try:\n await muted_role.edit(position=role_position)\n except:\n await ctx.send(\"Error: The mute role is above my top role.\")\n # Add the created role to the muted member\n await member.add_roles(muted_role, reason=reason)\n # Open mutes database\n db = UnQLite(\"FHDatabase.db\")\n mutes = db.collection(\"Mutes\")\n time = None\n if reason:\n possible_time = reason.split()[0]\n matches = re.match(r\"(\\d+[dhm])+\", possible_time)\n if matches:\n time = matches.group(0)\n if len(reason.split()) > 1:\n reason = ' '.join(reason.split()[1:])\n else:\n reason = None\n # If time was specified, go for the timed unmute\n if time:\n delta = await convertTime(time)\n if not delta:\n mutes.store({\"id\": member.id, \"time\": None})\n else:\n date = datetime.datetime.utcnow() + delta\n mutes.store({\"id\": member.id, \"time\": date.strftime(\"%d %m %Y %H:%M:%S\")})\n embed = discord.Embed(title=\"Member muted\", description=f\"{member.mention} was muted until {date.strftime('%B %d, %Y, %H:%M UTC.')}\", color=0xc0c0c0)\n else:\n mutes.store({\"id\": member.id, \"time\": None})\n embed = discord.Embed(title=\"Member muted\", description=f\"{member.mention} was muted.\")\n embed.add_field(name=\"Reason specified\", value=reason)\n await ctx.send(embed=embed)\n try:\n embed = discord.Embed(title=\"You were muted\", description=f\"You were muted on Furry Hangout.\", color=0xff0000)\n embed.add_field(name=\"Time specified\", value=time)\n embed.add_field(name=\"Reason specified\", value=reason)\n await member.send(embed=embed)\n except:\n print(\"Unable to DM the muted member\")\n db.close()\n\n @commands.command()\n @commands.has_permissions(manage_roles=True)\n async def unmute(self, ctx, member: discord.Member):\n await add_to_logs(ctx.message.content, ctx.author)\n db = UnQLite(\"FHDatabase.db\")\n mutes = db.collection(\"Mutes\")\n roles = db.collection(\"Roles\")\n fetched = roles.filter(lambda obj: obj[\"name\"] == \"muted\")[0][\"id\"]\n muted_role = ctx.guild.get_role(fetched)\n if muted_role:\n if muted_role in member.roles:\n await member.remove_roles(muted_role)\n else:\n await ctx.send(\"Error: An invalid role is specified. If the member is muted, please manually unmute them.\")\n fetched = mutes.filter(lambda obj: obj[\"id\"] == member.id)\n if fetched:\n mutes.delete(fetched[0][\"__id\"])\n await ctx.send(f\"{member.display_name} was unmuted.\")\n\n @commands.command()\n @commands.has_permissions(manage_messages=True)\n async def purge(self, ctx, amount: typing.Union[int, str], *, member: typing.Optional[discord.Member]):\n await add_to_logs(ctx.message.content, ctx.author)\n messages = []\n messages_to_delete = 0;\n if amount == \"all\":\n messages_to_delete = 500\n else:\n messages_to_delete = amount\n count = 0\n firstMessageChecked = False\n async for message in ctx.channel.history(limit=501, oldest_first=False):\n if not firstMessageChecked:\n firstMessageChecked = True\n else:\n if member is not None:\n if message.author == member:\n messages.append(message)\n count += 1\n else:\n messages.append(message)\n count += 1\n if count == messages_to_delete:\n break\n await ctx.channel.delete_messages(messages)\n print(f\"{amount} messages deleted in #{ctx.channel.name}\")\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def message(self, ctx, member: discord.Member, *, message: str):\n await add_to_logs(ctx.message.content, ctx.author)\n signature = \"I'm a bot and I was only asked to deliver this message. Replying to me will not get a response.\"\n try:\n await member.send(f\"{message}\\n\\n{signature}\")\n await ctx.send(f\"Message successfully delivered to {member}!\")\n except:\n print(f\"Can't send a message to {member}\")\n await member.send(f\"Unable to send a message to {member}. :( Maybe they blocked me or have DMs turned off.\")\n\n @commands.command()\n @commands.has_permissions(kick_members=True)\n async def testwelcome(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n # For testing the welcoming image\n message, imageFile, location = await create_welcome(ctx.author, ctx.guild.member_count)\n await ctx.send(message, file=imageFile)\n os.remove(location)\n\n# VOICE CHANNEL COMMANDS\nclass Voice(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def join(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n # Save the command issuer's voice state\n voicestate = ctx.author.voice\n # Check if the bot is already connected somewhere\n if len(bot.voice_clients) > 0:\n botchannel = bot.voice_clients[0].channel\n # Check if the bot is currently on VC with others or is still playing something\n if len(botchannel.members) > 1 or bot.voice_clients[0].is_playing():\n await ctx.send(f\"I'm currently occupied on {botchannel}.\")\n return\n # Check if the command issuer is not connected to a voice channel\n elif voicestate is None:\n await ctx.send(\"You are not connected to a voice channel.\")\n return\n # If above checks were false, disconnect from the channel\n else:\n await bot.voice_clients[0].disconnect()\n\n # If the command issuer is connected on a voice channel, join on the same channel\n if voicestate is not None:\n channel = voicestate.channel\n await channel.connect()\n else:\n await ctx.send(\"You are not connected to a voice channel.\")\n\n @commands.command()\n async def leave(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n botchannel = bot.voice_clients[0].channel\n # Check that the command issuer is on the voice channel before disconnecting\n if await isOnChannel(ctx.author, botchannel):\n bot.voice_clients[0].stop()\n await bot.voice_clients[0].disconnect()\n\n @commands.command()\n async def play(ctx, *, search: str):\n await add_to_logs(ctx.message.content, ctx.author)\n print(\"Playing...\")\n @play.error\n async def play_error(ctx, error):\n # If the user didn't include a search term\n if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):\n await ctx.send(\"Usage: `play [YouTube URL/search term]`\")\n\n # Function to check if the user shares a voice channel with the bot\n async def isOnChannel(member: discord.Member, channel: discord.VoiceChannel):\n return member in channel.members\n\n def checkQueue(voice_client, lastPlayed: int):\n print(\"Checking queue\")\n\n# UTILITY COMMANDS\nclass Utility(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def remindme(self, ctx, in_or_at: str, time: str, remindertype: str, *, reminder: str):\n await add_to_logs(ctx.message.content, ctx.author)\n if remindertype != \"once\" and remindertype != \"repeat\":\n await ctx.send(\"Error in parameter: repeat\")\n return\n date = await get_reminder_date(in_or_at, time)\n if type(date) is type(\"string\"):\n await ctx.send(date)\n return\n # Interval is either the time specified with \"in\" (in minutes) or 1 day (1440 minutes) with \"at\"\n if in_or_at == \"in\":\n delta = await convertTime(time)\n interval = int(delta.total_seconds() / 60)\n else:\n interval = 1440\n db = UnQLite(\"FHDatabase.db\")\n reminders = db.collection(\"Reminders\")\n # remind parameter: \"me\" or \"group\", repeat parameter: True if reminder type is \"repeat\"\n reminders.store({\"id\": ctx.author.id, \"remind\": \"me\", \"repeat\": remindertype == \"repeat\", \"interval\": interval, \"time\": date.strftime(\"%d %m %Y %H:%M:%S\"), \"reminder\": reminder})\n responses = [\"Of course!\", \"Sure!\", \"Definitely!\", \"Absolutely!\", \"No problem!\"]\n if remindertype == \"once\":\n willRepeat = \"The reminder will be shown only once.\"\n else:\n willRepeat = \"The reminder will be repeated after.\"\n await ctx.send(f\"{random.choice(responses)} I will remind you at {date.strftime('%B %d, %H:%M UTC')} about: {reminder}\\n{willRepeat}\")\n db.close()\n\n @commands.command()\n async def remindgroup(self, ctx, in_or_at: str, time: str, remindertype: str, *, reminder: str):\n await add_to_logs(ctx.message.content, ctx.author)\n if remindertype != \"once\" and remindertype != \"repeat\":\n await ctx.send(\"Error in parameter: repeat\")\n return\n date = await get_reminder_date(in_or_at, time)\n if type(date) is type(\"string\"):\n await ctx.send(date)\n return\n responses = [\"Of course!\", \"Sure!\", \"Definitely!\", \"Absolutely!\", \"No problem!\"]\n if remindertype == \"once\":\n willRepeat = \"The reminder will be shown only once.\"\n else:\n willRepeat = \"The reminder will be repeated multiple times.\"\n bot_message = await ctx.send(f\"{random.choice(responses)} I will send a reminder at {date.strftime('%B %d, %H:%M UTC')} about: {reminder}\\nOpt in by reacting 👍 to this message.\\n{willRepeat}\")\n await bot_message.add_reaction(\"👍\")\n # Interval is either the time specified with \"in\" (in minutes) or 1 day (1440 minutes) with \"at\"\n if in_or_at == \"in\":\n delta = await convertTime(time)\n interval = int(delta.total_seconds() / 60)\n else:\n interval = 1440\n db = UnQLite(\"FHDatabase.db\")\n reminders = db.collection(\"Reminders\")\n reminders.store({\"id\": bot_message.id, \"channel\": ctx.channel.id, \"remind\": \"group\", \"repeat\": remindertype == \"repeat\", \"interval\": interval, \"time\": date.strftime(\"%d %m %Y %H:%M:%S\"), \"reminder\": reminder})\n db.close()\n\n @commands.command()\n async def reminders(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n db = UnQLite(\"FHDatabase.db\")\n reminders = db.collection(\"Reminders\")\n embed = discord.Embed(title=f\"Reminders of {ctx.author}\")\n fetched = reminders.filter(lambda obj: obj[\"id\"] == ctx.author.id and obj[\"remind\"] == \"me\")\n if not fetched:\n embed.description = \"No reminders found. Create a new reminder using the `remindme` command\"\n for reminder in fetched:\n date = datetime.datetime.strptime(reminder[\"time\"], \"%d %m %Y %H:%M:%S\")\n embed.add_field(name=f\"ID: {reminder['__id']}\", value=f\"**Content:** {reminder['reminder']}\\n**Due:** {date.strftime('%B %-d %Y, %H:%M:%S UTC')}\\n**Will Repeat:** {reminder['repeat']}\")\n await ctx.send(embed=embed)\n db.close()\n\n @commands.command()\n async def forget(self, ctx, reminder_id: int):\n await add_to_logs(ctx.message.content, ctx.author)\n db = UnQLite(\"FHDatabase.db\")\n reminders = db.collection(\"Reminders\")\n fetched = reminders.filter(lambda obj: obj[\"id\"] == ctx.author.id and obj[\"remind\"] == \"me\" and obj[\"__id\"] == reminder_id)\n if not fetched:\n await ctx.send(f\"You have no reminders with ID {reminder_id}. Check your reminder IDs with `reminders`\")\n else:\n bot_message = await ctx.send(f'Are you sure you want me to forget `{fetched[0][\"reminder\"]}`?')\n await bot_message.add_reaction(\"✅\")\n await bot_message.add_reaction(\"❌\")\n\n # check that the user who reacts is the user who initialised the command, and that they react with the correct reaction\n def check_approve(reaction, user):\n return user == ctx.author and (reaction.emoji == \"✅\" or reaction.emoji == \"❌\")\n\n # use the above check to proceed and do nothing if no reaction is given in 60 seconds\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check_approve)\n except asyncio.TimeoutError:\n await bot_message.delete()\n else:\n # if approved, inform that the reminder was deleted and proceed to remove it from the database, otherwise do nothing\n if reaction.emoji == \"✅\":\n await bot_message.edit(content=f'I have forgotten {fetched[0][\"reminder\"]}.')\n await bot_message.clear_reactions()\n reminders.delete(reminder_id)\n elif reaction.emoji == \"❌\":\n await bot_message.delete()\n db.close()\n\n @commands.command()\n async def forgetall(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n db = UnQLite(\"FHDatabase.db\")\n reminders = db.collection(\"Reminders\")\n fetched = reminders.filter(lambda obj: obj[\"id\"] == ctx.author.id and obj[\"remind\"] == \"me\")\n if not fetched:\n await ctx.send(f\"You have no reminders to remove. Create a new reminder with the `remindme` command.\")\n else:\n bot_message = await ctx.send(f'Are you sure you want me to forget all of your reminders?')\n await bot_message.add_reaction(\"✅\")\n await bot_message.add_reaction(\"❌\")\n\n # check that the user who reacts is the user who initialised the command, and that they react with the correct reaction\n def check_approve(reaction, user):\n return user == ctx.author and (reaction.emoji == \"✅\" or reaction.emoji == \"❌\")\n\n # use the above check to proceed and do nothing if no reaction is given in 60 seconds\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check_approve)\n except asyncio.TimeoutError:\n await bot_message.delete()\n else:\n # if approved, inform that the reminders were deleted and proceed to remove them from the database, otherwise do nothing\n if reaction.emoji == \"✅\":\n await bot_message.edit(content=f'I have forgotten all of your reminders.')\n await bot_message.clear_reactions()\n for reminder in fetched:\n reminders.delete(reminder[\"__id\"])\n elif reaction.emoji == \"❌\":\n await bot_message.delete()\n db.close()\n\n @commands.command()\n async def feedback(self, ctx, *, message: str):\n if ctx.channel.type == discord.ChannelType.private:\n staff_members = []\n staff_role = bot.get_guild(223340988314157056).get_role(225004317596319746)\n numbers = \"0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟\".split()\n embed = discord.Embed(title=\"Who to send it to?\", description=\"React with the corresponding emoji.\")\n embed.add_field(name=\"**ALL**\", value=\"Emoji: 🅰️\")\n index = 0\n async for member in bot.get_guild(223340988314157056).fetch_members():\n if index == 10:\n break\n if staff_role in member.roles:\n staff_members.append(member)\n embed.add_field(name=f\"**{member}**\", value=f\"Emoji: {numbers[index]}\")\n index += 1\n bot_message = await ctx.send(embed=embed)\n await bot_message.add_reaction(\"🅰️\")\n for index in range(len(staff_members)):\n await bot_message.add_reaction(numbers[index])\n def check_reaction(reaction, user):\n return (reaction.emoji in numbers or reaction.emoji == \"🅰️\") and user == ctx.author\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check_reaction)\n except asyncio.TimeoutError:\n await bot_message.edit(content=\"Timed Out. Please resend the command.\", embed=None)\n else:\n messages = []\n recipients = []\n def check_spam(reaction, user):\n return reaction.emoji == \"🚩\" and reaction.message in messages and user in recipients\n embed = discord.Embed(title=\"Feedback received\", description=f\"Anonymous user has sent feedback.\")\n embed.add_field(name=\"**Feedback**\", value=message)\n if reaction.emoji == \"🅰️\":\n embed.add_field(name=\"**Scope**\", value=\"This feedback was sent to all staff members.\", inline=False)\n embed.add_field(name=\"Is this message spam?\", value=\"Flag it by reacting with 🚩 within 1 hour and report to Veloxization#0735.\")\n sent_to = len(staff_members)\n for member in staff_members:\n try:\n feedback_message = await member.send(embed=embed)\n messages.append(feedback_message)\n recipients.append(member)\n except:\n print(f\"Unable to send feedback to {member}\")\n sent_to -= 1\n await bot_message.edit(content=f\"Feedback delivered to {sent_to} staff members!\", embed=None)\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=3600.0, check=check_spam)\n except asyncio.TimeoutError:\n print(\"Feedback not reported as spam.\")\n else:\n await add_to_logs(ctx.message.clean_content, ctx.author)\n else:\n embed.add_field(name=\"**Scope**\", value=\"This feedback was sent to you personally.\", inline=False)\n embed.add_field(name=\"Is this message spam?\", value=\"Flag it by reacting with 🚩 within 1 hour and report to Veloxization#0735.\")\n index = numbers.index(reaction.emoji)\n try:\n feedback_message = await staff_members[index].send(embed=embed)\n except:\n await bot_message.edit(content=f\"I was unable to deliver the feedback to {staff_members[index]}. :(\", embed=None)\n else:\n await bot_message.edit(content=f\"Feedback successfully delivered to {staff_members[index]}!\", embed=None)\n messages.append(feedback_message)\n recipients.append(staff_members[index])\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=3600.0, check=check_spam)\n except asyncio.TimeoutError:\n print(\"Feedback not reported as spam.\")\n else:\n await add_to_logs(ctx.message.clean_content, ctx.author)\n else:\n await ctx.message.delete()\n bot_message = await ctx.send(\"This command is reserved only for private messages! Please message me privately to use this command.\")\n await bot_message.delete(delay=10.0)\n\n# INFO COMMANDS\nclass Info(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def stats(self, ctx, member: typing.Optional[discord.Member], channel: typing.Optional[discord.TextChannel]):\n await add_to_logs(ctx.message.content, ctx.author)\n # Default to command issuer in the current channel\n if member is None:\n member = ctx.author\n if channel is None:\n channel = ctx.message.channel\n if member.bot:\n await ctx.send(\"I can't do this for bots. :c\")\n return\n # Inform that the bot is calculating the statistics\n botmessage = await ctx.send(\"Calculating...\")\n # Put all the messages in a list\n messages = await channel.history(limit=500, oldest_first=False).flatten()\n # Combined length of all messages, the number of attachments in those messages and the number of messages checked\n combinedLength = 0\n totalAttachments = 0\n user_messages = 0\n for message in messages:\n # Only handle the message if it's sent by the member we want\n if message.author == member:\n combinedLength += len(message.clean_content)\n totalAttachments += len(message.attachments)\n user_messages += 1\n # If the member doesn't have any messages in the specified channel, inform about it\n if user_messages == 0:\n averageLength = 0\n embed = discord.Embed(title=f\"None of the past 500 messages in {channel.mention} were from {member.name}.\")\n # Otherwise show the stats\n else:\n averageLength = round(combinedLength / user_messages, 2)\n embed = discord.Embed(title=f\"Statistics for {member.name}\", description=f\"Analyzed {user_messages} messages in {channel.mention}\", color=0x0ffca9)\n embed.add_field(name=\"Average message length\", value=f\"{averageLength} characters\")\n embed.add_field(name=\"Total attachments\", value=f\"{totalAttachments} attachments\")\n await botmessage.edit(content=None, embed=embed)\n\n @commands.command()\n async def userinfo(self, ctx, *, member: typing.Optional[discord.Member]):\n await add_to_logs(ctx.message.content, ctx.author)\n if member is None:\n member = ctx.author\n embed = discord.Embed(title=f\"{member.name}'s info\", color=member.color)\n embed.set_thumbnail(url=member.avatar_url)\n embed.add_field(name=\"Joined\", value=member.joined_at.strftime(\"%B %d, %Y at %H:%M UTC\"), inline=False)\n embed.add_field(name=\"Account Created\", value=member.created_at.strftime(\"%B %d, %Y at %H:%M UTC\"), inline=False)\n db = UnQLite(\"FHDatabase.db\")\n usernames = db.collection(\"Usernames\")\n fetched = usernames.filter(lambda obj: obj[\"id\"] == member.id)\n usernames = \"\"\n for row in fetched:\n usernames += row[\"username\"] + \", \"\n if usernames == \"\":\n usernames = \"N/A\"\n else:\n usernames = usernames[:-2]\n embed.add_field(name=\"Previous usernames\", value=usernames, inline=False)\n nicknames = db.collection(\"Nicknames\")\n fetched = nicknames.filter(lambda obj: obj[\"id\"] == member.id)\n nicknames = \"\"\n for row in fetched:\n nicknames += row[\"nickname\"] + \", \"\n if nicknames == \"\":\n nicknames = \"N/A\"\n else:\n nicknames = nicknames[:-2]\n embed.add_field(name=\"Previous nicknames\", value=nicknames, inline=False)\n db.close()\n\n roles = \"\"\n for role in member.roles:\n if role.name != \"@everyone\":\n roles = roles + role.mention + \", \"\n if roles == \"\":\n roles = \"None\"\n else:\n roles = roles[:-2]\n embed.add_field(name=\"Roles\", value=roles, inline=False)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def serverinfo(self, ctx):\n await add_to_logs(ctx.message.content, ctx.author)\n embed = discord.Embed(title=\"Furry Hangout\", description=\"Server info\", color=0x8eff00)\n embed.set_thumbnail(url=ctx.guild.icon_url)\n embed.add_field(name=\"ID\", value=ctx.guild.id, inline=True)\n embed.add_field(name=\"Owner\", value=f\"{ctx.guild.owner}\", inline=True)\n embed.add_field(name=\"Members\", value=f\"{ctx.guild.member_count}\", inline=True)\n embed.add_field(name=\"Text Channels\", value=f\"{len(ctx.guild.text_channels)}\", inline=True)\n embed.add_field(name=\"Voice Channels\", value=f\"{len(ctx.guild.voice_channels)}\", inline=True)\n embed.add_field(name=\"Creation Date\", value=ctx.guild.created_at.strftime(\"%b %d, %Y, %H:%M:%S UTC\"), inline=True)\n embed.add_field(name=\"Roles\", value=f\"{len(ctx.guild.roles) - 1}\", inline=True)\n await ctx.send(embed=embed)\n\n# FUN COMMANDS\nclass Fun(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def eightball(self, ctx, arg: typing.Optional[str]):\n await add_to_logs(ctx.message.content, ctx.author)\n with open('json/8ball.json') as json_data:\n responses = json.load(json_data)\n response = random.choice(responses)\n await ctx.send(response)\n\n# HELP COMMANDS\nclass Help(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # Command for listing all available commands and their function\n @commands.command()\n async def help(self, ctx, arg: str = \"help\"):\n await add_to_logs(ctx.message.content, ctx.author)\n description = \" [optional]\" #To avoid continuous copy-pasting\n # List all moderator commands\n if arg.lower() == \"mod\":\n embed = discord.Embed(title=\"Moderator commands\", description=description, color=0x7f00ff)\n embed.add_field(name=\"kick <@member> [reason]\", value=\"Kicks a member with an optional reason.\", inline=False)\n embed.add_field(name='ban <@member/ID> [days of messages deleted (max 7, default 0)] [reason]', value=\"Bans a user by ID or tag with optional amount of messages deleted and reason.\", inline=False)\n embed.add_field(name=\"unban [reason]\", value=\"Unbans a user by ID.\", inline=False)\n embed.add_field(name=\"warn <@member> \", value=\"Warns a member with given reason.\", inline=False)\n embed.add_field(name=\"warnings <@member>\", value=\"Lists the warnings of a member.\", inline=False)\n embed.add_field(name=\"removewarn \", value=\"Removes a warning with the given ID number.\", inline=False)\n embed.add_field(name=\"mute <@member> [time] [reason]\", value=\"Mutes a member from chatting and voice chatting with optional time and reason. Time units are m, h and d. Don't use spaces when specifying time. E.g. `mute @john 2d5h15m Spamming in #general`\", inline=False)\n embed.add_field(name=\"unmute <@member>\", value=\"Unmutes a specified member, given that they're muted.\")\n embed.add_field(name=\"purge [@member]\", value=\"Bulk deletes a specified amount of messages (max 500) from a channel. Optionally can be filtered to delete only a specified member's messages. E.g. `purge all @john` or `purge 10`\", inline=False)\n embed.add_field(name=\"message <@member> \", value=\"I will deliver a message to the member of your choosing.\", inline=False)\n embed.add_field(name=\"testwelcome\", value=\"Displays the welcoming message as if a new member joined.\", inline=False)\n await ctx.send(embed=embed)\n # Lists all music commands\n elif arg.lower() == \"music\":\n embed = discord.Embed(title=\"Music commands (UPCOMING, NON-FUNCTIONAL!)\", description=description, color=0x208afc)\n embed.add_field(name=\"join\", value=\"Makes the bot join your voice channel.\", inline=False)\n embed.add_field(name=\"leave\", value=\"Makes the bot leave the current voice channel.\", inline=False)\n embed.add_field(name=\"play \", value=\"Plays a song from a YouTube URL.\", inline=False)\n embed.add_field(name=\"search \", value=\"Searches a song from YouTube to play. Can be played by reacting with a corresponding number.\", inline=False)\n await ctx.send(embed=embed)\n # Lists all utility commands\n elif arg.lower() == \"utility\":\n embed = discord.Embed(title=\"Utility commands\", description=description, color=0xa4b4bc)\n embed.add_field(name=\"remindme