diff --git "a/3811.jsonl" "b/3811.jsonl" new file mode 100644--- /dev/null +++ "b/3811.jsonl" @@ -0,0 +1,743 @@ +{"seq_id":"512344652","text":"from django.shortcuts import render\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n#from snippets.serializers import SnippetSerializer\nfrom api.models import Anime,Profile\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.views import APIView\nfrom .serializers import AnimeSerializer\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import AllowAny,IsAuthenticated\n\n\n#вьха для пользоавтеля\nclass AnimeView(APIView):\n #получение своих аниме, не заыть поменять гет\n permission_classes = [IsAuthenticated]\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n animelist = Anime.objects.filter(user=profile)\n serializer_context = {\n 'request': request,\n }\n serializer = AnimeSerializer(animelist, context=serializer_context,many=True)\n return Response({\"list\": serializer.data})\n def post(self,request):\n try:\n mode = request.data.get('mode')\n if mode == 'add':\n id = request.data.get('id')\n profile = Profile.objects.get(user=request.user)\n anime=Anime.objects.get(id=id)\n anime.user.add(profile)\n else:\n id = request.data.get('id')\n profile = Profile.objects.get(user=request.user)\n anime = Anime.objects.get(id=id)\n anime.user.remove(profile)\n\n return Response({\"success\": \"New anime '{}' add successfully\".format(anime.name)})\n except :\n return Response({\"fail\": \"1\"})\n\n\n\nclass AnimeAllView(APIView):\n #получение all\n permission_classes = [AllowAny]\n def get(self, request):\n animelist = Anime.objects.all()\n serializer_context = {\n 'request': request,\n }\n serializer = AnimeSerializer(animelist, context=serializer_context,many=True)\n return Response({\"list\": serializer.data})\n\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"357511793","text":"\"\"\"\n准备Mustard数据集,方便调用\n\"\"\"\nimport os\nimport sys\nimport re\nimport json\nimport pickle\n\nfrom tqdm import tqdm\nimport h5py\nimport nltk\nimport numpy as np\nimport jsonlines\nfrom collections import defaultdict\nfrom sklearn.model_selection import StratifiedKFold\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nimport torch.nn as nn\n\nfrom torchsummary import summary\nfrom torch import optim\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n\nwork_dir = '/media/dn/85E803050C839C68/m_fusion_data/'\n\n\ndef print_hi(name):\n print(f'Hi, {name}')\n DATA_PATH_JSON = work_dir + \"data/sarcasm_data.json\"\n BERT_TARGET_EMBEDDINGS = work_dir + \"data/bert-output.jsonl\"\n INDICES_FILE_OUR_INDEPENDENT_SPLIT = work_dir + \"data/split_indices_oursplit_independent.p\"\n AUDIO_PICKLE = work_dir + \"data/audio_features.p\"\n DATASET_FILE = work_dir + \"mustard_oursplit.pkl\"\n\n def pickle_loader(filename):\n if sys.version_info[0] < 3:\n return pickle.load(open(filename, 'rb'))\n else:\n return pickle.load(open(filename, 'rb'), encoding=\"latin1\")\n\n def get_data_loader(train_ind_SI, author_ind):\n # (text,video,AUDIO)\n train_input = [data_input[ind] for ind in train_ind_SI]\n return train_input\n\n dataset_json = json.load(open(DATA_PATH_JSON))\n # text\n text_bert_embeddings = []\n with jsonlines.open(BERT_TARGET_EMBEDDINGS) as reader:\n print('opend bert : ', BERT_TARGET_EMBEDDINGS)\n for obj in reader:\n CLS_TOKEN_INDEX = 0\n features = obj['features'][CLS_TOKEN_INDEX]\n bert_embedding_target = []\n for layer in [0, 1, 2, 3]:\n bert_embedding_target.append(np.array(features[\"layers\"][layer][\"values\"]))\n bert_embedding_target = np.mean(bert_embedding_target, axis=0)\n # text_bert_embeddings (list:690) 768\n text_bert_embeddings.append(np.copy(bert_embedding_target))\n print('np.array(text_bert_embeddings).shape bert 768 ')\n print(np.array(text_bert_embeddings).shape) # 690 768\n # video\n video_features_file = h5py.File(work_dir + 'data/features/utterances_final/resnet_pool5.hdf5')\n # combined feature index\n # audio dict (283 12) (283 11)\n audio_features = pickle_loader(AUDIO_PICKLE)\n\n # parse_data\n data_input, data_output = [], []\n # from nltk.tokenize import word_tokenize\n\n # data = \"All work and no play makes jack a dull boy, all work and no play\"\n # print(word_tokenize(data))\n # data_input [(text,video)(text,video)]\n # text:768 vide0: frame:2048\n for idx, ID in enumerate(dataset_json.keys()):\n text = dataset_json[ID][\"utterance\"]\n # text = word_tokenize(text)\n # len_text = len(text)\n\n video = video_features_file[ID][()] #(96,2048)(72,2048)\n # Zero in the end, deform, match the length of the sentence\n # len_video = video.shape[0]\n # dim_video = video.shape[1]\n # if len_video % len_text !=0 :\n # len_pad = len_text - (len_video % len_text)\n # pad_video = np.zeros((len_pad,dim_video))\n # video = np.concatenate((video,pad_video))\n #\n # video = video.reshape(len_text,-1,dim_video)\n # video = np.mean(video,axis=1)\n\n audio = audio_features[ID] #(283,12) (283,11)\n audio = audio.T\n # # Zero in the end, deform, match the length of the sentence\n # len_audio = audio.shape[0]\n # dim_audio = audio.shape[1]\n # if len_audio % len_text != 0:\n # len_pad = len_text - (len_audio % len_text)\n # pad_audio = np.zeros((len_pad, dim_audio))\n # audio = np.concatenate((audio, pad_audio))\n #\n # audio = audio.reshape(len_text, -1, dim_audio)\n # audio = np.mean(audio, axis=1)\n\n label = int(dataset_json[ID][\"sarcasm\"])\n label = 1.0 if label else -1.0\n label = np.array([[label]])\n segment = ID\n # (words, visual, acoustic), label_id, segment,\n data_input.append((\n (text, # 0 TEXT_ID\n video, # 1 VIDEO_ID\n audio, # 2\n ),\n label,\n segment)\n )\n data_output.append(int(dataset_json[ID][\"sarcasm\"]))\n\n video_features_file.close()\n\n split_indices = pickle_loader(INDICES_FILE_OUR_INDEPENDENT_SPLIT)\n # 对于独立于说话者的设置 5组索引是相同的。\n for fold, (train_index, test_index) in enumerate(split_indices[:1]):\n train_ind_SI = train_index\n val_ind_SI = test_index\n test_ind_SI = test_index\n\n train_dataLoader = get_data_loader(train_ind_SI,None)\n val_dataLoader = get_data_loader(val_ind_SI,None)\n test_dataLoader = get_data_loader(test_ind_SI,None)\n\n data = {}\n data[\"train\"] = train_dataLoader\n data[\"dev\"] = val_dataLoader\n data[\"test\"] = test_dataLoader\n\n if not os.path.exists(DATASET_FILE):\n pickle.dump(data, open(DATASET_FILE, 'wb'), protocol=2)\n\n print(f\"write to {DATASET_FILE} ...\")\n\n\nif __name__ == '__main__':\n print_hi('lf_dnn')\n","sub_path":"data/.ipynb_checkpoints/prepare_mustard-checkpoint.py","file_name":"prepare_mustard-checkpoint.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"266032617","text":"from video.video import Video\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil.tz import tzutc\nfrom datetime import datetime\nimport cv2\nimport os\nimport logging\nimport time\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-10s) %(message)s')\n\nclass VideoRecorder(Video):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n \n def __str__(self):\n return \"VideoRecorder\"\n \n def record(self, seconds=5, minutes=0, hours=0, days=0):\n \"\"\"Record images and write MP4 videos to disk.\"\"\"\n while True:\n if self.more():\n # Ensure generator yields frames at a rate that matchs FPS of stream.\n fps = self.stream.get(cv2.CAP_PROP_FPS)\n if fps != 0:\n time.sleep(1/fps)\n # yield self.get_image()\n print('writing image')\n else:\n if self.debug:\n logging.info('The queue is empty, buffering...')\n time.sleep(self.buffer_wait_time)\n\n # def record(self, seconds=0, minutes=30, hours=0, days=0):\n # \"\"\"Record images and write MP4 videos to disk.\"\"\"\n # start = datetime.now(tzutc())\n # later = start + relativedelta(seconds=seconds, minutes=minutes, hours=0, days=0)\n # now = datetime.now(tzutc())\n\n # logging.debug(\"start: \" + str(start))\n # logging.debug(\"later: \" + str(later))\n # logging.debug(\"now: \" + str(now))\n\n # video_name = now.strftime(\"%Y-%m-%d-%H-%M-%S-%f-%Z\") + '.mp4'\n # logging.debug(video_name)\n # # fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n # # writer = cv2.VideoWriter(os.path.join('s3', video_name), fourcc, 24, (1152, 864))\n\n # # Get image from thread queue and write it to MP4 file.\n # while True:\n # logging.debug(\"more: \" + str(self.more()))\n # while self.more():\n # success, image = self.stream.read()\n # # writer.write(image)\n # logging.debug('write image')\n\n # now = datetime.now(tzutc())\n # recording_duration_has_ended = now >= later\n # if recording_duration_has_ended:\n # break\n # # writer.release()\n\nif __name__ == '__main__':\n nest_stream_doorbell_stream_url = os.environ.get('NEST_DOORBELL_STREAM_URL', 0)\n video_recorder = VideoRecorder(video=nest_stream_doorbell_stream_url, buffer_wait_time=5, debug=True)\n video_recorder.start()\n video_recorder.record(seconds=5, minutes=0)","sub_path":"video/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"492995967","text":"\n\ndef main():\n\tn = [19,24,25]\n\n\tfor a in range(1,n[0]) :\n\t\tl = []\n\t\tfor i in range(n[0]) :\n\t\t\tl.append(pow(a,i)%n[0])\n\n\t\tprint(l,max(l))\n\nif __name__ == \"__main__\" :\n\tmain()","sub_path":"NetworksAssignments/RIP protocol/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"80441030","text":"from typing import cast\nfrom w3modmanager.util.util import getTitleString, debounce\nfrom w3modmanager.ui.graphical.modlist import ModListItemDelegate\nfrom w3modmanager.domain.web.nexus import RequestError, ResponseError, getModId, getModFileUrls, getModFiles\nfrom w3modmanager.core.model import *\n\nimport html\n\nimport dateparser\n\nfrom PySide6.QtCore import QModelIndex, Qt, QSize, Signal, QObject\nfrom PySide6.QtWidgets import QLabel, QGroupBox, QVBoxLayout, QHBoxLayout, QSizePolicy, QPushButton, \\\n QLineEdit, QDialog, QWidget, QTableWidget, QTableWidgetItem, QAbstractItemView\nfrom PySide6.QtGui import QMouseEvent\n\n\nclass DownloadWindowEvents(QObject):\n download = Signal(list)\n\n\nclass DownloadWindow(QDialog):\n def __init__(self, parent: Optional[QWidget] = None, url: str = '') -> None:\n super().__init__(parent, )\n\n if parent:\n self.setWindowTitle('Download Mod')\n else:\n self.setWindowTitle(getTitleString('Download Mod'))\n self.setAttribute(Qt.WA_DeleteOnClose)\n\n mainLayout = QVBoxLayout(self)\n mainLayout.setContentsMargins(5, 5, 5, 5)\n\n self.signals = DownloadWindowEvents(self)\n\n # URL input\n\n gbUrl = QGroupBox('Mod URL')\n gbUrlLayout = QVBoxLayout()\n gbUrl.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)\n\n self.url = QLineEdit()\n self.url.setPlaceholderText('https://www.nexusmods.com/witcher3/mods/...')\n self.url.setText(url)\n self.url.textChanged.connect(lambda: self.validateUrl(self.url.text()))\n gbUrlLayout.addWidget(self.url)\n\n self.urlInfo = QLabel('🌐')\n self.urlInfo.setContentsMargins(4, 4, 4, 4)\n self.urlInfo.setMinimumHeight(36)\n self.urlInfo.setWordWrap(True)\n gbUrlLayout.addWidget(self.urlInfo)\n\n gbUrl.setLayout(gbUrlLayout)\n mainLayout.addWidget(gbUrl)\n\n # File selection\n\n gbFiles = QGroupBox('Mod Files')\n gbFilesLayout = QVBoxLayout()\n gbFiles.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n\n self.files = QTableWidget(0, 4)\n self.files.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)\n self.files.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)\n self.files.setContextMenuPolicy(Qt.CustomContextMenu)\n self.files.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.files.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.files.setWordWrap(False)\n self.files.setSortingEnabled(True)\n self.files.setFocusPolicy(Qt.StrongFocus)\n self.files.verticalHeader().hide()\n self.files.setSortingEnabled(True)\n self.files.sortByColumn(2, Qt.DescendingOrder)\n self.files.verticalHeader().setVisible(False)\n self.files.verticalHeader().setDefaultSectionSize(25)\n self.files.horizontalHeader().setHighlightSections(False)\n self.files.horizontalHeader().setStretchLastSection(True)\n self.files.setHorizontalHeaderLabels(['File Name', 'Version', 'Upload Date', 'Description'])\n self.files.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.files.verticalScrollBar().valueChanged.connect(lambda: self.files.clearFocus())\n self.files.itemSelectionChanged.connect(lambda: self.validateFiles())\n self.files.setDisabled(True)\n gbFilesLayout.addWidget(self.files)\n\n _mouseMoveEvent = self.files.mouseMoveEvent\n self.files.hoverIndexRow = -1\n\n def mouseMoveEvent(event: QMouseEvent) -> None:\n self.files.hoverIndexRow = self.files.indexAt(event.pos()).row()\n _mouseMoveEvent(event)\n self.files.mouseMoveEvent = mouseMoveEvent # type: ignore\n self.files.setItemDelegate(ModListItemDelegate(self.files))\n self.files.setMouseTracking(True)\n\n gbFiles.setLayout(gbFilesLayout)\n mainLayout.addWidget(gbFiles)\n\n # Actions\n\n actionsLayout = QHBoxLayout()\n actionsLayout.setAlignment(Qt.AlignRight)\n self.download = QPushButton('Download', self)\n self.download.clicked.connect(lambda: self.downloadEvent())\n self.download.setAutoDefault(True)\n self.download.setDefault(True)\n self.download.setDisabled(True)\n actionsLayout.addWidget(self.download)\n cancel = QPushButton('Cancel', self)\n cancel.clicked.connect(self.cancelEvent)\n actionsLayout.addWidget(cancel)\n mainLayout.addLayout(actionsLayout)\n\n # Setup\n\n self.setMinimumSize(QSize(440, 440))\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n self.resize(QSize(720, 440))\n\n self.finished.connect(lambda: self.validateUrl.cancel()) # type: ignore\n self.finished.connect(lambda: self.downloadEvent.cancel()) # type: ignore\n\n self.modId = 0\n self.validateUrl(self.url.text())\n\n def cancelEvent(self) -> None:\n self.close()\n\n @debounce(200, cancel_running=True)\n async def validateUrl(self, url: str) -> bool:\n self.download.setDisabled(True)\n self.files.setDisabled(True)\n self.files.clearSelection()\n self.files.clearFocus()\n self.files.clearContents()\n self.files.setRowCount(0)\n self.files.setSortingEnabled(False)\n self.url.setStyleSheet('')\n self.modId = 0\n if not url:\n self.urlInfo.setText('''\n Please enter a valid mod url.\n ''')\n return False\n modId = getModId(url)\n if not modId:\n self.files.setDisabled(True)\n self.url.setStyleSheet('''\n *{\n border: 1px solid #B22222;\n padding: 1px 0px;\n }\n ''')\n self.urlInfo.setText('''\n Please enter a valid mod url.\n ''')\n return False\n self.urlInfo.setText('🌐')\n try:\n filesResponse = await getModFiles(modId)\n except (RequestError, ResponseError, Exception) as e:\n self.url.setStyleSheet('''\n *{\n border: 1px solid #B22222;\n padding: 1px 0px;\n }\n ''')\n self.urlInfo.setText(f'''\n Could not get mod files: {e}.\n ''')\n return False\n try:\n files = filesResponse['files']\n if not len(files):\n self.urlInfo.setText(f'''\n Mod \"{modId}\" has no files!\n ''')\n return False\n\n self.files.setRowCount(len(files))\n for i in range(len(files)):\n file = files[i]\n fileid = int(file['file_id'])\n name = str(file['name'])\n version = str(file['version'])\n _uploadtime = dateparser.parse(file['uploaded_time'])\n uploadtime = _uploadtime.astimezone(tz=None).strftime('%Y-%m-%d %H:%M:%S') if _uploadtime else '?'\n description = html.unescape(str(file['description']))\n nameItem = QTableWidgetItem(name)\n nameItem.setToolTip(name)\n nameItem.setData(Qt.UserRole, fileid)\n self.files.setItem(i, 0, nameItem)\n versionItem = QTableWidgetItem(version)\n versionItem.setToolTip(version)\n self.files.setItem(i, 1, versionItem)\n uploadtimeItem = QTableWidgetItem(uploadtime)\n uploadtimeItem.setToolTip(uploadtime)\n self.files.setItem(i, 2, uploadtimeItem)\n descriptionItem = QTableWidgetItem(description)\n descriptionItem.setToolTip(description)\n self.files.setItem(i, 3, descriptionItem)\n except KeyError as e:\n logger.exception(\n f'Could not find key \"{str(e)}\" in mod files response')\n self.urlInfo.setText(f'''\n Could not find key \"{str(e)}\" in mod files response.\n ''')\n return False\n\n self.urlInfo.setText(f'''\n Found {len(files)} available files.\n ''')\n self.files.resizeColumnsToContents()\n self.files.setDisabled(False)\n self.files.setSortingEnabled(True)\n self.modId = modId\n return True\n\n def validateFiles(self) -> bool:\n selection = self.files.selectionModel().selectedRows()\n if len(selection) > 0:\n self.download.setText(f'Download {len(selection)} mods')\n self.download.setDisabled(False)\n return True\n return False\n\n @debounce(25, cancel_running=True)\n async def downloadEvent(self) -> None:\n self.download.setDisabled(True)\n self.url.setDisabled(True)\n selection = self.files.selectionModel().selectedRows()\n files = [self.files.item(cast(QModelIndex, index).row(), 0).data(Qt.UserRole) for index in selection]\n self.files.setDisabled(True)\n try:\n urls = await asyncio.gather(\n *[getModFileUrls(self.modId, file) for file in files],\n )\n except (RequestError, ResponseError, Exception) as e:\n self.url.setStyleSheet('''\n *{\n border: 1px solid #B22222;\n padding: 1px 0px;\n }\n ''')\n self.urlInfo.setText(f'''\n Could not download mod files: {e}.\n ''')\n return\n try:\n self.signals.download.emit([url[0]['URI'] for url in urls])\n except KeyError as e:\n logger.exception(\n f'Could not find key \"{str(e)}\" in file download response')\n self.urlInfo.setText(f'''\n Could not find key \"{str(e)}\" in file download response.\n ''')\n return\n self.close()\n","sub_path":"w3modmanager/ui/graphical/downloadwindow.py","file_name":"downloadwindow.py","file_ext":"py","file_size_in_byte":10214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"237595731","text":"#IMPORT\nimport sys\nimport math\nfrom math import sqrt\nfrom math import cos\nfrom math import sin\n\n#CONSTANTES\nMS_X \t\t= 16000\nHALF_MS_X\t= MS_X / 2\nMS_Y\t\t= 7501\nHALF_MS_Y\t= MS_Y / 2\n#CLASSC PART\nclass rectangle:\n\t\"class rectangle\"\n\tdef __init__(self, x, y, L, h):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.L = L\n\t\tself.h = h\n\t\n\tdef is_in(self, x, y):\n\t\tif x > self.x and x < self.x + self.L:\n\t\t\tif y > self.y and y < self.y + self.h:\n\t\t\t\treturn 1\n\t\treturn 0\nclass item:\n\tdef __init__(self, id, x, y, vx, vy, state):\n\t\tself.id = id\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.vx = vx\n\t\tself.vy = vy\n\t\tself.state = state\n\t\tself.dist = 0\n\t\tself.dist_goal = 0\n\t\tself.target_id = 0\nclass goal:\n\t#side:0ou1 si gauche ou droite\n\tdef __init__(self, side):\n\t\tself.side = side\n\t\tself.y = 3500\n\t\tif side == 0:\n\t\t\tself.x = 0\n\t\telse:\n\t\t\tself.x = 16000\n\tdef get_target(self, wizard):\n\t\tif wizard.y > HALF_MS_Y:\n\t\t\ttarget = [self.x, self.y + 500]\n\t\telse:\n\t\t\ttarget = [self.x, self.y - 500]\n\t\treturn target\n#INIT PART\nmy_team_id = int(input()) # if 0 you need to score on the right of the map, if 1 you need to score on the left\nif my_team_id == 0:\n\tennemy_team_id = 1\nelse:\n\tennemy_team_id = 0\n\t#definition des rectangles\nif my_team_id == 0:\n\tmy_zone = rectangle(0, 0, HALF_MS_X, MS_Y)\n\tmy_close_zone = rectangle(0, 0, 4000, MS_Y)\n\tmy_goal_zone = rectangle(0, 1750, 4000, 4000)\n\ten_zone = rectangle(MS_X-HALF_MS_X, 0, HALF_MS_X, MS_Y)\n\ten_goal_zone = rectangle(MS_X-4000, 1750, 4000, 4000)\n\ten_close_zone = rectangle(MS_X-4500, 0, 4500, MS_Y)\nelse:\n\ten_zone = rectangle(0, 0, HALF_MS_X, MS_Y)\n\ten_close_zone = rectangle(0, 0, 4500, MS_Y)\n\ten_goal_zone = rectangle(0, 1750, 4000, 4000)\n\tmy_zone = rectangle(MS_X-HALF_MS_X, 0, HALF_MS_X, MS_Y)\n\tmy_goal_zone = rectangle(MS_X-4000, 1750, 4000, 4000)\n\tmy_close_zone = rectangle(MS_X-4000, 0, 4000, MS_Y)\n\nmiddle_column = rectangle(6000, 0, 4000, MS_Y)\nmy_goal = goal(my_team_id)\nennemy_goal = goal(ennemy_team_id)\n\n#UTILS PART\ndef Sqr(a):\n\treturn a*a\ndef Distance(x1,y1,x2,y2):\n\treturn sqrt(Sqr(y2-y1)+Sqr(x2-x1))\ndef Print_wizard(wizard):\n\tprint(\"wizard id =\", wizard[\"id\"], file=sys.stderr)\n\tprint(\" x = \", wizard[\"x\"], file=sys.stderr)\n\tprint(\" y = \", wizard[\"y\"], file=sys.stderr)\n\tprint(\" vx = \",wizard[\"vx\"], file=sys.stderr)\n\tprint(\" vy =\", wizard[\"vy\"], file=sys.stderr)\n\tprint(\" state =\", wizard[\"state\"], file=sys.stderr)\ndef Print_snaffle(snaffle):\n\tprint(\"snaffle id =\", snaffle[\"id\"], file=sys.stderr)\n\tprint(\" x = \", snaffle[\"x\"], file=sys.stderr)\n\tprint(\" y = \", snaffle[\"y\"], file=sys.stderr)\n\tprint(\" vx = \",snaffle[\"vx\"], file=sys.stderr)\n\tprint(\" vy =\", snaffle[\"vy\"], file=sys.stderr)\n\tprint(\" state =\", snaffle[\"state\"], file=sys.stderr)\ndef Print_snaffle_list_d(snaffle_list):\n\tprint(\"snaffle_list:\", file=sys.stderr)\n\tfor snaffle in snaffle_list:\n\t\tprint(\" id=\", snaffle.id, \"dist=\", snaffle.dist, file=sys.stderr)\ndef summon(id, gx, gy, magic, my_magic):\n\tprint(\"WINGARDIUM\", id, gx, gy, magic, file=sys.stderr)\n\tprint(\"WINGARDIUM\", id, gx, gy, magic)\n\tmy_magic -= magic\ndef move_to(gx, gy, thrust):\n\tprint(\"MOVE\", int(gx), int(gy), thrust, file=sys.stderr)\n\tprint(\"MOVE\", int(gx), int(gy), thrust)\ndef check_in_front(wizard, bx, by, ennemie, rayon):\n\tm = (by - wizard.x) / (bx - wizard.y)\n\tb = wizard.x - (m * wizard.y)\n\t#carre = 1 + m^2\n\tcarre = 1 + Sqr(m)\n\t#x = (-2*cx) - 2*m*(cy-b)\n\tx = (-2*ennemie.x) - 2*m*(ennemie.y-b)\n\t# cst = cx^2 + (cy - b)^2 - rayon^2\n\tcst = Sqr(ennemie.x) + Sqr(ennemie.y - b) - Sqr(rayon)\n\t#delta = x^2 - 4*carre*cst\n\tdelta = Sqr(x) - 4*carre*cst\n\tprint(\"DELTA =\", delta, file=sys.stderr)\n\tif delta >= 0 and in_my_direction(wizard, ennemie, bx, by):\n\t\tprint(\"ENNEMI\", ennemie.id, \"en face\", file=sys.stderr)\n\t\treturn 1\n\telse:\n\t\tprint(\"pas d'ennemie en face\", file=sys.stderr)\n\t\treturn 0\ndef in_my_direction(wizard, ennemie, cibx, ciby):\n\tA = 0\n\tB = 0\n\tif (ennemie.x - wizard.x < 0 and cibx - wizard.x < 0) or (ennemie.x - wizard.x > 0 and cibx-wizard.x > 0):\n\t\tA = 1\n\tif (ennemie.y - wizard.y < 0 and ciby - wizard.y < 0) or (ennemie.y - wizard.y > 0 and ciby - wizard.y > 0):\n\t\tB = 1\n\tprint(\"A=\", A, \"B=\", B, file=sys.stderr)\n\tif A == 1 and B == 1:\n\t\treturn 1\n\telse:\n\t\treturn 0\ndef add_angle(gx, gy):\n\tnew_angle = []\n\tPHI = math.radians(20)\n\tnew_angle.append(gx*cos(PHI) - gy*sin(PHI))\n\tnew_angle.append(gx*sin(PHI) + gy*cos(PHI))\n\treturn new_angle\ndef get_other_wizard(wizard, wizard_list):\n\twiz = wizard_list[0]\n\tif wiz.id != wizard.id:\n\t\treturn wiz\n\telse:\n\t\treturn wizard_list[1]\n#FONCTIONS\ndef throw(gx, gy, power):\n\tprint(\"THROW\", int(gx), int(gy), power,file=sys.stderr) \t\n\tprint(\"THROW\", int(gx), int(gy), power)\ndef nearest(wizard_list, snaffle):\n\t#renvoi l'ID du wizard le plus proche de snaffle \n\tdist=[]\n\tfor wizard in wizard_list:\n\t\tdist.append(Distance(wizard.x, wizard.y, snaffle.x, snaffle.y))\n\tif dist[0] < dist[1]:\n\t\treturn wizard_list[0].id\n\telse:\n\t\treturn wizard_list[1].id\ndef other_nearest_too(wizard, wizard_list, snaffle_list, nearest):\n\tsorted_list = list(snaffle_list)\n\tother = get_other_wizard(wizard, wizard_list)\n\tfor snaffle in sorted_list:\n\t\tsnaffle.dist = Distance(snaffle.x, snaffle.y, other.x, other.y)\n\tsorted_list.sort(key=lambda snaffle: snaffle.dist)\n\tprint(\"my list\", Print_snaffle_list_d(snaffle_list), file=sys.stderr)\n\tprint(\"other_list\", Print_snaffle_list_d(sorted_list), file=sys.stderr)\n\tif sorted_list[0].id == nearest.id:\n\t\treturn 1\n\telse:\n\t\treturn 0\ndef snaffle_left(wizard, snaffle_list):\n\tfor snaffle in snaffle_list:\n\t\tif snaffle.x > wizard.x:\n\t\t\treturn 1\n\treturn 0\ndef other_id_is_busy(wizard, wizard_list, sn_id):\n\tprint(\"A.5\", file=sys.stderr)\n\tother = get_other_wizard(wizard, wizard_list)\n\tif other.target_id != sn_id:\n\t\treturn 1\n\treturn 0\ndef closest_snaffle(snaffle_list, wizard, wizard_list):\n\t#renvoi le snaffle le plus proche qui n'est pas déja visé\n\tfor snaffle in snaffle_list:\n\t\tprint(snaffle.id, \"=state\", snaffle.state, \"nearest id =\", nearest(wizard_list, snaffle), file=sys.stderr)\n\t\tif snaffle.state == 0:\n\t\t\tprint(\"A\", file=sys.stderr)\n\t\t\tif other_nearest_too(wizard, wizard_list, snaffle_list, snaffle) and other_id_is_busy(wizard, wizard_list, snaffle.id):\n\t\t\t\tprint(\"B\", file=sys.stderr)\n\t\t\t\tif wizard.id == nearest(wizard_list, snaffle):\n\t\t\t\t\tprint(\"C\", file=sys.stderr)\n\t\t\t\t\tif snaffle_left(wizard, snaffle_list) == 1:\n\t\t\t\t\t\tprint(\"SPECIAL is\", snaffle.id, file=sys.stderr)\n\t\t\t\t\t\treturn snaffle\n\t\t\t\t\telse:\n\t\t\t\t\t\twizard.target_id = 0\n\t\t\t\telse:\n\t\t\t\t\twizard.target_id = 0\n\t\t\telse:\n\t\t\t\treturn snaffle\n\tprint(\"closest not found\", file=sys.stderr)\ndef\tget_near_from_goal(snaffle_list, wizard, goal):\n\tfor snaffle in snaffle_list:\n\t\tsnaffle.dist_goal = Distance(snaffle.x, snaffle.y, goal.x, goal.y)\n\tsorted_list = sorted(snaffle_list, key=lambda snaffle: snaffle.dist_goal)\n\tnearest = closest_snaffle(sorted_list, wizard, wizard_list)\n\treturn nearest\ndef get_clean_trajectory(wizard, gx, gy, opponent_list):\n\tangle = [gx, gy]\n\tmodif = 1\n\twhile modif == 1:\n\t\tmodif = 0\n\t\tfor opponent in opponent_list:\n\t\t\tif check_in_front(wizard, angle[0], angle[1], opponent, 600):\n\t\t\t\tmodif = 1\n\t\t\t\tangle = add_angle(angle[0], angle[1])\n\treturn angle\n\ndef need_to_incant(snaffle_list, wizard, my_goal, ennemy_goal, my_magic):\n\ttarget = []\n\tclosest = get_near_from_goal(snaffle_list, wizard, ennemy_goal)\n\tif closest is not None and closest.dist_goal < 5000 and my_magic > 20:\n\t\ttarget.append(closest.id)\n\t\txy = ennemy_goal.get_target(closest)\n\t\ttarget.extend(xy)\n\t\ttarget.append(my_magic)\n\t\treturn target\n\telif closest is not None and middle_column.is_in(closest.x, closest.y) and my_magic > 40:\n\t\ttarget.append(closest.id)\n\t\txy = ennemy_goal.get_target(closest)\n\t\ttarget.extend(xy)\n\t\ttarget.append(my_magic)\ndef need_to_throw(wizard):\n\tif wizard.state == 1:\n\t\treturn 1\n\telse:\n\t\treturn 0\ndef sort_liste(wizard, bludger_list, snaffle_list, opponent_list):\n\tfor bludger in bludger_list:\n\t\tbludger.dist = Distance(bludger.x, bludger.y, wizard.x, wizard.y)\n\tfor snaffle in snaffle_list:\n\t\tsnaffle.dist = Distance(snaffle.x, snaffle.y, wizard.x, wizard.y)\n\tfor opponent in opponent_list:\n\t\topponent.dist = Distance(opponent.x, opponent.y, wizard.x, wizard.y)\n\tbludger_list.sort(key=lambda obj: obj.dist)\n\tsnaffle_list.sort(key=lambda obj: obj.dist)\n\topponent_list.sort(key=lambda obj: obj.dist)\ndef goto_center():\n\tmove_to(HALF_MS_X, HALF_MS_Y, 150)\ndef goto_closest(snaffle_list, wizard, wizard_list):\n\tclosest = closest_snaffle(snaffle_list, wizard, wizard_list)\n\tif closest is not None:\n\t\tprint(\"goto closest: id=\", closest.id, file=sys.stderr)\n\t\tprint(\"Debug wizard:\", wizard.id, \"chasse c:\", closest.id, file=sys.stderr)\n\t\tmove_to(closest.x, closest.y, 150)\n\t\t#set target_id\n\t\twizard.target_id= closest.id\n\t\treturn 1\n\telse:\n\t\treturn 0\ndef move(wizard, snaffle_list, wizard_list):\n\tif goto_closest(snaffle_list, wizard, wizard_list) == 0:\n\t\tgoto_center()\ndef throw_ball(wizard, opponent_list):\n\tpower = 500\n\tprint(\"my team_id =\", my_team_id, \"en_close_zone =\", en_close_zone, file=sys.stderr)\n\tif en_close_zone.is_in(wizard.x, wizard.y) or 1:\n\t\tprint(\"is in en_close_zone\", file=sys.stderr)\n\t\t#tirer vers cage\n\t\ttarget = ennemy_goal.get_target(wizard)\n\t\tgx = target[0]\n\t\tgy = target[1]\n\telse:\n\t\t#tirer vers en_close_zone\n\t\tprint(\"not in en_close_zone\", file=sys.stderr)\n\t\tgx = 16000\n\t\tgy = wizard.y\n\t#ici verifier si trajectoire libre\n\tprint(\"old traj =\", gx, gy, file=sys.stderr)\n\ttrajectory = get_clean_trajectory(wizard, gx, gy, opponent_list)\n\tprint(\"new traj =\", trajectory[0], trajectory[1], file=sys.stderr)\n\tthrow(trajectory[0], trajectory[1], power)\n\twizard.target_id = 0\n\t# d_g = Distance(wizard[\"x\"], wizard[\"y\"], gx, gy)\n\t# print(\"Debug distance a la cible = \", d_g, file=sys.stderr)\n\t# print(\"Debug cible: x=\", gx, \"y=\", gy, file=sys.stderr)\n\n\n\t# game loop\nwhile True:\n\tmy_score, my_magic = [int(i) for i in input().split()]\n\topponent_score, opponent_magic = [int(i) for i in input().split()]\n\tentities = int(input()) # number of entities still in game\n\twizard_list = []\n\tsnaffle_list = []\n\tbludger_list = []\n\topponent_list = []\n\t#fill list\n\tfor i in range(entities):\n\t\t# entity_id: entity identifier\n\t\t# entity_type: \"WIZARD\", \"OPPONENT_WIZARD\" or \"SNAFFLE\" or \"BLUDGER\"\n\t\t# x: position\n\t\t# y: position\n\t\t# vx: velocity\n\t\t# vy: velocity\n\t\t# state: 1 if the wizard is holding a Snaffle, 0 otherwise. 1 if the Snaffle is being held, 0 otherwise. id of the last victim of the bludger.\n\t\tentity_id, entity_type, x, y, vx, vy, state = input().split()\n\t\tmy_item = item(int(entity_id), int(x), int(y), int(vx), int(vy), int(state))\n\t\tif entity_type == \"WIZARD\":\n\t\t\twizard_list.append(my_item)\n\t\telif entity_type == \"SNAFFLE\":\n\t\t\tsnaffle_list.append(my_item)\t\n\t\telif entity_type == \"BLUDGER\":\n\t\t\tbludger_list.append(my_item)\n\t\telif entity_type == \"OPPONENT_WIZARD\":\n\t\t\topponent_list.append(my_item)\n\tlen(snaffle_list)\n#Pour chaque magicien\n\tfor wizard in wizard_list:\n\t\tprint(\"WIZ ID=\", wizard.id, file=sys.stderr)\n\t\t#trie les liste\tdu plus pres au plus loin du magicien\n\t\tsort_liste(wizard, bludger_list, snaffle_list, opponent_list)\n\t#\tPrint_snaffle_list_d(snaffle_list)\n\t\ttarget = need_to_incant(snaffle_list, wizard, my_goal, ennemy_goal, my_magic)\n\t\t# print(\"HERE, target = \", target, file=sys.stderr)\n\t\tif target is not None:\n\t\t\tsummon(target[0], target[1], target[2], target[3], my_magic)\n\t\telif need_to_throw(wizard) == 1:\n\t\t\tthrow_ball(wizard, opponent_list)\n\t\telse:\n\t#\t\tPrint_snaffle_list_d(snaffle_list)\n\t\t\tmove(wizard, snaffle_list, wizard_list)","sub_path":"quiditch/IA_propre.py","file_name":"IA_propre.py","file_ext":"py","file_size_in_byte":11385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"245010160","text":"import gzip\nfrom difflib import Differ\nfrom io import BytesIO\nfrom PIL import Image\n\nDELIM = ' '\nwith gzip.open('deltas.gz', 'rt') as f:\n left = []\n right = []\n for line in f:\n idx = line.find(DELIM)\n if idx == -1:\n continue\n l = line[0:idx].strip()\n r = line[idx:].strip()\n if len(l):\n left.append(l)\n if len(r):\n right.append(r)\n\n'''\nwith open('0.png', 'wb') as f0, open('1.png', 'wb') as f1, open('2.png', 'wb') as f2:\n for line in Differ().compare(left, right):\n data = bytes([int(ch, 16) for ch in line[2:].split(' ')])\n if line[0] == '+':\n f1.write(data)\n elif line[0] == '-':\n f2.write(data)\n else:\n f0.write(data)\n'''\n\nf = [BytesIO() for _ in range(3)]\nfor line in Differ().compare(left, right):\n data = bytes([int(ch, 16) for ch in line[2:].split(' ')])\n if line[0] == '+':\n f[1].write(data) # right only\n elif line[0] == '-':\n f[2].write(data) # left only\n else:\n f[0].write(data) # common\n\nfor i in range(3):\n Image.open(f[i], 'r').show()","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"534165292","text":"import time\nimport datetime\nfrom Trainer_mtcnn import Train\nfrom net import RNet\n\nif __name__ == '__main__':\n start_time = time.time()\n net = RNet()\n net_path = '/home/ray/datasets/Mtcnn/img_celeba_dataset/24'\n net_para_path = './parameter/rnet.pkl'\n iscuda = True\n\n train = Train(net, net_path, net_para_path, iscuda)\n Rnet_time = (time.time() - start_time) / 60\n print('{}训练耗时:'.format(str(net)[:4]), int(Rnet_time), 'minutes')\n print(datetime.datetime.now())","sub_path":"Trainer24.py","file_name":"Trainer24.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"45704628","text":"import nltk\nfrom nltk import FreqDist, NaiveBayesClassifier\nfrom nltk.corpus import movie_reviews\nimport random\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import stopwords\n\ndocuments = [(list(movie_reviews.words(fileid)), category)\n for category in movie_reviews.categories()\n for fileid in movie_reviews.fileids(category)]\nall_words = FreqDist(w.lower() for w in movie_reviews.words())\nword_features = list(all_words)[:2000]\nstop_words = set(stopwords.words(\"english\"))\n\n\ndef document_features(document):\n document_words = set(document)\n features = {}\n for word in word_features:\n features['contains({})'.format(word)] = (word in document_words)\n return features\n\n\nprint(document_features(movie_reviews.words('pos/cv957_8737.txt')))\n\nfeaturesets = [(document_features(d), c) for (d, c) in documents]\ntrain_set, test_set = featuresets[100:], featuresets[:100]\nclassifier = NaiveBayesClassifier.train(train_set)\n\nprint(nltk.classify.accuracy(classifier, test_set))\nclassifier.show_most_informative_features(10)\n","sub_path":"Lab 3/movieReview/movieReviewClassifier.py","file_name":"movieReviewClassifier.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"152599197","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n# Author: Naresh Soni\n# Copyright 2015 Cozy Business Solutions Pvt.Ltd\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp import models, fields, api\nfrom openerp.osv import fields as old_fields,osv\nimport openerp.addons.decimal_precision as dp\nfrom openerp.tools.float_utils import float_compare, float_round\n\nclass procurement_order(models.Model):\n _inherit = 'procurement.order'\n @api.model\n def _get_product_supplier(self, procurement):\n return self.env['import.supplier.pricelist'].get_cheapest_supplier_or_value({'1':[procurement.product_id]}, return_value=False)\n \n @api.model\n def _search_suitable_rule(self, procurement, domain):\n ''' we search for exact cheapest supplier location with available quantity to order.'''\n res = super(procurement_order,self)._search_suitable_rule(procurement, domain)\n for rule in res:\n rule_rec = self.env['procurement.rule'].browse(rule)\n location = rule_rec.picking_type_id.default_location_src_id.id\n quants = self.env['stock.quant'].search([('location_id', '=', location),('product_id','=',procurement.product_id.id)])\n qty = 0.0\n for quant in quants:\n qty += quant.qty\n if procurement.product_qty < qty:\n return [rule]\n return res\n \nclass purchase_order(models.Model):\n _inherit = 'purchase.order'\n \n @api.model \n def _prepare_order_line_move(self, order, order_line, picking_id, group_id):\n res = super(purchase_order,self)._prepare_order_line_move(order, order_line, picking_id, group_id)\n for vals in res:\n if vals.has_key('location_id'):\n vals['location_id'] = order.picking_type_id.default_location_src_id.id\n return res\n\nclass sale_order(models.Model):\n _inherit = 'sale.order'\n \n @api.model\n def _prepare_order_line_procurement(self, order, line, group_id=False):\n supplier = self.env['import.supplier.pricelist'].get_cheapest_supplier_or_value({'1':[line.product_id]}, return_value=False)\n if supplier:\n warehouse = self.env['stock.warehouse'].search([('name','ilike',supplier.name)])\n if warehouse and warehouse.id:\n order.write({'warehouse_id':warehouse.id})\n return super(sale_order, self)._prepare_order_line_procurement(order, line, group_id=group_id)\n\n\nclass stock_quant(models.Model):\n _inherit = 'stock.quant'\n \n @api.model\n def _quant_create(self, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,\n force_location_from=False, force_location_to=False): \n positive_quant = super(stock_quant, self)._quant_create(qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,\n force_location_from=False, force_location_to=False)\n if move.location_id.usage == 'supplier':\n price_unit = self.env['stock.move'].get_price_unit(move)\n negative_vals = {\n 'product_id': positive_quant.product_id.id,\n 'history_ids': [(4, move.id)],\n 'in_date': positive_quant.in_date,\n 'company_id': positive_quant.company_id.id,\n 'lot_id': positive_quant.lot_id.id,\n 'owner_id': positive_quant.owner_id.id,\n }\n rounding = move.product_id.uom_id.rounding\n negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id\n negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)\n negative_vals['cost'] = price_unit\n negative_vals['negative_move_id'] = move.id\n negative_vals['package_id'] = src_package_id\n negative_quant_id = self.env['stock.quant'].sudo().create(negative_vals)\n positive_quant.write({'propagated_from_id': negative_quant_id.id})\n return positive_quant\n\n \nclass product_product(models.Model):\n _inherit = \"product.product\"\n \n @api.multi\n @api.depends('incoming_qty','qty_available','outgoing_qty','virtual_available')\n def _product_available(self):\n res = {}\n for rec in self:\n qty = 0.0\n incoming_qty = 0.0\n outgoing_qty = 0.0\n virtual_available = 0.0\n if rec.product_tmpl_id.purchase_ok and rec.seller_ids:\n supplier = self.env['import.supplier.pricelist'].get_cheapest_supplier_or_value({'1':[rec]}, return_value=False)\n parent_location = self.env['import.supplier.pricelist'].get_parent_location(supplier.name)\n if parent_location:\n quants = self.env['stock.quant'].search([('location_id', 'child_of', parent_location.id),('product_id','=',rec.id)])\n for quant in quants:\n qty += quant.qty\n domain_move_inout_loc = ['&', ('location_dest_id', 'child_of', parent_location.id), '!', ('location_id', 'child_of', parent_location.id)]\n domain_move_in = self._get_domain_dates() + [('state', 'not in', ('done', 'cancel', 'draft'))] + [('product_id', '=', rec.id)]\n domain_move_out = self._get_domain_dates() + [('state', 'not in', ('done', 'cancel', 'draft'))] + [('product_id', '=', rec.id)]\n domain_move_in += domain_move_inout_loc\n domain_move_out += domain_move_inout_loc\n moves_in = self.env['stock.move'].read_group(domain_move_in, ['product_id', 'product_qty'], ['product_id'])\n moves_out = self.env['stock.move'].read_group(domain_move_out, ['product_id', 'product_qty'], ['product_id'])\n moves_in = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_in))\n moves_out = dict(map(lambda x: (x['product_id'][0], x['product_qty']), moves_out))\n incoming_qty = float_round(moves_in.get(rec.id, 0.0), precision_rounding=rec.uom_id.rounding)\n outgoing_qty = float_round(moves_out.get(rec.id, 0.0), precision_rounding=rec.uom_id.rounding)\n virtual_available = float_round(qty + incoming_qty - outgoing_qty, precision_rounding=rec.uom_id.rounding)\n rec.qty_available = qty\n rec.incoming_qty = incoming_qty\n rec.outgoing_qty = outgoing_qty\n rec.virtual_available = virtual_available\n res[rec.id]={ 'qty_available': qty,\n 'incoming_qty': incoming_qty,\n 'outgoing_qty': outgoing_qty,\n 'virtual_available': virtual_available,\n }\n return res\n\n qty_available = fields.Float(compute='_product_available',\n digits=dp.get_precision('Product Unit of Measure'),\n string='Quantity On Hand',\n help=\"Current quantity of products.\\n\"\n \"In a context with a single Stock Location, this includes \"\n \"goods stored at this Location, or any of its children.\\n\"\n \"In a context with a single Warehouse, this includes \"\n \"goods stored in the Stock Location of this Warehouse, or any \"\n \"of its children.\\n\"\n \"stored in the Stock Location of the Warehouse of this Shop, \"\n \"or any of its children.\\n\"\n \"Otherwise, this includes goods stored in any Stock Location \"\n \"with 'internal' type.\")\n \nclass product_template(models.Model):\n _inherit = \"product.template\"\n \n preferred_supplier = fields.Char(string='Preferred Supplier')\n \n\n \n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"extras/auto_po_and_price_import/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":8877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"379160227","text":"def main(): \n try: \n fout = open(\"abc.txt\", \"w\") \n except IOError: \n print (\"Error: open file failed.\" ) \n return \n \n for i in range(5): \n line = str(i) + \"\\n\" \n fout.write(line) \n \n fout.close() \n \nif __name__ == \"__main__\": \n main() \n","sub_path":"fanli/347/347.py","file_name":"347.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"542793116","text":"import os\nimport sys\n\nfrom apps import default\nfrom kubeflow.kubeflow.crud_backend import config, logging\n\nlog = logging.getLogger(__name__)\n\n\ndef get_config(mode):\n \"\"\"Return a config based on the selected mode.\"\"\"\n config_classes = {\n config.BackendMode.DEVELOPMENT.value: config.DevConfig,\n config.BackendMode.DEVELOPMENT_FULL.value: config.DevConfig,\n config.BackendMode.PRODUCTION.value: config.ProdConfig,\n config.BackendMode.PRODUCTION_FULL.value: config.ProdConfig,\n }\n cfg_class = config_classes.get(mode)\n if not cfg_class:\n raise RuntimeError(\"Backend mode '%s' is not implemented. Choose one\"\n \" of %s\" % (mode, list(config_classes.keys())))\n return cfg_class()\n\n\nAPP_NAME = os.environ.get(\"APP_NAME\", \"Jupyter Web App\")\nBACKEND_MODE = os.environ.get(\"BACKEND_MODE\",\n config.BackendMode.PRODUCTION.value)\nPREFIX = os.environ.get(\"APP_PREFIX\", \"/\")\n\n# Check both values for determining what flavor to load\nUI_FLAVOR = os.environ.get(\"UI_FLAVOR\", None)\nif UI_FLAVOR is None:\n UI_FLAVOR = os.environ.get(\"UI\", \"default\")\n\ncfg = get_config(BACKEND_MODE)\ncfg.PREFIX = PREFIX\n\n# Load the app based on UI_FLAVOR env var\nif UI_FLAVOR == \"default\":\n app = default.create_app(APP_NAME, cfg)\nelse:\n log.error(\"No UI flavor for '%s'\" % UI_FLAVOR)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"components/crud-web-apps/jupyter/backend/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"7289551","text":"# -*- coding: utf-8 -*-\nimport random\n\nx = \"I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind .\"\nx = x.replace('.', '').replace(':', '')\nx = x.split(' ')\nwhile \"\" in x:\n x.remove(\"\")\n\nhead = x[0]\ntail = x[-1]\n\nx = x[1:-1]\n\nrandom.shuffle(x)\n\nx = [head] + x + [tail]\n\nprint(x)\nprint(head)\nprint(tail)","sub_path":"takahashi/chapter01/knock09.py","file_name":"knock09.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"336603793","text":"import datetime\nimport logging\nimport os\nimport sys\n\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator\nfrom airflow.utils.task_group import TaskGroup\n\nfrom plugins.utils import on_failure_callback\n\nlogger = logging.getLogger('example_dag')\n\ndefault_args = {\n 'owner': 'nicor88',\n 'depends_on_past': False,\n 'start_date': datetime.datetime(2021, 6, 9),\n 'retries': 0,\n 'retry_delay': datetime.timedelta(seconds=5),\n 'on_failure_callback': on_failure_callback,\n 'execution_timeout': datetime.timedelta(minutes=10)\n}\n\ndag = DAG(\n 'example_dag',\n default_args=default_args,\n description='A simple tutorial DAG',\n schedule_interval=None,\n tags=['example', 'example_dag']\n)\n\n\ndef print_task(**context):\n _task = context['task']\n _dag = context['dag']\n # _ti = context['ti']\n logger.info(f'Executed from {_task.task_id} inside {_dag.dag_id}')\n\n\ndef print_version():\n version = sys.version\n logger.info(version)\n\n return version\n\n\ndef print_stage():\n stage = os.environ.get('STAGE')\n logger.info(f'Printing from stage: {stage}')\n\n\ndef print_env():\n env = os.environ\n logger.info(env)\n\n\ndef handle_xcom(**kwargs):\n task_id = 'print_version'\n task_instance = kwargs['ti']\n value = task_instance.xcom_pull(key='return_value', task_ids=task_id)\n\n logger.info(f'Value returned from {task_id}: {value}')\n\n\ntask_group = TaskGroup(\n dag=dag,\n group_id='just_a_task_group'\n)\n\ntask_1 = BashOperator(\n task_id='print_date',\n bash_command='date',\n dag=dag,\n)\n\ntask_2 = PythonOperator(\n task_id='print_task',\n python_callable=print_task,\n dag=dag,\n)\n\ntask_3 = PythonOperator(\n task_id='print_version',\n python_callable=print_version,\n dag=dag,\n task_group=task_group\n)\n\ntask_4 = PythonOperator(\n task_id='xcom_example',\n python_callable=handle_xcom,\n dag=dag\n)\n\ntask_1 >> [task_2, task_3] >> task_4\n","sub_path":"dags/example_dag.py","file_name":"example_dag.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"650924778","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\ndef f(x,y):\n sets = [[] for _ in xrange(50)]\n for a in x:\n for b in y:\n sets.append(a+b)\n return sets\n\n\ndef f2(*x):\n for item1 in x:\n s=s+item1\n return s\n\n\"\"\"def getAllPairs(x):\n a=converNumtoLit(getLit(1))\n for el in a:\n return el\"\"\"\ndef converNumtoLit(x):\n a=[]\n for sym in str(x):\n a.append(getLit(int(sym)))\n return a\n\ndef getLit(x):\n if x==0:\n return(\"\")\n elif x==1:\n return (\"a\",\"b\",\"c\")\n elif x==2:\n return (\"d\",\"e\",\"f\")\n elif x==3:\n return (\"g\",\"h\",\"i\")\n elif x==4:\n return (\"j\",\"k\",\"l\")\n elif x==5:\n return (\"m\",\"n\",\"o\")\n elif x==6:\n return (\"p\",\"q\",\"r\")\n elif x==7:\n return (\"s\",\"t\",\"u\")\n elif x==8:\n return (\"v\",\"w\",\"x\")\n elif x==9:\n return (\"y\",\"z\")\n\"\"\"print getAllPairs(123)\"\"\"\n\n\n","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"575685235","text":"from dataclasses import dataclass\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom unittest import mock\n\nfrom lxml.etree import QName\n\nfrom tests.factories import ClassFactory\nfrom tests.factories import FactoryTestCase\nfrom xsdata.codegen.models import Class\nfrom xsdata.codegen.writer import writer\nfrom xsdata.exceptions import CodeGenerationError\nfrom xsdata.formats.dataclass.generator import DataclassGenerator\nfrom xsdata.formats.mixins import AbstractGenerator\nfrom xsdata.formats.mixins import GeneratorResult\nfrom xsdata.utils import text\n\n\n@dataclass\nclass FakeGenerator(AbstractGenerator):\n dir: Optional[TemporaryDirectory] = None\n\n def render(self, classes: List[Class]) -> Iterator[GeneratorResult]:\n for obj in classes:\n assert obj.package is not None\n yield GeneratorResult(\n path=Path(f\"{self.dir}/{obj.name}.txt\"),\n title=obj.package,\n source=obj.name,\n )\n\n @classmethod\n def module_name(cls, name):\n return text.snake_case(name)\n\n @classmethod\n def package_name(cls, name):\n return text.snake_case(name)\n\n\n@dataclass\nclass EmptyGenerator(FakeGenerator):\n def render(self, classes: List[Class]) -> Iterator[GeneratorResult]:\n for obj in classes:\n yield GeneratorResult(\n path=Path(f\"{self.dir}/{obj.name}.txt\"), title=\"Empty\", source=\"\",\n )\n\n\nclass CodeWriterTests(FactoryTestCase):\n FAKE_NAME = \"fake\"\n\n def tearDown(self):\n writer.generators.pop(self.FAKE_NAME, False)\n\n def test_formats(self):\n expected = [\"pydata\", \"plantuml\"]\n self.assertEqual(expected, writer.formats)\n self.assertIsInstance(writer.get_format(\"pydata\"), DataclassGenerator)\n\n def test_register_generator(self):\n writer.register_format(self.FAKE_NAME, FakeGenerator())\n self.assertIn(\"fake\", writer.formats)\n self.assertIsInstance(writer.get_format(\"fake\"), FakeGenerator)\n\n def test_write(self):\n classes = ClassFactory.list(2)\n with TemporaryDirectory() as tmpdir:\n writer.register_format(self.FAKE_NAME, FakeGenerator(tmpdir))\n writer.write(classes, \"fake\")\n\n for obj in classes:\n self.assertEqual(obj.name, Path(f\"{tmpdir}/{obj.name}.txt\").read_text())\n\n def test_write_skip_empty_output(self):\n cls = ClassFactory.create()\n with TemporaryDirectory() as tmpdir:\n writer.register_format(self.FAKE_NAME, EmptyGenerator(tmpdir))\n writer.write([cls], \"fake\")\n\n self.assertFalse(Path(f\"{tmpdir}/{cls.name}.txt\").exists())\n\n @mock.patch(\"builtins.print\")\n def test_print(self, mock_print):\n classes = ClassFactory.list(2)\n writer.register_format(self.FAKE_NAME, FakeGenerator())\n writer.print(classes, \"fake\")\n\n mock_print.assert_has_calls([mock.call(obj.name, end=\"\") for obj in classes])\n\n def test_designate(self):\n classes = ClassFactory.list(3)\n classes[2].package = \"foo!\"\n classes[2].module = \"tests!\"\n\n writer.register_format(self.FAKE_NAME, FakeGenerator())\n writer.designate(classes, \"fake\", \"\", False)\n\n self.assertEqual(\"foo\", classes[0].package)\n self.assertEqual(\"foo\", classes[1].package)\n self.assertEqual(\"foo\", classes[2].package)\n\n self.assertEqual(\"tests\", classes[0].module)\n self.assertEqual(\"tests\", classes[1].module)\n self.assertEqual(\"tests\", classes[2].module)\n\n classes = ClassFactory.list(1, package=None)\n with self.assertRaises(CodeGenerationError) as cm:\n writer.designate(classes, \"fake\", \"\", False)\n\n self.assertEqual(\n \"Class `class_E` has not been assign to a package.\", str(cm.exception)\n )\n\n classes = ClassFactory.list(2, package=None)\n writer.designate(classes, \"fake\", \"bar\", True)\n self.assertEqual(\"bar\", classes[0].package)\n self.assertEqual(\"bar\", classes[1].package)\n\n self.assertEqual(\"xsdata\", classes[0].module)\n self.assertEqual(\"xsdata\", classes[1].module)\n\n classes = ClassFactory.list(1, qname=QName(\"foo\"))\n with self.assertRaises(CodeGenerationError) as cm:\n writer.designate(classes, \"fake\", \"foo\", True)\n\n self.assertEqual(\n (\"Class `foo` target namespace is empty, \" \"avoid option `--ns-struct`\"),\n str(cm.exception),\n )\n","sub_path":"tests/codegen/test_writer.py","file_name":"test_writer.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"448439269","text":"# from __future__ import print_function\nfrom scipy.io.wavfile import read\nimport numpy\nimport argparse\n\ntext_file = \"output.c\"\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('file_name', nargs='?')\n args = parser.parse_args()\n\n a = read(args.file_name)\n soundRate = a[0]\n bitStream = a[1]\n\n with open(\"Output.txt\", \"w\") as text_file:\n print(\"#include \\n\\r\", file=text_file)\n print(\"// %s \\n\\r\" % args.file_name, file=text_file)\n print(\"int32_t soundRate = %d;\" % soundRate, file=text_file)\n print(\"int32_t soundFrames = %d;\" % bitStream.size, file=text_file)\n print(\"int32_t sound[] = {\", file=text_file)\n for i in range(0, bitStream.size):\n # for j in range(0, 10):\n if(i != bitStream.size - 1):\n print(\"%d, \" % bitStream[i], file=text_file, end=\"\")\n else:\n print(\"%d\\n};\" % bitStream[i], file=text_file, end=\"\")\n","sub_path":"audio/audioConverter.py","file_name":"audioConverter.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"173036239","text":"from django.conf.urls import url\nfrom . import views\n\n\n\nurlpatterns = [\n url(r'^$',views.index, name='index'),\n # url(r'cf/([0-9]+)/$', views.cf, name='cf'),\n url(r'^trends/([a-zA-Z]+)/$', views.get_trends_in_country, name='get_trends'),\n url(r'^trends/download/([a-zA-Z]+)/$', views.export_as_csv, name='download_trends'),\n url(r'^trends/([a-zA-Z]+)/([a-zA-Z0-9 ,]+)/$', views.get_trends, name='get_trends_keyword'),\n url(r'^trends/download/([a-zA-Z]+)/([a-zA-Z0-9 ,]+)/$', views.export_key_country, name='export_key_country'),\n\n]\n\n","sub_path":"trends_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"91580898","text":"class Solution(object):\n\n def add_one_to_number(self, A):\n\n # init\n B = [0]*len(A)\n B[-1] = 1\n carry = 0\n\n # logic\n for i in range(len(A)-1,-1,-1):\n sum_ = A[i] + B[i] + int(carry)\n if sum_ > 9:\n carry = sum_ / 10\n else:\n carry = 0\n\n A[i] = sum_ % 10\n\n if carry > 0:\n A = [int(carry)] + A\n\n print(A)\n while(A[0] == 0):\n A.pop(0)\n # return\n return A\n\nA = [0,0,0,1,2,1]\nA = [9,9,9]\nsol = Solution()\nsol.add_one_to_number(A)\n","sub_path":"Misc/Arrays/add_one_to_number_integer_araray.py","file_name":"add_one_to_number_integer_araray.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"162004379","text":"# Kell Larson, CPSC 231\nimport turtle\nwn = turtle.Screen()\nwn.title(\"Star\")\n\ntess = turtle.Turtle()\ntess.pensize(5)\ntess.speed(0)\ntess.hideturtle()\ntess.penup()\n\ntess.left(90) # Tess centers the star, \ntess.forward(140)\ntess.right(90)\n\ntess.pendown()\ntess.forward(300)\n\nfor i in range(5): #draw the rest of the star\n\ttess.left(-144)\n\ttess.forward(600)\n\nwn.mainloop()\n","sub_path":"pyprograms/star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"444490078","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n # Pages \n path('', views.leave_dashboard_page, name=\"leave_dashboard_page\"),\n path('types/', views.leave_types_page, name=\"leave_types_page\"),\n path('holidays/', views.holidays_page, name=\"holidays_page\"),\n path('path/', views.approval_path_page, name=\"path_page\"),\n path('apply/', views.apply_leave_page, name=\"apply_leave_page\"),\n \n # Process\n path('add_new_type/', views.add_new_type, name=\"add_new_type\"),\n path('edit_type//', views.edit_leave_type_page, name=\"edit_leave_type_page\"),\n path('add_new_holiday/', views.add_new_holiday, name=\"add_new_holiday\"),\n path('add_new_path/', views.add_new_path, name=\"add_new_path\"),\n path('apply_leave/', views.apply_leave, name=\"apply_leave\"),\n path('approve_leave/', views.approve_leave, name=\"approve_leave\"),\n \n path('annual_calendar/', views.Leave_planner_summary, name=\"annual_calendar\"),\n path('leave_planner/', views.leave_planer, name=\"leave_planner\"),\n path('add_new_absence/', views.add_new_absence, name=\"add_new_absence\"),\n path('Leave_planner_summary/', views.Leave_planner_summary, name=\"Leave_planner_summary\"),\n path('leave_calendar/', views.leave_calendar, name=\"Leave_calendar\"),\n\n]","sub_path":"leave/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"587700697","text":"#! /usr/bin/env python\n# -*- coding: utf8 -*-\nimport paho.mqtt.client as mqtt\nimport socket\nimport random\nimport datetime\nfrom optparse import OptionParser\nimport time\nnow_time = time.strftime(\"%Hc%Mc%S\")\nusage = \"usage: %prog [options] [data]\\n \\\n options: -d for sending data\\n \\\n -i IP for which MQTT Broker. Default is localhost\\n \\\n -m MAC for DL Node. Default is 04000476 \\n \\\n -g GID . Default is 1C497B499010 \\n \\\n e.g.: '%prog --data \\\"1234567890\\\" will UPLink to Broker\"\nparser = OptionParser(usage)\nparser.add_option(\"-d\", \"--data\", action=\"store\", dest=\"data\",\n default=now_time,\n help=\"sending data\")\nparser.add_option(\"-i\", \"--ip\", action=\"store\", dest=\"host\",\n default=\"127.0.0.1\",\n help=\"setting Broker IP\")\nparser.add_option(\"-g\", \"--gwid\", action=\"store\", dest=\"GID\",\n default=\"1C497B499010\",\n help=\"setting GID\")\nparser.add_option(\"-m\", \"--mac\", action=\"store\", dest=\"MAC\",\n default=\"04000476\",\n help=\"setting DL target Moudle MAC\")\n(options, args) = parser.parse_args()\nif options.data:\n data = options.data\nmid = str(random.randint(1, 99))\n# This is IDU GID\n# GID = \"1C497B499010\"\n# GID = \"1C497B4321AA\"\n# This is ODU GID\n# GID = \"00001c497b431fcd\"\nGID = options.GID\nMAC = options.MAC\ntopic = \"GIOT-GW/UL/\" + GID\nusername = \"admin\"\npassword = \"admin\"\nmsg = '[{\"channel\":923125000, \"sf\":10, '\\\n + '\"time\":\"' + datetime.datetime.now().isoformat()[:19] + '\", ' \\\n + '\"gwip\":\"192.168.88.1\", '\\\n + '\"gwid\":\"0000f835dde7de2\", \"repeater\":\"00000000ffffffff\", ' \\\n + '\"systype\":' + str(int(MAC[:2], 16)) + ', ' \\\n + '\"rssi\":-118.0, \"snr\":0.5, \"snr_max\":3.8, \"snr_min\":-4.5, ' \\\n + '\"macAddr\":\"00000000' + MAC + '\", ' \\\n + '\"data\":\"' + data + '\",' \\\n + '\"frameCnt\":\"' + mid + '\",' \\\n + '\"port\":2}]'\nprint(\"Broker:\"+options.host+\" Topic:\"+topic)\nprint(msg)\nclient = mqtt.Client(protocol=mqtt.MQTTv31)\ntry:\n client.username_pw_set(username, password)\n client.connect(options.host, 1883, 60)\nexcept socket.error as e:\n print(\"Can't Connect to \" + options.host)\n print(\"May use -i to specify broker server?\")\n\nclient.publish(topic, msg)\nclient.disconnect()\n","sub_path":"pub_arduino_DTX_emulator.py","file_name":"pub_arduino_DTX_emulator.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"302540728","text":"# Try to fit the learning_data to model y = ax^2 + bx + c\n# I use numpy for fast and convenient dealing with matrix multiplication\n\nimport numpy as np\nimport math\n\nlearning_data = np.loadtxt(\"learning_data.txt\", delimiter='\\t')\n\nx = learning_data[:, 0]\ny = learning_data[:, 1]\n\n\n# Compute root-mean-square using model w with inputs x and targets y.\n# Only for model y = ax^2 + bx + c.\ndef compute_rms(x, y, w):\n n = x.size\n\n # Create matrix X to compute the value y\n # Values of y corresponds to x is computed : y = X * w\n X = np.ones(shape=(n, 3))\n X[:, 0] = x * x\n X[:, 1] = x\n\n predictions = X.dot(w)\n s = 0.0\n for i in range(n):\n s += (predictions[i] - y[i]) ** 2\n return math.sqrt(s / n)\n\n\n# Try to build model by minimize the gradient vector of sum-of-squares error function.\n# Model y = ax^2 + bx + c\ndef training(x, y, loop_times, alpha):\n n = x.size\n\n # Create matrix X to compute the value y\n # Values of y corresponds to x is computed : y = X * w\n X = np.ones(shape=(n, 3))\n X[:, 0] = x * x\n X[:, 1] = x\n\n # Initialize the value of parameters : a, b, c in model w\n w = np.array([[0.0],\n [0.0],\n [0.0]])\n\n for i in range(loop_times):\n predictions = X.dot(w).flatten()\n delta_c = predictions - y\n delta_b = delta_c * X[:, 1]\n delta_a = delta_c * X[:, 0]\n\n # Step by step minimize gradient vector.\n w[0][0] -= alpha * (1.0 / n) * delta_a.sum()\n w[1][0] -= alpha * (1.0 / n) * delta_b.sum()\n w[2][0] -= alpha * (1.0 / n) * delta_c.sum()\n\n # print(compute_rms(x,y,w)) #Use this to see the training is working( rms decreases).\n\n return w\n\n\ndef guess_value_of_y(model, x_tests):\n print('Guessing value of y corresponds to x value in test_data:')\n a, b, c = model\n for x in x_tests:\n print(' {0:.15f}\\t{1:.15f}'.format(x, float(a * x ** 2 + b * x + c)))\n\n\nmodel = training(x, y, 1000, 0.1)\na, b, c = float(model[0]), float(model[1]), float(model[2])\nprint('Trained model:')\nprint(' y = {0:.6f} x^2 + {1:.6f} x + {2:.6f}'.format(a, b, c))\n\ntest = np.loadtxt('test_data.txt', delimiter='\\t')\nguess_value_of_y(model, test[:, 0])\n\nrms = compute_rms(test[:, 0], test[:, 1], model)\nprint('RMS value :')\nprint(' {0:.6f}'.format(rms))\n\n","sub_path":"better_model.py","file_name":"better_model.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"387894864","text":"# Module imported to use the sleep function for delays in output\r\nimport time\r\n\r\nclass CaesarCipher:\r\n\r\n def __init__(self):\r\n self.message = \"\" # Stores the message that the user will input\r\n self.encrypted_message = \"\" # Stores encrypted message if this mode is chosen\r\n self.decrypted_message = \"\" # Stores decrypted message if this mode is chosen\r\n self.shift = 0 # Integer storing the number of characters to shift left (decryption) or right (encryption) by\r\n self.mode = \"\" # Either stores 'e' for encryption or 'd' for decryption\r\n \r\n # Function to display title\r\n def title(self):\r\n print(\"\\n-------------\")\r\n print(\"CAESAR CIPHER\")\r\n print(\"-------------\")\r\n \r\n # Function to display the main menu\r\n def main_menu(self):\r\n time.sleep(1)\r\n print(\"\\nENCRYPT MESSAGE - E\")\r\n print(\"DECRYPT MESSAGE - D\")\r\n print(\"INSTRUCTIONS - I\")\r\n print(\"QUIT - Q\")\r\n \r\n # Function to check the user's input to the main menu screen and, if valid, returns their choice\r\n def get_main_menu_input(self):\r\n while True:\r\n choice = input(\"Enter E, D, I or Q: \").lower().strip()\r\n if len(choice) != 1 or choice not in \"ediq\":\r\n print(\"Invalid input!\")\r\n else:\r\n return choice\r\n\r\n # Function to display the instructions and the user exits these by pressing the enter key \r\n def instructions(self):\r\n print(\"\\n------------\")\r\n print(\"INSTRUCTIONS\")\r\n print(\"------------\")\r\n print(\"Enter the message you would like to encrypt or decrypt when prompted\")\r\n print(\"Enter an integer for the amount that the characters in your message shift by when prompted\")\r\n \r\n input(\"\\nPress Enter to start using this program\")\r\n \r\n # Function that quits the program\r\n def quit_program(self):\r\n print(\"\\nThanks for using the program!\")\r\n quit()\r\n \r\n # Function that handles the program loop\r\n def program_loop(self):\r\n # Displays the title\r\n self.title()\r\n \r\n # Loop to ensure user can encrypt, decrypt, see instructions or quit\r\n while True:\r\n # Displays the main menu\r\n self.main_menu()\r\n # Gets the user input for the main menu\r\n choice = self.get_main_menu_input()\r\n\r\n # If the user types something beginning with 'q'\r\n if choice == \"q\":\r\n # The quit function of this class is called\r\n self.quit_program()\r\n # Otherwise, if the user types something starting with 'i'\r\n elif choice == \"i\":\r\n # The instructions function of this class is called\r\n self.instructions()\r\n # Otherwise the user inputted either 'e' or 'd'\r\n else:\r\n # The mode is set to 'e' or 'd' depending on user input\r\n self.mode = choice\r\n # Function called so that the encryption/decryption can begin\r\n self.get_message_and_shift()\r\n \r\n # Asks the user to enter the message and shift and then encrypts or decrypts based on user input from main menu\r\n def get_message_and_shift(self):\r\n self.message = input(\"\\nEnter message: \")\r\n \r\n # Keeps asking the user to enter the shift until it is a valid integer (be it positive or negative)\r\n while True:\r\n try:\r\n self.shift = int(input(\"\\nEnter shift: \"))\r\n break\r\n except:\r\n print(\"You must enter a whole number!\")\r\n \r\n if self.mode.startswith(\"e\"):\r\n self.encrypt_message() # Function in this class to encrypt message\r\n else:\r\n self.decrypt_message() # Function in this class to decrypt message\r\n \r\n # Function to encrypt the message and print out the result\r\n def encrypt_message(self):\r\n \r\n # The encrypted message needs to be an empty string to ensure only the message recently typed in by the user is encrypted and stored\r\n self.encrypted_message = \"\"\r\n \r\n # Adding a bit of suspense...\r\n print(\"Encrypting...\")\r\n time.sleep(1)\r\n\r\n # Loops through all the characters in the message\r\n for char in self.message:\r\n # Checks if the character is a capital letter\r\n if ord(char) >= 65 and ord(char) <= 90:\r\n # Formula to find the new index of the letter (65 is the ASCII value for 'A')\r\n new_index = (ord(char) + self.shift - 65) % 26 + 65\r\n # Converts the new index into character form\r\n char = chr(new_index)\r\n # Checks if the character is a lower-case letter\r\n elif ord(char) >= 97 and ord(char) <= 122:\r\n # Formula to find the new index of the letter (97 is the ASCII value for 'a')\r\n new_index = (ord(char) + self.shift - 97) % 26 + 97\r\n # Converts the new index into character form\r\n char = chr(new_index)\r\n\r\n # Will either add the shifted letters or the non-alphabetic characters to the \"encrypted_message\" attribute\r\n self.encrypted_message += char\r\n \r\n # Prints out the encrypted message\r\n print(\"\\nEncrypted message:\", self.encrypted_message)\r\n \r\n # Function to decrypt the message and print out the result\r\n def decrypt_message(self):\r\n \r\n # The decrypted message needs to be an empty string to ensure only the message recently typed in by the user is decrypted and stored\r\n self.decrypted_message = \"\"\r\n\r\n # Adding a bit of suspense...\r\n print(\"Decrypting...\")\r\n time.sleep(1)\r\n\r\n # Loops through all the characters in the message\r\n for char in self.message:\r\n # Checks if the character is a capital letter\r\n if ord(char) >= 65 and ord(char) <= 90:\r\n new_index = (ord(char) - self.shift - 65) % 26 + 65\r\n char = chr(new_index)\r\n # Checks if the character is a lower-case letter\r\n elif ord(char) >= 97 and ord(char) <= 122:\r\n new_index = (ord(char) - self.shift - 97) % 26 + 97\r\n char = chr(new_index)\r\n \r\n # Will either add the shifted letters or the non-alphabetic characters to the \"decrypted_message\" attribute\r\n self.decrypted_message += char\r\n \r\n print(\"\\nDecrypted message:\", self.decrypted_message)\r\n\r\n# Creates an instance of the \"CaesarCipher\" class\r\nc = CaesarCipher()\r\n# program_loop function from the \"CaesarCipher\" class is called through the object in previous line\r\nc.program_loop()","sub_path":"Year13/Alliyah/CaesarCipher/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"479077579","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/neox/commons/forms.py\n# Compiled at: 2020-02-26 23:29:03\n# Size of source mod 2**32: 8903 bytes\nimport locale\nfrom PyQt5.QtWidgets import QLineEdit, QLabel, QComboBox, QGridLayout, QTextEdit, QTreeView, QCompleter\nfrom PyQt5.QtCore import Qt, QRegExp\nfrom PyQt5.QtGui import QRegExpValidator, QDoubleValidator\nfrom neox.commons.qt_models import get_simple_model\nfrom neox.commons.model import Modules\nregex_ = QRegExp('^\\\\d{1,3}(([.]\\\\d{3})*),(\\\\d{2})$')\nvalidator = QRegExpValidator(regex_)\ntry:\n locale.setlocale(locale.LC_ALL, str('es_CO.UTF-8'))\nexcept:\n print('Warning: Error setting locale')\n\n__all__ = ['Label', 'Field', 'ComboBox', 'GridForm', 'FieldMoney']\n\ndef set_object_name(obj, type_, value):\n size = 'small'\n color = 'gray'\n if value.get('size'):\n size = value.get('size')\n if value.get('color'):\n color = value.get('color')\n name = type_ + size + '_' + color\n obj.setObjectName(name)\n\n\nclass Completer(QCompleter):\n\n def __init__(self, parent, records, fields):\n super(Completer, self).__init__()\n self.parent = parent\n self.treeview_search = QTreeView()\n col_headers = self.treeview_search.header()\n col_headers.hide()\n self.setPopup(self.treeview_search)\n self.fields = fields\n self._set_model(records, fields)\n self.activated.connect(self.on_accept)\n self.setFilterMode(Qt.MatchContains)\n self.setCaseSensitivity(Qt.CaseInsensitive)\n self.setWrapAround(True)\n self.setCompletionColumn(1)\n self.treeview_search.setColumnWidth(1, 300)\n self.treeview_search.setColumnHidden(0, True)\n self.id = None\n\n def get_values(self, records):\n vkeys = [f[0] for f in self.fields]\n values = []\n for r in records:\n row = []\n for key in vkeys:\n row.append(r[key])\n\n values.append(row)\n\n return values\n\n def _set_model(self, records, headers):\n headers = [f[1] for f in self.fields]\n values = self.get_values(records)\n self.model = get_simple_model(self.parent, values, headers)\n self.setModel(self.model)\n\n def on_accept(self):\n model_index = self._get_model_index()\n idx = self.model.index(model_index.row(), 0)\n self.id = idx.data()\n\n def _get_model_index(self):\n item_view = self.popup()\n index = item_view.currentIndex()\n proxy_model = self.completionModel()\n model_index = proxy_model.mapToSource(index)\n return model_index\n\n\nclass Label(QLabel):\n\n def __init__(self, obj, key, value, align='right'):\n super(Label, self).__init__()\n self.setText(value['name'] + ':')\n set_object_name(self, 'label_', value)\n if align == 'left':\n self.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n else:\n self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n\n\nclass Field(QLineEdit):\n\n def __init__(self, obj, key, value, type=None):\n super(Field, self).__init__()\n setattr(obj, 'field_' + key, self)\n self.parent = obj\n set_object_name(self, 'field_', value)\n if value.get('type') == 'numeric':\n self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n else:\n if value.get('type') == 'relation':\n self.set_completer(value.get('model'), value.get('fields'), value.get('domain'))\n\n def set_completer(self, tryton_model, fields, domain=[]):\n records = tryton_model.find(domain)\n self.completer = Completer(self.parent, records, fields)\n self.setCompleter(self.completer)\n\n def get_id(self):\n return self.completer.id\n\n def _get_tryton_model(self, model, fields):\n modules = Modules(self, self.conn)\n modules.set_models([\n {'name':'_Model', \n 'model':model, \n 'fields':fields}])\n\n\nclass TextField(QTextEdit):\n\n def __init__(self, obj, key, value):\n super(Field, self).__init__()\n setattr(obj, 'field_' + key, self)\n set_object_name(self, 'field_', value)\n self.value_changed = False\n self.setValidator(validator)\n\n def textChanged(self, text):\n self.value_changed = True\n\n\nclass FieldMoney(QLineEdit):\n\n def __init__(self, obj, key, value, amount=None, digits=2, readonly=True):\n super(FieldMoney, self).__init__()\n setattr(obj, 'field_' + key, self)\n set_object_name(self, 'field_', value)\n self.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.digits = 2\n self.value_changed = False\n self.textEdited.connect(self.value_edited)\n self._text = '0'\n self.amount = 0\n self.setReadOnly(readonly)\n validator = QDoubleValidator()\n validator.setDecimals(2)\n self.setValidator(validator)\n if not amount:\n self.zero()\n\n def __str__(self):\n return self.format_text()\n\n def format_text(self, text_):\n amount = float(text_)\n return '{:,}'.format(round(amount, self.digits))\n\n def setText(self, amount):\n if not amount:\n text = ''\n else:\n text = self.format_text(amount)\n super(FieldMoney, self).setText(str(text))\n\n def zero(self):\n self.setText(str(0))\n\n def value_edited(self, amount):\n self.value_changed = True\n\n def show(self):\n pass\n\n\nclass ComboBox(QComboBox):\n\n def __init__(self, obj, key, data):\n super(ComboBox, self).__init__()\n setattr(obj, 'field_' + key, self)\n self.parent = obj\n self.setFrame(True)\n self.setObjectName('field_' + key)\n values = []\n if data.get('values'):\n values = data.get('values')\n heads = []\n if data.get('heads'):\n heads = data.get('heads')\n selection_model = get_simple_model(obj, values, heads)\n self.setModel(selection_model)\n self.setModelColumn(1)\n selection_model.findItems((str(3)), column=0)\n if data.get('on_change'):\n self.method_on_change = getattr(self.parent, data.get('on_change'))\n self.currentIndexChanged.connect(self.on_change)\n\n def on_change(self, index):\n self.method_on_change(index)\n\n def set_editable(self, value=True):\n self.setEditable(value)\n\n def set_enabled(self, value=True):\n self.setEnabled(value)\n\n def get_id(self):\n model = self.model()\n row = self.currentIndex()\n column = 0\n res = model.item(row, column)\n return res.text()\n\n def get_label(self):\n model = self.model()\n row = self.currentIndex()\n column = 1\n res = model.item(row, column)\n return res.text()\n\n def set_from_id(self, id_):\n model = self.model()\n items = model.findItems((str(id_)), column=0)\n idx = model.indexFromItem(items[0])\n self.setCurrentIndex(idx.row())\n\n\nclass GridForm(QGridLayout):\n __doc__ = \"\\n Add a simple form Grid Style to screen,\\n from a data dict with set of {values, attributes}\\n example:\\n (field_name, {\\n 'name': string descriptor,\\n 'readonly': Bool,\\n 'type': type_widget,\\n 'placeholder': True or False,\\n }),\\n col:: is number of columns\\n type_widget :: field or selection\\n \"\n\n def __init__(self, obj, values, col=1):\n super(GridForm, self).__init__()\n row = 1\n cols = 0\n align = 'right'\n if col == 0:\n align = 'left'\n for key, value in list(values.items()):\n if not value.get('placeholder'):\n _label = Label(obj, key, value, align)\n if value.get('type') == 'selection':\n _field = ComboBox(obj, key, value)\n else:\n if value.get('type') == 'money':\n _field = FieldMoney(obj, key, value)\n else:\n _field = Field(obj, key, value)\n if value.get('password') is True:\n _field.setEchoMode(QLineEdit.Password)\n else:\n if value.get('placeholder'):\n _field.setPlaceholderText(value['name'])\n else:\n self.setRowStretch(row, 0)\n column1 = cols * col + 1\n column2 = column1 + 1\n if value.get('invisible') is True:\n continue\n value.get('placeholder') or self.addWidget(_label, row, column1)\n if col == 0:\n row = row + 1\n self.addWidget(_field, row, column1)\n else:\n self.addWidget(_field, row, column2)\n if value.get('readonly') is True:\n _field.setReadOnly(True)\n _field.setFocusPolicy(Qt.NoFocus)\n if cols < col - 1:\n cols += 1\n else:\n row += 1\n cols = 0","sub_path":"pycfiles/neox-5.0.35-py3.7/forms.cpython-37.py","file_name":"forms.cpython-37.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"280417869","text":"H,W=map(int,input().split())\nA=[]\nAs=[]\nfor h in range(H):\n A.append(list(map(int,input().split())))\n As.append(set(A[h]))\n\ndef dfs(h,xor1,hist):\n if h==H:\n if xor1>0:\n return hist\n else:\n return None\n for a in As[h]:\n ret = dfs(h+1,xor1^a,hist+[a])\n if ret is not None:\n return ret\n return None\n\nhist=dfs(0,0,[])\nif hist is not None:\n print('TAK')\n print(*[A[h].index(hist[h])+1 for h in range(H)])\nelse:\n print('NIE')\n","sub_path":"codeforces/cr553_2/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"649629298","text":"\n\n#calss header\nclass _BUMP():\n\tdef __init__(self,): \n\t\tself.name = \"BUMP\"\n\t\tself.definitions = [u'a round, raised area on a surface or on the body: ', u'the sound of something falling to the ground: ', u'an accident involving a car, especially one that is not serious: ', u'an increase in something: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_bump.py","file_name":"_bump.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"336508142","text":"# coding:utf-8\nimport numpy as np\nimport os\nimport sys\nimport configparser\nimport math\nimport _function as func\n\n\nclass SoundDirections:\n def __init__(self):\n sd_config = './config_sd.ini'\n if os.path.exists(sd_config):\n config = configparser.ConfigParser()\n config.read(sd_config)\n self.radius = int(config['Sound']['Radius'])\n self.min_theta = int(config['Sound']['Mini_Theta'])\n self.max_theta = int(config['Sound']['Max_Theta'])\n self.theta_interval = int(config['Sound']['Theta_Interval'])\n y_grid = int(config['Sound']['Y_Grid'])\n x_grid = int(config['Sound']['X_Grid'])\n self.grid_size = [y_grid, x_grid]\n y_cell = int(config['Sound']['Y_Cell'])\n x_cell = int(config['Sound']['X_Cell'])\n self.cell_size = [y_cell, x_cell]\n self.z_distance = int(config['Sound']['Z_Distance'])\n else:\n print(\"#couldn't find\", sd_config)\n sys.exit()\n \n def __call__(self, bm_azimuth=False, bm_image=False):\n if bm_azimuth:\n return self.directions_azimuth()\n elif bm_image:\n return self.directions_2d()\n else:\n sys.exit()\n \n def directions_azimuth(self):\n # return sound source theta list , and sound source position class list\n theta_list = np.arange(self.min_theta, self.max_theta + self.theta_interval, self.theta_interval)\n ss_pos_list = []\n for theta in theta_list:\n ss_pos_list.append(func.Position(self.radius, theta))\n # print('#Create temporal sound source position list')\n # print(\"theta 0 's direction is\", ss_pos_list[0].pos())\n \n # return theta_list, ss_pos_list\n return np.array(ss_pos_list)\n \n def directions_2d(self):\n # 単位 cm\n # ss_pos_list = []\n ss_position = \\\n np.ones((3, int(self.grid_size[0] / self.cell_size[0] + 1),\n int(self.grid_size[1] / self.cell_size[1] + 1))) * self.z_distance\n x = \\\n np.tile(np.arange(-self.grid_size[0] / 2, self.grid_size[0] / 2 + 1, self.cell_size[0]),\n (int(self.grid_size[1] / self.cell_size[1] + 1), 1))\n y = \\\n np.tile(np.arange(self.grid_size[1] / 2, -self.grid_size[1] / 2 - 1, -self.cell_size[1]),\n (int(self.grid_size[0] / self.cell_size[0] + 1), 1)).T\n ss_position[0, :, :] = x\n ss_position[1, :, :] = y\n ss_position = np.reshape(ss_position, (3, -1))\n # print(ss_position)\n # print('#Create temporal sound source position list')\n return ss_position\n \n \nif __name__ == '__main__':\n sd = SoundDirections()\n azimuth = sd(bm_azimuth=True)\n image = sd(bm_image=True)\n","sub_path":"src/beamforming_sound_directions.py","file_name":"beamforming_sound_directions.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"262696105","text":"#from __future__ import print_function\r\nfrom ctypes import POINTER, cast\r\nimport comtypes\r\nfrom comtypes import CLSCTX_ALL\r\n\r\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume, CLSID_MMDeviceEnumerator, IMMDeviceEnumerator, EDataFlow, ERole\r\n\r\n\r\nclass MyAudioUtilities(AudioUtilities):\r\n @staticmethod\r\n def GetSpeaker(id=None):\r\n device_enumerator = comtypes.CoCreateInstance(\r\n CLSID_MMDeviceEnumerator,\r\n IMMDeviceEnumerator,\r\n comtypes.CLSCTX_INPROC_SERVER)\r\n if id is not None:\r\n speakers = device_enumerator.GetDevice(id)\r\n else:\r\n speakers = device_enumerator.GetDefaultAudioEndpoint(EDataFlow.eRender.value, ERole.eMultimedia.value)\r\n return speakers\r\n\r\nasync def setVolume(targetVolume, outputDevice):\r\n mixer_output = None\r\n\r\n devicelist = MyAudioUtilities.GetAllDevices()\r\n\r\n for device in devicelist:\r\n if outputDevice in str(device) and \"Speaker\" in str(device):\r\n mixer_output = device\r\n\r\n devices = MyAudioUtilities.GetSpeaker(mixer_output.id)\r\n\r\n interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\r\n volume = cast(interface, POINTER(IAudioEndpointVolume))\r\n volume.SetMasterVolumeLevelScalar(targetVolume/100, None)\r\n print(\"volume.GetMasterVolumeLevel(): %s\" % volume.GetMasterVolumeLevelScalar())\r\n\r\nasync def getVolume(outputDevice):\r\n mixer_output = None\r\n\r\n devicelist = MyAudioUtilities.GetAllDevices()\r\n\r\n for device in devicelist:\r\n if outputDevice in str(device) and \"Speaker\" in str(device):\r\n mixer_output = device\r\n\r\n devices = MyAudioUtilities.GetSpeaker(mixer_output.id)\r\n\r\n interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\r\n volume = cast(interface, POINTER(IAudioEndpointVolume))\r\n print(\"volume.GetMasterVolumeLevel(): %s\" % round(volume.GetMasterVolumeLevelScalar()*100))\r\n return volume.GetMasterVolumeLevelScalar()*100\r\n\r\nasync def printAllDevices():\r\n mixer_output = None\r\n\r\n devicelist = MyAudioUtilities.GetAllDevices()\r\n\r\n for device in devicelist:\r\n if \"Speaker\" in str(device):\r\n print(device)\r\n\r\ndef main():\r\n mixer_output = None\r\n\r\n devicelist = MyAudioUtilities.GetAllDevices()\r\n\r\n for device in devicelist:\r\n if \"Speakers (Realtek(R) Audio)\" in str(device) and \"Speaker\" in str(device):\r\n mixer_output = device\r\n\r\n devices = MyAudioUtilities.GetSpeaker(mixer_output.id)\r\n print(devices)\r\n\r\n interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\r\n volume = cast(interface, POINTER(IAudioEndpointVolume))\r\n print(\"volume.GetMute(): %s\" % volume.GetMute())\r\n print(\"volume.GetMasterVolumeLevel(): %s\" % volume.GetMasterVolumeLevel())\r\n print(\"volume.GetVolumeRange(): (%s, %s, %s)\" % volume.GetVolumeRange())\r\n print(\"volume.SetMasterVolumeLevel()\")\r\n volume.SetMasterVolumeLevelScalar(0.3, None)\r\n print(\"volume.GetMasterVolumeLevel(): %s\" % volume.GetMasterVolumeLevelScalar())\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #main()\r\n #setVolume(100, \"Speakers (Realtek(R) Audio)\")\r\n getVolume(\"Speakers (Realtek(R) Audio)\")\r\n #printAllDevices()\r\n","sub_path":"volume_control.py","file_name":"volume_control.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"527891991","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt \nfrom matplotlib import cm \nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy import linalg\nfrom collections import OrderedDict\n\n'''\npol_regression.csv is a one-dimensional dataset with inputs x (1 input dimension) and outputs y (1 output dimension).\n'''\ndata_train = pd.DataFrame.from_csv('pol_regression.csv')\ndata_train.sort_values('x', axis = 0, ascending = True, inplace = True, na_position = 'last')\nx_train = data_train['x'].as_matrix()\ny_train = data_train['y'].as_matrix()\n\n\n\ndef getPolynomialDataMatrix(x, degree):\n '''\n This function does exactly the same as np.vander()\n except np.vander() arranges the numbers in the opposite \n order. Also this runs slightly quicker.\n Essentially arranges a Vandermonde matrix with a row\n for each variable and the amount of columns (and therefore\n the amount of incrementing exponents) is based\n on the degree. \n\n For each variable in x, a row : [1, x, x², x³... xⁿ] where n based on the degrees\n '''\n X = np.ones(x.shape)\n for i in range(1,degree + 1):\n X = np.column_stack((X, x ** i))\n \n return X\n\ndef getWeightsForPolynomialFit(x,y,degree):\n #Least squares estimation to get the weights for the equation\n X = getPolynomialDataMatrix(x, degree)\n\n XX = X.transpose().dot(X)\n w = np.linalg.solve(XX, X.transpose().dot(y))\n\n return w\n\n\n\n\ndef pol_regression(features_train, y_train, degree):\n #Calls function to use least squares solution to get weights\n weights = getWeightsForPolynomialFit(features_train, y_train, degree) \n\n yintercept = weights[0]\n print('Y Intercept = ');print(yintercept)\n\n\n for i in np.arange(1, len(weights)): \n yintercept += weights[i] * features_train ** i #Arranges the equation β0 + β1x^1 + β2x^2...\n\n line = yintercept #in this case the variable is no longer just the y-intercept but we've built the \n #rest of the equation up on top of it, so we make a new variable to make this clear\n\n plt.plot(features_train, line) \n plt.title('Polynomial Fit')\n\n plt.xlabel('x')\n plt.ylabel('y') \n #plt.show()\n return weights\n\n\n\n\n\n\ndef eval_pol_regression(parameters, x, y, degree):\n \n polydata1 = pd.read_csv('pol_regression.csv')\n poly2 = polydata1.sample(frac=1)\n\n train_data = poly2[0:(int(round(len(poly2)*0.7)))]\n test_data = poly2[(int(round(len(poly2)*0.7))):(len(poly2))]\n\n print('Amount of training points:');print(len(train_data))\n print('Amount of testing points:');print(len(test_data))\n\n\n x_train = train_data['x'].values\n y_train = train_data['y'].values\n\n x_test = test_data['x'].values\n y_test = test_data['y'].values\n\n print('X and Y Train')\n print(x_train)\n print(y_train)\n\n print('X and Y Test')\n print(x_test)\n print(y_test)\n rmse_train = np.zeros((9,1))\n rmse_test = np.zeros((9,1))\n\n \n for i in range(1,10):\n Xtrain2 = getPolynomialDataMatrix(x_train,i)\n Xtest2 = getPolynomialDataMatrix(x_test,i)\n if i>=1:\n w = getWeightsForPolynomialFit(x_train,y_train,i)\n elif i == 0:\n w = np.mean(y_train)\n rmse_train[i-1] = np.sqrt(np.mean((Xtrain2.dot(w)-y_train)**2)) #Root mean squared error\n rmse_test[i-1] = np.sqrt(np.mean((Xtest2.dot(w)-y_test)**2)) #Root mean squared error\n \n '''\n Xtrain2 = getPolynomialDataMatrix(x_train,degree)\n Xtest2 = getPolynomialDataMatrix(x_test,degree)\n if degree>=1:\n w = getWeightsForPolynomialFit(x_train,y_train,degree)\n elif degree == 0:\n w = np.mean(y_train)\n rmse_train[degree-1] = np.sqrt(np.mean((Xtrain2.dot(w)-y_train)**2)) #Root mean squared error\n rmse_test[degree-1] = np.sqrt(np.mean((Xtest2.dot(w)-y_test)**2)) #Root mean squared error\n '''\n plt.plot(range(1,10),rmse_train)\n plt.plot(range(1,10),rmse_test)\n \n #plt.plot(rmse_train)\n #plt.plot(rmse_test)\n\n plt.legend(('RMSE Training', 'RMSE Testing'))\n plt.xlabel('Order')\n plt.ylabel('Error') \n\n plt.show()\n\n\n\n\n\n #rmse = \n #return rmse\n\nweights = getWeightsForPolynomialFit(x_train, y_train, 1) \n\nyintercept = weights[0]\n\n\nplt.plot(x_train, y_train, 'bo') \nline0 = plt.axhline(y=yintercept, color='r', linestyle='-')\nline1 = pol_regression(x_train, y_train, 1)\nline2 = pol_regression(x_train, y_train, 2)\nline3 = pol_regression(x_train, y_train, 3)\nline4 = pol_regression(x_train, y_train, 5)\nline5 = pol_regression(x_train, y_train, 10)\n\n\n#np.mean(y_train) and plot like ususal \n\nplt.legend(('points', '$x^0$','$x$', '$x^2$', '$x^3$', '$x^5$','$x^{10}$'), loc = 'lower right')\naxes = plt.gca() #x and y\naxes.set_xlim([-5,5]) #as per brief\n\nplt.show()\n\neval_pol_regression(line2, x_train, y_train, 5)\n\n#eval_pol_regression(weights1, x_train, y_train, 1)","sub_path":"Python Files/LinearRegression_and_KMeans/Polynomial.py","file_name":"Polynomial.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"535217573","text":"\"\"\"\n# @file init_logger.py\n# @Synopsis init logger\n# @author Ming Gu(guming02@baidu.com))\n# @version 1.0\n# @date 2015-09-19\n\"\"\"\nimport sys\nsys.path.append('..')\nimport logging\nimport logging.handlers\nfrom env_config import EnvConfig\nfrom dao.mail_handler import MailHandler\nfrom dao.sms_handler import SMSHandler\n\nclass InitLogger(object):\n \"\"\"\n # @Synopsis initiate logger\n \"\"\"\n\n def __init__(self):\n logger = logging.getLogger(EnvConfig.LOG_NAME)\n logger.setLevel(logging.DEBUG)\n file_hdlr = logging.handlers.TimedRotatingFileHandler(\n EnvConfig.GENERAL_LOG_FILE, when='D', backupCount=7)\n stdout_hdler = logging.StreamHandler(sys.stdout)\n email_hdler = MailHandler()\n email_hdler.setLevel(logging.ERROR)\n sms_handler = SMSHandler()\n sms_handler.setLevel(logging.ERROR)\n formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s',\n \"%Y-%m-%d %H:%M:%S\")\n file_hdlr.setFormatter(formatter)\n stdout_hdler.setFormatter(formatter)\n email_hdler.setFormatter(formatter)\n sms_handler.setFormatter(formatter)\n logger.addHandler(file_hdlr)\n logger.addHandler(stdout_hdler)\n logger.addHandler(email_hdler)\n logger.addHandler(sms_handler)\n\nif __name__ == '__main__':\n InitLogger()\n","sub_path":"conf/init_logger.py","file_name":"init_logger.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"83144858","text":"\n# coding: utf-8\n\n# In[4]:\n\n\n\n#sequential search is the easiest\n#basically it is just doing a traversal on all items\ndef f(n,list):\n i=0\n \n #we create a boolen value to control the loop\n #when we find the value in list\n #we reset found so we break the loop\n #in the end the output is found\n found=False\n \n #note that while function does one extra iteration\n #so it is in:\n l=i-1\n else:\n \n #vice versa\n f=i+1\n return found\n\n\n# In[13]:\n\n\nbina(1,[4,5,7,9,24])\n\n","sub_path":"sequential and binary search.py","file_name":"sequential and binary search.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"152828504","text":"import cv2 as cv\nimport sys\n\nimport frameMain\nimport control\nimport parameters\nimport opencv_utils\n\nrectangle_offset = 128\noffset = 64\n\nwindow_name = 'image'\n\nglobal imgPath\nglobal img\n\n# Callback functions\ndef draw_rectangule(event,x,y,flags,param):\n if event == cv.EVENT_LBUTTONDOWN and control.mark_image_rectangle :\n print(param)\n global img\n print('x: ' + str(x))\n print('y: ' + str(y))\n #img = cv.imread(param)\n img = opencv_utils.imageRead(param)\n \n cv.rectangle(img, (x-offset,y-offset), (x+offset,y+offset), parameters.color_green, 2, cv.LINE_4)\n #cv.imshow(window_name,img)\n opencv_utils.imageShow(window_name, img)\n\n#Core functions\ndef abrir_imagem(imagePath):\n global imgPath\n imgPath = imagePath\n global img \n img = opencv_utils.imageRead(imagePath)\n \n if img is None:\n sys.exit(\"Could not read the image.\")\n \n opencv_utils.openWindow(window_name, img)\n \n cv.setMouseCallback(window_name,draw_rectangule,param=imgPath)\n \n frameMain.telaInicial()\n\ndef marcar_regiao():\n opencv_utils.openWindow(window_name, img)\n\n cv.setMouseCallback(window_name,draw_rectangule,param=imgPath)\n\n frameMain.telaInicial()\n \n if control.mark_image_rectangle :\n while(1):\n k = cv.waitKey(1) & 0xFF\n\n if k == ord('q'):\n print('saiii')\n salvar_imagem()\n cv.destroyWindow(window_name)\n\ndef salvar_imagem():\n #converter para png\n cv.imwrite(\"../images/processing.png\", img)\n","sub_path":"src/opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"299042782","text":"from pygame.math import Vector2 as vec\r\n#screen setting\r\nWIDTH, HEIGHT = 610, 670\r\nFPS = 60\r\nTOP_BOTTOM_BUFFER = 50\r\nMAZE_WIDTH, MAZE_HEIGHT = WIDTH- TOP_BOTTOM_BUFFER, HEIGHT- TOP_BOTTOM_BUFFER\r\n\r\nROWS = 28\r\nCOLS = 30\r\n#color\r\nBLACK = (0,0,0)\r\nRED = (255, 0, 0)\r\nGREY = (110, 110, 104)\r\nWHITE = (255,255,255)\r\n#font\r\nSTART_TEXT_SIZE = 26\r\nSTART_FONT = 'Bauhaus 93'\r\n\r\n#image\r\nSTART_ICON = \"pacmanLogo.png\"\r\n#player setting\r\nPLAYER_START_POSITION = vec(1,1)","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"332180887","text":"import numpy as np\nimport random\nfrom collections import deque\nfrom mxnet import nd, gluon, init, autograd\nfrom mxnet.gluon import nn\n\nclass Agent(object): # Double DQN (decoupled evaluation)\n def __init__(self, env):\n self._env = env\n self.gamma = 0.9 # discount factor\n self.epsilon = 1.\n self.epsilon_decay = 0.995\n self.epsilon_min = 0.01\n self.batch_size = 64\n self.update_c = 1000 # steps to update target model\n self.max_steps = 1000\n \n self.memory = deque() # s, a, r, s', done\n self.memory_limit = 20000\n self.param_file_name = \"net.params\"\n self.train_model = self.create_model()\n self.target_model = self.create_model(True)\n self.trainer = gluon.Trainer(self.train_model.collect_params(),\n 'sgd', {'learning_rate':0.01})\n #'nag', {'learning_rate':0.005})\n #'adam', {'learning_rate':0.001}) # all adaptive methods give bad result\n self.loss = gluon.loss.L2Loss()\n\n self.train_loss = 0.\n\n def create_model(self, target = False):\n net = nn.Sequential()\n with net.name_scope():\n net.add(\n nn.Dense(32, activation=\"relu\", in_units = 4),\n nn.Dense(16, activation=\"relu\", in_units = 32),\n nn.Dense(self._env.action_space.n, in_units = 16)\n )\n if target:\n self.train_model.save_params(self.param_file_name)\n net.load_params(self.param_file_name)\n else:\n net.initialize(init=init.Xavier())\n return net\n \n def obs_to_state(self, observation):\n #s = tuple((observation*10).astype(int))\n state = nd.array(observation)\n state = nd.reshape(state, [1,self._env.observation_space.shape[0]])\n return state\n\n def greedy(self, observation):\n if np.random.random_sample(None) < self.epsilon:\n return self._env.action_space.sample()\n state = self.obs_to_state(observation)\n return int(nd.argmax(self.train_model(state), 1).asscalar())\n \n def replay(self):\n # experience replay\n if len(self.memory) < self.batch_size:\n return\n batch = random.sample(self.memory, self.batch_size)\n state_batch = nd.array([b[0] for b in batch]) \n action_batch = nd.array([b[1] for b in batch])\n reward_batch = nd.array([b[2] for b in batch])\n next_state_batch = nd.array([b[3] for b in batch])\n # Double-DQN:\n # Calculate target value by choosing action with online network, \n # and getting value from target network\n target_action_batch = np.argmax(self.train_model(next_state_batch), 1)\n target_batch = reward_batch + self.gamma * \\\n nd.pick(self.target_model(next_state_batch), target_action_batch, 1)\n #np.max(self.target_model(next_state_batch),1)\n for i in range(self.batch_size): # s, a, r, _s, d\n if batch[i][4]:\n target_batch[i] = reward_batch[i]\n #target_batch[i] = target_batch[i] + self.gamma * \\\n # np.max(self.model(nd.reshape(next_state_batch[i],[1,4])),1)\n with autograd.record():\n q_target_batch = self.train_model(state_batch)\n #print(q_target_batch.shape,\"\\n\", target_batch.shape)\n output_batch = nd.pick(q_target_batch, action_batch, 1)\n loss = self.loss(output_batch,target_batch)\n loss.backward()\n self.train_loss += loss.mean().asscalar()\n self.trainer.step(self.batch_size)\n return\n\n def learn(self, max_episodes=1000):\n verb_s = 1\n c = 0\n stop_count = 0\n stop_limit = 20\n for i in range(max_episodes):\n self.train_loss = 0.\n obser = self._env.reset()\n for t in range(self.max_steps):\n action = self.greedy(obser)\n next_obser, reward, done, _ = self._env.step(action)\n #next_state = self.obs_to_state(next_state)\n self.memory.append((obser, action, reward, next_obser, done))\n if len(self.memory) > self.memory_limit:\n self.memory.popleft()\n obser = next_obser\n self.replay()\n if c % self.update_c == 0:\n print(\"update target function at episode {}\".format(i))\n self.train_model.save_params(self.param_file_name)\n self.target_model.load_params(self.param_file_name)\n c += 1\n\n if done:\n stop_count = stop_count + 1 if t == 199 else 0\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n if i % verb_s == 0:\n print(\"Episode {} over, loss = {:.3f}, epi = {:.3f}, steps = {}\"\n .format(i+1, self.train_loss, self.epsilon, t))\n break\n if stop_count == stop_limit:\n print(\"{} consecutive max reward, train over\".format(stop_limit))\n break\n\n # update again at the end\n print(\"update target function at the end of last episode\")\n self.train_model.save_params(self.param_file_name)\n self.target_model.load_params(self.param_file_name)\n\n def query(self, observation):\n return int(nd.argmax(self.target_model(self.obs_to_state(observation)), 1).asscalar())\n","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"476576136","text":"#! /usr/bin/env python\n\"\"\"This module implements the URI specification defined in RFC 2396\n\nReferences:\n\"\"\"\nimport string\nimport os\nimport os.path\nimport sys\nimport warnings\n\nfrom types import UnicodeType, StringType\nfrom pyslet.rfc1738 import *\nimport pyslet.vfs as vfs\n\n\nclass URIException(Exception):\n pass\n\n\nclass URIRelativeError(URIException):\n pass\n\n\ndef IsUpAlpha(c):\n return c and (ord(c) >= 0x41 and ord(c) <= 0x5A)\n\n\ndef IsLowAlpha(c):\n return c and (ord(c) >= 0x61 and ord(c) <= 0x7A)\n\n\ndef IsAlpha(c):\n return IsUpAlpha(c) or IsLowAlpha(c)\n\n\ndef IsDigit(c):\n return c and (ord(c) >= 0x30 and ord(c) <= 0x39)\n\n\ndef IsAlphaNum(c):\n return IsUpAlpha(c) or IsLowAlpha(c) or IsDigit(c)\n\n\ndef IsReserved(c):\n # ;/?:@&=+$,\n return c and ord(c) in (0x3B, 0x2F, 0x3F, 0x3A, 0x40, 0x26, 0x3D, 0x2B, 0x24, 0x2C)\n\n\ndef IsUnreserved(c):\n return IsAlphaNum(c) or IsMark(c)\n\n\ndef IsMark(c):\n # -_.!~*'()\n return c and ord(c) in (0x2D, 0x5F, 0x2E, 0x21, 0x7E, 0x2A, 0x27, 0x28, 0x29)\n\n\ndef IsHex(c):\n return c and (IsDigit(c) or (ord(c) >= 0x41 and ord(c) <= 0x46) or (ord(c) >= 0x61 and ord(c) <= 0x66))\n\n\ndef IsControl(c):\n return c and (ord(c) < 0x20 or ord(c) == 0x7F)\n\n\ndef IsSpace(c):\n return c and ord(c) == 0x20\n\n\ndef IsDelims(c):\n return c and ord(c) in (0x3C, 0x3E, 0x23, 0x25, 0x22)\n\n\ndef IsUnwise(c):\n return c and ord(c) in (0x7B, 0x7D, 0x7C, 0x5C, 0x5E, 0x5B, 0x5D, 0x60)\n\n\ndef IsSchemeChar(c):\n return IsAlphaNum(c) or (c and ord(c) in (0x2B, 0x2D, 0x2E))\n\n\ndef IsAuthorityReserved(c):\n return (c and ord(c) in (0x3B, 0x3A, 0x40, 0x3F, 0x2F))\n\n\ndef ParseURIC(source, pos=0):\n \"\"\"Parse the source string (starting from pos) and return the number\n of URI characters (uric) parsed\"\"\"\n uric = 0\n mode = None\n while pos < len(source):\n c = source[pos]\n pos += 1\n if mode is None:\n if IsReserved(c) or IsUnreserved(c):\n uric += 1\n elif ord(c) == 0x25: # % escape\n mode = '%'\n else:\n break\n elif mode == '%':\n if IsHex(c):\n mode = c\n else:\n break\n else:\n if IsHex(c):\n mode = None\n uric += 3\n else:\n break\n return uric\n\n\ndef ParseScheme(octets):\n pos = 0\n scheme = None\n while pos < len(octets):\n c = octets[pos]\n if (pos and IsSchemeChar(c)) or IsAlpha(c):\n pos += 1\n else:\n if ord(c) == 0x3A:\n # we have the scheme\n scheme = octets[0:pos]\n break\n return scheme\n\n\ndef CanonicalizeData(source):\n \"\"\"Returns the canonical form of *source* string.\n\n The canonical form is the same string but any unreserved characters\n represented as hex escapes in source are unencoded and any unescaped\n characters that are neither reserved nor unreserved are escaped.\"\"\"\n result = []\n pos = 0\n while pos < len(source):\n c = source[pos]\n if c == \"%\":\n escape = source[pos + 1:pos + 3]\n c = chr(int(escape, 16))\n if IsUnreserved(c):\n result.append(c)\n else:\n result.append(\"%\")\n result.append(escape)\n pos += 3\n elif not (IsUnreserved(c) or IsReserved(c)):\n result.append(\"%%%02X\" % ord(c))\n pos += 1\n else:\n result.append(c)\n pos += 1\n return string.join(result, '')\n\n\ndef EscapeData(source, reservedFunction=IsReserved):\n result = []\n for c in source:\n if reservedFunction(c) or not (IsUnreserved(c) or IsReserved(c)):\n # short-circuit boolean means we don't evaluate IsReserved twice in\n # default case\n result.append(\"%%%02X\" % ord(c))\n else:\n result.append(c)\n return string.join(result, '')\n\n\ndef UnescapeData(source):\n data = []\n mode = None\n pos = 0\n while pos < len(source):\n c = source[pos]\n pos += 1\n if mode is None:\n if ord(c) == 0x25:\n mode = '%'\n else:\n data.append(c)\n elif mode == '%':\n if IsHex(c):\n mode = c\n else:\n data.append('%')\n data.append(c)\n mode = None\n else:\n if IsHex(c):\n data.append(chr(int(mode + c, 16)))\n else:\n data.append('%')\n data.append(mode)\n mode = None\n return string.join(data, '')\n\n\ndef SplitServer(authority):\n userinfo = None\n host = None\n port = None\n if authority is not None:\n if authority:\n mode = None\n pos = 0\n while True:\n if pos < len(authority):\n c = authority[pos]\n else:\n c = None\n if mode is None:\n if c is None:\n host = authority\n break\n elif ord(c) == 0x40:\n userinfo = authority[:pos]\n mode = 'h'\n hStart = pos + 1\n elif ord(c) == 0x3A:\n # could be in userinfo or start of port\n host = authority[:pos]\n mode = 'p'\n pStart = pos + 1\n pos += 1\n elif mode == 'h':\n if c is None:\n host = authority[hStart:]\n break\n elif ord(c) == 0x3A:\n host = authority[hStart:pos]\n mode = 'p'\n pStart = pos + 1\n pos += 1\n elif mode == 'p':\n if c is None:\n port = authority[pStart:]\n break\n elif ord(c) == 0x40 and userinfo is None:\n # must have been username:pass@\n userinfo = authority[:pos]\n host = None\n mode = 'h'\n hStart = pos + 1\n elif not IsDigit(c):\n if userinfo is None:\n # probably username:pass...\n host = None\n mode = 'u'\n else:\n # userinfo@host:123XX - bad port, stop parsing\n port = authority[pStart:pos]\n break\n pos += 1\n elif mode == 'u':\n # username:pass...\n if c is None:\n userinfo = authority\n host = ''\n break\n elif ord(c) == 0x40:\n userinfo = authority[:pos]\n mode = 'h'\n hStart = pos + 1\n pos += 1\n else:\n host = ''\n return userinfo, host, port\n\n\ndef IsPathSegmentReserved(c):\n return (c and ord(c) in (0x2F, 0x3B, 0x3D, 0x3F))\n\n\ndef SplitPath(path, absPath=True):\n segments = []\n if path:\n pos = 0\n if absPath:\n segStart = None\n else:\n segStart = 0\n while True:\n if pos < len(path):\n c = path[pos]\n if ord(c) == 0x2F:\n if segStart is not None:\n segments.append(path[segStart:pos])\n segStart = pos + 1\n pos += 1\n else:\n if segStart is not None:\n segments.append(path[segStart:pos])\n break\n elif not absPath:\n # relative paths always have an empty segment\n segments.append('')\n return segments\n\n\ndef SplitAbsPath(absPath):\n return SplitPath(absPath, True)\n\n\ndef SplitRelPath(relPath):\n return SplitPath(relPath, False)\n\n\ndef NormalizeSegments(pathSegments):\n \"\"\"Normalizes a list of pathSegments, as returned by Split*Path methods.\n\n Normalizing follows the rules for resolving relative URI paths, './'\n and trailing '.' are removed, 'seg/../' and trailing seg/.. are\n also removed.\"\"\"\n i = 0\n while i < len(pathSegments):\n if pathSegments[i] == '.':\n if i + 1 >= len(pathSegments):\n pathSegments[i] = ''\n else:\n del pathSegments[i]\n elif pathSegments[i] == '..' and (i > 0 and pathSegments[i - 1] != '..'):\n if i + 1 >= len(pathSegments):\n pathSegments[i] = ''\n del pathSegments[i - 1]\n else:\n del pathSegments[i]\n del pathSegments[i - 1]\n i -= 1\n else:\n i += 1\n if pathSegments and pathSegments[-1] == '..':\n # special case of trailing '..' gets an extra slash for consistency\n pathSegments.append('')\n\n\ndef RelativizeSegments(pathSegments, baseSegments):\n result = []\n pos = 0\n while pos < len(baseSegments):\n if result:\n result = ['..'] + result\n else:\n if pos >= len(pathSegments) or baseSegments[pos] != pathSegments[pos]:\n result = result + pathSegments[pos:]\n pos = pos + 1\n if not result and len(pathSegments) > len(baseSegments):\n # full match but pathSegments is longer\n return pathSegments[len(baseSegments) - 1:]\n elif result == ['']:\n return ['.'] + result\n else:\n return result\n\n\ndef MakeRelPathAbs(absPath, basePath):\n \"\"\"Return absPath relative to basePath\"\"\"\n pathSegments = SplitAbsPath(absPath)\n NormalizeSegments(pathSegments)\n baseSegments = SplitAbsPath(basePath)\n NormalizeSegments(baseSegments)\n result = RelativizeSegments(pathSegments, baseSegments)\n return string.join(result, '/')\n\n\ndef MakeRelPathRel(relPath, baseRelPath):\n \"\"\"Return relPath relative to baseRelPath\"\"\"\n pathSegments = SplitRelPath(relPath)\n NormalizeSegments(pathSegments)\n baseSegments = SplitRelPath(baseRelPath)\n NormalizeSegments(baseSegments)\n # At this point there are no '.' components, but there may be leading '..'\n i = 0\n while i < len(pathSegments):\n if pathSegments[i] == '..':\n i += 1\n else:\n break\n j = 0\n while j < len(baseSegments):\n if baseSegments[j] == '..':\n j += 1\n else:\n break\n if j > i:\n i = j\n if i:\n # we have leading '..' components, add a common path prefix and\n # re-normalize\n pathSegments = ['x'] * i + pathSegments\n NormalizeSegments(pathSegments)\n baseSegments = ['x'] * i + baseSegments\n NormalizeSegments(baseSegments)\n result = RelativizeSegments(pathSegments, baseSegments)\n return string.join(result, '/')\n\n\ndef SplitPathSegment(segment):\n pchar = ''\n params = []\n pos = 0\n mode = None\n while True:\n if pos < len(segment):\n c = segment[pos]\n else:\n c = None\n if mode is None:\n if c is None:\n pchar = segment\n break\n elif ord(c) == 0x3B:\n mode = ';'\n pchar = segment[:pos]\n pStart = pos + 1\n pos += 1\n elif mode == ';':\n if c is None:\n params.append(segment[pStart:])\n break\n elif ord(c) == 0x3B:\n params.append(segment[pStart:pos])\n pStart = pos + 1\n pos += 1\n return pchar, params\n\n\ndef IsQueryReserved(c):\n return (c and ord(c) in (0x3B, 0x2F, 0x3F, 0x3A, 0x40, 0x26, 0x3D, 0x2B, 0x2C, 0x24))\n\n\ndef EncodeUnicodeURI(uSrc):\n \"\"\"Takes a unicode string that is supposed to be a URI and returns an octent string.\n\n The encoding algorithm used is the same as the one adopted by HTML: utf-8\n and then %-escape. This is not part of the RFC standard which only defines\n the behaviour for streams of octets.\"\"\"\n octets = []\n for c in uSrc:\n if ord(c) > 0x7F:\n octets = octets + map(lambda x: \"%%%2X\" %\n ord(x), c.encode('UTF-8'))\n clean = False\n else:\n octets.append(chr(ord(c)))\n return string.join(octets, '')\n\n\nclass URI(object):\n\n \"\"\"Class to represent URI Reference.\"\"\"\n\n def __init__(self, octets):\n if type(octets) is UnicodeType:\n octets = EncodeUnicodeURI(octets)\n uriLen = ParseURIC(octets)\n self.octets = octets[0:uriLen]\n \"\"\"The octet string representing this URI.\"\"\"\n self.fragment = None\n \"\"\"The fragment string that was appended to the URI or None if no fragment was given.\"\"\"\n if uriLen < len(octets):\n if ord(octets[uriLen]) == 0x23:\n self.fragment = octets[uriLen + 1:]\n else:\n raise URIException(\n \"URI incompletely parsed from octets: %s\" % octets)\n self.scheme = ParseScheme(self.octets)\n \"\"\"The URI scheme, if present.\"\"\"\n self.schemeSpecificPart = None\n \"\"\"The scheme specific part of the URI.\"\"\"\n self.opaquePart = None\n \"\"\"None if the URI is hierarchical, otherwise the same as schemeSpecificPart.\"\"\"\n self.authority = None\n \"\"\"The authority (e.g., host name) of a hierarchical URI\"\"\"\n self.absPath = None\n \"\"\"The absolute path of a hierarchical URI (None if the path is relative)\"\"\"\n self.relPath = None\n \"\"\"The relative path of a hierarchical URI (None if the path is absolute)\"\"\"\n self.query = None\n \"\"\"The optional query associated with a hierarchical URI.\"\"\"\n if self.scheme is not None:\n self.schemeSpecificPart = self.octets[len(self.scheme) + 1:]\n if self.IsAbsolute():\n self.relPath = None\n self.ParseSchemeSpecificPart()\n else:\n self.opaquePart = None\n self.ParseRelativeURI()\n\n def ParseSchemeSpecificPart(self):\n pos = 0\n mode = ':'\n self.opaquePart = self.authority = self.absPath = self.query = None\n while True:\n if pos < len(self.schemeSpecificPart):\n c = self.schemeSpecificPart[pos]\n else:\n c = None\n if mode == ':':\n # Is this a hier_part or opaque_part?\n if c is None:\n # Empty scheme-specific part; neither opaque nor\n # hierarchical\n break\n elif ord(c) == 0x2F:\n mode = '/'\n else:\n self.opaquePart = self.schemeSpecificPart\n break\n pos += 1\n elif mode == '/':\n # Is this a net_path or abs_path\n if c is None:\n # Single '/' is an abs_path\n self.absPath = '/'\n break\n elif ord(c) == 0x2F:\n mode = 'a'\n aStart = pos + 1\n elif ord(c) == 0x3F:\n # special case, abs_path is /\n self.absPath = '/'\n mode = '?'\n qStart = pos + 1\n else:\n mode = 'p'\n pStart = pos - 1\n pos += 1\n elif mode == 'a':\n # parse authority\n if c is None:\n self.authority = self.schemeSpecificPart[aStart:pos]\n break\n elif ord(c) == 0x2F:\n self.authority = self.schemeSpecificPart[aStart:pos]\n mode = 'p'\n pStart = pos\n elif ord(c) == 0x3F:\n self.authority = self.schemeSpecificPart[aStart:pos]\n mode = '?'\n qStart = pos + 1\n pos += 1\n elif mode == 'p':\n # parse absPath\n if c is None:\n self.absPath = self.schemeSpecificPart[pStart:pos]\n break\n elif ord(c) == 0x3F:\n self.absPath = self.schemeSpecificPart[pStart:pos]\n mode = '?'\n qStart = pos + 1\n pos += 1\n elif mode == '?':\n # query string is everything up to the end of the URI\n if c is None:\n self.query = self.schemeSpecificPart[qStart:pos]\n break\n pos += 1\n\n def ParseRelativeURI(self):\n pos = 0\n self.authority = self.absPath = self.relPath = self.query = None\n mode = None\n while True:\n if pos < len(self.octets):\n c = self.octets[pos]\n else:\n c = None\n if mode is None:\n # net_path, abs_path or rel_path ?\n if c is None:\n # An empty URI is a same document reference\n self.relPath = ''\n break\n elif ord(c) == 0x2F:\n mode = '/'\n elif ord(c) == 0x3F:\n # the RFC is ambiguous here, seems relPath can be empty\n # afterall\n self.relPath = ''\n mode = '?'\n qStart = pos + 1\n else:\n mode = 'r'\n rStart = pos\n pos += 1\n elif mode == '/':\n # Is this a net_path or abs_path\n if c is None:\n # Single '/' is an abs_path\n self.absPath = '/'\n break\n elif ord(c) == 0x2F:\n mode = 'a'\n aStart = pos + 1\n elif ord(c) == 0x3F:\n # special case, abs_path is /\n self.absPath = '/'\n mode = '?'\n qStart = pos + 1\n else:\n mode = 'p'\n pStart = pos - 1\n pos += 1\n elif mode == 'a':\n # parse authority\n if c is None:\n self.authority = self.octets[aStart:pos]\n break\n elif ord(c) == 0x2F:\n self.authority = self.octets[aStart:pos]\n mode = 'p'\n pStart = pos\n elif ord(c) == 0x3F:\n self.authority = self.octets[aStart:pos]\n mode = '?'\n qStart = pos + 1\n pos += 1\n elif mode == 'p':\n # parse absPath\n if c is None:\n self.absPath = self.octets[pStart:pos]\n break\n elif ord(c) == 0x3F:\n self.absPath = self.octets[pStart:pos]\n mode = '?'\n qStart = pos + 1\n pos += 1\n elif mode == 'r':\n # parse relPath\n if c is None:\n self.relPath = self.octets[rStart:pos]\n break\n elif ord(c) == 0x3F:\n self.relPath = self.octets[rStart:pos]\n mode = '?'\n qStart = pos + 1\n pos += 1\n elif mode == '?':\n # query string is everything up to the end of the URI\n if c is None:\n self.query = self.octets[qStart:pos]\n break\n pos += 1\n\n def GetFileName(self):\n \"\"\"Returns the file name associated with this resource or None if the\n URL scheme does not have the concept. By default the file name is\n extracted from the last component of the path. Note the subtle\n difference between returning None and returning an empty string\n (indicating that the URI represents a directory-like object).\"\"\"\n if self.absPath:\n segments = SplitAbsPath(self.absPath)\n elif self.relPath:\n segments = SplitRelPath(self.relPath)\n else:\n segments = []\n fileName = None\n # we loop around until we have a non-empty fileName\n while fileName is None:\n if segments:\n fileName = segments.pop()\n else:\n break\n if fileName is not None:\n fileName = unicode(UnescapeData(fileName), 'utf-8')\n return fileName\n else:\n return None\n\n def GetCanonicalRoot(self):\n \"\"\"Returns a new URI comprised of the scheme and authority only.\n\n Only valid for absolute URIs.\"\"\"\n if self.IsAbsolute():\n canonicalURI = self.canonicalize()\n result = [canonicalURI.scheme, ':']\n if canonicalURI.authority is not None:\n result.append('//')\n result.append(canonicalURI.authority)\n return URIFactory.URI(string.join(result, ''))\n else:\n return None\n\n def Resolve(self, base, currentDocRef=None):\n \"\"\"Resolves a (relative) URI relative to base returning a new\n :py:class:`URI` instance\n\n If the base URI is also relative then the result is a relative URI,\n otherwise the result is an absolute URI. The RFC does not actually go\n into the procedure for combining relative URIs but if B is an absolute\n URI and R1 and R2 are relative URIs then using the resolve operator::\n\n U1 = B [*] R1\n U2 = U1 [*] R2\n U2 = ( B [*] R1 ) [*] R2\n\n The last expression prompts the issue of associativity, in other words,\n is the following expression also valid? ::\n\n U2 = B [*] ( R1 [*] R2 )\n\n For this to work it must be possible to use the resolve operator to\n combine two relative URIs to make a third, which is what we allow\n here.\n\n The optional *currentDocRef* allows you to handle the special case of\n resolving the empty URI. Strictly speaking, fragments are not part of\n the URI itself so a relative URI consisting of the empty string, or a\n relative URI consisting of just a fragment both refer to the current\n document. By default, *currentDocRef* is assumed to be the same as\n *base* but there are cases where the base URI is not the same as the URI\n used to originally retrieve the document and the optional parameter\n allows you to cope with those cases.\"\"\"\n if currentDocRef is None:\n currentDocRef = base\n if not(self.absPath or self.relPath) and self.scheme is None and self.authority is None and self.query is None:\n # current document reference, just change the fragment\n if self.fragment is None:\n return URIFactory.URI(currentDocRef.octets)\n else:\n return URIFactory.URI(currentDocRef.octets + '#' + self.fragment)\n if self.scheme is not None:\n return URIFactory.URI(str(self))\n scheme = base.scheme\n authority = None\n if self.authority is None:\n authority = base.authority\n if self.absPath is None:\n if base.absPath is not None:\n segments = SplitAbsPath(base.absPath)[:-1]\n segments = segments + SplitRelPath(self.relPath)\n NormalizeSegments(segments)\n absPath = '/' + string.join(segments, '/')\n relPath = None\n else:\n segments = SplitRelPath(base.relPath)[:-1]\n segments = segments + SplitRelPath(self.relPath)\n NormalizeSegments(segments)\n absPath = None\n relPath = string.join(segments, '/')\n if relPath == '':\n # degenerate case, as we are relative we won't prefix\n # with /\n relPath = './'\n else:\n absPath = self.absPath\n relPath = None\n else:\n authority = self.authority\n absPath = self.absPath\n relPath = None\n result = []\n if scheme is not None:\n result.append(scheme)\n result.append(':')\n if authority is not None:\n result.append('//')\n result.append(authority)\n if absPath is not None:\n result.append(absPath)\n elif relPath is not None:\n result.append(relPath)\n if self.query is not None:\n result.append('?')\n result.append(self.query)\n if self.fragment is not None:\n result.append('#')\n result.append(self.fragment)\n return URIFactory.URI(string.join(result, ''))\n\n def Relative(self, base):\n \"\"\"Evaluates the Relative operator, returning the URI expressed relative to base.\n\n As we also allow the Resolve method for relative paths it makes sense\n for the Relative operator to also be defined::\n\n R3 = R1 [*] R2\n R3 [/] R1 = R2\n\n Note that there are some restrictions.... ::\n\n U = B [*] R\n\n If R is absolute, or simply more specified than B on the following scale:\n\n absolute URI > authority > absolute path > relative path\n\n then U = R regardless of the value of B and therefore::\n\n U [/] B = U if B is less specified than U.\n\n Also note that if U is a relative URI then B cannot be absolute. In fact\n B must always be less than, or equally specified to U because B is the\n base URI from which U has been derived. ::\n\n U [/] B = undefined if B is more specified than U\n\n Therefore the only interesting cases are when B is equally specified to\n U. To give a concrete example::\n\n U = /HD/User/setting.txt\n B = /HD/folder/file.txt\n\n /HD/User/setting.txt [\\] /HD/folder/file.txt = ../User/setting.txt\n /HD/User/setting.txt = /HD/folder/file.txt [*] ../User/setting.txt\n\n And for relative paths::\n\n U = User/setting.txt\n B = User/folder/file.txt\n\n User/setting.txt [\\] User/folder/file.txt = ../setting.txt\n User/setting.txt = User/folder/file.txt [*] ../setting.txt\t\t\n \"\"\"\n if self.opaquePart is not None:\n # This is not a hierarchical URI so we can ignore base\n return URIFactory.URI(str(self))\n if self.scheme is None:\n if base.scheme is not None:\n raise URIRelativeError(str(base))\n elif base.scheme is None or self.scheme.lower() != base.scheme.lower():\n return URIFactory.URI(str(self))\n # continuing with equal schemes; scheme will not be shown in result\n if self.authority is None:\n if base.authority is not None:\n raise URIRelativeError(str(base))\n if self.authority != base.authority:\n authority = self.authority\n absPath = self.absPath\n relPath = self.relPath\n else:\n # equal or empty authorities\n authority = None\n if self.absPath is None:\n if base.absPath is not None:\n raise URIRelativeError(str(base))\n absPath = None\n if self.relPath is None:\n raise URIRelativeError(str(base))\n if base.relPath is None:\n relPath = self.relPath\n else:\n # two relative paths, calculate self relative to base\n # we add a common leading segment to re-use the absPath\n # routine\n relPath = MakeRelPathRel(self.relPath, base.relPath)\n elif base.absPath is None:\n return URIFactory.URI(str(self))\n else:\n # two absolute paths, calculate self relative to base\n absPath = None\n relPath = MakeRelPathAbs(self.absPath, base.absPath)\n # todo: /a/b relative to /c/d really should be '/a/b' and not ../a/b\n # in particular, drive letters look wrong in relative paths:\n # ../C:/Program%20Files/\n result = []\n if authority is not None:\n result.append('//')\n result.append(authority)\n if absPath is not None:\n result.append(absPath)\n elif relPath is not None:\n result.append(relPath)\n if self.query is not None:\n result.append('?')\n result.append(self.query)\n if self.fragment is not None:\n result.append('#')\n result.append(self.fragment)\n return URIFactory.URI(string.join(result, ''))\n\n def __str__(self):\n r\"\"\"URI are always returned as a string (of bytes), not a unicode string.\n\n The reason for this restriction is best illustrated with an example:\n\n The URI %E8%8B%B1%E5%9B%BD.xml is a UTF-8 and URL-encoded path segment\n using the Chinese word for United Kingdom. When we remove the\n URL-encoding we get the string '\\\\xe8\\\\x8b\\\\xb1\\\\xe5\\\\x9b\\\\xbd.xml'\n which must be interpreted with utf-8 to get the intended path segment\n value: u'\\\\u82f1\\\\u56fd.xml'. However, if the URL was marked as being a\n unicode string of characters then this second stage would not be carried\n out and the result would be the unicode string\n u'\\\\xe8\\\\x8b\\\\xb1\\\\xe5\\\\x9b\\\\xbd', which is a meaningless string of 6\n characters taken from the European Latin-1 character set.\"\"\"\n if self.fragment is not None:\n return self.octets + '#' + self.fragment\n else:\n return self.octets\n\n def __cmp__(self, otherURI):\n \"\"\"Compare this URI against another URI or a string.\"\"\"\n return cmp(str(self), str(otherURI))\n\n def canonicalize(self):\n \"\"\"Returns a canonical form of this URI\"\"\"\n new_uri = []\n if self.scheme is not None:\n new_uri.append(self.scheme.lower())\n new_uri.append(':')\n new_uri.append(self.schemeSpecificPart)\n else:\n # we don't need to look inside the URI\n new_uri.append(self.octets)\n if self.fragment:\n new_uri.append('#')\n new_uri.append(self.fragment)\n return URIFactory.URI(string.join(new_uri, ''))\n\n def Match(self, otherURI): # noqa\n warnings.warn(\"URI.Match is deprecated, use URI.match instead\",\n DeprecationWarning,\n stacklevel=2)\n return self.match(otherURI)\n\n def match(self, otherURI):\n \"\"\"Compares this URI against otherURI returning True if they match.\"\"\"\n return str(self.canonicalize()) == str(otherURI.canonicalize())\n\n def IsAbsolute(self):\n \"\"\"Returns True if this URI is absolute, i.e., fully specified with a scheme name.\"\"\"\n return self.scheme is not None\n\n\nclass URIFactoryClass:\n\n \"\"\"A factory class that contains methods for creating :class:`URI` instances.\"\"\"\n\n def __init__(self):\n self.urlClass = {}\n\n def Register(self, scheme, uriClass):\n self.urlClass[scheme.lower()] = uriClass\n\n def URI(self, octets):\n \"\"\"Creates an instance of :class:`URI` from a string of octets.\"\"\"\n scheme = ParseScheme(octets)\n if scheme is not None:\n scheme = scheme.lower()\n return self.urlClass.get(scheme, URI)(octets)\n\n def URLFromPathname(self, path):\n \"\"\"Converts a local file path into a :class:`URI` instance.\n\n If the path is not absolute it is made absolute by resolving it relative\n to the current working directory before converting it to a URI.\n\n Under Windows, the URL is constructed according to the recommendations\n on this blog post:\n http://blogs.msdn.com/b/ie/archive/2006/12/06/file-uris-in-windows.aspx\n So UNC paths are mapped to both the network location and path components\n of the resulting URI.\"\"\"\n host = ''\n segments = []\n if not os.path.isabs(path):\n path = os.path.join(os.getcwd(), path)\n # print path\n drive, head = os.path.splitdrive(path)\n while head:\n newHead, tail = os.path.split(head)\n if newHead == head:\n # We are unable to split any more from head\n break\n else:\n segments[0:0] = [tail]\n if newHead == '\\\\\\\\':\n # This is the unusual case of the UNC path, first segment\n # is machine\n host = segments[0]\n del segments[0]\n break\n head = newHead\n if drive:\n segments[0:0] = [drive]\n # At this point we need to convert to octets\n c = sys.getfilesystemencoding()\n if type(host) is UnicodeType:\n host = EscapeData(host.encode(c), IsAuthorityReserved)\n for i in xrange(len(segments)):\n # we always use utf-8 in URL path segments to make URLs portable\n if type(segments[i]) is UnicodeType:\n segments[i] = EscapeData(\n segments[i].encode('utf-8'), IsPathSegmentReserved)\n else:\n segments[i] = EscapeData(\n unicode(segments[i], c).encode('utf-8'), IsPathSegmentReserved)\n return FileURL('file://%s/%s' % (host, string.join(segments, '/')))\n\n def URLFromVirtualFilePath(self, path):\n \"\"\"Converts a virtual file path into a :class:`URI` instance.\"\"\"\n host = path.fsName\n segments = []\n if not path.isabs():\n path = path.abspath()\n drive, head = path.splitdrive()\n uncFlag = path.IsUNC()\n while head:\n newHead, tail = head.split()\n if newHead == head:\n # We are unable to split any more from head\n if uncFlag and segments:\n # This is the unusual case of the UNC path, first segment\n # is machine\n if host:\n raise ValueError(\n \"UNC hosts cannot be specified in named file systems.\")\n host = str(segments[0])\n del segments[0]\n break\n else:\n segments[0:0] = [tail]\n head = newHead\n if drive:\n segments[0:0] = [drive]\n # At this point we need to convert to octets\n c = sys.getfilesystemencoding()\n if host:\n host = EscapeData(host, IsAuthorityReserved)\n for i in xrange(len(segments)):\n # we always use utf-8 in URL path segments to make URLs portable\n segments[i] = EscapeData(\n unicode(segments[i]).encode('utf-8'), IsPathSegmentReserved)\n return FileURL('file://%s/%s' % (host, string.join(segments, '/')))\n\n def Resolve(self, b, r):\n \"\"\"Evaluates the resolve operator B [*] R, resolving R relative to B\n\n The input parameters are converted to URI objects if necessary.\"\"\"\n if not isinstance(b, URI):\n b = self.URI(b)\n if not isinstance(r, URI):\n r = self.URI(r)\n return r.Resolve(b)\n\n def Relative(self, u, b):\n \"\"\"Evaluates the relative operator U [/] B, returning U relative to B\n\n The input parameters are converted to URI objects if necessary.\"\"\"\n if not isinstance(u, URI):\n u = self.URI(u)\n if not isinstance(b, URI):\n b = self.URI(b)\n return u.Relative(b)\n\n\nclass ServerBasedURL(URI):\n\n DEFAULT_PORT = None # : the default port for this type of URL\n\n def __init__(self, octets):\n super(ServerBasedURL, self).__init__(octets)\n self.userinfo, self.host, self.port = SplitServer(self.authority)\n\n def canonicalize(self):\n \"\"\"Returns a canonical form of this URI\"\"\"\n new_uri = []\n if self.scheme is not None:\n new_uri.append(self.scheme.lower())\n new_uri.append(':')\n if self.authority is not None:\n new_uri.append('//')\n if self.userinfo is not None:\n new_uri.append(self.userinfo)\n new_uri.append('@')\n new_uri.append(self.host.lower())\n if self.port: # port could be an empty string\n port = int(self.port)\n if port != self.DEFAULT_PORT:\n new_uri.append(':')\n new_uri.append(\"%i\" % int(self.port))\n if self.absPath is not None:\n new_uri.append(self.absPath)\n elif self.relPath is not None:\n new_uri.append(self.relPath)\n if self.query is not None:\n new_uri.append('?')\n new_uri.append(self.query)\n if self.fragment is not None:\n new_uri.append('#')\n new_uri.append(self.fragment)\n return URIFactory.URI(string.join(new_uri, ''))\n\n\nclass FileURL(ServerBasedURL):\n\n \"\"\"Represents the FileURL defined by RFC1738\"\"\"\n\n def __init__(self, octets='file:///'):\n super(FileURL, self).__init__(octets)\n\n def GetPathname(self, force8Bit=False):\n \"\"\"Returns the system path name corresponding to this file URL\n\n Note that if the system supports unicode file names (as reported by\n os.path.supports_unicode_filenames) then GetPathname also returns a\n unicode string, otherwise it returns an 8-bit string encoded in the\n underlying file system encoding.\n\n There are some libraries (notably sax) that will fail when passed files\n opened using unicode paths. The force8Bit flag can be used to force\n GetPathname to return a byte string encoded using the native file system\n encoding.\"\"\"\n c = sys.getfilesystemencoding()\n if os.path.supports_unicode_filenames and not force8Bit:\n decode = lambda s: unicode(UnescapeData(s), 'utf-8')\n else:\n decode = lambda s: unicode(UnescapeData(s), 'utf-8').encode(c)\n if self.host and hasattr(os.path, 'splitunc'):\n uncRoot = decode('\\\\\\\\%s' % self.host)\n else:\n uncRoot = decode('')\n segments = SplitAbsPath(self.absPath)\n # ignore parameters in file system\n path = string.join(map(decode, segments), os.sep)\n if uncRoot:\n # If we have a UNC root then we will have an absolute path\n path = string.join((uncRoot, path), os.sep)\n elif not os.path.isabs(path):\n # Otherwise, prepend the sep if we're not absolute (most likely UNIX)\n # Note that drive designations do not need a prefix\n path = string.join(('', path), os.sep)\n return path\n\n def GetVirtualFilePath(self):\n \"\"\"Returns a virtual file path corresponding to this file URL.\"\"\"\n decode = lambda s: unicode(UnescapeData(s), 'utf-8')\n if self.host:\n fs = vfs.GetFileSystemByName(self.host)\n if fs is None:\n if vfs.defaultFS.supports_unc:\n fs = defaultNS\n uncRoot = decode('\\\\\\\\%s' % self.host)\n else:\n raise ValueError(\n \"Unrecognized host in file URL: %s\" % self.host)\n else:\n uncRoot = decode('')\n else:\n fs = vfs.defaultFS\n uncRoot = decode('')\n segments = SplitAbsPath(self.absPath)\n # ignore parameters in file system\n path = string.join(map(decode, segments), fs.sep)\n if uncRoot:\n # If we have a UNC root then we will have an absolute path\n vpath = fs(string.join((uncRoot, path), fs.sep))\n else:\n vpath = fs(path)\n if not vpath.isabs():\n # Prepend the sep if we're not absolute (most likely UNIX) because\n # it is only drive designations that do not need a prefix\n vpath = fs(string.join(('', path), fs.sep))\n return vpath\n\n\nURIFactory = URIFactoryClass()\nURIFactory.Register('file', FileURL)\n","sub_path":"pyslet/rfc2396.py","file_name":"rfc2396.py","file_ext":"py","file_size_in_byte":40839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"257995513","text":"from pymongo import MongoClient\nimport requests\nfrom preprocessing import parseTweets\n\nMONGO_HOST='mongodb://localhost/twootdb'\n\nclient = MongoClient(MONGO_HOST)\ndb = client.twootdb \ncollections = ['mondaymotivation', 'DayaAfterChristmas', 'GoldenGlobes', 'JamesHarrison', 'amtrak']\n\ncollectionsWithStopwords, collectionsWithoutStopwords, tweetsIds = parseTweets()\n\ntweetsWithLabels = {}\ncount = 0\nfor i,collection in enumerate(collectionsWithoutStopwords):\n for j,tweet in enumerate(collection):\n try:\n print(tweet) \n r = requests.post(\"http://text-processing.com/api/sentiment/\", data={'text':tweet })\n print(r.status_code, r.reason)\n print(r.json())\n count += 1\n up=db[collections[i]].update_one(\n {\"_id\": tweetsIds[i][j] },\n {\"$set\": {\"label\": r.json()['label'], \"positive_probability\": r.json()['probability']['pos'] , \"negative_probability\": r.json()['probability']['neg'] ,\"neutral_probability\": r.json()['probability']['neutral']}})\n print(up.matched_count)\n print(count)\n except: #(r.status_code == 400):\n #Empty tweet or other problem\n count += 1\n up=db[collections[i]].update_one(\n {\"_id\": tweetsIds[i][j] },\n {\"$set\": {\"label\": 'error', \"positive_probability\": 0 , \"negative_probability\": 0 ,\"neutral_probability\": 0}})\n print(up.matched_count)\n print(count)\n \n","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"455872894","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 19 11:30:30 2017\n\n@author: Luis Carlos Prieto\nluisc.prieto@gmail.com\n\"\"\"\n\n\nimport log\n\nimport ta_MongoDB\nimport ta_SQLServer\n\nimport ta_DataCleaner\nimport ta_twitter\n\n\ndef main():\n \n # Variables Generales...\n Errores = 0 \n miLog = log.Log()\n GestorMongoDB = ta_MongoDB.MongoDB\n GestorSQLServer = ta_SQLServer.SQLServer\n GestorTwitter = ta_twitter.Twitter\n \n miLog.Salidaln(\"Bienvenido al Manager de carga PARCIAL de MBIT, Iniciando servicios...\")\n \n GestorMongoDB = ta_MongoDB.MongoDB()\n \n GestorTwitter = ta_twitter.Twitter(GestorMongoDB.m_db)\n GestorTwitter.BuscarNuevos()\n \n \n try:\n # Creación del -Gestor de SQL Server\n GestorSQLServer = ta_SQLServer.SQLServer()\n \n except Exception as e:\n Errores += 1\n miLog.Salidaln(\"ERROR generando Gestor de SQL Server...\" )\n miLog.Salidaln(e.args)\n return -1\n \n try:\n\t\t #Una vez aseguradas las conexiones a BBDD, cargamos los Tweets en formato JSON\n GestorMongoDB.CargarJSON()\n \n # Creación del DataCleaner y Volcados a SQL Server\n DataCleaner = ta_DataCleaner.DataCleaner(GestorMongoDB.m_ListaJSON)\n \n DataCleaner.AnalisisTweetsParcial()\n \n GestorSQLServer.ParcialTwiteros(DataCleaner.m_ListaTwiteros)\n GestorSQLServer.ParcialTimeline(DataCleaner.m_ListaTimeline)\n\t\t\n # Marcamos los tweets ya cargados para reducir la carga de parciales desde Mongo Azure\n\n GestorMongoDB.CerrarJSONParcial(DataCleaner.m_ListaTimeline) \n GestorSQLServer.m_conSQL.close()\n \n except Exception as e:\n Errores += 1\n miLog.Salidaln(\"ERROR No se ha podido generar DataCleaner \")\n miLog.Salidaln(e.args)\n \n \n if (Errores > 0 ):\n miLog.Salidaln(\"ERRORES DETECTADOS\")\n else:\n miLog.Salidaln(\"Proceso finalizado con exito...\")\n \n \n \n \n \nmain()\n \n ","sub_path":"ta_completa.py","file_name":"ta_completa.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"566854506","text":"file = open(\"C:\\\\Users\\\\devatendou\\\\Desktop\\\\AdventCode2017\\\\AdventCode5.txt\", 'r')\n\n#Converts all the elements in the file into a list of ints\narray = [int(x) for x in file.read().split('\\n')]\n\nsteps, index = 0, 0\nwhile True:\n steps += 1\n temp = index + array[index]\n if temp < 0 or temp >= len(array):\n print(steps)\n break\n else:\n temp2 = array[index]\n if temp2 >= 3: array[index] -= 1\n else: array[index] += 1\n index += temp2\n\nfile.close()\n","sub_path":"2017/AdventCode5.2.py","file_name":"AdventCode5.2.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"376701710","text":"\n'''exacto. Haz como una pequeña librería que permite analyzar los datos,\n sacar los acoustic words.\n Guardar el dictionary, sacar las nuevas representaciones y guardarlas en un numpy file'''\n\nimport os\nimport json\nimport warnings\nimport numpy as np\nimport pickle\n\nfrom settings import EMBEDDING_FOLDERS\n\ndef load_as_binary(filename = './files/no_name.file'):\n with open(filename, \"rb\") as f:\n data = pickle.load(f)\n return data\n\n\ndef save_as_binary(element, filename = './files/no_name.file'):\n print('saving as pickle binary ...')\n with open(filename, \"wb\") as f:\n pickle.dump(element, f, pickle.HIGHEST_PROTOCOL)\n print('... file saved as', filename)\n\n\ndef compute_metrics_difference(dataset1, dataset2, embedding_name):\n if(dataset1 == None or dataset2 == None):\n return None\n ds1_metrics = dataset1['evaluation_metrics_{}'.format(embedding_name)]\n ds2_metrics = dataset2['evaluation_metrics_{}'.format(embedding_name)] \n metrics_difference = {}\n metrics_difference['purity'] = ds1_metrics['purity'] - ds2_metrics['purity']\n metrics_difference['adjusted_mutual_info'] = ds1_metrics['adjusted_mutual_info'] - ds2_metrics['adjusted_mutual_info']\n metrics_difference['adjusted_rand'] = ds1_metrics['adjusted_rand'] - ds2_metrics['adjusted_rand']\n return metrics_difference\n\ndef make_datasets_report(metrics_differences, fieldname, ds_name_1, ds_name_2):\n ds1_wins = []\n ds2_wins = []\n discarded = []\n for idx, metric_diff in enumerate(metrics_differences):\n if metric_diff == None: discarded.append(idx)\n elif(metric_diff[fieldname]>=0): \n ds1_wins.append(idx)\n else: ds2_wins.append(idx)\n \n usable_datasets = len(metrics_differences) - len(discarded)\n\n print('\\n*',fieldname.upper(), \":\\n \")\n print(\" -\", ds_name_1, \"had better performance in: \")\n print(ds1_wins)\n print(len(ds1_wins) ,\"/\", usable_datasets, \"\\n\")\n print(\" -\", ds_name_2, \"had better performance in \")\n print(ds2_wins)\n print(len(ds2_wins) ,\"/\", usable_datasets, \"\\n\")\n print(\" -\", len(discarded), \"discarded datasets :\") \n print(discarded, \"\\n\")\n print(\"numerical difference:\", metrics_differences)\n \n\n\ndef fix_naming(dataset):\n '''TODO: remove when I dont use it anymore'''\n try:\n dataset['evaluation_metrics_audioset'] = dataset['evaluation_metrics_codebook_audioset']\n except:\n dataset = None \n return dataset\n\n\ndef compare_datasets(datasets1, datasets2, ds_name_1=\"ds1\", ds_name_2=\"ds2\" ):\n #combined_datasets = [{**ds1, **ds2} for ds1, ds2 in zip(datasets_mean, datasets_codebook)]\n for embedding_name, _ in EMBEDDING_FOLDERS.items():\n metrics_differences = [compute_metrics_difference(ds1, ds2, embedding_name) for ds1, ds2 in zip(datasets1, datasets2)]\n make_datasets_report(metrics_differences, 'purity', ds_name_1, ds_name_2)\n make_datasets_report(metrics_differences, 'adjusted_mutual_info', ds_name_1, ds_name_2)\n make_datasets_report(metrics_differences, 'adjusted_rand', ds_name_1, ds_name_2)\n \n\n\nif __name__ == '__main__':\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n #for codebook_size in [64, 128, 256, 512, 1024, 2048]:\n for codebook_size in [64, 128, 256, 512]:\n print(\"\\n================================\")\n print(\"\\nCodebook size: \", codebook_size)\n\n #load\n filename_codebook_loc = \"files/\"+str(codebook_size)+\"_codebook_clusters.file\" \n #filename_codebook_glob= \"files/\"+str(codebook_size)+\"_global_codebook_clusters.file\" \n filename_mean = \"files/mean_computed_datasets_clusters.file\" #TODO: change name to mean\n datasets_codebook_loc = load_as_binary(filename_codebook_loc)\n #datasets_codebook_glob = load_as_binary(filename_codebook_glob)\n datasets_mean = load_as_binary(filename_mean)\n\n #compare_datasets(datasets_codebook_glob, datasets_codebook_loc, \"global codebook\", \"local codebook\")\n #compare_datasets(datasets_codebook_glob, datasets_mean, \"global codebook\", \"original mean dataset\")\n compare_datasets(datasets_codebook_loc, datasets_mean, \"local codebook\", \"original mean dataset\")\n","sub_path":"freesound_processing_pipeline/codebook_utils.py","file_name":"codebook_utils.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"498782490","text":"import unittest\nfrom src.solution import Solution, StringToDecimal, User, Defibrilator, Algorithm\n\nlines = [\n \"3,879483\",\n \"43,608177\",\n \"1\",\n \"1;Maison de la Prevention Sante;6 rue Maguelone 340000 Montpellier;;3,87952263361082;43,6071285339217\",\n \"2;Hotel de Ville;1 place Georges Freche 34267 Montpellier;;3,89652239197876;43,5987299452849\",\n \"3;Zoo de Lunaret;50 avenue Agropolis 34090 Mtp;;3,87388031141133;43,6395872778854\"\n]\n\n\nclass SolutionTest(unittest.TestCase):\n def test_cannot_be_instanciate_without_lines(self):\n with self.assertRaises(ValueError):\n Solution(None, StringToDecimal)\n\n def test_cannot_be_instanciate_without_a_StringFormator(self):\n with self.assertRaises(ValueError):\n Solution(lines, None)\n\n def test_it_instanciates_a_user(self):\n expectedLon = 3.879483\n expectedLat = 43.608177\n\n user = Solution(lines, StringToDecimal).user()\n\n assert isinstance(user, User)\n assert user.lat == expectedLat\n assert user.lon == expectedLon\n\n def test_it_creates_an_array_of_defibrilators(self):\n defibrilators = Solution(lines, StringToDecimal).defibrilators()\n\n assert len(defibrilators) == 3\n for defibrilator in defibrilators:\n assert isinstance(defibrilator, Defibrilator)\n\n def test_it_returns_the_closest_defibrilator(self):\n assert Solution(lines, StringToDecimal).getClosest(Algorithm).name() == \"Maison de la Prevention Sante\"\n","sub_path":"defibrillators/tests/integration/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"498968472","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\ntry:\n from io import StringIO\nexcept ImportError:\n from cStringIO import StringIO\n\n\nclass Printer(object):\n def write(self, message):\n sys.stdout.write(message + '\\n')\n\n\nclass RefactoredPrinter(object):\n # def __init__(self, buffer):\n # self.buffer = buffer\n\n def write(self, message, buffer):\n buffer.write(message)\n buffer.write(u'\\n')\n\n\nif __name__ == '__main__':\n Printer().write('This is right, but may be improved!')\n\n rfp = RefactoredPrinter()\n\n rfp.write('This is better', sys.stdout)\n rfp.write('This is error', sys.stderr)\n\n st = StringIO()\n rfp.write(u'here we go', st)\n print(st.getvalue())\n","sub_path":"PYTHON/course7/principles/solid/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"321418864","text":"# 1: Создать модуль music_serialize.py.\n# В этом модуле определить словарь для вашей любимой музыкальной группы.\n# С помощью модулей json и pickle сериализовать данный словарь в json и в байты, вывести результаты в терминал.\n# Записать результаты в файлы group.json, group.pickle соответственно.\n# В файле group.json указать кодировку utf-8.\n\nimport json\nimport pickle\n\nmy_favourtie_band = {\n 'name': 'Metallica',\n 'tracks': ['Cyanide', 'Fuel'],\n 'albums': [{'name': 'Death Magnetic', 'year': 2008},\n {'name': 'Fuel', 'year': 1986}]\n}\n\nwith open('group.json', 'w', encoding='utf-8') as f:\n json.dump(my_favourtie_band, f)\n\nwith open('group.pickle', 'wb') as f:\n pickle.dump(my_favourtie_band, f)\n","sub_path":"1.Introduction_to_Python/Files_and_Encoding/someone/Lesson6/music_serializer.py","file_name":"music_serializer.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"647747609","text":"#2019/9/15\nclass Solution:\n def __init__(self):\n self.stack1=[]\n self.stack2=[]\n\n def push(self,node):\n self.stack1.append(node)\n def pop(self):\n if self.stack2:\n return self.stack2.pop()\n elif not self.stack1:\n return None\n else:\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n return self.stack2.pop()\n\n def getQueue(self):\n return self.stack1\n\nq=Solution()\nq.push(1)\nq.push(2)\nq.push(3)\nprint(q.getQueue())\nprint(q.pop())\nprint(q.pop())\nprint(q.pop())\n","sub_path":"jianzhioffer/p69_9用两个栈实现队列.py","file_name":"p69_9用两个栈实现队列.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"253369613","text":"r, c = [int(x) for x in input().split(\" \")]\nmatrix = [list(input()) for _ in range(r)]\nflatten_matrix = [j for sub in matrix for j in sub]\nqueries = int(input())\npos = [[int(x) - 1 for x in input().split(\" \")] for i in range(queries)]\nlink = [int(i) for i in range(r*c)]\nsize = [1 for i in range(r*c)]\n\n\ndef find(x):\n x_start = x\n while x != link[x]:\n x = link[x]\n link[x_start]=x\n return x\n\n\n\ndef makeStructure():\n for i in range(r):\n for j in range(c):\n curr = matrix[i][j]\n if (i+1 != r and matrix[i+1][j]==curr):\n unite(flatten(i,j), flatten(i+1, j))\n if (j+1 != c and matrix[i][j+1]==curr):\n unite(flatten(i,j), flatten(i, j+1))\n\n \ndef flatten(a,b):\n return a * c + b\n\ndef unite(a, b):\n a = find(a)\n b = find(b)\n if (same(a, b)):\n return None\n if(size[a] < size[b]):\n a,b = b,a\n size[a] += size[b]\n link[b] = a\n\n\ndef same(a, b):\n return find(a) == find(b)\n\ndef unionfind():\n makeStructure()\n for startX, startY, endX, endY in pos:\n if(same(flatten(startX, startY), flatten(endX,endY))):\n if(matrix[startX][startY] == '1'):\n print(\"decimal\")\n else:\n print(\"binary\")\n else:\n print(\"neither\")\n return None\n\n\nunionfind()\n\n","sub_path":"10kindsofpeople.py","file_name":"10kindsofpeople.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"436726668","text":"# API\nfrom flask import Flask, request, render_template\nfrom controllers.score_manager import ScoreManager\nfrom models.score import Score\n\napp = Flask(__name__)\nscore_manager = ScoreManager()\n\n@app.route('/')\ndef display_scores():\n return render_template(\"scores.html\", score_list=score_manager.get_scores())\n\n@app.route('/api/list') # -- Default method 'GET'\ndef list_all_scores():\n \"\"\" Returns a dictionary of scores\n\n Returns:\n dict: score dicts\n \"\"\"\n return {\"scores\": score_manager.get_scores()}\n\n@app.route('/api/new', methods=['PUT'])\ndef add_new_score():\n \"\"\" Adds a score to the server\n\n Returns:\n str: HTTP status code\n \"\"\"\n try:\n # -- Get the JSON data of the request, containing a new object to add\n\n data = request.get_json()\n new_score = Score(data['name'], int(data['score']))\n score_manager.add_score(new_score)\n \n return '', 204\n \n except: # This is the correct syntax\n return \"Error\", 400\n\n@app.route('/api/list', methods=['DELETE'])\ndef delete_score():\n \"\"\" Removes a score or scores from the server\n\n Returns:\n str: HTTP status code\n \"\"\"\n try:\n # -- Get the JSON data of the request, containing a new object to remove\n data = request.get_json()\n score_manager.remove_score(data['name'])\n\n return '', 204\n except:\n return \"Error\", 400\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Web/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"38674037","text":"## Import the packages\nimport numpy as np\nimport csv\nfrom scipy import stats\nfrom scipy.stats import ttest_ind, ttest_ind_from_stats\n\nwith open('iris.data', 'r') as iris:\n iris_list = list(csv.reader(iris, delimiter=','))\n iris_array = np.array(iris_list)\n viriginica_list = []\n versicolor_list = []\n\n for flower in iris_array :\n if flower[4] == 'Iris-versicolor':\n versicolor_list.append(float(flower[1]))\n elif flower[4] == 'Iris-virginica':\n viriginica_list.append(float(flower[1]))\n\n print(versicolor_list)\n print(viriginica_list)\n\nN = viriginica_list.__sizeof__()\nviriginica_array = np.array(viriginica_list)\nversicolor_array = np.array(versicolor_list)\n## Calculate the Standard Deviation\n#Calculate the variance to get the standard deviation\n\n#For unbiased max likelihood estimate we have to divide the var by N-1, and therefore the parameter ddof = 1\nvar_versicolor = versicolor_array.var(ddof=1)\nvar_virginica = viriginica_array.var(ddof=1)\n\n#std deviation\ns = np.sqrt((var_versicolor + var_virginica) / 2)\ns\n\n\n\n## Calculate the t-statistics\nt = (versicolor_array.mean() - viriginica_array.mean())/(s*np.sqrt(2/N))\n\n\n\n## Compare with the critical t-value\n#Degrees of freedom\ndf = 2*N - 2\n\n#p-value after comparison with the t\np = 1 - stats.t.cdf(t,df=df)\n\n\nprint(\"t = \" + str(t))\nprint(\"p = \" + str(2*p))\n#Note that we multiply the p value by 2 because its a twp tail t-test\n### You can see that after comparing the t statistic with the critical t value (computed internally) we get a good p value of 0.0005 and thus we reject the null hypothesis and thus it proves that the mean of the two distributions are different and statistically significant.\n\n\n## Cross Checking with the internal scipy function\nt2, p2 = stats.ttest_ind(versicolor_array, viriginica_array)\nprint(\"t = \" + str(t2))\nprint(\"p = \" + str(2*p2))\n\nte, pe = ttest_ind(viriginica_array, versicolor_array, equal_var=False)\nprint(\"te = \" + str(te))\nprint(\"pe = \" + str(pe))","sub_path":"Zadanie3/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"354305238","text":"from BaseDeDados import infor\nimport glob,time,os,threading\n\ndef excluirIncompleto():\n time.sleep(3)\n os.system('cls')\n print(\"AT2-excluindo arquivos vazios\")\n for arq in glob.glob(\"dados/*.txt\"):\n if os.path.getsize(arq) == 0:\n os.remove(arq)\n\ndef treiner():\n with open(\"treiner.txt\") as inf:#informe o nome do arquivo\n for pos,x in enumerate(inf.readlines()):\n recurso = x.replace(\"\\n\",'')\n try:\n infor(recurso)\n print(\"AT1-Aprendi sobre: \",recurso,pos)\n\n except:\n print(\"AT1-Nao aprendi sobre: \",recurso)\n\ntreiner()\nexcluirIncompleto()","sub_path":"treiner.py","file_name":"treiner.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"242421765","text":"import datetime\n\nfrom . import *\n\n\n@Andencento.on(andencento_cmd(pattern=\"ping$\"))\n@Andencento.on(sudo_cmd(pattern=\"ping$\", allow_sudo=True))\nasync def pong(user):\n if user.fwd_from:\n return\n start = datetime.datetime.now()\n event = await eor(user, \"`·.·★ 🅟🅘🅝🅖 ★·.·´\")\n end = datetime.datetime.now()\n ms = (end - start).microseconds / 1000\n await event.edit(\n f\"︻┳═一 🅟🅘🅝🅖 ︻┳═一\\n\\n ⚘ `{ms}`\\n ⚘ __**🅞🅦🅝🅔🅡**__ **:** {user_mention}\"\n )\n\n\nCmdHelp(\"ping\").add_command(\n \"ping\", None, \"Checks the ping speed of your ᴀɴᴅᴇɴᴄᴇɴᴛᴏ\"\n).add_warning(\"✅ Harmless Module\").add()\n\n# userbot\n","sub_path":"plugins/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"33292360","text":"# Copyright (c) 2014-2015 Sine Nomine Associates\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS' AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n#\n\nimport os\nimport re\nimport sys\nimport time\nfrom struct import pack,calcsize\nfrom robot.api import logger\nfrom OpenAFSLibrary.util import get_var\n\nKRB_KEYTAB_MAGIC = 0x0502\nKRB_NT_PRINCIPAL = 1\n\n# IANA Kerberos Encryption Type Numbers\nKRB_ENCTYPE_NUMBERS = {\n 'des-cbc-crc': 1,\n 'des-cbc-md4': 2,\n 'des-cbc-md5': 3,\n 'des3-cbc-md5': 5,\n 'des3-cbc-sha1': 7,\n 'dsaWithSHA1-CmsOID': 9,\n 'md5WithRSAEncryption-CmsOID': 10,\n 'sha1WithRSAEncryption-CmsOID': 11,\n 'rc2CBC-EnvOID': 12,\n 'rsaEncryption-EnvOID': 13,\n 'rsaES-OAEP-ENV-OID': 14,\n 'des-ede3-cbc-Env-OID': 15,\n 'des3-cbc-sha1-kd': 16,\n 'aes128-cts-hmac-sha1-96': 17, # common\n 'aes256-cts-hmac-sha1-96': 18, # common\n 'rc4-hmac': 23,\n 'rc4-hmac-exp': 24,\n 'camellia128-cts-cmac': 25,\n 'camellia256-cts-cmac': 26,\n 'subkey-keymaterial': 65,\n}\n\nKRB_ENCTYPE_DESCS = {\n 'aes128-cts-hmac-sha1-96': \"AES-128 CTS mode with 96-bit SHA-1 HMAC\",\n 'aes256-cts-hmac-sha1-96': \"AES-256 CTS mode with 96-bit SHA-1 HMAC\",\n 'arcfour-hmac': \"ArcFour with HMAC/md5\" ,\n 'des3-cbc-sha1': \"Triple DES cbc mode with HMAC/sha1\",\n 'des-cbc-crc': \"DES cbc mode with CRC-32\",\n}\n\ndef encryption_type_number(enctype):\n \"\"\"Get the enctype number of an enctype string.\"\"\"\n if not enctype in KRB_ENCTYPE_NUMBERS:\n raise AssertionError(\"Unknown enctype: %s\" % (enctype))\n return KRB_ENCTYPE_NUMBERS[enctype]\n\ndef encryption_type_is_des(enctype):\n eno = encryption_type_number(enctype)\n return (eno in [1, 2, 3, 15])\n\ndef normalize_enctype(enctype):\n if enctype in KRB_ENCTYPE_NUMBERS:\n return enctype\n for k in KRB_ENCTYPE_DESCS:\n if enctype == KRB_ENCTYPE_DESCS[k]:\n return k\n raise AssertionError(\"Invalid enctype string: %s\" % (enctype))\n\ndef get_keytab_keys(keytab):\n \"\"\"Read the list of (kvno,principal,enctype) tuples from a keytab.\"\"\"\n klist = get_var('KLIST')\n entries = []\n command = \"%s -e -k -t %s\" % (klist, keytab)\n logger.info(\"Running: %s \" % (command))\n pipe = os.popen(command)\n for line in pipe.readlines():\n logger.info(line.rstrip())\n if line.startswith('Keytab name:'):\n continue\n if line.startswith('KVNO'):\n continue\n if line.startswith('----'):\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+\\((.+)\\)', line)\n if m:\n kvno = int(m.group(1))\n principal = m.group(4)\n enctype = normalize_enctype(m.group(5))\n else:\n raise AssertionError(\"Unexpected klist line: %s\" % (line))\n entries.append({'kvno':kvno, 'principal':principal, 'enctype':enctype})\n rc = pipe.close()\n if rc:\n raise AssertionError(\"klist failed: exit code=%d\" % (rc))\n return entries\n\ndef get_principal_keys(principal):\n kadmin_local = get_var('KADMIN_LOCAL')\n keys = []\n if \"'\" in principal:\n raise AssertionError(\"Invalid principal string: %s\" % (principal))\n command = \"sudo -n %s -q 'get_principal %s'\" % (kadmin_local, principal)\n logger.info(\"Running: %s\" % command)\n pipe = os.popen(command)\n for line in pipe.readlines():\n logger.info(line.rstrip())\n if line.startswith(\"Key:\"):\n k = [x.strip() for x in line.replace(\"Key:\", \"\", 1).split(',')]\n kvno = int(k[0].strip('vno '))\n enctype = k[1]\n salt = k[2]\n if salt == 'no salt':\n salt = 'normal'\n keys.append({'kvno':kvno, 'enctype':enctype, 'salt':salt, 'principal':principal})\n rc = pipe.close()\n if rc:\n raise AssertionError(\"kadmin.local failed: exit code=%d\" % (rc))\n return keys\n\ndef get_key_version_number(keytab, cell, realm, enctype=\"des-cbc-crc\"):\n \"\"\"Get the kvno of the AFS service key.\n\n Returns the kvno of the AFS service key for the given cell, realm and\n enctype pattern. The largest kvno is returned if more than one key matches.\n \"\"\"\n logger.info(\"Searching for afs/%s@%s (or afs@%s) with enctype %s in %s\" % \\\n (cell, realm, realm, enctype, keytab))\n p = re.compile(r'afs(/%s)?@%s$' % (cell, realm))\n e = re.compile(r'%s$' % (enctype))\n kvnos = [k['kvno'] for k in get_keytab_keys(keytab) if p.match(k['principal']) and e.match(k['enctype'])]\n if len(kvnos) == 0:\n raise AssertionError(\"Failed to find a kvno in keytab '%s'.\" % (keytab))\n kvno = sorted(kvnos, reverse=True)[0]\n return kvno\n\ndef add_principal(principal):\n \"\"\"Add a principal to the Kerberos realm.\"\"\"\n kadmin_local = get_var('KADMIN_LOCAL')\n if \"'\" in principal:\n raise AssertionError(\"Invalid principal string: %s\" % (principal))\n command = \"sudo -n %s -q 'add_principal -randkey %s'\" % (kadmin_local, principal)\n logger.info(\"Running: %s\" % command)\n pipe = os.popen(command)\n for line in pipe.readlines():\n logger.info(line.rstrip())\n rc = pipe.close()\n if rc:\n raise AssertionError(\"kadmin.local failed: exit code=%d\" % (rc))\n\ndef add_entry_to_keytab(keytab, principal, enctype=None, salt='normal'):\n \"\"\"Write an entry to a keytab.\"\"\"\n kadmin_local = get_var('KADMIN_LOCAL')\n if principal and \"'\" in principal:\n raise AssertionError(\"Invalid principal string: %s\" % (principal))\n if enctype and \"'\" in enctype:\n raise AssertionError(\"Invalid enctype string: %s\" % (enctype))\n if salt and \"'\" in salt:\n raise AssertionError(\"Invalid salt string: %s\" % (salt))\n\n if enctype:\n query = \"ktadd -k %s -e %s:%s %s\" % (keytab, enctype, salt, principal)\n else:\n query = \"ktadd -k %s %s\" % (keytab, principal)\n command = \"sudo -n %s -q '%s'\" % (kadmin_local, query)\n logger.info(\"Running: %s \" % (command))\n pipe = os.popen(command)\n for line in pipe.readlines():\n logger.info(line.rstrip())\n rc = pipe.close()\n if rc:\n raise AssertionError(\"kadmin.local failed: exit code=%d\" % (rc))\n\ndef generate_des_key():\n \"\"\"Generate a random DES key with correct parity bits.\"\"\"\n keybytes = bytearray(os.urandom(8))\n key = bytearray(0)\n for i in keybytes:\n b = bin(i & 0xfe)\n nb = len(b.split('1'))\n # nb is one more than the number of bits set\n key.append(int(b, 2) + (nb % 2))\n return bytes(key)\n\ndef create_empty_keytab(keytab):\n \"\"\"Create an emtpy keytab file.\n\n If the keytab is created by kadmin.local (under sudo), it will be\n owned by root and not readable by the test user. Instead of trying\n to steal ownship after the file is created, create an empty keytab\n file owned by the non-root user and have kadmin write the keys to\n the empty file.\n\n kadmin will not write to the keytab file unless it begins with a\n magic file format number, so put that at the beginning of the new file.\n \"\"\"\n if not os.path.exists(os.path.dirname(keytab)):\n os.makedirs(os.path.dirname(keytab))\n f = open(keytab, 'wb')\n f.write(pack('!h', KRB_KEYTAB_MAGIC)) # requried by kadmin\n f.close()\n\ndef create_afs_service_keytab(self, keytab, cell, realm, enctype):\n \"\"\"Create the AFS service key and write it to a keytab.\n\n Create the afs service key using kadmin.local if it does not\n exist and write the key to a keytab if the key version number is\n not already in the keytab.\n \"\"\"\n principal = \"afs/%s@%s\" % (cell, realm)\n if encryption_type_is_des(enctype):\n salt = 'afs3'\n else:\n salt = 'normal'\n # Create the key, if needed\n keys = get_principal_keys(principal)\n if not keys:\n add_principal(principal)\n keys = get_principal_keys(principal)\n kvno = [k['kvno'] for k in keys][0]\n # Create an empty keytab, if none. Otherwise get the service key kvno\n # in the existing keytab.\n if not os.path.isfile(keytab):\n create_empty_keytab(keytab)\n try:\n in_keytab = get_key_version_number(keytab, cell, realm, enctype=enctype)\n except:\n in_keytab = None\n # Write the service key, if not already in the keytab.\n if kvno != in_keytab:\n add_entry_to_keytab(keytab, principal, enctype=enctype, salt=salt)\n\ndef create_fake_keytab(self, keytab, cell, realm, enctype):\n \"\"\"Create a test keytab file for akimpersonate.\n\n This is intended testing OpenAFS without requiring an external kerberos\n server. A dummy service key is created randomly and saved in the MIT krb5\n keytab format. The key is not cryptographically strong; only use this\n for test systems.\n \"\"\"\n # The following C-like structure definitions illustrate the MIT keytab\n # file format. All values are in network byte order. All text is ASCII.\n #\n # keytab {\n # uint16_t file_format_version; /* 0x502 */\n # keytab_entry entries[*];\n # };\n # keytab_entry {\n # int32_t size;\n # uint16_t num_components; /* sub 1 if version 0x501 */\n # counted_octet_string realm;\n # counted_octet_string components[num_components];\n # uint32_t name_type; /* not present if version 0x501 */\n # uint32_t timestamp;\n # uint8_t vno8;\n # keyblock key;\n # uint32_t vno; /* only present if >= 4 bytes left in entry */\n # };\n # counted_octet_string {\n # uint16_t length;\n # uint8_t data[length];\n # };\n # keyblock {\n # uint16_t type;\n # counted_octet_string key;\n # };\n num_components = 2\n name_type = KRB_NT_PRINCIPAL\n timestamp = int(time.time())\n vno = 1\n eno = encryption_type_number(enctype)\n if eno in (1, 2, 3):\n key = generate_des_key()\n elif eno == 17:\n key = os.urandom(16)\n elif eno == 18:\n key = os.urandom(32)\n else:\n AssertionError(\"Cannot create fake keytab for enctype %s\" % (enctype))\n fmt = \"HH%dsH%dsH%dsLLBHH%ds\" % (len(realm), len(\"afs\"), len(cell), len(key))\n size = calcsize(\"!\"+fmt) # get the entry size\n f = open(keytab, \"w\")\n f.write(pack('!h', KRB_KEYTAB_MAGIC))\n f.write(pack(\"!l\"+fmt,\n size, num_components,\n len(realm), realm, len(\"afs\"), \"afs\", len(cell), cell, name_type,\n timestamp, vno, eno, len(key), key))\n f.close()\n\nclass _KeytabKeywords(object):\n\n def get_encryption_types(self):\n \"\"\"Return the list of encyption types.\"\"\"\n return KRB_ENCTYPE_NUMBERS.keys()\n\n def encryption_type_is_des(self, enctype):\n \"\"\"Returns true if the enctype uses the single DES cipher.\"\"\"\n return encryption_type_is_des(enctype)\n\n def get_key_version_number(self, keytab, cell, realm, enctype=\"des-cbc-crc\"):\n \"\"\"Get the kvno of the AFS service key.\n\n Returns the kvno of the AFS service key for the given cell, realm and\n enctype pattern. The largest kvno is returned if more than one key matches.\n \"\"\"\n return get_key_version_number(keytab, cell, realm, enctype)\n\n def get_encryption_type_number(self, enctype):\n \"\"\"Get the enctype number of an enctype string.\"\"\"\n return encryption_type_number(enctype)\n\n def create_user_keytab(self, keytab, principal, realm):\n \"\"\"Create test user keys and write them to a keytab.\n\n Create the test user key using kadmin.local if it does not exist\n and write the key to a keytab if the key version number is not already\n in the keytab.\n \"\"\"\n principal = \"%s@%s\" % (principal.replace(\".\", \"/\"), realm)\n # Create the key, if needed\n keys = get_principal_keys(principal)\n if not keys:\n add_principal(principal)\n keys = get_principal_keys(principal)\n kvno = [k['kvno'] for k in keys][0]\n # Create an emtpy keytab owned by the current uid, if the keytab does not\n # already exist. See if the kvno is already in the keytab.\n if not os.path.isfile(keytab):\n create_empty_keytab(keytab)\n try:\n keys = get_keytab_keys(keytab)\n kvnos = [k['kvno'] for k in keys if k['principal']==principal]\n except:\n kvnos = []\n # Write the service key, if not already in the keytab.\n if not kvnos:\n add_entry_to_keytab(keytab, principal)\n else:\n # Avoid old kvnos in the keytab.\n old = [k for k in kvnos if k!=kvno]\n if old:\n raise AssertionError(\"Old kvnos for principal '%s' in keytab '%s'!\" % (principal, keytab))\n\n def create_service_keytab(self, keytab, cell, realm, enctype=None, akimpersonate=True):\n \"\"\"Create the AFS service key keytab.\"\"\"\n if akimpersonate:\n create_fake_keytab(self, keytab, cell, realm, enctype)\n else:\n create_afs_service_keytab(self, keytab, cell, realm, enctype)\n\n","sub_path":"libraries/OpenAFSLibrary/keywords/keytab.py","file_name":"keytab.py","file_ext":"py","file_size_in_byte":13874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"5720112","text":"import time\nfrom .LandingPage import LandingPage\nfrom .LoginPage import LoginPage\n\n\nclass HomePage:\n\n def __init__(self, driver):\n self.driver = driver\n self.logo_xpath = \"//*[@id=\\\"root\\\"]/div/div/div[2]/nav/a/img\"\n\n def check_logo(self):\n landing = LandingPage(self.driver)\n landing.click_login()\n login = LoginPage(self.driver)\n login.login(\"jean-baptiste.melet@epitech.eu\", \"azertyuiop\")\n time.sleep(3)\n self.driver.find_element_by_xpath(self.logo_xpath)\n","sub_path":"nrt-ftest/Website-tests/Pages/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"47783017","text":"\"\"\"Tests for utils.py.\"\"\"\n\nimport itertools\nimport logging\nimport os\n\nfrom pytype import utils\nfrom pytype.tests import test_base\nfrom pytype.typegraph import cfg\n\nimport unittest\n\n# pylint: disable=invalid-name\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DummyValue(object):\n \"\"\"A class with a 'parameters' function, for testing cartesian products.\"\"\"\n\n def __init__(self, index):\n self.index = index\n self._parameters = []\n\n def set_parameters(self, parameters):\n self._parameters = parameters\n\n def unique_parameter_values(self):\n return [param.bindings for param in self._parameters]\n\n def __repr__(self):\n return \"x%d\" % self.index\n\n\nclass Node(object):\n \"\"\"A graph node, for testing topological sorting.\"\"\"\n\n def __init__(self, name, *incoming):\n self.name = name\n self.outgoing = []\n self.incoming = list(incoming)\n for n in incoming:\n n.outgoing.append(self)\n\n def connect_to(self, other_node):\n self.outgoing.append(other_node)\n other_node.incoming.append(self)\n\n def __repr__(self):\n return \"Node(%s)\" % self.name\n\n\nclass UtilsTest(unittest.TestCase):\n\n def setUp(self):\n self.prog = cfg.Program()\n self.current_location = self.prog.NewCFGNode()\n\n def testReplaceExtension(self):\n self.assertEqual(\"foo.bar\", utils.replace_extension(\"foo.txt\", \"bar\"))\n self.assertEqual(\"foo.bar\", utils.replace_extension(\"foo.txt\", \".bar\"))\n self.assertEqual(\"a.b.c.bar\", utils.replace_extension(\"a.b.c.txt\", \".bar\"))\n self.assertEqual(\"a.b/c.bar\", utils.replace_extension(\"a.b/c.d\", \".bar\"))\n self.assertEqual(\"xyz.bar\", utils.replace_extension(\"xyz\", \"bar\"))\n\n def testComplexityLimit(self):\n limit = utils.ComplexityLimit(5)\n limit.inc()\n limit.inc(2)\n limit.inc()\n self.assertRaises(utils.TooComplexError, limit.inc)\n\n def testVariableProduct(self):\n u1 = self.prog.NewVariable([1, 2], [], self.current_location)\n u2 = self.prog.NewVariable([3, 4], [], self.current_location)\n product = utils.variable_product([u1, u2])\n pairs = [[a.data for a in d]\n for d in product]\n self.assertItemsEqual(pairs, [\n [1, 3],\n [1, 4],\n [2, 3],\n [2, 4],\n ])\n\n def testDeepVariableProductRaises(self):\n x1, x2 = [DummyValue(i + 1) for i in range(2)]\n v1 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v2 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v3 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v4 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v5 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v6 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v7 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v8 = self.prog.NewVariable([x1, x2], [], self.current_location)\n self.assertRaises(utils.TooComplexError,\n utils.deep_variable_product, [v1, v2, v3, v4,\n v5, v6, v7, v8], 256)\n\n def testDeepVariableProductRaises2(self):\n x1, x2, x3, x4 = [DummyValue(i + 1) for i in range(4)]\n v1 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v2 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v3 = self.prog.NewVariable([x3, x4], [], self.current_location)\n v4 = self.prog.NewVariable([x3, x4], [], self.current_location)\n x1.set_parameters([v3])\n x2.set_parameters([v4])\n self.assertRaises(utils.TooComplexError,\n utils.deep_variable_product, [v1, v2], 4)\n\n def testVariableProductDictRaises(self):\n values = [DummyValue(i + 1) for i in range(4)]\n v1 = self.prog.NewVariable(values, [], self.current_location)\n v2 = self.prog.NewVariable(values, [], self.current_location)\n v3 = self.prog.NewVariable(values, [], self.current_location)\n v4 = self.prog.NewVariable(values, [], self.current_location)\n variabledict = {\"v1\": v1, \"v2\": v2, \"v3\": v3, \"v4\": v4}\n self.assertRaises(utils.TooComplexError,\n utils.variable_product_dict, variabledict, 4)\n\n def testDeepVariableProduct(self):\n x1, x2, x3, x4, x5, x6 = [DummyValue(i + 1) for i in range(6)]\n v1 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v2 = self.prog.NewVariable([x3], [], self.current_location)\n v3 = self.prog.NewVariable([x4, x5], [], self.current_location)\n v4 = self.prog.NewVariable([x6], [], self.current_location)\n x1.set_parameters([v2, v3])\n product = utils.deep_variable_product([v1, v4])\n rows = [{a.data for a in row}\n for row in product]\n self.assertItemsEqual(rows, [\n {x1, x3, x4, x6},\n {x1, x3, x5, x6},\n {x2, x6},\n ])\n\n def testDeepVariableProductWithEmptyVariables(self):\n x1 = DummyValue(1)\n v1 = self.prog.NewVariable([x1], [], self.current_location)\n v2 = self.prog.NewVariable([], [], self.current_location)\n x1.set_parameters([v2])\n product = utils.deep_variable_product([v1])\n rows = [{a.data for a in row}\n for row in product]\n self.assertItemsEqual(rows, [{x1}])\n\n def testDeepVariableProductWithEmptyTopLayer(self):\n x1 = DummyValue(1)\n v1 = self.prog.NewVariable([x1], [], self.current_location)\n v2 = self.prog.NewVariable([], [], self.current_location)\n product = utils.deep_variable_product([v1, v2])\n rows = [{a.data for a in row}\n for row in product]\n self.assertItemsEqual(rows, [{x1}])\n\n def testDeepVariableProductWithCycle(self):\n x1, x2, x3, x4, x5, x6 = [DummyValue(i + 1) for i in range(6)]\n v1 = self.prog.NewVariable([x1, x2], [], self.current_location)\n v2 = self.prog.NewVariable([x3], [], self.current_location)\n v3 = self.prog.NewVariable([x4, x5], [], self.current_location)\n v4 = self.prog.NewVariable([x6], [], self.current_location)\n x1.set_parameters([v2, v3])\n x5.set_parameters([v1])\n product = utils.deep_variable_product([v1, v4])\n rows = [{a.data for a in row}\n for row in product]\n self.assertItemsEqual(rows, [\n {x1, x3, x4, x6},\n {x1, x2, x3, x5, x6},\n {x1, x3, x5, x6},\n {x2, x6},\n ])\n\n def testVariableProductDict(self):\n u1 = self.prog.NewVariable([1, 2], [], self.current_location)\n u2 = self.prog.NewVariable([3, 4], [], self.current_location)\n product = utils.variable_product_dict({\"a\": u1, \"b\": u2})\n pairs = [{k: a.data for k, a in d.items()} for d in product]\n self.assertItemsEqual(pairs, [\n {\"a\": 1, \"b\": 3},\n {\"a\": 1, \"b\": 4},\n {\"a\": 2, \"b\": 3},\n {\"a\": 2, \"b\": 4},\n ])\n\n def testNumericSortKey(self):\n k = utils.numeric_sort_key\n self.assertLess(k(\"1aaa\"), k(\"12aa\"))\n self.assertLess(k(\"12aa\"), k(\"123a\"))\n self.assertLess(k(\"a1aa\"), k(\"a12a\"))\n self.assertLess(k(\"a12a\"), k(\"a123\"))\n\n def testPrettyDNF(self):\n dnf = [[\"a\", \"b\"], \"c\", [\"d\", \"e\", \"f\"]]\n self.assertEqual(utils.pretty_dnf(dnf), \"(a & b) | c | (d & e & f)\")\n\n def testComputePredecessors(self):\n # n7 n6\n # ^ ^\n # | |\n # | |\n # n1 ---> n20 --> n3 --> n5 -+\n # | ^ ^ |\n # | | | |\n # | +------------|---+\n # v |\n # n4 ------------+\n n1 = self.prog.NewCFGNode(\"n1\")\n n20 = n1.ConnectNew(\"n20\")\n n3 = n20.ConnectNew(\"n3\")\n n4 = n20.ConnectNew(\"n4\")\n n5 = n3.ConnectNew(\"n5\")\n n6 = n20.ConnectNew(\"n6\")\n n7 = n1.ConnectNew(\"n7\")\n n3.ConnectTo(n5)\n n4.ConnectTo(n5)\n n5.ConnectTo(n20)\n\n # Intentionally pick a non-root as nodes[0] to verify that the graph\n # will still be fully explored.\n nodes = [n7, n1, n20, n3, n4, n5, n6]\n r = utils.compute_predecessors(nodes)\n self.assertItemsEqual(r[n1], {n1})\n self.assertItemsEqual(r[n20], {n1, n20, n3, n4, n5})\n self.assertItemsEqual(r[n3], {n1, n20, n3, n4, n5})\n self.assertItemsEqual(r[n4], {n1, n20, n3, n4, n5})\n self.assertItemsEqual(r[n5], {n1, n20, n3, n4, n5})\n self.assertItemsEqual(r[n6], {n1, n20, n3, n4, n5, n6})\n self.assertItemsEqual(r[n7], {n1, n7})\n\n def testOrderNodes0(self):\n order = utils.order_nodes([])\n self.assertItemsEqual(order, [])\n\n def testOrderNodes1(self):\n # n1 --> n2\n n1 = self.prog.NewCFGNode(\"n1\")\n n2 = n1.ConnectNew(\"n2\")\n order = utils.order_nodes([n1, n2])\n self.assertItemsEqual([n1, n2], order)\n\n def testOrderNodes2(self):\n # n1 n2(dead)\n n1 = self.prog.NewCFGNode(\"n1\")\n n2 = self.prog.NewCFGNode(\"n2\")\n order = utils.order_nodes([n1, n2])\n self.assertItemsEqual([n1], order)\n\n def testOrderNodes3(self):\n # n1 --> n2 --> n3\n # ^ |\n # +-------------+\n n1 = self.prog.NewCFGNode(\"n1\")\n n2 = n1.ConnectNew(\"n2\")\n n3 = n2.ConnectNew(\"n3\")\n n3.ConnectTo(n1)\n order = utils.order_nodes([n1, n2, n3])\n self.assertItemsEqual([n1, n2, n3], order)\n\n def testOrderNodes4(self):\n # n1 --> n3 --> n2\n # ^ |\n # +------+\n n1 = self.prog.NewCFGNode(\"n1\")\n n3 = n1.ConnectNew(\"n3\")\n n2 = n3.ConnectNew(\"n2\")\n n3.ConnectTo(n1)\n order = utils.order_nodes([n1, n2, n3])\n self.assertItemsEqual([n1, n3, n2], order)\n\n def testOrderNodes5(self):\n # n1 --> n3 --> n2\n # ^ |\n # +------+ n4(dead)\n n1 = self.prog.NewCFGNode(\"n1\")\n n3 = n1.ConnectNew(\"n3\")\n n2 = n3.ConnectNew(\"n2\")\n n3.ConnectTo(n1)\n n4 = self.prog.NewCFGNode(\"n4\")\n order = utils.order_nodes([n1, n2, n3, n4])\n self.assertItemsEqual([n1, n3, n2], order)\n\n def testOrderNodes6(self):\n # +-------------------+\n # | v\n # n1 --> n2 --> n3 --> n5\n # ^ |\n # +------n4\n n1 = self.prog.NewCFGNode(\"n1\")\n n2 = n1.ConnectNew(\"n2\")\n n3 = n2.ConnectNew(\"n3\")\n n4 = n3.ConnectNew(\"n4\")\n n4.ConnectTo(n2)\n n5 = n3.ConnectNew(\"n5\")\n n1.ConnectTo(n5)\n order = utils.order_nodes([n1, n5, n4, n3, n2])\n self.assertItemsEqual([n1, n2, n3, n4, n5], order)\n\n def testOrderNodes7(self):\n # +---------------------------------+\n # | v\n # n1 --> n2 --> n3 --> n4 --> n5 --> n6\n # ^ | ^ |\n # | v | v\n # +------n7 +------n8\n n1 = self.prog.NewCFGNode(\"n1\")\n n2 = n1.ConnectNew(\"n2\")\n n3 = n2.ConnectNew(\"n3\")\n n4 = n3.ConnectNew(\"n4\")\n n5 = n4.ConnectNew(\"n5\")\n n6 = n5.ConnectNew(\"n6\")\n n7 = n3.ConnectNew(\"n7\")\n n7.ConnectTo(n2)\n n8 = n5.ConnectNew(\"n8\")\n n8.ConnectTo(n4)\n n1.ConnectTo(n6)\n order = utils.order_nodes([n1, n2, n3, n4, n5, n6, n7, n8])\n self.assertItemsEqual([n1, n2, n3, n7, n4, n5, n8, n6], order)\n\n def testTopologicalSort(self):\n n1 = Node(\"1\")\n n2 = Node(\"2\", n1)\n n3 = Node(\"3\", n2)\n n4 = Node(\"4\", n2, n3)\n for permutation in itertools.permutations([n1, n2, n3, n4]):\n self.assertEqual(list(utils.topological_sort(permutation)),\n [n1, n2, n3, n4])\n\n def testTopologicalSort2(self):\n n1 = Node(\"1\")\n n2 = Node(\"2\", n1)\n self.assertEqual(list(utils.topological_sort([n1, n2, 3, 4]))[-1], n2)\n\n def testTopologicalSortCycle(self):\n n1 = Node(\"1\")\n n2 = Node(\"2\")\n n1.incoming = [n2]\n n2.incoming = [n1]\n generator = utils.topological_sort([n1, n2])\n self.assertRaises(ValueError, list, generator)\n\n def testTopologicalSortSubCycle(self):\n n1 = Node(\"1\")\n n2 = Node(\"2\")\n n3 = Node(\"3\")\n n1.incoming = [n2]\n n2.incoming = [n1]\n n3.incoming = [n1, n2]\n generator = utils.topological_sort([n1, n2, n3])\n self.assertRaises(ValueError, list, generator)\n\n def testTopologicalSortGetattr(self):\n self.assertEqual(list(utils.topological_sort([1])), [1])\n\n def testTempdir(self):\n with utils.Tempdir() as d:\n filename1 = d.create_file(\"foo.txt\")\n filename2 = d.create_file(\"bar.txt\", \"\\tdata2\")\n filename3 = d.create_file(\"baz.txt\", \"data3\")\n filename4 = d.create_file(\"d1/d2/qqsv.txt\", \" data4.1\\n data4.2\")\n filename5 = d.create_directory(\"directory\")\n self.assertEqual(filename1, d[\"foo.txt\"])\n self.assertEqual(filename2, d[\"bar.txt\"])\n self.assertEqual(filename3, d[\"baz.txt\"])\n self.assertEqual(filename4, d[\"d1/d2/qqsv.txt\"])\n self.assertTrue(os.path.isdir(d.path))\n self.assertTrue(os.path.isfile(filename1))\n self.assertTrue(os.path.isfile(filename2))\n self.assertTrue(os.path.isfile(filename3))\n self.assertTrue(os.path.isfile(filename4))\n self.assertTrue(os.path.isdir(os.path.join(d.path, \"d1\")))\n self.assertTrue(os.path.isdir(os.path.join(d.path, \"d1\", \"d2\")))\n self.assertTrue(os.path.isdir(filename5))\n self.assertEqual(filename4, os.path.join(d.path, \"d1\", \"d2\", \"qqsv.txt\"))\n for filename, contents in [(filename1, \"\"),\n (filename2, \"data2\"), # dedented\n (filename3, \"data3\"),\n (filename4, \"data4.1\\ndata4.2\"), # dedented\n ]:\n with open(filename, \"r\") as fi:\n self.assertEqual(fi.read(), contents)\n self.assertFalse(os.path.isdir(d.path))\n self.assertFalse(os.path.isfile(filename1))\n self.assertFalse(os.path.isfile(filename2))\n self.assertFalse(os.path.isfile(filename3))\n self.assertFalse(os.path.isdir(os.path.join(d.path, \"d1\")))\n self.assertFalse(os.path.isdir(os.path.join(d.path, \"d1\", \"d2\")))\n self.assertFalse(os.path.isdir(filename5))\n\n def testCd(self):\n with utils.Tempdir() as d:\n d.create_directory(\"foo\")\n d1 = os.getcwd()\n with utils.cd(d.path):\n self.assertTrue(os.path.isdir(\"foo\"))\n d2 = os.getcwd()\n self.assertEqual(d1, d2)\n\n def testListStripPrefix(self):\n self.assertEqual([1, 2, 3], utils.list_strip_prefix([1, 2, 3], []))\n self.assertEqual([2, 3], utils.list_strip_prefix([1, 2, 3], [1]))\n self.assertEqual([3], utils.list_strip_prefix([1, 2, 3], [1, 2]))\n self.assertEqual([], utils.list_strip_prefix([1, 2, 3], [1, 2, 3]))\n self.assertEqual([1, 2, 3],\n utils.list_strip_prefix([1, 2, 3], [0, 1, 2, 3]))\n self.assertEqual([], utils.list_strip_prefix([], [1, 2, 3]))\n self.assertEqual(list(\"wellington\"), utils.list_strip_prefix(\n list(\"newwellington\"), list(\"new\")))\n self.assertEqual(\n \"a.somewhat.long.path.src2.d3.shrdlu\".split(\".\"),\n utils.list_strip_prefix(\n \"top.a.somewhat.long.path.src2.d3.shrdlu\".split(\".\"),\n \"top\".split(\".\")))\n\n def testListStartsWith(self):\n self.assertTrue(utils.list_startswith([1, 2, 3], []))\n self.assertTrue(utils.list_startswith([1, 2, 3], [1]))\n self.assertTrue(utils.list_startswith([1, 2, 3], [1, 2]))\n self.assertTrue(utils.list_startswith([1, 2, 3], [1, 2, 3]))\n self.assertFalse(utils.list_startswith([1, 2, 3], [2]))\n self.assertTrue(utils.list_startswith([], []))\n self.assertFalse(utils.list_startswith([], [1]))\n\n def testGetAbsoluteName(self):\n test_cases = [\n (\"x.y\", \"a.b\", \"x.y.a.b\"),\n (\"\", \"a.b\", \"a.b\"),\n (\"x.y\", \".a.b\", \"x.y.a.b\"),\n (\"x.y\", \"..a.b\", \"x.a.b\"),\n (\"x.y\", \"...a.b\", None),\n ]\n for prefix, name, expected in test_cases:\n self.assertEqual(utils.get_absolute_name(prefix, name), expected)\n\n @utils.memoize\n def _f1(self, x, y):\n return x + y\n\n def testMemoize1(self):\n l1 = self._f1((1,), (2,))\n l2 = self._f1(x=(1,), y=(2,))\n l3 = self._f1((1,), y=(2,))\n self.assertIs(l1, l2)\n self.assertIs(l2, l3)\n l1 = self._f1((1,), (2,))\n l2 = self._f1((1,), (3,))\n self.assertIsNot(l1, l2)\n\n @utils.memoize(\"x\")\n def _f2(self, x, y):\n return x + y\n\n def testMemoize2(self):\n l1 = self._f2((1,), (2,))\n l2 = self._f2((1,), (3,))\n self.assertIs(l1, l2)\n l1 = self._f2(x=(1,), y=(2,))\n l2 = self._f2(x=(1,), y=(3,))\n self.assertIs(l1, l2)\n l1 = self._f2((1,), (2,))\n l2 = self._f2((2,), (2,))\n self.assertIsNot(l1, l2)\n\n @utils.memoize(\"(x, id(y))\")\n def _f3(self, x, y):\n return x + y\n\n def testMemoize3(self):\n l1 = self._f3((1,), (2,))\n l2 = self._f3((1,), (2,))\n self.assertIsNot(l1, l2) # two different ids\n y = (2,)\n l1 = self._f3((1,), y)\n l2 = self._f3((1,), y)\n l3 = self._f3(x=(1,), y=y)\n self.assertIs(l1, l2)\n self.assertIs(l2, l3)\n\n @utils.memoize(\"(x, y)\")\n def _f4(self, x=1, y=2):\n return x + y\n\n def testMemoize4(self):\n z1 = self._f4(1, 2)\n z2 = self._f4(1, 3)\n self.assertNotEqual(z1, z2)\n z1 = self._f4(1, 2)\n z2 = self._f4(1, 2)\n self.assertIs(z1, z2)\n z1 = self._f4()\n z2 = self._f4()\n self.assertIs(z1, z2)\n z1 = self._f4()\n z2 = self._f4(1, 2)\n self.assertIs(z1, z2)\n\n def testMemoize5(self):\n class Foo(object):\n\n @utils.memoize(\"(self, x, y)\")\n def _f5(self, x, y):\n return x + y\n foo1 = Foo()\n foo2 = Foo()\n z1 = foo1._f5((1,), (2,))\n z2 = foo2._f5((1,), (2,))\n z3 = foo2._f5((1,), (2,))\n self.assertFalse(z1 is z2)\n self.assertTrue(z2 is z3)\n\n def testInvertDict(self):\n a = {\"p\": [\"q\", \"r\"], \"x\": [\"q\", \"z\"]}\n b = utils.invert_dict(a)\n self.assertEqual(sorted(b[\"q\"]), [\"p\", \"x\"])\n self.assertEqual(b[\"r\"], [\"p\"])\n self.assertEqual(b[\"z\"], [\"x\"])\n\n def testMonitorDict(self):\n d = utils.MonitorDict()\n changestamp = d.changestamp\n var = self.prog.NewVariable()\n d[\"key\"] = var\n self.assertGreater(d.changestamp, changestamp)\n changestamp = d.changestamp\n var.AddBinding(\"data\")\n self.assertGreater(d.changestamp, changestamp)\n changestamp = d.changestamp\n var.AddBinding(\"data\") # No change because this is duplicate data\n self.assertEqual(d.changestamp, changestamp)\n changestamp = d.changestamp\n\n def testAliasingDict(self):\n d = utils.AliasingDict()\n # To avoid surprising behavior, we require desired dict functionality to be\n # explicitly overridden\n with self.assertRaises(NotImplementedError):\n d.viewitems()\n d.add_alias(\"alias\", \"name\")\n self.assertNotIn(\"alias\", d)\n self.assertNotIn(\"name\", d)\n var1 = self.prog.NewVariable()\n d[\"alias\"] = var1\n self.assertIn(\"name\", d)\n self.assertIn(\"alias\", d)\n self.assertEqual(var1, d[\"name\"])\n self.assertEqual(d[\"name\"], d[\"alias\"])\n self.assertEqual(d[\"alias\"], d.get(\"alias\"))\n self.assertEqual(d[\"name\"], d.get(\"name\"))\n self.assertEqual(None, d.get(\"other_name\"))\n var2 = self.prog.NewVariable()\n d[\"name\"] = var2\n self.assertEqual(var2, d[\"name\"])\n self.assertEqual(d[\"name\"], d[\"alias\"])\n\n def testAliasingDictRealiasing(self):\n d = utils.AliasingDict()\n d.add_alias(\"alias1\", \"name\")\n d.add_alias(\"alias2\", \"name\")\n self.assertRaises(AssertionError,\n lambda: d.add_alias(\"name\", \"other_name\"))\n try:\n d.add_alias(\"alias1\", \"other_name\")\n except utils.AliasingDictConflictError as e:\n self.assertEqual(e.existing_name, \"name\")\n else:\n self.fail(\"AliasingDictConflictError not raised\")\n d.add_alias(\"alias1\", \"name\")\n d.add_alias(\"alias2\", \"alias1\")\n d.add_alias(\"alias1\", \"alias2\")\n # Check that the name, alias1, and alias2 still all refer to the same key\n var = self.prog.NewVariable()\n d[\"alias1\"] = var\n self.assertEqual(1, len(d))\n self.assertEqual(var, d[\"name\"])\n self.assertEqual(var, d[\"alias1\"])\n self.assertEqual(var, d[\"alias2\"])\n\n def testNonemptyAliasingDictRealiasing(self):\n d = utils.AliasingDict()\n d.add_alias(\"alias\", \"name\")\n d[\"name\"] = \"hello\"\n d[\"name2\"] = \"world\"\n self.assertRaises(AssertionError, lambda: d.add_alias(\"name2\", \"name\"))\n d.add_alias(\"alias\", \"name\")\n\n def testAliasingDictTransitive(self):\n d = utils.AliasingDict()\n d.add_alias(\"alias1\", \"name\")\n d.add_alias(\"alias2\", \"alias1\")\n d[\"name\"] = self.prog.NewVariable()\n self.assertEqual(1, len(d))\n self.assertEqual(d[\"name\"], d[\"alias1\"])\n self.assertEqual(d[\"alias1\"], d[\"alias2\"])\n\n def testAliasingDictValueMove(self):\n d = utils.AliasingDict()\n v = self.prog.NewVariable()\n d[\"alias\"] = v\n d.add_alias(\"alias\", \"name\")\n self.assertEqual(d[\"name\"], v)\n self.assertEqual(d[\"alias\"], d[\"name\"])\n\n def testAliasingDictTransitiveValueMove(self):\n d = utils.AliasingDict()\n d.add_alias(\"alias2\", \"name\")\n v = self.prog.NewVariable()\n d[\"alias1\"] = v\n d.add_alias(\"alias1\", \"alias2\")\n self.assertEqual(d[\"name\"], v)\n self.assertEqual(d[\"alias2\"], d[\"name\"])\n self.assertEqual(d[\"alias1\"], d[\"alias2\"])\n\n def testLazyDict(self):\n d = utils.LazyDict()\n # To avoid surprising behavior, we require desired dict functionality to be\n # explicitly overridden\n with self.assertRaises(NotImplementedError):\n d.viewitems()\n x = []\n def f(y):\n # Change the state of x so that we can check whether f is evaluated at the\n # right time\n x.append(x)\n return y\n d.add_lazy_item(\"f\", f, \"foo\")\n self.assertIn(\"f\", d)\n self.assertEqual(1, len(d))\n self.assertEqual(0, len(x))\n # Evaluate the item\n self.assertEqual(\"foo\", d[\"f\"])\n self.assertEqual(1, len(x))\n self.assertIn(\"f\", d)\n self.assertEqual(1, len(d))\n\n def testLazyDictEq(self):\n d = utils.LazyDict()\n f = lambda x: x\n d.add_lazy_item(\"f\", f, \"foo\")\n self.assertTrue(d.lazy_eq(\"f\", f, \"foo\"))\n self.assertFalse(d.lazy_eq(\"f\", f, \"bar\"))\n with self.assertRaises(KeyError):\n d.lazy_eq(\"g\", f, \"foo\")\n self.assertEqual(\"foo\", d[\"f\"]) # evaluation\n # The point of lazy_eq is to do approximate equality checks when we can't\n # evaluate the function, so there's no way to determine \"foo\" != f(\"bar\").\n self.assertTrue(d.lazy_eq(\"f\", f, \"bar\"))\n\n def testDynamicVar(self):\n var = utils.DynamicVar()\n self.assertIsNone(var.get())\n with var.bind(123):\n self.assertEqual(123, var.get())\n with var.bind(456):\n self.assertEqual(456, var.get())\n self.assertEqual(123, var.get())\n self.assertIsNone(var.get())\n\n def testAnnotatingDecorator(self):\n foo = utils.AnnotatingDecorator()\n @foo(3)\n def f(): # pylint: disable=unused-variable\n pass\n self.assertEqual(foo.lookup[\"f\"], 3)\n\n def testListPytypeFiles(self):\n l = list(utils.list_pytype_files(\"pytd/stdlib/2\"))\n self.assertIn(\"ctypes.pytd\", l)\n self.assertIn(\"collections.pytd\", l)\n\n def testPathToModuleName(self):\n self.assertEqual(\"x.y.z\", utils.path_to_module_name(\"x/y/z.pyi\"))\n self.assertEqual(\"x.y.z\", utils.path_to_module_name(\"x/y/z.pytd\"))\n self.assertEqual(\"x.y.z\", utils.path_to_module_name(\"x/y/z/__init__.pyi\"))\n\n\nif __name__ == \"__main__\":\n test_base.main()\n","sub_path":"pytype/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":22766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"290893581","text":"from django.shortcuts import render, redirect\n\nfrom tictactoe_api.models.persistent_game_state import PersistentGameState\n\ndef index(request):\n \"return all the games for which there's been a recorded move\"\n gameIds = list(PersistentGameState.getAllGameIds())\n return render(request, \"index.html\", {'gameIds':gameIds})\n\ndef html_form_response(request, game, msg=None):\n \"Capture the request object for rendering the response\"\n squares = list(enumerate(game.board))\n rows = [squares[0:3], squares[3:6], squares[6:9]]\n return render(request, \"game.html\", {\"game\": game, \"rows\": rows, \"msg\": msg})\n\ndef new_game(request):\n \"synthesize a new ID and redirect to it. The returned Game ID is ephemeral until a move is posted\"\n if request.method != 'POST':\n return render(request, \"error.html\", {\"message\":\"Must POST to get a new game ID\"})\n else:\n game_id = PersistentGameState.generate_id()\n return redirect('game', game_id=game_id)\n\ndef game(request, game_id):\n game = PersistentGameState.load(game_id)\n if request.method != 'POST':\n return html_form_response(request, game)\n else:\n player = request.POST['player']\n position = int(request.POST['position'])\n return game.execute_move(\n player,\n position,\n onValid = lambda game: computer_move_response(request, game),\n onInvalid = lambda reason: html_form_response(request, game, \"You attempted an invalid move: \" + reason)\n )\n\ndef computer_move_response(request, game):\n \"Act as the computer opponent. if the game is finished, just return it. \"\n \"Otherwise find a move with the minmax algorithm and play it\"\n if game.isFinished():\n return html_form_response(request, game, \"Game is over.\")\n else:\n computer_player = game.next_player()\n _, computer_move = game.suggest_next(computer_player)\n return game.execute_move(\n computer_player,\n computer_move,\n onValid = lambda game: html_form_response(request, game, \"Computer has moved.\"),\n onInvalid = lambda reason: html_form_response(request, game, \"Computer made an invalid move: \" + reason)\n )\n","sub_path":"django_tictactoe/tictactoe_ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"120805662","text":"import torch\nfrom torchvision.utils import make_grid, save_image\nimport numpy as np\nimport argparse\nimport skimage\nfrom model import IntroVAE\nfrom main import DB, colormap\n\ndef main(args):\n print(args)\n\n device = torch.device('cuda')\n torch.set_grad_enabled(False)\n args.alpha, args.beta, args.margin, args.lr = 0, 0, 0, 0\n vae = IntroVAE(args).to(device)\n vae.load_state_dict(torch.load(args.load))\n print('load ckpt from:', args.load)\n\n args.root = '/dev/null'\n args.data_aug = False\n db = DB(args)\n db.images = args.input\n imgs = [img for img in db]\n x = torch.stack(imgs, dim=0).to(device)\n mu, logvar = vae.encoder(x)\n feature = mu.cpu().numpy()\n np.save(args.output, feature)\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser()\n argparser.add_argument('--imgsz', type=int, default=128, \\\n help='imgsz')\n argparser.add_argument('--z_dim', type=int, default=256, \\\n help='hidden latent z dim')\n argparser.add_argument('--load', type=str, required=True, \\\n help='checkpoint to load')\n argparser.add_argument('--input', type=str, required=True, nargs='*', \\\n help='input images')\n argparser.add_argument('--output', type=str, required=True, \\\n help='output path')\n argparser.add_argument('--num_classes', type=int, default=-1, \\\n help='set to positive value to model shapes (e.g. segmentation)')\n\n args = argparser.parse_args()\n main(args)\n\n","sub_path":"save_feature.py","file_name":"save_feature.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"359376536","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n#Created on Mon Aug 5 15:26:32 2019\r\n\r\n@author: d2gu53\r\n\"\"\"\r\n\r\n# cnn from scratch in python \r\n# https://victorzhou.com/blog/intro-to-cnns-part-1/\r\n\r\nimport sys\r\nimport mnist\r\npath = \"/home/konstantin/Documents/master_arbeit/cnn_python\"\r\nsys.path.append(path)\r\nimport numpy as np\r\nimport test_cnn as test\r\nimport functions as fun\r\n\r\ntest.debug_cnn(n_iter=11, version=\"changed\", learn_rate=0.01)\r\n\r\ntest_images = mnist.test_images()[:2001]\r\ntest_labels = mnist.test_labels()[:2001]\r\n\r\n\r\ndef train(training_data, labels, n_iter, n_classes, n_filter, learn_rate, print_acc=True):\r\n \r\n input_dim = int((((training_data[0].shape[0] - 3 + 1) / 2) ** 2) * n_filter)\r\n np.random.seed(seed=30); own_filter_conv = np.random.randn(n_filter, 3, 3) / 9\r\n np.random.seed(seed=30); own_weight_soft = (np.random.randn(input_dim, n_classes) / input_dim)\r\n own_bias_soft = np.random.randn(n_classes)\r\n \r\n num_correct = 0\r\n \r\n for i in range(n_iter):\r\n \r\n image = training_data[i] / 255 - 0.5\r\n label = labels[i]\r\n \r\n own_feature_map, own_filter_conv = fun.convolute(image=image, filter_matrix= own_filter_conv)\r\n own_maxpool_map = fun.maxpool(feature_map=own_feature_map)\r\n own_probs, own_inter_soft = fun.softmax(own_maxpool_map, \r\n weight_matrix=own_weight_soft, \r\n bias_vector=own_bias_soft)\r\n own_weight_soft, own_bias_soft, own_gradient_soft = fun.backprop_softmax(inter_soft=own_inter_soft,\r\n probabilities=own_probs,\r\n label = label,\r\n learn_rate=learn_rate) \r\n own_gradient_max = fun.backprop_maxpool(feature_map=own_feature_map, \r\n gradient=own_gradient_soft)\r\n own_filter_conv = fun.backprop_conv(image=image, filter_conv=own_filter_conv,\r\n gradient=own_gradient_max, learn_rate=learn_rate)\r\n \r\n prediction = np.argmax(own_probs)\r\n acc = 1 if prediction == label else 0\r\n num_correct += acc\r\n \r\n if i % 100 == 0 and i != 0 and print_acc:\r\n accuracy = num_correct / i\r\n print(f\"accuracy for the first {i} samples: {accuracy}\")\r\n print(f\"{num_correct} predictions for {i} samples were correct\")\r\n \r\n return None\r\n\r\n\r\n\r\ntrain(test_images, test_labels, n_iter=1001, n_classes=10, n_filter=8, learn_rate=0.01)\r\n\r\n","sub_path":"cnn_from_scratch_blog.py","file_name":"cnn_from_scratch_blog.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"393274897","text":"h, w = map(int, input().split())\ns = [\".\" + input() + \".\" for _ in range(h)]\ns = [\".\" * (w+2)] + s + [\".\" * (w+2)]\nfor i in range(h):\n for j in range(w):\n if s[i][j] == \".\":\n continue\n else:\n if s[i][j+1] == \"#\" or s[i][j-1] == \"#\" or s[i-1][j] == \"#\" or s[i+1][j] == \"#\":\n continue\n else:\n print(\"No\")\n exit()\nprint(\"Yes\")","sub_path":"Python/atcoder/ABC/90/096/ABC096C_GridRepainting2.py","file_name":"ABC096C_GridRepainting2.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"115476994","text":"from Luna import CMD_HELP\nimport os\nfrom Luna import tbot\nimport requests\nimport cryptocompare\nfrom Luna import CASH_API_KEY\n\nfrom telethon import types\nfrom telethon.tl import functions\nfrom Luna.events import register\n\n\n@register(pattern=\"^/cash\")\nasync def _(event):\n\n cmd = event.text\n\n args = cmd.split(\" \")\n\n if len(args) == 4:\n try:\n orig_cur_amount = float(args[1])\n\n except ValueError:\n await event.reply(\"Invalid Amount Of Currency\")\n return\n\n orig_cur = args[2].upper()\n\n new_cur = args[3].upper()\n\n request_url = (\n f\"https://www.alphavantage.co/query\"\n f\"?function=CURRENCY_EXCHANGE_RATE\"\n f\"&from_currency={orig_cur}\"\n f\"&to_currency={new_cur}\"\n f\"&apikey={CASH_API_KEY}\"\n )\n response = requests.get(request_url).json()\n try:\n current_rate = float(\n response[\"Realtime Currency Exchange Rate\"][\"5. Exchange Rate\"]\n )\n except KeyError:\n await event.reply(\"Currency Not Supported.\")\n return\n new_cur_amount = round(orig_cur_amount * current_rate, 5)\n await event.reply(f\"{orig_cur_amount} {orig_cur} = {new_cur_amount} {new_cur}\")\n\n elif len(args) == 1:\n await event.reply(__help__)\n\n else:\n await event.reply(\n f\"**Invalid Args!!:** Required 3 But Passed {len(args) -1}\",\n )\n\n\n@register(pattern=\"^/crypto (.*)\")\nasync def _(event):\n if event.fwd_from:\n return\n input_str = event.pattern_match.group(1)\n stark = input_str.split(\" \", 1)\n curreo = stark[0]\n currency1 = stark[1]\n curre = curreo.upper()\n currency = currency1.upper()\n take = \"\"\n take = cryptocompare.get_price(currency, curr=curre)\n t = take.get(currency)\n k = curre\n q = str(t.get(curre))\n\n await event.reply(\n f\"Conversion complete \\ncryptocurrency:- {currency} \\ncryptocurrency value in {k} is :- {q}\",\n parse_mode=\"HTML\",\n )\n\nfile_help = os.path.basename(__file__)\nfile_help = file_help.replace(\".py\", \"\")\nfile_helpo = file_help.replace(\"_\", \" \")\n\n__help__ = \"\"\"\n - /cash : currency converter\nExample syntax: `/cash 1 USD INR`\n - /crypto : Crypto Value\nExample syntax: `/crypto inr btc`\n\"\"\"\n\nCMD_HELP.update({file_helpo: [file_helpo, __help__]})\n","sub_path":"Luna/modules/_currency.py","file_name":"_currency.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"565614233","text":"import datasets\n\n\ndata = datasets.comet('gbm')\nres = []\nfor label in data.labels:\n lst = label.split(',')\n for lab in lst:\n if lab.endswith('(D)') or lab.endswith('(A)'):\n res.append(lab[:-3])\n else:\n res.append(lab)\nres = list(set(res))\nprint(f'Total: {len(res)}')\ns = '\\n'.join(res)\nwith open('gbm_genes.txt', 'w') as fout:\n fout.write(s)\n","sub_path":"datasets/comet_genes.py","file_name":"comet_genes.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"426204253","text":"import telepot\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\nfrom datetime import datetime\n\nfrom rzd_request_helper import RZDRequestHelper\n\n\nclass RZDHandler(object):\n def __init__(self, bot: telepot.Bot):\n self.bot = bot\n self.requester = RZDRequestHelper()\n self.requests = {}\n self.trains_per_message = 4\n\n @staticmethod\n def validate_date(date_string):\n try:\n date = datetime.strptime(date_string, \"%d.%m.%Y\")\n return date >= datetime.now()\n except ValueError:\n return False\n\n def handle_callback(self, msg):\n query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')\n chat_id = msg['message']['chat']['id']\n print('Callback Query:', chat_id, query_id, from_id, query_data)\n\n # first of all handle city setter and route setter\n if 'city' in query_data:\n return self.handle_city_callback(chat_id, query_data, query_id)\n\n if not self.requests.get(chat_id):\n self.requests[chat_id] = {}\n\n self.set_state(chat_id, query_data, query_id)\n\n if query_data == 'arrival' or query_data == 'departure':\n self.handle_arrival_departure_callback(chat_id, query_data, query_id)\n if query_data == 'date':\n self.handle_date_callback(chat_id, query_id)\n if query_data == 'search':\n self.handle_search_callback(chat_id, query_id)\n\n def set_state(self, chat_id, query_data, query_id):\n self.requests[chat_id]['state'] = query_data\n self.requests[chat_id]['query_id'] = query_id\n\n def handle_city_callback(self, chat_id, query_data: str, query_id):\n code, name = query_data[5:].split(':')\n self.requests[chat_id][self.requests[chat_id]['state']] = {'name': name, 'code': code}\n self.bot.answerCallbackQuery(query_id)\n self.answer_after_apply(chat_id)\n\n def handle_search_callback(self, chat_id, query_id):\n req = self.requests[chat_id]\n\n if self.is_search_available(req):\n self.bot.sendMessage(chat_id, 'Searching...')\n\n available_trains = self.requester.do_search(req)\n\n self.bot.answerCallbackQuery(query_id)\n\n if not available_trains:\n self.bot.sendMessage(chat_id, 'No trains found')\n elif available_trains[0].get('noSeats'):\n self.bot.sendMessage(chat_id, 'No seats found')\n elif not available_trains[0].get('list'):\n self.bot.sendMessage(chat_id, 'No trains found')\n else:\n self._parse_and_answer(chat_id, available_trains[0].get('list'))\n else:\n self.bot.answerCallbackQuery(query_id)\n self.set_state(chat_id, '', 0)\n self.bot.sendMessage(chat_id, 'Incorrect request info. Try again')\n self.answer_with_search_buttons(chat_id)\n\n def handle_date_callback(self, chat_id, query_id, additional=''):\n self.bot.answerCallbackQuery(query_id)\n self.bot.sendMessage(chat_id, additional + 'Type a date in dd.mm.YYYY format')\n\n def handle_arrival_departure_callback(self, chat_id, query_data, query_id, additional=''):\n self.bot.answerCallbackQuery(query_id)\n self.bot.sendMessage(chat_id, additional + f'Type a {query_data} point name')\n\n def handle_message(self, msg):\n chat_id = msg['chat']['id']\n if self.msg_without_entities(msg) and chat_id in self.requests.keys():\n return self.try_apply_message(chat_id, msg['text'])\n\n for cmd in filter(lambda x: x['type'] == 'bot_command', msg['entities']):\n self.handle_command(msg['text'][cmd['offset']: cmd['offset'] + cmd['length']], msg)\n\n @staticmethod\n def msg_without_entities(msg):\n return 'entities' not in msg or not msg['entities']\n\n def handle_command(self, command: str, msg):\n if command == '/start':\n self.handle_start_command(msg)\n elif command == '/help':\n self.handle_help_command(msg)\n elif command == '/search':\n self.handle_search_command(msg)\n\n def handle_help_command(self, msg):\n self.bot.sendMessage(msg['chat']['id'],\n 'This is simple RZD bot. You can search available tickets for Russian Railways here '\n 'with command /search')\n\n def handle_start_command(self, msg):\n self.bot.sendMessage(msg['chat']['id'],\n f'Hello, {msg[\"chat\"][\"first_name\"]}! Welcome to RZD bot!\\nTo see what this bot can do '\n f'type /help.\\n')\n\n def handle_search_command(self, msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n self.requests[chat_id] = {}\n self.answer_with_search_buttons(chat_id)\n\n @staticmethod\n def picker(req, field):\n return req.get(field, {}).get('name', 'Not selected')\n\n def answer_with_search_buttons(self, chat_id):\n req = self.requests[chat_id]\n\n keyboard = InlineKeyboardMarkup(inline_keyboard=[\n [InlineKeyboardButton(text='Choose departure', callback_data='departure'),\n InlineKeyboardButton(text='Choose arrival', callback_data='arrival'),\n InlineKeyboardButton(text='Choose date', callback_data='date')],\n [InlineKeyboardButton(text='Search', callback_data='search')] if self.is_search_available(req) else []\n ])\n\n departure = self.picker(req, 'departure')\n arrival = self.picker(req, 'arrival')\n date = req.get('date', 'Not selected')\n text = f'Departure: {departure}, arrival: {arrival}, date: {date}\\n' \\\n f'Select action'\n self.bot.sendMessage(chat_id, text, reply_markup=keyboard)\n\n def try_apply_message(self, chat_id: int, text: str):\n query_data = self.requests[chat_id]['state']\n query_id = self.requests[chat_id]['query_id']\n\n if query_data == 'arrival' or query_data == 'departure':\n cities = self.requester.find_city(text)\n\n if not cities:\n return self.handle_arrival_departure_callback(chat_id, query_data, query_id, 'City name incorrect. '\n 'Try again.\\n')\n elif len(cities) == 1:\n self.requests[chat_id][query_data] = cities[0]\n else:\n return self.handle_multiple_city_choose(chat_id, cities)\n elif query_data == 'date':\n if not self.validate_date(text):\n return self.handle_date_callback(chat_id, query_id, 'Incorrect date!\\n')\n\n self.requests[chat_id][query_data] = text\n\n self.answer_after_apply(chat_id)\n\n def answer_after_apply(self, chat_id):\n self.set_state(chat_id, '', 0)\n\n self.answer_with_search_buttons(chat_id)\n\n @staticmethod\n def is_search_available(req):\n return req.get('departure') and req.get('arrival') and req.get('date')\n\n def handle_multiple_city_choose(self, chat_id: int, cities):\n keys = list(map(lambda x: InlineKeyboardButton(text=x['name'], callback_data=f'city {x[\"code\"]}:{x[\"name\"]}'),\n cities[:12]))\n keyboard = InlineKeyboardMarkup(inline_keyboard=[keys[i:i + 3] for i in range(0, len(keys), 3)])\n self.bot.sendMessage(chat_id, 'Please, choose right city', reply_markup=keyboard)\n\n def _parse_and_answer(self, chat_id: int, trains: []):\n for i in range(len(trains) // self.trains_per_message):\n self.bot.sendMessage(chat_id, self.requester.beautify_train_info(\n trains[self.trains_per_message * i: self.trains_per_message * i + self.trains_per_message]))\n","sub_path":"rzd_handler.py","file_name":"rzd_handler.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"603421182","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n#\n# By Guangbai.\n# Created on 2018-09-21 下午3:07\n#\n\n\nimport numpy as np\n\n\ndef Cosine(x, y):\n sum_xy = 0.0\n normX = 0.0\n normY = 0.0\n for a, b in zip(x, y):\n sum_xy += a * b\n normX += a ** 2\n normY += b ** 2\n if normX == 0.0 or normY == 0.0:\n return None\n else:\n return sum_xy / ((normX * normY) ** 0.5)\n\n\ndataA = np.mat([1, 2, 3, 3, 2, 1])\ndataB = np.mat([2, 3, 4, 4, 3, 2])\nprint(Cosine(dataA, dataB))\n","sub_path":"src/nlp/similarity_102.py","file_name":"similarity_102.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"641436697","text":"from microbit import *\nimport radio\n\ndirections = {\"-\": Image.SQUARE_SMALL, \"R\": Image.ARROW_E, \"L\": Image.ARROW_W, \"F\": Image.ARROW_N, \"B\": Image.ARROW_S}\n\nwhile True:\n if button_b.was_pressed():\n #Checks if the radio is on or not, then turns it on or off.\n try:\n radio.send(\"-\") # Sends the stop signal to the buggy, in order to avoid accidents.\n radio.off()\n display.scroll(\"RADIO OFF\")\n except:\n radio.on()\n display.scroll(\"RADIO ON\")\n spatial_awareness = accelerometer.get_values() #Gets the accelerometer data.\n #Displays the directions. Directions are good. \n if spatial_awareness[0] > 200:\n direction = \"R\"\n elif spatial_awareness[0] < -200:\n direction = \"L\"\n elif spatial_awareness[1] > 200:\n direction = \"B\"\n elif spatial_awareness[1] < -200:\n direction = \"F\"\n else:\n direction = \"-\"\n try:\n #Sends the direction over the radio, and displays the direction on the display.\n radio.send(direction)\n display.show(directions[direction])\n except:\n #If the radio isn't on, show a lack of anything.\n display.clear()\n","sub_path":"buggy_controller.py","file_name":"buggy_controller.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"491584075","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: mpathic/src/profile_freq.py\n# Compiled at: 2018-06-21 15:23:40\n\"\"\"\nCalculates the fractional number of character occurances at each position within the set of sequences passed.\n\"\"\"\nfrom __future__ import division\nimport argparse, numpy as np, sys, pandas as pd\nfrom mpathic.src import qc\nfrom mpathic.src import io_local as io\nfrom mpathic.src import profile_ct\nimport pdb\nfrom mpathic import SortSeqError\nfrom mpathic.src.utils import handle_errors, check, ControlledError\n\nclass ProfileFreq:\n \"\"\"\n\n Profile Frequencies computes character frequencies (0.0 to 1.0) at each position\n\n Parameters\n ----------\n dataset_df: (pandas dataframe)\n A dataframe containing a valid dataset.\n\n bin: (int)\n A bin number specifying which counts to use\n\n start: (int)\n An integer specifying the sequence start position\n\n end: (int)\n An integer specifying the sequence end position\n\n Returns\n -------\n freq_df: (pd.DataFrame)\n A dataframe containing counts for each nucleotide/amino \n\n acid character at each position.\n\n \"\"\"\n\n @handle_errors\n def __init__(self, dataset_df=None, bin=None, start=0, end=None):\n self.dataset_df = dataset_df\n self.bin = bin\n self.start = start\n self.end = end\n self.freq_df = None\n self._input_check()\n counts_df = profile_ct.main(dataset_df, bin=bin, start=start, end=end)\n ct_cols = [ c for c in counts_df.columns if qc.is_col_type(c, 'ct_') ]\n freq_cols = [ 'freq_' + c.split('_')[1] for c in ct_cols ]\n freq_df = counts_df[ct_cols].div(counts_df['ct'], axis=0)\n freq_df.columns = freq_cols\n freq_df['pos'] = counts_df['pos']\n freq_df = qc.validate_profile_freq(freq_df, fix=True)\n self.freq_df = freq_df\n return\n\n def _input_check(self):\n \"\"\"\n check input parameters for correctness\n \"\"\"\n if self.dataset_df is None:\n raise ControlledError(\" Profile freq requires pandas dataframe as input dataframe. Entered df was 'None'.\")\n elif self.dataset_df is not None:\n check(isinstance(self.dataset_df, pd.DataFrame), 'type(df) = %s; must be a pandas dataframe ' % type(self.dataset_df))\n check(pd.DataFrame.equals(self.dataset_df, qc.validate_dataset(self.dataset_df)), ' Input dataframe failed quality control, please ensure input dataset has the correct format of an mpathic dataframe ')\n if self.bin is not None:\n check(isinstance(self.bin, int), 'type(bin) = %s; must be of type int ' % type(self.bin))\n check(self.bin > 0, 'bin = %d must be a positive int ' % self.bin)\n check(isinstance(self.start, int), 'type(start) = %s; must be of type int ' % type(self.start))\n check(self.start >= 0, 'start = %d must be a positive integer ' % self.start)\n if self.end is not None:\n check(isinstance(self.end, int), 'type(end) = %s; must be of type int ' % type(self.end))\n return","sub_path":"pycfiles/mpathic-0.6-cp36-cp36m-macosx_10_7_x86_64/profile_freq.py","file_name":"profile_freq.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"28345207","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n'''\n高斯模糊\n\n'''\n\n\n# 保证每个通道的像素值大小在0~255之间\ndef clamp(pv):\n if pv > 255:\n return 255\n elif pv < 0:\n return 0\n else:\n return pv\n\n\n# 加高斯噪声\ndef gaussian_noise(image):\n h, w, c = image.shape\n\n # 产生随机数,分别加到图像的三个通道\n for row in range(h):\n for col in range(w):\n # normal(loc=0.0, scale=1.0, size=None),均值,标准差,大小\n s = np.random.normal(0, 20, 3)\n\n b = image[row, col, 0]\n g = image[row, col, 1]\n r = image[row, col, 2]\n\n image[row, col, 0] = clamp(b + s[0])\n image[row, col, 1] = clamp(b + s[1])\n image[row, col, 2] = clamp(b + s[2])\n\n cv.imshow(\"gaussian_noise\", image)\n\n return image\n\n\n# 高斯模糊\ndef gaussian_blur_operation(image):\n # GaussianBlur(src, ksize, sigmaX, dst=None, sigmaY=None, borderType=None)\n # ksize表示卷积核大小,sigmaX/Y表示x,y方向上的标准差,这两者只需一个即可,并且ksize为大于0的奇数\n dst = cv.GaussianBlur(image, (5, 5), 0)\n cv.imshow(\"gaussian_blur\", dst)\n\n\ndef main():\n # 读取图片\n img = cv.imread(\"../code_images/lena.jpg\")\n\n # 创建窗口,窗口尺寸自动调整\n # cv.namedWindow(\"lena\", cv.WINDOW_AUTOSIZE)\n\n # 显示图片\n cv.imshow(\"lena\", img)\n\n t1 = cv.getTickCount()\n\n # 为原图加上高斯噪声\n new_image = gaussian_noise(img)\n\n t2 = cv.getTickCount()\n time = (t2 - t1) / cv.getTickFrequency()\n\n print(\"为图像添加高斯噪声用时: %s ms\" % (time * 1000))\n\n # 高斯模糊\n gaussian_blur_operation(new_image)\n\n # 等待键盘输入\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/case_07_gauss_blur_operation.py","file_name":"case_07_gauss_blur_operation.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"43279507","text":"import os\nimport glob\nimport argparse\nimport numpy as np\nimport random\n\ndef parser():\n args = argparse.ArgumentParser()\n args.add_argument(\"-i\", \"--inputPath\", type=str, help=\"Path the folder directory (Full)\")\n return args.parse_args()\n\ndef moveFile(name_list, new_name_folder):\n if not name_list:\n print(\"cannot move to \", new_name_folder)\n return\n if not os.path.exists(new_name_folder):\n os.makedirs(new_name_folder)\n for file_name in name_list:\n os.rename(file_name, os.path.join(new_name_folder, file_name)) \n\ndef main():\n args =parser()\n if args.inputPath == None:\n print(\"Khong co dau vao\")\n return\n path = args.inputPath\n for folder_name in os.listdir(path):\n path_current = os.path.join(path, folder_name)\n os.chdir(path_current)\n name_list = os.listdir(path_current)\n # name_list = set(name_list)\n np.random.shuffle(name_list)\n # print(type(name_list))\n if (len(name_list) > 500):\n train = name_list[:400]\n val = name_list[400:500]\n test = name_list[500:]\n else:\n train = name_list[:320]\n val = name_list[320:400]\n test = name_list[400:]\n print(folder_name, \" :\" , len(name_list))\n print(\"train: {}, val: {}, test: {}\".format(len(train), len(val), len(test)))\n moveFile(train, \"train\")\n moveFile(val, \"valid\")\n moveFile(test, \"test\")\n \nmain()","sub_path":"divideFolder.py","file_name":"divideFolder.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"181849508","text":"# Copyright (C) 2015 Oleh Prypin \n# \n# This file is part of CrSFML.\n# \n# This software is provided 'as-is', without any express or implied\n# warranty. In no event will the authors be held liable for any damages\n# arising from the use of this software.\n# \n# Permission is granted to anyone to use this software for any purpose,\n# including commercial applications, and to alter it and redistribute it\n# freely, subject to the following restrictions:\n# \n# 1. The origin of this software must not be misrepresented; you must not\n# claim that you wrote the original software. If you use this software\n# in a product, an acknowledgement in the product documentation would be\n# appreciated but is not required.\n# 2. Altered source versions must be plainly marked as such, and must not be\n# misrepresented as being the original software.\n# 3. This notice may not be removed or altered from any source distribution.\n\n\nimport sys\nimport re\nimport textwrap\nimport itertools\nimport collections\n\nfrom pycparser import parse_file, c_ast, c_generator\n\n\n\nwith open('docs_gen.txt', encoding='utf-8') as f:\n docs = f.read().strip().split('\\n--------\\n')\n\n\ndef rename_sf(name):\n if name is None:\n return name\n if not name.startswith('sf'):\n raise ValueError(name)\n return name[2:]\n\ndef rename_type(name, var=''):\n orname = name\n m = re.match('^(.+) *\\[([0-9]+)\\]$', name)\n if m:\n name, arrsize = m.groups()\n arrsize = int(arrsize)\n else:\n arrsize = None\n name = re.sub(r'\\bconst\\b', '', name).strip()\n if name.startswith('sf') and ('Int' in name or 'Uint' in name) and 'Rect' not in name:\n name = name[2:].replace('i', 'I')\n ptr = name.count('*')\n name = name.replace('*', '').strip()\n name = {\n 'char': 'LibC::Char',\n 'int': 'Int32',\n 'size_t': 'LibC::SizeT',\n 'sfBool': 'CSFML::Bool',\n 'unsigned int': 'Int32',\n 'unsigned short': 'UInt16',\n 'float': 'Float32',\n 'double': 'Float64',\n 'sfVector2u': 'sfVector2i',\n }.get(name, name)\n if ptr and 'sf' in name:\n if rename_sf(name) in classes:\n ptr -= 1\n try:\n name = rename_sf(name)\n except ValueError:\n pass\n name = name[0].upper()+name[1:]+'*'*ptr\n if arrsize:\n name = '{}[{}]'.format(name, arrsize)\n return name\n\ndef rename_identifier(name):\n #name = {\n #'object': 'obj',\n #'type': 'kind',\n #'bind': 'bindGL',\n #}.get(name, name)\n #name = name.replace('String', 'Str').replace('string', 'str')\n name = re.sub('[A-Z](?![A-Z]|$)', lambda m: '_'+m.group(0).lower(), name)\n name = re.sub('[A-Z]+', lambda m: '_'+m.group(0).lower()+'_', name)\n return name.replace('__', '_').replace('._', '.').strip('_')\n\ndef common_start(strings):\n if not strings:\n return ''\n first = strings[0]\n for i in range(1, len(first)+1):\n if not all(s[:i]==first[:i] for s in strings):\n return first[:i-1]\n return first\n\n\ndef get_doc(indent=0):\n global doc\n if doc is None:\n return None\n\n comments = ''\n\n for line in doc.splitlines():\n start = '# ' if line else '#'\n comments += '\\n' + indent * ' ' + start + line\n\n doc = None\n\n return comments.strip()\n\nenum_relations = {\n 'JoystickAxis': 'Joystick',\n 'MouseButton': 'Mouse',\n 'MouseWheel': 'Mouse',\n 'BlendEquation': 'BlendMode',\n 'EventType': 'Event',\n 'ContextAttribute': 'ContextSettings',\n 'BlendFactor': 'BlendMode',\n 'KeyCode': 'Keyboard',\n 'TextStyle': 'Text',\n 'SensorType': 'Sensor',\n 'SoundStatus': 'SoundSource',\n 'PrimitiveType': '',\n 'WindowStyle': '',\n 'FtpTransferMode': 'Ftp',\n 'FtpStatus': 'FtpResponse',\n 'HttpMethod': 'HttpRequest',\n 'HttpStatus': 'HttpResponse',\n 'SocketStatus': 'Socket',\n}\ndef handle_enum(name, items):\n if name is None:\n for name, value in items:\n name = rename_sf(name)\n lib('{} = {}'.format(name, value) if value is not None else name)\n return\n \n nitems = [name for name, value in items]\n c = len(common_start(nitems))\n nitems = [nitem[c:] for nitem in nitems]\n nitems = list(zip(nitems, (value for name, value in items)))\n \n def subname(name):\n return {'ButtonCount': 'Count', 'DefaultStyle': 'Default'}.get(name, name)\n\n nname = rename_sf(name)\n if all(value is not None for name, value in nitems):\n nitems.sort(key=lambda kv: int(kv[1]))\n d = get_doc()\n if d: lib(d)\n s = 'enum {}'.format(nname)\n if nname in ['WindowStyle', 'TextStyle', 'ContextAttribute']:\n lib('@[Flags]')\n s += ': UInt32'\n lib(s)\n lib(*(textwrap.wrap(', '.join(\n ('{} = {}'.format(subname(name), value) if value is not None else subname(name))\n for name, value in nitems\n ), 78, initial_indent=' ', subsequent_indent=' ')))\n lib('end')\n \n if d: obj(nname+'ALIAS', d, '#')\n for name, value in nitems:\n cls = enum_relations[nname] or 'SF'\n obj(nname+'ALIAS', '# * `{cls}`::{name}'.format(**locals()))\n obj(nname+'ALIAS', 'alias {0} = CSFML::{0} # enum'.format(nname))\n for name, value in nitems:\n orcls = cls = enum_relations[nname]\n if cls and cls in structs:\n cls = 'CSFML::'+cls\n if cls and cls not in objs[cmodule]:\n obj(cls, ('struct ' if orcls in structs else 'class ')+cls)\n sub = subname(name)\n suffix = '.value' if name.endswith('Count') else ''\n obj(cls, '{name} = CSFML::{nname}::{sub}{suffix}'.format(**locals()))\n\nstructs = {'Event': None, 'BlendMode': None, 'ContextSettings': None}\ndef handle_struct(name, items):\n if name=='sfVector2u':\n return\n name = rename_type(name)\n structs[name] = [rename_identifier(n) for t, n in items]\n d = get_doc()\n if d: lib(d)\n lib('struct {}'.format(name))\n \n if d: obj(name+'ALIAS', d, '#')\n for t, n in items:\n t = rename_type(t)\n if t=='UInt32' and n=='unicode':\n t = 'Char'\n elif t=='UInt32' and n=='attributeFlags':\n t = 'ContextAttribute'\n if t in ['Vector2f', 'Vector2i']:\n t = 'Vector2'\n n = rename_identifier(n)\n obj(name+'ALIAS', '# * {n} : `{t}`'.format(**locals()))\n obj(name+'ALIAS', '#', \"# Do not use `.new`; `SF` module may contain constructor methods for this struct.\")\n obj(name+'ALIAS', 'alias {0} = CSFML::{0} # struct'.format(name))\n \n for t, n in items:\n rt = rename_type(t)\n rn = rename_identifier(n)\n\n special = ''\n if 'Vector2' in t or '*' in t and 'void' not in t and '*' not in rt:\n special = '_'\n\n if rt=='UInt32' and n=='unicode':\n rt = 'Char'\n elif rt=='UInt32' and n=='attributeFlags':\n rt = 'ContextAttribute'\n lib(' {}{} : {}'.format(rename_identifier(n), special, rt))\n\n if special:\n if name and name not in objs[cmodule]:\n obj(name, 'struct CSFML::'+name)\n \n obj(name, 'def {}'.format(rn))\n if 'Vector2' in t:\n obj(name, ' SF.vector2({}_)'.format(rn))\n else:\n obj(name, ' SF::{}.wrap_ptr?({}_)'.format(rt, rn))\n obj(name, 'end')\n \n obj(name, 'def {}=(value)'.format(rn))\n if 'Vector2' in t:\n obj(name, ' self.{}_ = SF.{}(value)'.format(rn, rt.lower()))\n else:\n obj(name, ' self.{}_ = value ? value.to_unsafe : Pointer(Void).null as CSFML::{}'.format(rn, rt))\n obj(name, 'end')\n \n lib('end')\n\ndef handle_union(name, items):\n name = rename_type(name)\n structs[name] = None\n d = get_doc()\n if d: lib(d)\n lib('union {}'.format(name))\n \n for t, n in items:\n t = rename_type(t)\n lib(' {} : {}'.format(rename_identifier(n), t))\n lib('end')\n\n if d: obj(name+'ALIAS', d)\n obj(name+'ALIAS', 'alias {0} = CSFML::{0} # union'.format(name))\n\n\nclasses = set()\nreimplemented = {'Shape', 'InputStream', 'SoundStreamChunk'}\ndef handle_class(name):\n pname = rename_sf(name)\n classes.add(pname)\n d = get_doc()\n if d: lib(d)\n lib('type {0} = Void*'.format(pname), '')\n \n obj(pname, 'class {}'.format(pname))\n if d: obj(pname, d)\n obj(pname, 'include Wrapper(CSFML::{})'.format(pname), '')\n\n\ndef handle_function(main, params, alias=None):\n public = True\n orparams = params\n ftype, ofname = main\n nfname = rename_sf(ofname)\n fname = rename_identifier(rename_sf(ofname))\n nfname = re.sub(r'(.+)_create(Unicode)?$', r'\\1_initialize', nfname)\n nfname = re.sub(r'(.+?)_?(? {}'.format(rename_sf(fname), params, rename_type(ftype)))\n\n\n\ncgen = c_generator.CGenerator()\n\ndef type_to_str(node):\n ptrs = 0\n while isinstance(node, c_ast.PtrDecl):\n node = node.type\n ptrs += 1\n return ' '.join(node.type.names)+'*'*ptrs, node.declname\n\ndef gen_expr_to_str(node):\n return cgen.visit(node)\n\ndef gen_type_to_str(node):\n name = None\n try: name = node.name\n except AttributeError: pass\n try: name = node.declname\n except AttributeError: pass\n typ = gen_expr_to_str(node)\n if name:\n typ = ' '.join(re.sub(r'\\b{}\\b'.format(name), '', typ).split())\n return typ, name\n\n\ndef _debug(node):\n try:\n for k, v in node.__dict__.items():\n if k.startswith('_'):\n continue\n if isinstance(v, list) and len(v)>0:\n yield '{} = ['.format(k)\n for it in v:\n yield ' {!r} ('.format(it)\n for l in _debug(it):\n yield textwrap.indent(l, ' ')\n yield ')'\n yield ']'\n else:\n yield '{} = {!r} ('.format(k, v)\n for l in _debug(v):\n yield textwrap.indent(l, ' ')\n yield ')'\n except Exception as e:\n pass\ndef debug(node):\n class root:\n pass\n root = root()\n root.root = node\n r = '\\n'.join(_debug(root))[7:]\n r = re.sub(r' \\(\\n *\\)', '', r)\n r = re.sub(r' object at 0x[0-9a-f]+', '', r)\n return r\n\nclass Visitor(c_ast.NodeVisitor):\n def visit_FuncDecl(self, node):\n try:\n func_type, func_name = type_to_str(node.type)\n func_params = [gen_type_to_str(param_decl) for param_decl in node.args.params] if node.args else []\n if len(func_params)==1 and func_params[0][0]=='void':\n func_params = []\n handle_function((func_type, func_name), func_params)\n except AttributeError as e:\n print(func_name, repr(e), file=sys.stderr)\n\n def visit_Typedef(self, node):\n if isinstance(node.type.type, (c_ast.Enum, c_ast.Struct)):\n node.type.type.my_name = node.type.declname\n if type(node.type.type).__name__=='Union':\n name = node.name\n node = node.type.type\n if node.decls:\n items = [gen_type_to_str(decl) for decl in node.decls]\n handle_union(name, items)\n\n #print(debug(node))\n try:\n r = (\n (gen_type_to_str(node.type.type.type.type)[0], node.name),\n [gen_type_to_str(p) for p in node.type.type.args.params]\n )\n except Exception as e:\n pass\n else:\n handle_functiondef(*r)\n return\n\n self.generic_visit(node)\n\n def visit_Enum(self, node):\n try:\n name = node.my_name\n except AttributeError:\n name = node.name\n if node.values:\n items = [\n (en.name, (gen_expr_to_str(en.value) if en.value else None))\n for en in node.values.enumerators\n ]\n handle_enum(name, items)\n else:\n if name.startswith('doc'):\n global doc\n doc = docs[int(name[3:])-1].strip()\n doc = re.sub(r'(Example:\\s+)?\\\\code(.|\\n)+?\\\\endcode\\n', r'', doc)\n doc = re.sub(r'\\\\brief ', r'', doc)\n doc = re.sub(r'\\\\param', r'*Arguments*:\\n\\n\\\\param', doc, 1)\n doc = re.sub(r'\\\\param ([a-zA-Z0-9_]+)', r'* `\\1`: ', doc)\n doc = re.sub(r'\\\\li ', r'- ', doc)\n doc = re.sub(r'\\\\a ([a-zA-Z0-9_]+)', r'`\\1`', doc)\n doc = re.sub(r'\\\\return ', r'*Returns*: ', doc)\n doc = re.sub(r'\\bsf([A-Z])', r'\\1', doc)\n doc = re.sub(r'\\b([a-z][a-z0-9]*[A-Z][a-zA-Z0-9]+)\\b', lambda m: rename_identifier(m.group(0)), doc)\n doc = re.sub(r' +', ' ', doc)\n else:\n global cmodule\n cmodule = name.split('_')[1].lower()\n #lib('\\n#--- {} ---#'.format(name.replace('_', '/')))\n\n self.generic_visit(node)\n\n def visit_Struct(self, node):\n try:\n name = node.my_name\n except AttributeError:\n name = node.name\n if node.decls:\n items = [gen_type_to_str(decl) for decl in node.decls]\n handle_struct(name, items)\n else:\n handle_class(name)\n\n self.generic_visit(node)\n\n\n\nlibs = collections.defaultdict(lambda: [[]])\ndef lib(*args):\n libs[cmodule].extend(itertools.chain.from_iterable(a.splitlines() or [''] for a in args))\n if not args or args[0].startswith('end'):\n libs[cmodule].append('')\ndef lib_pre(*args):\n libs[cmodule][0].extend(itertools.chain.from_iterable(a.splitlines() or [''] for a in args))\n if not args or args[0].startswith('end'):\n libs[cmodule][0].append('')\nobjs = collections.defaultdict(collections.OrderedDict)\ndef obj(cls, *args):\n try:\n lst = objs[cmodule][cls]\n except KeyError:\n objs[cmodule][cls] = lst = []\n lst += itertools.chain.from_iterable(a.splitlines() or [''] for a in args)\n\nast = parse_file('headers_gen.h')\nVisitor().visit(ast)\n\ndeps = {'system': [], 'window': ['system'], 'graphics': ['system', 'window'], 'audio': ['system'], 'network': ['system']}\nfor mod, lines in libs.items():\n with open('{}_lib.cr'.format(mod), 'w', encoding='utf-8') as f:\n f.write('\\n'.join(lines[0]))\n f.write('require \"./common_lib\"\\n')\n for d in deps[mod]:\n f.write('require \"./{}_lib\"\\n'.format(d))\n f.write('\\n@[Link(\"csfml-{}\")]\\n'.format(mod))\n f.write('# :nodoc:\\n')\n f.write('lib CSFML\\n\\n')\n f.write('\\n'.join(((' ' + line) if line else '') for line in lines[1:]))\n f.write('\\nend\\n')\nfor mod, classes in objs.items():\n with open('{}_obj.cr'.format(mod), 'w', encoding='utf-8') as f:\n f.write('require \"./{}_lib\"\\n'.format(mod))\n f.write('require \"./common_obj\"\\n\\n')\n f.write('module SF\\n extend self\\n\\n')\n for cls, lines in classes.items():\n if not cls:\n continue\n if cls.endswith('ALIAS'):\n cls = cls[:-5]\n if cls in reimplemented:\n continue\n\n indent = 2\n\n for i, line in enumerate(lines):\n if line:\n f.write(' ' * indent + line + '\\n')\n else:\n f.write('\\n')\n\n if indent == 2 and not line.startswith('#'):\n ii = i\n indent = 4\n\n if not lines[ii].startswith('alias'):\n f.write(' end\\n')\n f.write('\\n')\n if '' in classes:\n f.write('\\n'.join(' ' + line if line else '' for line in classes['']))\n\n f.write('\\nend\\n')\n","sub_path":"generate/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":24797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"2821713","text":"import discord\nfrom settings import TOKEN, PREFIX\nfrom handlers.message_handler_container import MessageHandlerContainer\n\nmessage_handlers = MessageHandlerContainer(prefix=PREFIX)\n\n@message_handlers.create_handler('say (.*)')\nasync def talk(context, user_message, msg):\n await user_message.delete()\n await user_message.channel.send(msg)\n\n\nclass MyClient(discord.Client):\n async def on_ready(self):\n print(f'Logged on as {self.user}!')\n\n async def on_message(self, message):\n await message_handlers.run_handlers(self, message)\n\n\nclient = MyClient()\nclient.run(TOKEN)\n\n","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"411447813","text":"import pandas as pd\nimport numpy as np\nfrom math import sin, cos, sqrt, atan2, radians\nimport sys\n\ncuisine,x,y = sys.argv[1],sys.argv[2],sys.argv[3]\nx = float(x)\ny = float(y)\n\ndef get_distance(lat1, lon1, lat2, lon2):\n R = 6373.0\n latr = radians(lat1)\n lonr = radians(lon1)\n laty = radians(lat2)\n lony = radians(lon2)\n dlon = lony - lonr\n dlat = laty - latr\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(abs(a)), sqrt(abs(1 - a)))\n distance = R * c\n return distance\n\ndef Average_rating():\n shops=pd.read_csv(\"zomato.csv\",encoding='ISO-8859-1')\n shops=shops[shops['Country Code']==1]\n shops=shops.drop(['Restaurant ID','Country Code','City','Address','Locality Verbose','Locality','Currency'],axis=1)\n shops=shops.drop(['Rating color','Rating text','Votes'],axis=1)\n shops=shops.drop(['Restaurant Name'],axis=1)\n shops=shops[shops['Cuisines'].str.contains(cuisine)]\n shops=shops.drop(['Cuisines'],axis=1)\n longArr = shops['Longitude']\n longArr=np.array(longArr)\n latArr = shops['Latitude']\n latArr=np.array(latArr)\n LatLonglist=[]\n for i in range(len(longArr)):\n dist=get_distance(x,y,latArr[i],longArr[i])\n if dist < 10 :\n LatLonglist.append([latArr[i],longArr[i]])\n latList=[]\n for i in range(len(LatLonglist)):\n latList.append(LatLonglist[i][0])\n longList=[]\n for i in range(len(LatLonglist)):\n longList.append(LatLonglist[i][1])\n rslt_df = shops[shops['Latitude'].isin(latList)]\n rslt_df = shops[shops['Longitude'].isin(longList)]\n print(rslt_df[\"Aggregate rating\"].mean())\n\n\n\nAverage_rating()","sub_path":"node/src/ML/Average_rating.py","file_name":"Average_rating.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"647273576","text":"\"\"\"\nFirst row contains numbers 1 <= W <= 10e4 and 1 <= n <= 300 that shows knapsack\nweight limit and number of golden bars. Items contain n weights of golden bars.\nFind maximum weight that one can bring in the knapsack.\n\"\"\"\n\ndef calc_max_value(W, items):\n d = [[0] * (len(items)+1) for _ in range(W+1)]\n for w in range(W+1):\n d[w][0] = 0\n for i in range(len(items)+1):\n d[0][i] = 0\n for i in range(1, len(items)+1):\n for w in range(1, W+1):\n d[w][i] = d[w][i-1]\n if items[i-1] <= w:\n d[w][i] = max(d[w][i], d[w-items[i-1]][i-1]+items[i-1])\n return d[-1][-1]\n\n\nif __name__ == \"__main__\":\n W = 10\n items = [1, 4, 8]\n print(calc_max_value(W, items))\n","sub_path":"knapsack_golden_bars.py","file_name":"knapsack_golden_bars.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"271725463","text":"def ispermutation(string, perm):\n if len(string) != len(perm):\n return False\n arr = [i for i in range(26)]\n arrperm = [i for i in range(26)]\n for i in range(len(string)):\n arr[ord(string[i])-ord('a')] += 1\n arrperm[ord(perm[i])-ord('a')] += 1\n \n return arr == arrperm\n\ndef main():\n string1, string2 = input().strip().split()\n print(ispermutation(string1, string2))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Chapter 1/1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"206060955","text":"#!/usr/bin/env python\r\n#-*-coding: utf-8 -*-\r\n#NOM......get_filesize_humanized\r\n#EXT.......py\r\n#MAJOR....1\r\n#MINOR....1\r\n#DESCR....get file size\r\n#USAGE....GetHumanReadable(size,precision=2) => str(float)\r\n\r\nimport os\r\n\r\ndef getSize(filename):\r\n\tst = os.stat(filename)\r\n\treturn st.st_size\r\n\r\ndef get_size(filename):\r\n\treturn os.path.getsize(filename)\r\n\t\r\n\t\r\ndef GetHumanReadable(size,precision=2):\r\n suffixes=['B','KB','MB','GB','TB']\r\n suffixIndex = 0\r\n while size > 1024:\r\n suffixIndex += 1 #increment the index of the suffix\r\n size = size/1024.0 #apply the division\r\n return \"%.*f %s\"%(precision,size,suffixes[suffixIndex])\r\n\t\r\n\r\n\t\r\nfile = 'test_getsize.txt'\r\n\r\nwith open(file, 'w') as flux:\r\n\tflux.write('%s' % ('x' * 2**18)) \r\n\tflux.close()\r\n\r\nfile2 = \"F:\\\\LABO-F1\\\\test-file-size.mkv\"\r\n\r\ntmp = [file, file2]\r\n\r\nfor file in tmp:\r\n\tprint('Taille du fichier : %s') % file\r\n\tprint('avec os.stat : %s') % GetHumanReadable(getSize(file), 2)\r\n\tprint('avec os.path.getsize: %s') % GetHumanReadable(get_size(file), 2)\r\n\t\r\n\t\r\n'''\r\nOUTPUT\r\n------\r\nTaille du fichier : test_getsize.txt\r\navec os.stat : 256.00 KB\r\navec os.path.getsize: 256.00 KB\r\nTaille du fichier : F:\\\\LABO-F1\\\\test-file-size.mkv\r\navec os.stat : 3.79 GB\r\navec os.path.getsize: 3.79 GB\r\n'''","sub_path":"file-size/get_filesize_humanized_v1.1.py","file_name":"get_filesize_humanized_v1.1.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"272745026","text":"import time\nimport json\nimport numpy\nimport base64\nimport cv2\n\nfrom flask import Flask, Response, request\nfrom view.black_box import BlackBox\n\nfrom utils import constants as C\nfrom utils.app_log import get_logger\n\napplication = Flask(__name__)\nlogger = get_logger(\"application\")\n\nblackbox = BlackBox()\n\ndef dump_json(passed_json):\n\treturn json.dumps(passed_json, indent=4, sort_keys=True, default=str)\n\ndef return_failed_response(ex):\n\treturn Response(dump_json({\"error\": ex}), status=C.STATUS_ERROR, mimetype='application/json')\n\n@application.route(\"/\", methods = [\"GET\", \"POST\"])\ndef check_everthing_is_ok():\n\treturn Response(dump_json({\"hello\": \"At this moment, everthing is working fine\"}),\n\t\t\t\t\tstatus=C.STATUS_OK, mimetype='application/json')\n\n@application.route(\"/api/human_detection\", methods = [\"GET\"])\ndef check_everthing_is_ok_2():\n\treturn Response(dump_json({\"error\": \"Make POST requests\"}),\n\t\t\t\t\tstatus=C.STATUS_OK, mimetype='application/json')\n\n@application.route(\"/api/human_detection\", methods = [\"POST\"])\ndef run_the_human_detector():\n\ttry:\n\t\tdata = json.loads(request.data)\n\t\tcamera_id = data.get(\"camera_id\")\n\t\ttask_id = data.get(\"task_id\")\n\n\t\tframe = data.get(\"frame\")\n\t\tpoints = data.get(\"points\")\n\t\tframe_shape = data.get(\"frame_shape\")\n\n\t\tassert type(camera_id) == str or type(camera_id) == int, 'Type of variable `camera_id` not valid. Must be string or int.'\n\t\tassert type(task_id) == str or type(task_id) == int, 'Type of variable `task_id` not valid. Must be string or int.'\n\t\tassert type(frame) == str, 'Type of variable `frame` not valid. Must be string.'\n\t\tassert type(points) == list and len(points) == 4, 'Variable `points` not valid. Must be list with 4 elem.'\n\t\tassert type(frame_shape) == list and len(frame_shape) == 3, 'Variable `frame_shape` not valid. Must be list with 3 elem.'\n\n\t\tprint(f\"[APP] Received: camera_id: {camera_id} && task_id: {task_id}\")\n\n\t\tframe = numpy.frombuffer(base64.b64decode(frame), dtype=numpy.uint8)\n\t\tframe = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n\t\tframe = frame.reshape(frame_shape)\n\n\t\tresponse_json = {\n\t\t\t\"camera_id\": camera_id,\n\t\t\t\"task_id\": task_id,\n\t\t\t\"is_alert\": False,\n\t\t\t\"objects\": []\n\t\t}\n\n\t\tresponse_in_json = blackbox.receiveFrame(camera_id, frame, points)\n\t\tresponse_json.update(response_in_json)\n\n\t\treturn Response(dump_json(response_json), status=C.STATUS_OK, mimetype='application/json')\n\n\texcept Exception as ex:\n\t\tlogger.error(f\"[APP] Error has occured. Exception: {ex}\")\n\t\treturn return_failed_response(ex)\n\nif __name__ == \"__main__\":\n\tapplication.run(host=C.HOST, port=C.PORT, debug=False)\n\tprint(f\"Running on http://{C.HOST}:{C.PORT}/ (Press CTRL+C to quit)\")\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"70033270","text":"__author__ = 'felipe'\n\nfrom database import Base, Trend\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///untitled1.db')\n\n\nBase.metadata.bind = engine\nDBSession = sessionmaker()\nDBSession.bind = engine\nsession = DBSession()\n\n # Make a query to find all Persons in the database\n#session.query(Trend).all()\n\n\n # Return the first Person from all Persons in the database\ntrend = session.query(Trend).first()\nprint(trend.word)\nprint(trend.related)\nprint(trend.count)\n\n\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"369871918","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab\n\nfrom faker.providers import BaseProvider\n\nclass EventProvider(BaseProvider):\n all_event_types = ['visit', 'view', 'cart', 'list', 'like', 'purchase', 'refund']\n\n def event_type(self):\n return self.random_element(self.all_event_types)\n\nclass AWSRegionProvider(BaseProvider):\n aws_regions = [\n 'us-east-2',\n 'us-east-1',\n 'us-west-1',\n 'us-west-2',\n 'af-south-1',\n 'ap-east-1',\n 'ap-south-1',\n 'ap-northeast-3',\n 'ap-northeast-2',\n 'ap-southeast-1',\n 'ap-southeast-2',\n 'ap-northeast-1',\n 'ca-central-1',\n 'eu-central-1',\n 'eu-west-1',\n 'eu-west-2',\n 'eu-south-1',\n 'eu-west-3',\n 'eu-north-1',\n 'me-south-1',\n 'sa-east-1',\n 'us-gov-east-1',\n 'us-gov-west-1'\n ]\n\n def aws_region(self):\n return self.random_element(self.aws_regions)\n\n\nif __name__ == '__main__':\n import argparse\n import datetime\n import itertools\n import random\n import sys\n import time\n\n import boto3\n from faker import Faker\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--region-name', action='store', default='us-east-1',\n help='aws region name (default: us-east-1)')\n parser.add_argument('--stream-name', help='The name of the stream to put the data record into.')\n parser.add_argument('--max-count', default=10, type=int, help='The max number of records to put.')\n parser.add_argument('--dry-run', action='store_true')\n\n options = parser.parse_args()\n\n fake = Faker()\n fake.add_provider(EventProvider)\n fake.add_provider(AWSRegionProvider)\n fake.set_arguments('customer_id_format', {'string_format': '%###########'})\n fake.set_arguments('devices', {'elements': ['pc', 'mobile', 'tablet']})\n fake.set_arguments('event_start_datetime',\n {'start_datetime': datetime.datetime.utcnow().replace(minute=0, second=0, microsecond=0)})\n\n DATA_COLUMNS = {\n 'type': {\n 'device': 'random_element:devices',\n 'event': 'event_type'\n },\n 'customer_id': 'pystr_format:customer_id_format',\n 'event_timestamp': 'unix_time:event_start_datetime',\n 'region': 'aws_region'\n }\n\n if not options.dry_run:\n firehose_client = boto3.client('firehose', region_name=options.region_name)\n\n predicate = (lambda x: x < options.max_count) if options.max_count >= 0 else (lambda x: x > options.max_count)\n for cnt in itertools.takewhile(predicate, itertools.count()):\n record = fake.json(data_columns=DATA_COLUMNS, num_rows=1)\n\n if options.dry_run:\n print(record, file=sys.stderr)\n else:\n res = firehose_client.put_record(\n DeliveryStreamName=options.stream_name,\n Record={\n 'Data': record\n }\n )\n\n if (cnt + 1) % 100 == 0:\n print('[INFO] {} records are processed'.format(cnt+1), file=sys.stderr)\n\n if res['ResponseMetadata']['HTTPStatusCode'] != 200:\n print(res, file=sys.stderr)\n time.sleep(random.choices([0.01, 0.03, 0.05, 0.07, 0.1])[-1])\n","sub_path":"kinesis-data-firehose/dynamic-partitioning/src/main/python/gen_fake_firehose_data.py","file_name":"gen_fake_firehose_data.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"108882413","text":"from setuptools import setup, find_packages\n\ndata = {\n 'name': 'tvcapi',\n 'version': '0.1',\n 'description': 'TVC wrapper',\n 'author': 'Isaac Sánchez',\n 'author_email': 'igsaac@gmail.com',\n 'license': 'MIT',\n 'packages': find_packages(),\n 'install_requires':\n [\n 'lxml',\n 'requests',\n 'selenium==3.8.0',\n ],\n 'zip_safe': False,\n}\n\nsetup(**data)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"288555100","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom poliastro.plotting import plot\nfrom astropy import units as u\n\nfrom poliastro.bodies import Earth, Mars, Sun\nfrom poliastro.twobody import Orbit\n\nfrom io import BytesIO\nimport base64\n\nplt.style.use(\"seaborn\") # Recommended\n\n# Data for Mars at J2000 from JPL HORIZONS\na = 1.523679 * u.AU\necc = 0.093315 * u.one\ninc = 1.85 * u.deg\nraan = 49.562 * u.deg\nargp = 286.537 * u.deg\nnu = 23.33 * u.deg\n\nss = Orbit.from_classical(Sun, a, ecc, inc, raan, argp, nu)\n\n# Plot and save to variable\nplot(ss)\nploteBytes = BytesIO()\nplt.savefig(ploteBytes, format='png')\nplotBase64 = base64.encodestring(ploteBytes.getvalue())\n\n# Create a handler for our read (GET) people\n# def returnOrbitAs(coordinateSystemType = \"vectors\"):\ndef returnOrbitAsVectors():\n \"\"\"\n This function responds to a request for /api/twobody.orbit\n with the orbit object\n\n :return: orbit object\n \"\"\"\n ORBIT_FROM_VECTORS = {\n \"orbitstring\": str(ss),\n \"attractor\": str(ss.attractor),\n \"r\": {\n \"x\": ss.r.value[0],\n \"y\": ss.r.value[1],\n \"z\": ss.r.value[2],\n },\n \"v\": {\n \"x\": ss.v.value[0],\n \"y\": ss.v.value[1],\n \"z\": ss.v.value[2],\n },\n \"epoch\": ss.epoch.iso,\n \"plotbase64\": plotBase64.decode('ascii')\n };\n return ORBIT_FROM_VECTORS\n","sub_path":"twobody/orbit.py","file_name":"orbit.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"576600938","text":"from .units import *\n\ndef run(test):\n log = []\n log += ['Тест \"%s\"' % test.name]\n log += test.run()\n return '\\n'.join(log)\n\ndef run_all():\n res = []\n res += [run(tips)]\n res += [run(nyaa)]\n res += [run(embeds)]\n return res","sub_path":"Kurisu/salieri/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"416788603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nFilter IPTV m3u playlists according to customized criteria.\n\nFile: main.py\nAuthor: huxuan\nEmail: i(at)huxuan.org\n\"\"\"\nimport argparse\n\nfrom iptvtools import __version__\nfrom iptvtools.config import Config\nfrom iptvtools.constants import defaults\nfrom iptvtools.constants import helps\nfrom iptvtools.models import Playlist\n\n\ndef parse_args():\n \"\"\"Arguments Parsers.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--min-height', default=defaults.MIN_HEIGHT, type=int,\n help=helps.MIN_HEIGHT)\n parser.add_argument('-c', '--config', default=defaults.CONFIG,\n help=helps.CONFIG)\n parser.add_argument('-i', '--input', action='append', default=[],\n help=helps.INPUT)\n parser.add_argument('-I', '--interval', default=defaults.INTERVAL,\n type=int, help=helps.INTERVAL)\n parser.add_argument('-o', '--output', default=defaults.OUTPUT,\n help=helps.OUTPUT)\n parser.add_argument('-r', '--replace-group-by-source', action='store_true',\n help=helps.REPLACE_GROUP_BY_SOURCE)\n parser.add_argument('-t', '--template', action='append', default=[],\n help=helps.TEMPLATE)\n parser.add_argument('-T', '--timeout', default=defaults.TIMEOUT, type=int,\n help=helps.TIMEOUT)\n parser.add_argument('-u', '--udpxy', default=defaults.UDPXY,\n help=helps.UDPXY)\n parser.add_argument('-v', '--version', action='version',\n version=__version__)\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Filter m3u playlists.\"\"\"\n args = parse_args()\n if not args.input:\n args.input = [defaults.INPUT]\n Config.init(args.config)\n playlist = Playlist()\n playlist.parse(args)\n playlist.filter(args)\n open(args.output, 'w', encoding='utf-8').write(playlist.export(args))\n print('Invalid Urls:')\n print('\\n'.join(sorted(playlist.invalid_urls)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"iptvtools/iptv_filter.py","file_name":"iptv_filter.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"400111741","text":"# Пройдите в цикле по скиску ['Вася', 'Маша', 'Петя', 'Валера', 'Саша', 'Даша']\n# пока не встретите имя 'Валера'. Когда найдете - напишит�� 'Валера нашелся'.\n\nnames = ['Вася', 'Маша', 'Петя', 'Валера', 'Саша', 'Даша']\nwhile True:\n if names[0] == 'Валера':\n print('Валера нашелся')\n break\n else:\n print('{} <-- Это не Валера, продолжайте искать его...'.format(names[0]))\n names.pop(0)\n \n# Перепишите предыдущий пример в виде функции find_person(name),\n# которая ищет имя в списке\n\nnames = ['Вася', 'Маша', 'Петя', 'Валера', 'Саша', 'Даша']\n\ndef find_person(name):\n while True:\n if names[0] == name:\n return '{} нашелся'.format(name)\n break\n else:\n print('{} <-- Это не {}, продолжайте искать его...'.format(names[0], name))\n names.pop(0)\n\nprint(find_person('Саша'))\n\n# Напишите функцию ask_user(), которая с помощью input() спрашивает пользователя “Как дела?”, пока он не ответит “Хорошо”\n# Создайте словарь типа \"вопрос\": \"ответ\", например: {\"Как дела\": \"Хорошо!\", \"Что делаешь?\": \"Программирую\"} и так далее\n# Доработайте ask_user() так, чтобы когда пользователь вводил вопрос который есть в словаре, программа давала ему соотвествующий ответ. Например:\n# Пользователь: Что делаешь?\n# Программа: Программирую\n\n# def ask_user():\n# while True:\n# user_answer = input('Как у тебя дела?: ')\n# if user_answer == 'Хорошо':\n# return 'Это замечатьно'\n# else:\n# print('Такой ответ не приму...')\n \n\n# print(ask_user())\n\n\ndict_questions = {\n 'Как дела?': 'Хорошо!',\n 'Что делаешь?': 'Программирую',\n 'Как тебя зовут?': 'Бот',\n 'Сколько тебе лет?': 20\n}\n\ndef ask_user():\n while True:\n user_answer = input('Задай свой вопрос:\\n')\n if user_answer in dict_questions:\n return 'Искусственный интелект: ' + dict_questions[user_answer]\n else:\n print('Исскуственный интелект: Я не знаю ответ')\n\nif __name__ == '__main__':\n print(ask_user())\n","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"358899357","text":"import papermill as pm\n\nparamlist=(dict(modSourceDir= '/results/SalishSea/nowcast-green.201812/',\n modver='201812',\n Chl_N=1.8,\n fname='compHPLCModelFirstLook-Regress-201812.ipynb',),\n dict(modSourceDir= '/results2/SalishSea/nowcast-green.201905/',\n modver='201905',\n Chl_N=1.8,\n fname='compHPLCModelFirstLook-Regress-201905.ipynb',),\n dict(modSourceDir='/data/eolson/results/MEOPAR/SS36runs/GrahamRuns/2018ES_LF/',\n mod_ver='2018ES_LF',\n start_date=(2015,1,1),\n end_date=(2015,6,30),\n Chl_N=1.8,\n fname='compHPLCModelFirstLook-Regress-2018ES_LF-2015.ipynb',),\n dict(modSourceDir='/data/eolson/results/MEOPAR/SS36runs/GrahamRuns/2018ES_LF/',\n mod_ver='2018ES_LF',\n start_date=(2016,1,1),\n end_date=(2016,6,30),\n Chl_N=1.8,\n fname='compHPLCModelFirstLook-Regress-2018ES_LF-2016.ipynb',),\n dict(modSourceDir='/data/eolson/results/MEOPAR/SS36runs/GrahamRuns/2018ES_LF/',\n mod_ver='2018ES_LF',\n start_date=(2017,1,1),\n end_date=(2017,6,30),\n Chl_N=1.8,\n fname='compHPLCModelFirstLook-Regress-2018ES_LF-2017.ipynb',),)\n\nfor idict in paramlist:\n pm.execute_notebook(\n 'compHPLCModelFirstLook-Regress-Base.ipynb',\n idict['fname'],\n parameters=idict\n );\n\n\n","sub_path":"notebooks/NemcekHPLC/compHPLCModelFirstLook-Regress.py","file_name":"compHPLCModelFirstLook-Regress.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"275633120","text":"import random\nimport math\nimport numpy as np\n\ndef main():\n smaller = 0\n larger = 0\n print(\"\"\"What is the probability of a randomly chosen chord in a circle having a length\ngreater than the radius? There are 3 different (correct!) methods of calculating this probability,\nand each of the 3 methods provides a different answer. This program attempts to simulate\nthe three different methods of creating a random chord in order to experimentally determine\nthe probability.\n\nChoice 1: Two random points on a unit circle are chosen using trigonometric functions and the distance between\nthem is calculated.\n\nChoice 2: A randomly chosen point inside the circle is defined as the midpoint of a chord. The program then\ncalculates an endpoint of the chord from the midpoint and uses these two points to calculate the length\nof the chord.\n\nChoice 3: A radius is randomly selected from the circle. A point on that radius is randomly chosen as the midpoint of a chord.\nThe program then calculates an endpoint of the chord given that midpoint and uses these two points to determine the length\nof the chord.\"\"\")\n\n # Get user choice for how to pick the random chords: accepts only 1, 2, or 3.\n while True:\n solve_method = GetInt(\"Enter 1, 2, or 3 to select a method: \")\n if solve_method not in [1, 2, 3]:\n print(\"Please select a valid method.\")\n continue\n else:\n break\n\n # Prompt the user for the number of trials to run - requires int between 1 and 1 million\n while True:\n trials = GetInt(\"Enter the number of trials to perform: \")\n if trials < 1 or trials > 1000000:\n print(\"Please enter an integer between 1 and 1,000,000.\")\n continue\n else:\n break\n\n # First method of solving - finds two endpoints of chords on the circle and calculate the distance between\n if solve_method == 1:\n for _ in range(trials):\n point1 = pick_point_on_circle()\n point2 = pick_point_on_circle()\n distance = calc_distance(point1,point2)\n if distance >= 1:\n larger += 1\n else:\n smaller += 1\n\n # Second method of solving - finds a random point in the circle to be the midpoint of the chord\n # Calculates the endpoint of a chord with that midpoint, find the distance between midpoint and endpoint\n # and then doubles to find the length of the chord\n if solve_method == 2:\n for _ in range(trials):\n point1 = pick_point_in_circle()\n chord_endpoint = find_endpoint(point1)\n distance = 2 * calc_distance(point1, chord_endpoint)\n if distance >= 1:\n larger += 1\n else:\n smaller += 1\n\n # Third method of solving - picks a random point on a radius to be the midoint of a chord. Calculates the endpoint\n # of a chord with that midpoint, finds the distance between midpoint and endpoint and then doubles to find the length\n # of the chord.\n if solve_method == 3:\n for _ in range(trials):\n radius_point = pick_radius_point()\n chord_endpoint = find_endpoint(radius_point)\n distance = 2 * calc_distance(radius_point, chord_endpoint)\n if distance >= 1:\n larger += 1\n else:\n smaller += 1\n\n # Print out results of our testing\n print(\"There were {} chords smaller than the radius\".format(smaller))\n print(\"There were {} chords larger than the radius.\".format(larger))\n prob_larger = float(larger) / trials * 100.0\n print(\"The probability of the chord being larger than the radius was {}.\".format(prob_larger))\n\n\n\ndef pick_point_on_circle():\n ''' This function picks a random point on the unit circle. The choice is made by selecting\n a random angle between 0 and 2pi radians. The x-coord is the cosine of this angle and the\n y-coord is the sine of this angle. The function returns a point as a 2d array.'''\n\n angle = random.uniform(0,1)*2*math.pi\n x = math.cos(angle)\n y = math.sin(angle)\n point = np.array([x, y])\n return point\n\n\ndef pick_point_in_circle():\n ''' This function picks a random point inside of a circle. The choice is made by first selecting an angle (t)\n and then a radius, sort of. The radius cannot be used directly because there would be more points selected closer to\n the center of the circle. Instead, a probability distribution function is used to calculate. For the given situation,\n the probability density function for a given radius r must be equal to r^2 (since the circle has radius 1). The density\n function is then p(r) = 2r. Mapping this to a uniform variable, we integrate and say that P(r) = r^2, and this must be equal\n to the uniform density function, u. So u = r^2, which gives r = sqrt(u). The x and y coordinates are then given by r multiplied\n by either the cosine or sine of t respectively.\n\n The function returns a point as a 2d array.'''\n\n t = np.random.uniform(0.0, 2.0*np.pi)\n r = np.sqrt(np.random.uniform(0.0, 1.0))\n x = r * np.cos(t)\n y = r * np.sin(t)\n point = np.array([x,y])\n return point\n\ndef pick_radius_point():\n point = pick_point_on_circle()\n center_vector = -point\n t = np.random.uniform(0.0, 1.0)\n radius_point = np.add(point, t * center_vector)\n return radius_point\n\n\ndef find_endpoint(midpoint):\n ''' This function determines an endpoint of a chord given its midpoint. The point is determined by moving some distance,\n t along the vector perpendicular to the radius. Points on this line are described by midpoint + T * vector. This will\n be on the edge of the circle when the distance from the origin to a given point on the line determined is equal to the radius.\n\n The function returns the chord endpoint as a 2d array.'''\n\n x = midpoint[0]\n y = midpoint[1]\n perp_vector = np.array([-y, x])\n t = ((1-x**2-y**2)/(x**2+y**2))**(1.0/2)\n endpoint = np.add(midpoint, t * perp_vector)\n return endpoint\n\ndef calc_distance(point1,point2):\n '''This function calculates the distance between two points and returns that value'''\n\n distance = ((point1[0]-point2[0])**2 + (point1[1] - point2[1])**2) ** (1.0/2)\n return distance\n\ndef GetInt(prompt):\n while True:\n integer = input(prompt)\n try:\n integer = int(integer)\n return integer\n except ValueError:\n print(\"Please enter an integer.\")\n continue\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"circleprob.py","file_name":"circleprob.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"162294848","text":"import sys, os\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../')\nsys.path.insert(0, myPath + '/../src')\nimport io\nimport configparser\nimport unittest\nfrom OrderList import OrderList\nfrom datetime import datetime\nimport pytz\nfrom httpretty import HTTPretty, httprettified\nimport sure\nimport httpretty\nimport requests\nimport json\n \nclass TestOrderList(unittest.TestCase):\n\n SAMPLE_DATA_RPAY_SEARCH = {\n \"orderNumberList\": [\n \"26161-20180101-22222201\",\n \"26161-20180101-22222202\",\n \"26161-20180101-22222203\"\n ],\n \"MessageModelList\": [\n {\n\t\"messageType\": \"INFO\",\n\t\"messageCode\": \"ORDER_EXT_API_SEARCH_ORDER_INFO_102\",\n\t\"message\": \"注文検索に成功しました。\"\n }\n ],\n \"PaginationResponseModel\": {\n \"totalRecordsAmount\": None,\n \"totalPages\": None,\n \"requestPages\": None\n }\n }\n\n def load_json_file(filepath):\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n\n SAMPLE_DATA_RPAY = load_json_file('tests/order_data_rpay.json')\n\n SAMPLE_DATA = load_json_file('tests/order_data.json')\n\n def test_genFileName(self):\n ol = OrderList()\n print('{}'.format(ol.genFileName('order')))\n print('{}'.format(ol.genFileName('coupon')))\n print('{}'.format(ol.genFileName('childOrder')))\n\n def test_extendOrder(self):\n ol = OrderList()\n for order in self.SAMPLE_DATA_RPAY['OrderModelList']:\n if len(order['PackageModelList'][0]['ItemModelList']) <= 1:\n continue\n eorder = ol.extendOrder(order)\n assert eorder is not None\n\n def test_grabChildren(self):\n ol = OrderList()\n d = {\n 'aaa': {\n 'bbb': 1,\n 'ccc': 2,\n 'ddd': {\n 'eee': True\n },\n 'fff': [1, 2, 3]\n }\n }\n ret = ol.grabChildren(d)\n self.assertEqual(ret['aaa.bbb'], 1)\n self.assertEqual(ret['aaa.ccc'], 2)\n self.assertEqual(ret['aaa.ddd.eee'], True)\n self.assertIsInstance(ret['aaa.fff'], list)\n print(ret)\n\n @httpretty.activate\n def test_rpayOrder_400(self):\n ol = OrderList()\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n conf['api']['licenseKey'] = 'AAA'\n conf['api']['secretService'] = 'BBB'\n conf['api']['shopUrl'] = 'testshop_666'\n conf['api']['RPay'] = '1'\n ws = ol.getRmsService(conf['api'])\n (input_dict, output_columns, general_conf) = ol.readInput(conf, 'tests/input_test1.conf')\n\n post_searchOrder_response = \"\"\"{\n \"MessageModelList\": [\n\t{\n\t \"messageType\": \"ERROR\",\n\t \"messageCode\": \"ORDER_EXT_API_GET_ORDER_ERROR_009\",\n\t \"message\": \"orderNumberListの項目を指定してください。\"\n\t}\n ],\n \"OrderModelList\": []\n}\"\"\"\n\n httpretty.register_uri(httpretty.POST, 'https://api.rms.rakuten.co.jp/es/2.0/order/searchOrder',\n body=post_searchOrder_response,\n content_type='application/json')\n\n result = ol.getOrderRPay(ws, input_dict, conf['api'])\n print(result.status)\n assert result['errorCode'] == 'W00-000'\n\n\n @httpretty.activate\n def test_rpayOrder_404(self):\n ol = OrderList()\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n conf['api']['licenseKey'] = 'AAA'\n conf['api']['serviceSecret'] = 'BBB'\n conf['api']['shopUrl'] = 'testshop_666'\n conf['api']['RPay'] = '1'\n ws = ol.getRmsService(conf['api'])\n (input_dict, output_columns, general_conf) = ol.readInput(conf, 'tests/input_test1.conf')\n post_searchOrder_response = \"\"\"{\n \"orderNumberList\": [\n ],\n \"MessageModelList\": [\n\t{\n\t \"messageType\": \"INFO\",\n\t \"messageCode\": \"ORDER_EXT_API_SEARCH_ORDER_INFO_102\",\n\t \"message\": \"注文検索に成功しました。\"\n\t}\n ],\n \"PaginationResponseModel\": {\n \"totalRecordsAmount\": null,\n \"totalPages\": null,\n \"requestPages\": null\n }\n}\"\"\"\n\n post_getOrder_response = \"\"\"{\n \"MessageModelList\": [\n\t{\n\t \"messageType\": \"INFO\",\n\t \"messageCode\": \"ORDER_EXT_API_GET_ORDER_INFO_102\",\n\t \"message\": \"受注情報が取得できませんでした。\",\n\t \"orderNumber\": \"234323-20180101-10101001\"\n\t},\n\t{\n\t \"messageType\": \"INFO\",\n\t \"messageCode\": \"ORDER_EXT_API_GET_ORDER_INFO_102\",\n\t \"message\": \"受注情報が取得できませんでした。\",\n\t \"orderNumber\": \"234323-20180101-10101002\"\n\t},\n\t{\n\t \"messageType\": \"INFO\",\n\t \"messageCode\": \"ORDER_EXT_API_GET_ORDER_INFO_102\",\n\t \"message\": \"受注情報が取得できませんでした。\",\n\t \"orderNumber\": \"234323-20180101-10101003\"\n\t}\n ],\n \"OrderModelList\": []\n}\"\"\"\n\n httpretty.register_uri(httpretty.POST,\n 'https://api.rms.rakuten.co.jp/es/2.0/order/searchOrder',\n body=post_searchOrder_response,\n content_type='application/json')\n\n result = ol.getOrderRPay(ws, input_dict, conf['api'])\n print(result.status)\n assert result['errorCode'] == 'W00-000'\n assert result['message'] == 'Not Found'\n assert len(result['orderModel']) == 0\n\n @httpretty.activate\n def test_rpayOrder_200(self):\n ol = OrderList()\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n conf['api']['licenseKey'] = 'AAA'\n conf['api']['secretService'] = 'BBB'\n conf['api']['shopUrl'] = 'testshop_666'\n conf['api']['RPay'] = '1'\n conf['api']['getOrderVersion'] = '3'\n ws = ol.getRmsService(conf['api'])\n (input_dict, output_columns, general_conf) = ol.readInput(conf, 'tests/input_test1_rpay.conf')\n start = input_dict['getOrderRequestModel']['orderSearchModel']['startDate']\n end = input_dict['getOrderRequestModel']['orderSearchModel']['endDate']\n duration_1call = -1\n val = general_conf['duration']\n if val: duration_1call = int(val)\n \n datetimeList = ol.datetimeSplit(start, end, duration_1call)\n\n\n post_searchOrder_response = json.dumps(self.SAMPLE_DATA_RPAY_SEARCH)\n post_getOrder_response = json.dumps(self.SAMPLE_DATA_RPAY)\n\n httpretty.reset()\n httpretty.register_uri(httpretty.POST,\n 'https://api.rms.rakuten.co.jp/es/2.0/order/searchOrder',\n body=post_searchOrder_response,\n content_type='application/json')\n\n httpretty.register_uri(httpretty.POST,\n 'https://api.rms.rakuten.co.jp/es/2.0/order/getOrder',\n body=post_getOrder_response,\n content_type='application/json')\n\n result = ol.getOrderRPay(ws, input_dict, conf['api'])\n if not 'orderSearchModel' in input_dict['getOrderRequestModel']:\n input_dict['getOrderRequestModel']['orderSearchModel'] = {}\n input_dict['getOrderRequestModel']['orderSearchModel']['startDate'] = 0\n input_dict['getOrderRequestModel']['orderSearchModel']['endDate'] = 0\n\n assert result['errorCode'] == 'N00-000'\n\n outfile = ol.genFileName('order', 'data')\n couponfile = ol.genFileName('coupon', 'data')\n shippingfile = ol.genFileName('shipping', 'data')\n writeCouponHeader = True\n with io.open(outfile, \"w\", encoding=conf['api']['output_encoding']) as output_file:\n coupon_file = io.open(couponfile, \"w\", \n encoding=conf['api']['output_encoding'], \n errors='replace')\n ret = ol.writeOutput(conf['api'], output_file, output_columns, result, True)\n cwnum = ol.writeCouponDetail(conf['api'], coupon_file,\n output_columns, result, writeCouponHeader)\n\n shipping_file = io.open(shippingfile, \"w\", \n encoding=conf['api']['output_encoding'], \n errors='replace')\n cwnum = ol.writeShippingDetail(conf['api'], shipping_file,\n output_columns, result, True)\n #print(ret)\n\n def test_readInput(self):\n ol = OrderList()\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n nhash, outputColumns, general_conf = ol.readInput(conf, 'tests/input_test1.conf')\n\n print(nhash)\n osm = nhash['getOrderRequestModel']['orderSearchModel']\n csm = nhash['getOrderRequestModel']['orderSearchModel']['cardSearchModel']\n self.assertEqual(len(osm['pointStatus']), 2)\n self.assertEqual(osm['pointStatus'][0], '-1')\n self.assertEqual(osm['pointStatus'][1], '0')\n self.assertEqual(len(csm['cardStatus']), 1)\n self.assertEqual(csm['cardStatus'][0], '1')\n self.assertEqual(len(csm['payType']), 1)\n self.assertEqual(csm['payType'][0], '1')\n\n def test_readInput2(self):\n ol = OrderList()\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n nhash, outputColumns, general_conf = ol.readInput(conf, 'tests/input_test2.conf')\n\n orm = nhash['getOrderRequestModel']\n osm = nhash['getOrderRequestModel']['orderSearchModel']\n csm = nhash['getOrderRequestModel']['orderSearchModel']['cardSearchModel']\n self.assertEqual(osm['asuraku'], False)\n self.assertEqual(len(orm['orderNumber']), 2)\n self.assertEqual(orm['orderNumber'][0], '666666-333333333-xxxxxxx')\n self.assertEqual(len(csm['cardStatus']), 2)\n self.assertEqual(csm['cardStatus'][0], '1')\n self.assertEqual(csm['cardStatus'][1], '3')\n self.assertEqual(len(csm['payType']), 1)\n self.assertEqual(csm['payType'][0], '1')\n\n self.assertIsInstance(osm['startDate'], datetime)\n self.assertIsInstance(osm['endDate'], datetime)\n\n def test_datetimeSplit(self):\n ol = OrderList()\n ret = ol.datetimeSplit(\n datetime.strptime('2016/01/01 00:00:00', '%Y/%m/%d %H:%M:%S'),\n datetime.strptime('2016/01/03 10:00:00', '%Y/%m/%d %H:%M:%S'), 86400)\n self.assertEqual(len(ret), 3)\n\n ret = ol.datetimeSplit(\n datetime.strptime('2016/01/01 00:00:00', '%Y/%m/%d %H:%M:%S'),\n datetime.strptime('2016/01/03 10:00:00', '%Y/%m/%d %H:%M:%S'), 3600)\n self.assertEqual(len(ret), 59)\n\n ret = ol.datetimeSplit(\n datetime.strptime('2016/01/01 00:00:00', '%Y/%m/%d %H:%M:%S'),\n datetime.strptime('2016/01/04 23:59:59', '%Y/%m/%d %H:%M:%S'), 3600)\n self.assertEqual(len(ret), 96)\n\n ret = ol.datetimeSplit(\n datetime.strptime('2016/01/01 00:00:00', '%Y/%m/%d %H:%M:%S'),\n datetime.strptime('2016/01/04 23:59:59', '%Y/%m/%d %H:%M:%S'), -1)\n self.assertEqual(len(ret), 1)\n\n\n def test_writeOutput(self):\n ol = OrderList()\n\n ret = self.SAMPLE_DATA\n\n outfile = ol.genFileName('order', 'data')\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n #conf['api']['output_delimiter'] = ','\n #conf['api']['output_quotechar'] = '\"'\n #conf['api']['output_encoding'] = 'cp932'\n input, output_columns, general_conf = ol.readInput(conf, 'input.txt')\n with io.open(outfile, \"w\", encoding=conf['api']['output_encoding']) as output_file:\n ret = ol.writeOutput(conf['api'], output_file, output_columns, ret, True)\n print(ret)\n\n @httpretty.activate\n def test_writeOutputRpay(self):\n ol = OrderList()\n\n post_searchOrder_response = json.dumps(self.SAMPLE_DATA_RPAY_SEARCH)\n post_getOrder_response = json.dumps(self.SAMPLE_DATA_RPAY)\n\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n conf['api']['licenseKey'] = 'AAA'\n conf['api']['secretService'] = 'BBB'\n conf['api']['shopUrl'] = 'testshop_666'\n conf['api']['RPay'] = '1'\n conf['api']['getOrderVersion'] = '3'\n ws = ol.getRmsService(conf['api'])\n (input_dict, output_columns, general_conf) = ol.readInput(conf, 'tests/input_test1_rpay.conf')\n\n httpretty.reset()\n httpretty.register_uri(httpretty.POST,\n 'https://api.rms.rakuten.co.jp/es/2.0/order/searchOrder',\n body=post_searchOrder_response,\n content_type='application/json')\n\n httpretty.register_uri(httpretty.POST,\n 'https://api.rms.rakuten.co.jp/es/2.0/order/getOrder',\n body=post_getOrder_response,\n content_type='application/json')\n\n ret = ol.getOrderRPay(ws, input_dict, conf['api'])\n\n outfile = ol.genFileName('order', 'data')\n conf = ol.emptyConfig()\n ol.defaultConfig(conf)\n #conf['api']['output_delimiter'] = ','\n #conf['api']['output_quotechar'] = '\"'\n #conf['api']['output_encoding'] = 'cp932'\n input, output_columns, general_conf = ol.readInput(conf, 'input_rpay.txt')\n with io.open(outfile, \"w\", encoding=conf['api']['output_encoding']) as output_file:\n ret = ol.writeOutput(conf['api'], output_file, output_columns, ret, True)\n print(ret)\n\n\n","sub_path":"tests/test_orderlist.py","file_name":"test_orderlist.py","file_ext":"py","file_size_in_byte":12498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"239024617","text":"import re\r\nimport sys\r\nimport time\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom trade import common\r\nfrom trade.c5_purchase import purchase, c5_purchase_by_name\r\n\r\nIG_HEADER = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Accept-Language': 'en,zh;q=0.9,zh-CN;q=0.8',\r\n 'Cache-Control': 'no-cache',\r\n 'Cookie': 'aliyungf_tc=AQAAAGDDX27H8QsAL9MKcCCcYOCXxt9s; __cfduid=d59327cd4e746778f725f13a8f2383a2e1563033516; _ga=GA1.2.167051666.1563033518; href=https%3A%2F%2Fwww.igxe.cn%2Finventory%2Fsteam%2F570; bad_id572d9ba0-d737-11e8-970c-a553533099d1=1f82bae1-a587-11e9-a64e-498ffeec9964; nice_id572d9ba0-d737-11e8-970c-a553533099d1=1f82bae2-a587-11e9-a64e-498ffeec9964; my_game=570; agree_sell_agreementCmart007=true; Hm_lvt_fe0238ac0617c14d9763a2776288b64b=1563033517; _gid=GA1.2.2144394666.1563724704; accessId=572d9ba0-d737-11e8-970c-a553533099d1; csrftoken=HAhnURWenduNbtg6FV1kiaYsJlJk6pWi; token=70fc3439-a23a-4d08-87ac-06b825a39bfe; sessionid=t1s4f27n2zrdq67lb1bt73f7z7dh6tak; not_pay_pwd_token=70fc3439-a23a-4d08-87ac-06b825a39bfe; myDateMinutes=24; qimo_seosource_572d9ba0-d737-11e8-970c-a553533099d1=%E7%AB%99%E5%86%85; qimo_seokeywords_572d9ba0-d737-11e8-970c-a553533099d1=; pageViewNum=61; _gat=1; Hm_lpvt_fe0238ac0617c14d9763a2776288b64b=1564043206',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \r\n 'Chrome/72.0.3626.121 Safari/537.36',\r\n 'Referer': 'https://www.igxe.cn/dmall/seller/shipper_order?start_time=&end_time=&search_type=0&page_no=1&app_id=',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'Pragma': 'no-cache',\r\n 'Host': 'www.igxe.cn'\r\n}\r\n# 不羁牡马指针\r\nlast_item = '不羁牡马指针'\r\nstart_page = 1\r\nend_page = 100\r\nprofit = 0\r\nif __name__ == '__main__':\r\n for page in range(end_page - start_page + 1):\r\n\r\n range_url = 'https://www.igxe.cn/dmall/seller/shipper_order?start_time=&end_time=&search_type=5&page_no=' \\\r\n + str(page + start_page) + '&app_id=570'\r\n r = requests.get(range_url, headers=IG_HEADER)\r\n soup = BeautifulSoup(r.content, 'html.parser', from_encoding='utf-8')\r\n # items = soup.find_all('div', class_='proTit')\r\n tables = soup.find_all('table', class_='moreProOrder')\r\n for table in tables:\r\n item = table.find('div', class_='proTit')\r\n item_name = item.string\r\n print(item_name, page + 1)\r\n if item_name == last_item:\r\n common.account_today(profit)\r\n sys.exit()\r\n # ig_price = common.get_ig_good_info(item_name)\r\n if item is not None:\r\n # print(table.find('div', class_='deInfo').contents[2].string)\r\n ig_price = float(re.search(r\"\\d*.\\d{2}\", table.find('div', class_='deInfo')\r\n .contents[2].string).group(0))\r\n c5_info_res = common.get_c5_good_info(item_name)\r\n if c5_info_res:\r\n item_id, srch_p = common.srch_val(item_name)\r\n print(ig_price, srch_p)\r\n if srch_p:\r\n if c5_info_res['c5_price'] and (ig_price - srch_p) > 0.5:\r\n if srch_p < 1:\r\n purchase(item_id, srch_p + 0.01)\r\n else:\r\n purchase(item_id, srch_p + 0.1)\r\n else:\r\n c5_purchase_by_name(item_name)\r\n # if c5_info_res['c5_price'] and float(ig_price) - float(c5_info_res['c5_price']) > 0.5:\r\n # common.buy_item(item_id)\r\n\r\n time.sleep(5)\r\n","sub_path":"trade/ig_sell_pur.py","file_name":"ig_sell_pur.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"360348884","text":"#instructions below\n#enter a website for ex:www.google.com\n#then it return ip address of that website\n#IP (Internet Protocol) Address is an address of your network hardware.\n\nfrom socket import *\nhost=input()\ntry:\n addr=gethostbyname(host)\n print(\"IP Address of website is\"+\" \"+addr)\nexcept gaierror:\n print(\"The website doesn't exist\")\np=gethostname()\nq=gethostbyname(p)\nprint(\"Your pc name is \"+p)\nprint(\"Your ip address is \"+q)\n","sub_path":"Find An IP of a website.py","file_name":"Find An IP of a website.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"149814365","text":"import numpy as np\nfrom scipy.optimize.minpack import curve_fit\nfrom scipy.stats import t\n\n\nclass FitData(object):\n \"\"\"Class to compute top and bottom extrapolation methods and associated statistics.\n\n Data required for the constructor method include data of class\n NormData, threshold for the minimum number of points for a valid\n median, top extrapolation method, bottom extrapolation method, type\n of fit, and if a manual fit, the exponent.\n\n Attributes\n ----------\n self.file_name: str\n Name of transect file\n top_method: str\n Top extrapolation method\n bot_method: str\n Bottom extrapolation method\n coef: float\n Power fit coefficient\n exponent: float\n Power fit exponent\n u: np.array(float)\n Fit values of the variable\n u_auto: np.array(float)\n Fit values from automatic fit\n z_auto: np.array(float)\n z values for automtic fit\n z: np.array(float)\n Distance from the streambed for fit variable\n exp_method: str\n Method to determine exponent (default, optimize, or manual)\n data_type: str\n Type of data (v, q, V, or Q)\n exponent_95_ci: float\n 95% confidence intervals for optimized exponent\n residuals: np.array(float)\n Residuals from fit\n r_squared: float\n R squared of model\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize object and instance variables.\"\"\"\n\n self.file_name = None # Name of transect file\n self.top_method = 'Power' # Top extrapolation method\n self.bot_method = 'Power' # Bottom extrapolation method\n self.coef = 0 # Power fit coefficient\n self.exponent = 0.1667 # Power fit exponent\n self.u = None # Fit values of the variable\n self.u_auto = None # Fit values from automatic fit\n self.z_auto = None # z values for automtic fit\n self.z = None # Distance from the streambed for fit variable\n self.exp_method = 'Power' # Method to determine exponent (default, optimize, or manual)\n self.data_type = None # Type of data (velocity or unit discharge)\n self.exponent_95_ci = 0 # 95% confidence intervals for optimized exponent\n self.residuals = np.array([]) # Residuals from fit\n self.r_squared = 0 # R squared of model\n\n def populate_data(self, norm_data, top, bot, method, exponent=None):\n \"\"\"Computes fit and stores associated data.\n\n Parameters\n ----------\n norm_data: NormData\n Object of NormData\n top: str\n Top extrapolation method\n bot: str\n Bottom extrapolation method\n method:\n Method used to define the exponent (default, optimize, or manual), default is 1/6.\n exponent:\n Exponent for power or no slip fit methods.\n \"\"\"\n\n avg_z = norm_data.unit_normalized_z\n y = norm_data.unit_normalized_med\n idxz = norm_data.valid_data\n zc = np.nan\n\n lower_bound = [-np.inf, 0.01]\n upper_bound = [np.inf, 1]\n bounds = None\n p0 = None\n uc = np.nan\n\n # Process data if available\n if len(idxz) > 0:\n idx_power = idxz\n\n # Create arrays for data fitting\n # Select median values to use in extrapolation methods selected and create\n # methods selected and create fir output data arrays\n\n # If bottom is No Slip, Power at top is not allowed\n if bot == 'No Slip':\n if top == 'Power':\n top = 'Constant'\n\n fit_combo = ''.join([top, bot])\n if fit_combo == 'PowerPower':\n self.z = np.arange(0, 1.01, 0.01)\n zc = np.nan\n uc = np.nan\n elif fit_combo == 'ConstantPower':\n self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)\n self.z = np.hstack([self.z, np.nan])\n zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)\n uc = np.tile(y[idxz[0]], zc.shape)\n elif fit_combo == '3-PointPower':\n self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)\n self.z = np.hstack([self.z, np.nan])\n # If less than 6 bins use constant at the top\n if len(idxz) < 6:\n zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)\n uc = np.tile(y[idxz[0]], zc.shape)\n else:\n p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)\n zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)\n # zc = zc.T\n uc = zc * p[0] + p[1]\n\n elif fit_combo == 'ConstantNo Slip':\n # Optimize constant / no slip if sufficient cells are available\n if method.lower() == 'optimize':\n idx = idxz[int(1+len(idxz) - np.floor(len(avg_z[idxz]) / 3) - 1)::]\n if len(idx) < 4:\n method = 'default'\n\n # Compute Constant / No Slip using WinRiver II and\n # RiverSurveyor Live default cells\n else:\n idx = np.where(avg_z[idxz] <= .2)[0]\n if len(idx) < 1:\n idx = idxz[-1]\n else:\n idx = idxz[idx]\n\n # Configures u and z arrays\n idxns = np.array([idx]).T\n self.z = np.arange(0, avg_z[idxns[0]], 0.01)\n self.z = np.hstack([self.z, [np.nan]])\n idx_power = idx\n zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.00, 0.01)\n uc = np.tile(y[idxz[0]], zc.shape)\n\n elif fit_combo == '3-PointNo Slip':\n # Optimize 3-Point / no slip if sufficient cells are available\n if method.lower() == 'optimize':\n idx = idxz[int(1 + len(idxz) - np.floor(len(avg_z[idxz])) / 3) - 1::]\n if len(idx) < 4:\n method = 'default'\n\n # Compute 3-Point / No Slip using WinRiver II and\n # RiverSurveyor Live default cells\n else:\n idx = np.where(avg_z[idxz] <= .2)[0]\n if len(idx) < 1:\n idx = idxz[-1]\n else:\n idx = idxz[idx]\n\n # Configures u and z arrays\n idxns = np.array([idx]).T\n self.z = np.arange(0, avg_z[idxns[0]], 0.01)\n self.z = np.hstack([self.z, [np.nan]])\n idx_power = idx\n # If less than 6 bins use constant at the top\n if len(idxz) < 6:\n zc = np.arange(np.max(idxz) + 0.01, 1.0, 0.01)\n uc = np.tile(y[idxz[0]], zc.shape)\n else:\n p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)\n zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)\n uc = zc * p[0] + p[1]\n\n # Compute exponent\n zfit = avg_z[idx_power]\n yfit = y[idx_power]\n\n # Check data validity\n ok_ = np.logical_and(np.isfinite(zfit), np.isfinite(yfit))\n\n self.exponent = np.nan\n self.exponent_95_ci = np.nan\n self.r_squared = np.nan\n fit_func = 'linear'\n\n lower_method = method.lower()\n\n if lower_method == 'manual':\n fit_func = 'linear'\n self.exponent = exponent\n bounds = None\n p0 = None\n\n elif lower_method == 'default':\n fit_func = 'linear'\n self.exponent = 1./6.\n bounds = None\n p0 = None\n\n elif lower_method == 'optimize':\n fit_func = 'power'\n bounds = [lower_bound, upper_bound]\n strt = yfit[ok_]\n p0 = [strt[-1], 1./6]\n\n fit_funcs = {\n 'linear': lambda x, a: a * x**self.exponent,\n 'power': lambda x, a, b: a * x**b\n }\n\n if ok_.size > 1:\n if bounds is not None:\n popt, pcov = curve_fit(fit_funcs[fit_func],\n zfit, yfit, p0=p0, bounds=bounds)\n else:\n popt, pcov = curve_fit(fit_funcs[fit_func],\n zfit, yfit, p0=p0)\n\n # Extract exponent and confidence intervals from fit\n if lower_method == 'optimize':\n self.exponent = popt[1]\n if self.exponent is None or self.exponent < 0.05:\n self.exponent = 0.05\n\n if len(zfit[ok_]) > 2:\n\n n = len(zfit) # number of data points\n\n t_val = t.ppf(.975, n-2)\n\n # Get 95% confidence intervals\n lower = (popt[-1] - t_val * np.sqrt(np.diag(pcov)[-1]))\n upper = (popt[-1] + t_val * np.sqrt(np.diag(pcov)[-1]))\n self.exponent_95_ci = np.vstack([lower, upper])\n\n # Get the rsquared for the model\n ss_tot = np.sum((y[idx_power] - np.mean(yfit))**2)\n ss_res = np.sum((y[idx_power] - fit_funcs[fit_func](zfit, *popt))**2)\n self.r_squared = 1 - (ss_res/ss_tot)\n else:\n self.exponent_95_ci = np.nan\n self.r_squared = np.nan\n\n # Fit power curve to appropriate data\n self.coef = ((self.exponent + 1) * 0.05 * np.nansum(y[idx_power])) / \\\n np.nansum(((avg_z[idx_power] + (0.5 * 0.05))**(self.exponent + 1)\n - ((avg_z[idx_power] - (0.5 * 0.05))**(self.exponent + 1))))\n\n # Compute residuals\n self.residuals = y[idx_power] - self.coef * avg_z[idx_power]**self.exponent\n if self.residuals is None:\n self.residuals = np.array([])\n\n # Compute values (velocity or discharge) based on exponent and compute coefficient\n self.u = self.coef * self.z**self.exponent\n if type(zc) == np.ndarray:\n self.u = np.append(self.u, uc)\n self.z = np.append(self.z, zc)\n\n # Assign variables to object properties\n self.file_name = norm_data.file_name\n self.top_method = top\n self.bot_method = bot\n self.exp_method = method\n self.data_type = norm_data.data_type\n\n else:\n # If not data are valid simply apply methods\n self.exponent = np.nan\n self.exponent_95_ci = [np.nan, np.nan]\n self.r_squared = np.nan\n self.file_name = norm_data.file_name\n self.top_method = top\n self.bot_method = bot\n self.exp_method = method\n self.data_type = norm_data.data_type\n","sub_path":"Classes/FitData.py","file_name":"FitData.py","file_ext":"py","file_size_in_byte":11072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"397621274","text":"from pprint import pprint\n\nfrom kubernetes import client, config\nfrom kubernetes.client import CoreV1Api\nfrom kubernetes.stream import stream\nfrom kubernetes.client import configuration\n\n# create an instance of the API class\n\nconfig.load_kube_config()\nconfiguration.assert_hostname = False\n\nexec_command = [\n '/bin/sh',\n '-c',\n 'echo This message goes to stdout; ls / -alh'\n]\n\napi = CoreV1Api()\napi_response = stream(api.connect_get_namespaced_pod_exec,\n 'tf-test-0',\n 'jeffyfhuang',\n command=exec_command,\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False,\n _preload_content=True)\n\npprint(api_response)\n","sub_path":"k8sdeployment/k8sstat/python/test_exec_pods.py","file_name":"test_exec_pods.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"138238458","text":"import json\nimport pymssql\nimport csv\nimport sys\nimport codecs\nimport os\nfrom os.path import realpath,dirname\n\n\nclass CSV2Sqlserver:\n def __init__(self,sqlConfigs):\n print(\"正在初始化CSV2Sqlserver!\")\n self.server=sqlConfigs['server']\n self.port=sqlConfigs['port']\n self.user=sqlConfigs['user'] \n self.password=sqlConfigs['password'] \n self.DBname=sqlConfigs['DBname']\n \n def connect(self):\n #打开数据库链接\n #连接到Sqlserver,主要一定要加上local_infile=1参数,否则会报错的\n try:\n return pymssql.connect( host=self.server,port=int(self.port),user=self.user,password=self.password,database=self.DBname,charset='utf8')\n except Exception as e:\n raise\n\n def close(self,db,cursor):\n # 关闭数据库连接\n try:\n cursor.close()\n db.close()\n except Exception as e:\n raise\n\n def create_table(self,keys,table_name):\n try:\n db=self.connect()\n cursor = db.cursor()\n colum=''\n for key in keys:\n colum=colum+key+' VARCHAR(MAX),'\n colum=colum[:-1]\n create=\"IF (not EXISTS(SELECT * FROM sysobjects WHERE name='\"+table_name+\"')) CREATE TABLE \"+table_name+\" (ID int NOT NULL IDENTITY(1,1) ,AddTime datetime default(getDate()),\"+colum+\",PRIMARY KEY ([ID]));\"\n print(create)\n cursor.execute(create)\n db.commit()\n except Exception as e:\n raise\n finally:\n self.close(db,cursor)\n\n def insert_MoreData(self,table,items):\n try:\n colum=''\n values=''\n for key in items[0].keys():\n colum=colum+key+','\n values=values+'%s,'\n values=values[:-1]\n colum=colum[:-1]\n temp=\"INSERT INTO \"+table+\"(\"+colum+\") values(\"+values+\")\"\n print(temp)\n data=[]\n for item in items :\n data.append(tuple(item.values()))\n db=self.connect()\n cursor = db.cursor()\n #批量插入\n cursor.executemany(temp,data)\n #插入之后对表进行去重\n re_db=\"\"#去重语句\n #cursor.execute()\n db.commit()\n except Exception as e:\n raise\n finally:\n self.close(db,cursor)\n\n\nif __name__==\"__main__\":\n test=CSV2Sqlserver({\"server\": \"192.168.1.224\", \"port\": \"9018\", \"user\": \"sa\", \"password\": \"123456\", \"DBname\": \"spider\", \"sqltype\": \"Sqlserver\"},\"测试01\")\n keys=['url','field']\n test.create_table(keys,'测试01')\n item1={'url':'是1','field':'a'}\n item2={'url':'是2','field':'b'}\n item3={'url':'是3','field':'c'}\n item4={'url':'是1','field':'d'}\n items=[item1,item2,item3,item4]\n test.insert_MoreData('测试01',items)\n\n","sub_path":"Hive_Server/Hive/SQL/CSV2Sqlserver.py","file_name":"CSV2Sqlserver.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"509435037","text":"# riaps:keep_import:begin\nfrom riaps.run.comp import Component\nimport logging\nimport random\nimport os\n# import capnp\n# import powerfail_capnp\n\n# riaps:keep_import:end\n\nclass PowerController(Component):\n\n# riaps:keep_constr:begin\n def __init__(self):\n super(PowerController, self).__init__()\n# riaps:keep_constr:end\n\n# riaps:keep_reqport:begin\n def on_ReqPort(self):\n reply = self.ReqPort.recv_pyobj()\n self.logger.info(\"received from device component %s\" % reply)\n \n def handleActivate(self):\n self.logger.info(\"starting shutdown timer\")\n self.clock.setDelay(10.0)\n self.clock.launch()\n# riaps:keep_reqport:end\n\n# riaps:keep_clock:begin\n def on_clock(self):\n now = self.clock.recv_pyobj()\n self.logger.info(\"on_clock(): %s\" % now)\n self.logger.info(\"sending request\")\n msg = \"shutdown\"\n try:\n self.ReqPort.send_pyobj(msg)\n self.clock.halt()\n except:\n self.logger.info(\"send exception\")\n \n# riaps:keep_clock:end\n\n# riaps:keep_trigger:begin\n def on_trigger(self):\n now = self.trigger.recv_pyobj()\n self.logger.info(\"on_trigger(): %s\" % now)\n self.logger.info(\"publishing value\")\n val = random.random()\n self.PubPort.send_pyobj(val)\n# riaps:keep_trigger:end\n\n# riaps:keep_impl:begin\n\n# riaps:keep_impl:end","sub_path":"apps-vu/fault-tolerance-tests/testPowerFail/PowerController.py","file_name":"PowerController.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"387590394","text":"from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nimport os\n\nimport random\nfrom imutils import paths\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help='path to trained model')\nap.add_argument('-l', '--labelbin', required=True, help='path to label binarizer')\nargs = vars(ap.parse_args())\n\n# image_paths = list(paths.list_images('/Users/mandywoo/Documents/cnn_projects/fruits360-kaggle/fruits/fruits-360/Test'))\nimage_paths = list(paths.list_images('fruits360-kaggle/dataset/Backgrounds'))\nrandom.seed(42)\nrandom.shuffle(image_paths)\n\ncorrect_count = 0\n\nfor image_path in image_paths:\n # read image\n # image = cv2.imread(args['image'])\n image = cv2.imread(image_path)\n output = image.copy()\n\n # preprocess image for classification\n image = cv2.resize(image, (100, 100))\n image = image.astype('float') / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n\n # load trained convolutional neural network and label binarizer\n print('[INFO] loading network...')\n model = load_model(args['model'])\n lb = pickle.loads(open(args['labelbin'], 'rb').read())\n\n # classify input image\n print('[INFO] classifying image...')\n proba = model.predict(image)[0]\n idx = np.argmax(proba)\n label = lb.classes_[idx]\n\n\n filename = image_path.split(os.path.sep)[-2]\n # correct = 'correct' if filename.rfind(label) != -1 else 'incorrect'\n if filename.rfind(label) != -1:\n correct = 'correct'\n correct_count += 1\n else:\n correct = 'incorrect'\n\n\n # build and draw label on image\n label = '{}: {:.2f}% ({})'.format(label, proba[idx] * 100, correct)\n output = imutils.resize(output, width=400)\n cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n print('[INFO] {}'.format(label))\n cv2.imshow('Output', output)\n cv2.waitKey(0)\n\nprint('Correct Percentage: ' + str(correct_count/len(image_paths)))\ncv2.waitKey(0)\n\n\n\n\n\n\n","sub_path":"Fruit-360/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"524401976","text":"# Copyright 2014 SolidBuilds.com. All rights reserved\n#\n# Authors: Ling Thio \n\nimport os\nfrom flask_mail import Mail\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_user import UserManager, SQLAlchemyAdapter\nfrom flask_wtf.csrf import CsrfProtect\nfrom myweb import appMe, db, manager\n\n\n@appMe.before_first_request\ndef initialize_app_on_first_request():\n \"\"\" Create users and roles tables on first HTTP request \"\"\"\n from .create_users import create_users\n create_users()\n\n\ndef create_app(extra_config_settings={}):\n \"\"\"\n Initialize Flask applicaton\n \"\"\"\n\n # ***** Initialize app config settings *****\n # Read common settings from 'myweb/startup/common_settings.py' file\n appMe.config.from_object('myweb.startup.common_settings')\n # Read environment-specific settings from file defined by OS environment variable 'ENV_SETTINGS_FILE'\n env_settings_file = os.environ.get('ENV_SETTINGS_FILE', 'env_settings.py')\n appMe.config.from_pyfile(env_settings_file)\n # Read extra config settings from function parameter 'extra_config_settings'\n appMe.config.update(extra_config_settings) # Overwrite with 'extra_config_settings' parameter\n if appMe.testing:\n appMe.config['WTF_CSRF_ENABLED'] = False # Disable CSRF checks while testing\n\n\n # Setup Flask-Migrate\n migrate = Migrate(appMe, db)\n manager.add_command('db', MigrateCommand)\n\n # Setup Flask-Mail\n mail = Mail(appMe)\n\n # Setup WTForms CsrfProtect\n CsrfProtect(appMe)\n\n # Define bootstrap_is_hidden_field for flask-bootstrap's bootstrap_wtf.html\n from wtforms.fields import HiddenField\n\n def is_hidden_field_filter(field):\n return isinstance(field, HiddenField)\n\n appMe.jinja_env.globals['bootstrap_is_hidden_field'] = is_hidden_field_filter\n\n # Setup an error-logger to send emails to app.config.ADMINS\n init_email_error_handler(appMe)\n\n # Setup Flask-User to handle user account related forms\n from myweb.user.core.models import User, MyRegisterForm\n from myweb.user.core.views import user_profile_page\n\n db_adapter = SQLAlchemyAdapter(db, User) # Setup the SQLAlchemy DB Adapter\n user_manager = UserManager(db_adapter, appMe, # Init Flask-User and bind to app\n register_form=MyRegisterForm, # using a custom register form with UserProfile fields\n user_profile_view_function=user_profile_page,\n )\n\n # Load all blueprints with their manager commands, models and views\n from myweb.user import core\n from myweb import wuaiwow\n return appMe\n\n\ndef init_email_error_handler(appMe):\n \"\"\"\n Initialize a logger to send emails on error-level messages.\n Unhandled exceptions will now send an email message to app.config.ADMINS.\n \"\"\"\n if appMe.debug: return # Do not send error emails while developing\n\n # Retrieve email settings from app.config\n host = appMe.config['MAIL_SERVER']\n port = appMe.config['MAIL_PORT']\n from_addr = appMe.config['MAIL_DEFAULT_SENDER']\n username = appMe.config['MAIL_USERNAME']\n password = appMe.config['MAIL_PASSWORD']\n secure = () if appMe.config.get('MAIL_USE_TLS') else None\n\n # Retrieve app settings from app.config\n to_addr_list = appMe.config['ADMINS']\n subject = appMe.config.get('APP_SYSTEM_ERROR_SUBJECT_LINE', 'System Error')\n\n # Setup an SMTP mail handler for error-level messages\n import logging\n from logging.handlers import SMTPHandler\n\n mail_handler = SMTPHandler(\n mailhost=(host, port), # Mail host and port\n fromaddr=from_addr, # From address\n toaddrs=to_addr_list, # To address\n subject=subject, # Subject line\n credentials=(username, password), # Credentials\n secure=secure,\n )\n mail_handler.setLevel(logging.ERROR)\n appMe.logger.addHandler(mail_handler)\n\n # Log errors using: app.logger.error('Some error message')\n\n\n\n\n","sub_path":"myweb/startup/create_app.py","file_name":"create_app.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"555425868","text":"from datetime import datetime, timedelta\nimport html\n\nimport tweepy\n\nfrom game import Game\nimport keys\n\n\nauth = tweepy.OAuthHandler(keys.consumer_key, keys.consumer_secret)\nauth.set_access_token(keys.access_token, keys.access_token_secret)\napi = tweepy.API(auth)\n\nhandle = auth.get_username()\n\n\nclass TwitterGame(tweepy.StreamListener):\n def __init__(self, api, game, initial_status_id):\n self.game = game\n self.initial_status_id = initial_status_id\n self.trigger_status_ids = {initial_status_id}\n self.end_at = datetime.now() + timedelta(hours=1)\n return super().__init__(api)\n\n def handle_play(self, status):\n if not status.text.startswith('@{}'.format(handle)):\n # ignore things that aren't mentions\n return\n\n if status.in_reply_to_status_id not in self.trigger_status_ids:\n # ignore things that aren't replies to the game in progress\n return\n\n self.trigger_status_ids.add(status.id)\n\n text = html.unescape(status.text)\n entry = text.replace('@{}'.format(handle), '').strip()\n score = self.game.play(entry)\n\n if score == 1:\n # correct! end the game\n api.update_status(\n \"@{} Correct! I'll start a new game soon.\".format(\n status.author.screen_name, score,\n ),\n in_reply_to_status_id=status.id,\n )\n api.retweet(status.id)\n\n return False\n else:\n # provide feeback\n self.trigger_status_ids.add(api.update_status(\n \"@{} That's {:.1%} right.\".format(\n status.author.screen_name, score,\n ),\n in_reply_to_status_id=status.id,\n ).id)\n\n def on_status(self, status):\n rv = self.handle_play(status)\n\n if rv is False:\n # there's no need to force the game to end; it's already over\n return rv\n\n elif datetime.now() > self.end_at:\n api.update_status(\n 'Game over. The answer was {}'.format(self.game.original),\n in_reply_to_status_id=self.initial_status_id,\n )\n return False\n\n else:\n return rv\n\n\ndef run_game():\n game = Game()\n listener = TwitterGame(api, game, api.update_status(game.clue).id)\n stream = tweepy.Stream(auth=api.auth, listener=listener)\n stream.userstream()\n\n\nif __name__ == '__main__':\n run_game()\n","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"420635402","text":"import argparse\nimport numpy as np\nRATING_FILE_NAME=dict({'movie':'ratings.dat','book':'BX-Book-Ratings.csv','news':'ratings.txt'})\nSEP=dict({'movie':'::','book':';','news':'\\t'})\nTHRESHOLD=dict({'movie':4,'book':0,'news':0})\n\ndef read_item_index_to_entity_id_file():\n file='../data/'+DATASET+'/item_index2entity_id_rehashed.txt'\n print('reading item index to entity id file:'+file+'...')\n i=0\n for line in open(file,encoding='utf-8').readlines():\n\n item_index=line.strip().split('\\t')[0]\n # print(15,item_index)\n satori_id=line.strip().split('\\t')[1]\n # print(17,satori_id)\n item_index_old2new[item_index]=i\n entity_id2index[satori_id]=i\n i+=1\n\n\ndef convert_rating():\n file='../data/'+DATASET+'/'+RATING_FILE_NAME[DATASET]\n # print(24)\n # print(item_index_old2new.values())#商品数\n item_set=set(item_index_old2new.values())\n user_pos_ratings=dict()\n user_neg_ratings=dict()\n for line in open(file,encoding='utf-8').readlines()[1:]:\n # print(line)\n array=line.strip().split(SEP[DATASET])\n if DATASET=='book':\n array=list(map(lambda x: x[1:-1],array))\n item_index_old=array[1]#商品索引\n if item_index_old not in item_index_old2new: #the item is not in the final item set\n continue\n item_index=item_index_old2new[item_index_old]#新的商品索引\n user_index_old=int(array[0])#用户index\n rating=float(array[2])#评分\n #评分大于4作为正用户\n if rating>=THRESHOLD[DATASET]:\n if user_index_old not in user_pos_ratings:\n user_pos_ratings[user_index_old]=set()\n user_pos_ratings[user_index_old].add(item_index)\n else:\n if user_index_old not in user_neg_ratings:\n user_neg_ratings[user_index_old]=set()\n user_neg_ratings[user_index_old].add(item_index)\n\n writer=open('../data/'+DATASET+'/ratings_final.txt','w',encoding='utf-8')\n user_cnt=0\n user_index_old2new=dict()\n for user_index_old,pos_item_set in user_pos_ratings.items():\n if user_index_old not in user_index_old2new:\n user_index_old2new[user_index_old]=user_cnt\n user_cnt+=1\n user_index=user_index_old2new[user_index_old]\n for item in pos_item_set:\n writer.write('%d\\t%d\\t1\\n' %(user_index,item))\n unwatched_set=item_set-pos_item_set\n if user_index_old in user_neg_ratings:\n unwatched_set-=user_neg_ratings[user_index_old]\n for item in np.random.choice(list(unwatched_set),size=len(pos_item_set),replace=False):\n writer.write('%d\\t%d\\t0\\n'%(user_index,item))\n writer.close()\n print('number of users: %d'%user_cnt)\n print('number of items: %d'%len(item_set))\n\ndef convert_kg():\n print('converting kg file..')\n entity_cnt=len(entity_id2index)\n relation_cnt=0\n writer=open('../data/'+DATASET+'/kg_final.txt','w',encoding='utf-8')\n files=[]\n if DATASET=='movie':\n files.append(open('../data/'+DATASET+'/kg_part1_rehashed.txt',encoding='utf-8'))\n files.append(open('../data/'+DATASET+'/kg_part2_rehashed.txt',encoding='utf-8'))\n else:\n files.append(open('../data/'+DATASET+'/kg_rehashed.txt',encoding='utf-8'))\n\n for file in files:\n for line in file:\n array=line.strip().split('\\t')\n head_old=array[0]\n relation_old=array[1]\n # print(relation_old)\n tail_old=array[2]\n\n if head_old not in entity_id2index:\n entity_id2index[head_old]=entity_cnt\n entity_cnt+=1\n head=entity_id2index[head_old]\n if tail_old not in entity_id2index:\n entity_id2index[tail_old]=entity_cnt\n entity_cnt+=1\n tail=entity_id2index[tail_old]\n if relation_old not in relation_id2index:\n relation_id2index[relation_old]=relation_cnt\n relation_cnt+=1\n relation=relation_id2index[relation_old]\n print(relation_id2index)\n print('-----')\n print(relation)\n writer.write('%d\\t%d\\t%d\\n'%(head,relation,tail))\n writer.close()\n print('number of entities (containing items):%d'%entity_cnt)\n print('number of relations:%d'%relation_cnt)\n\nif __name__ == '__main__':\n '''\n number of users: 6036\n number of items: 2445\n number of entities 182011\n number of relations:12\n '''\n np.random.seed(555)\n parser=argparse.ArgumentParser()\n parser.add_argument('-d','--dataset',type=str,default='movie',help='which dataset to preprocess')\n args=parser.parse_args()\n DATASET=args.dataset\n entity_id2index=dict()\n relation_id2index=dict()\n item_index_old2new=dict()\n read_item_index_to_entity_id_file()\n convert_rating()\n convert_kg()\n\n","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"188837425","text":"import logging\n\nfrom rest_server import app as application\n\nlog_stderr = logging.StreamHandler()\nlog_stderr.setLevel(logging.INFO)\nformatter = logging.Formatter(\n \"%(asctime)s %(levelname)s [%(module)s] %(name)s - %(funcName)s(): %(message)s\"\n)\nlog_stderr.setFormatter(formatter)\nlogging.basicConfig(level=10, handlers=[log_stderr])\nlogging.getLogger(\"converters\").setLevel(logging.INFO)\n\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"239926144","text":"from django import forms\n\n\nclass BudgetStoreListForm(forms.Form):\n stores = forms.ChoiceField(\n choices=[],\n widget=forms.Select(attrs={'class': \"form-control \"})\n )\n\n def __init__(self, store_price_choices, initial_store, entry_id,\n *args, **kwargs):\n super(BudgetStoreListForm, self).__init__(*args, **kwargs)\n\n self.fields['stores'].choices = \\\n [(e[0], e[2]) for e in store_price_choices]\n self.fields['stores'].widget.attrs['data'] = entry_id\n\n if initial_store:\n self.fields['stores'].initial = initial_store.id\n","sub_path":"hardware/forms/budget_store_list_form.py","file_name":"budget_store_list_form.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"318123557","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport h5py\r\nimport scipy\r\nfrom PIL import Image\r\nfrom scipy import ndimage\r\nimport os\r\n\r\ndef sigmoid(z):\r\n s=1/(1+np.exp(-z))\r\n return s\r\n\r\ndef Test_Image(path,size_weight,size_height):\r\n Test_X=[]\r\n for filename in os.listdir(r\"./Test\"):\r\n image_path=os.path.join(path, filename)\r\n train_image=image = np.array(ndimage.imread(image_path, flatten=False))\r\n image = scipy.misc.imresize(train_image, size=(size_weight, size_height)).reshape((1, size_weight * size_height * 3)).T\r\n Test_X.append(image) # 将图像数组添加到训练数组里\r\n # print(filename)\r\n\r\n X = np.array(Test_X).T # 将训练数组转为矩阵行列X\r\n X = np.squeeze(X) # 删去矩阵里为1的维度\r\n return X\r\n\r\npath='./Test/'#路径 path='./image/'#路径\r\nsize_weight=64#图片的宽\r\nsize_height=64#图片的高\r\n\r\nX=Test_Image(path,size_weight,size_weight)\r\n\r\nW=np.load('W.npy')\r\nb=np.load('b.npy')\r\n\r\n#print(\"W\",W.shape)\r\n#print(\"X\",X.shape)\r\n\r\nZ=np.dot(W.T,X)+b\r\nyi=sigmoid(Z)\r\nprint(\"yi:\",yi)","sub_path":"Logical/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"357318062","text":"# -*- coding: utf-8 -*\n# Copyright (c) 2019 BuildGroup Data Services Inc.\n# All rights reserved.\nfrom haystack.query import SearchQuerySet, ValuesSearchQuerySet, ValuesListSearchQuerySet\n\n\nclass CaravaggioSearchQuerySet(SearchQuerySet):\n\n def get(self, *args, **kwargs):\n \"\"\"\n Returns a single instance matching this query, optionally with additional filter kwargs.\n\n Returns a single object matching the QuerySet.\n\n .. code-block:: python\n\n user = User.get(id=1)\n\n If no objects are matched, a :class:`~.DoesNotExist` exception is raised.\n\n If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.\n \"\"\"\n if args or kwargs:\n return self.filter(*args, **kwargs).get()\n\n # Check that the resultset only contains one element, avoiding sending a COUNT query\n try:\n self[1]\n raise list(self.query.models)[0].MultipleObjectsReturned('Multiple objects found')\n except IndexError:\n pass\n\n try:\n obj = self[0]\n except IndexError:\n raise list(self.query.models)[0].DoesNotExist\n\n return obj\n\n def terms_json_facet(self, facet_name, field, facets, **kwargs):\n \"\"\"Adds a terms json facet to a query for the provided field.\"\"\"\n clone = self._clone()\n if not getattr(clone.query, \"add_json_query_facet\"):\n raise TypeError(\n \"'{}.{}' is not valid SearchQuerySet class. \" \"It has not support for json facets.\",\n clone.query.__module__,\n clone.query.__name__,\n )\n clone.query.add_json_terms_facet(facet_name, field, facets, **kwargs)\n return clone\n\n def query_json_facet(self, facet_name, q, facets, **kwargs):\n \"\"\"Adds a query json facet to a query for the provided field.\"\"\"\n clone = self._clone()\n if not getattr(clone.query, \"add_json_query_facet\"):\n raise TypeError(\n \"'{}.{}' is not valid SearchQuerySet class. \" \"It has not support for json facets.\",\n clone.query.__module__,\n clone.query.__name__,\n )\n clone.query.add_json_query_facet(facet_name, q, facets, **kwargs)\n return clone\n\n def range_facet(self, field, **options):\n \"\"\"Adds ranged faceting to a query for the provided field. Only for\n Solr.\n Options: start, end, gap, hardend, other, include, as described at\n http://wiki.apache.org/solr/SimpleFacetParameters#Facet_by_Range\"\"\"\n clone = self._clone()\n if not getattr(clone.query, \"add_range_facet\"):\n raise TypeError(\n \"'{}.{}' is not valid SearchQuerySet class. \" \"It has not support for range facets.\",\n clone.query.__module__,\n clone.query.__name__,\n )\n clone.query.add_range_facet(field, **options)\n return clone\n\n def heatmap_facet(self, field, **options):\n \"\"\"Adds heatmap faceting to a query for the provided field. Only for\n Solr.\"\"\"\n clone = self._clone()\n if not getattr(clone.query, \"add_heatmap_facet\"):\n raise TypeError(\n \"'{}.{}' is not valid SearchQuerySet class. \" \"It has not support for range facets.\",\n clone.query.__module__,\n clone.query.__name__,\n )\n clone.query.add_heatmap_facet(field, **options)\n return clone\n\n def facets_option(self, facet_param, value):\n \"\"\"Adds faceting option to a query for the provided fields.\n Only for Solr. Options: facets.mincount, facets.missing, include,\n as described at\n https://javadeveloperzone.com/solr/solr-field-value-faceting-example\n /#25_facetmincount\"\"\"\n clone = self._clone()\n if not getattr(clone.query, \"add_facets_option\"):\n raise TypeError(\n \"'{}.{}' is not valid SearchQuerySet class. \" \"It has not support for facets options in the request.\",\n clone.query.__module__,\n clone.query.__name__,\n )\n clone.query.add_facets_option(facet_param, value)\n return clone\n\n def values(self, *fields):\n \"\"\"\n Returns a list of dictionaries, each containing the key/value pairs for\n the result, exactly like Django's ``ValuesQuerySet``.\n \"\"\"\n qs = self._clone(klass=CaravaggioValuesSearchQuerySet)\n qs._fields.extend(fields)\n return qs\n\n def values_list(self, *fields, **kwargs):\n \"\"\"\n Returns a list of field values as tuples, exactly like Django's\n ``QuerySet.values``.\n\n Optionally accepts a ``flat=True`` kwarg, which in the case of a\n single field being provided, will return a flat list of that field\n rather than a list of tuples.\n \"\"\"\n flat = kwargs.pop(\"flat\", False)\n\n if flat and len(fields) > 1:\n raise TypeError(\"'flat' is not valid when values_list is\" \" called with more than one field.\")\n\n qs = self._clone(klass=CaravaggioValuesListSearchQuerySet)\n qs._fields.extend(fields)\n qs._flat = flat\n return qs\n\n\nclass CaravaggioValuesListSearchQuerySet(ValuesListSearchQuerySet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._internal_fields = [\"score\"]\n\n\nclass CaravaggioValuesSearchQuerySet(ValuesSearchQuerySet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._internal_fields = [\"score\"]\n","sub_path":"src/caravaggio_rest_api/haystack/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"454219298","text":"from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom EloMain.calculator.rating_delta import calc_rating_delta\nfrom EloMain.models import Championship, Club, Change, Game, Options\nfrom EloRating.celery import app\n\n\nclass Stats:\n await_matches = 0\n counter = 0\n filter_date = None\n filter_date2 = None\n\n def __str__(self):\n return \"{} {} {} {}\".format(self.await_matches, self.counter, self.filter_date, self.filter_date2)\n\n\n@app.task\ndef fill_championship(champ_id):\n stats = Stats()\n date_from = Options.objects.get(name='date_from').value\n date_to = Options.objects.get(name='date_to').value\n stats.filter_date = datetime.strptime(date_from, \"%d.%m.%y\")\n stats.filter_date2 = datetime.strptime(date_to, \"%d.%m.%y\")\n champ = Championship.objects.get(id=champ_id)\n print(champ.link)\n page_content = requests.get(champ.link).content\n page_soup = BeautifulSoup(page_content, \"html.parser\")\n matches = page_soup.find_all(class_=\"game_block\")\n\n for match in matches:\n date, ht_name, ht_score, at_score, at_name = find_match_data(match)\n print(\"{} {} {}-{} {}\".format(date, ht_name, ht_score, at_score, at_name))\n if not validate_date(date):\n stats.await_matches += 1\n continue\n\n date_obj = get_date_from_string(date)\n if date_obj < stats.filter_date:\n break\n\n if date_obj > stats.filter_date2:\n continue\n\n if not check_game_exist(date_obj, ht_name, at_name):\n home_team_obj = Club.objects.get(name=ht_name)\n away_team_obj = Club.objects.get(name=at_name)\n game = Game(\n date=date_obj.strftime(\"%Y-%m-%d\"),\n home_team=home_team_obj,\n away_team=away_team_obj,\n home_score=int(ht_score),\n away_score=int(at_score),\n tournament=champ,\n )\n game.save()\n\n index = game.tournament.elo_index\n\n home_team = game.home_team\n away_team = game.away_team\n ht_score = game.home_score\n at_score = game.away_score\n\n ht_rating = home_team.rating\n at_rating = away_team.rating\n\n delta = calc_rating_delta(ht_rating, at_rating, ht_score, at_score, index)\n\n home_team.rating = ht_rating + delta\n away_team.rating = at_rating - delta\n\n home_team.save()\n away_team.save()\n\n change_h = Change(\n game=game, club=home_team, rating_before=ht_rating, rating_after=home_team.rating, rating_delta=delta\n )\n change_a = Change(\n game=game, club=away_team, rating_before=at_rating, rating_after=away_team.rating, rating_delta=-delta\n )\n\n change_h.save()\n change_a.save()\n stats.counter += 1\n\n if stats.await_matches > 0:\n print(\"В ожидании ({}): {}\".format(champ.name, stats.await_matches))\n if stats.counter > 0:\n print(\"Записано ({}): {}\".format(champ.name, stats.counter))\n\n return champ.name, stats.counter, stats.await_matches\n\n\ndef find_match_data(match):\n date = \"Not valid date\"\n try:\n date = match.find(class_=\"status\").find(\"span\").get_text()\n except Exception as e:\n print(e)\n\n ht = match.find(class_=\"ht\")\n ht_name, ht_score = find_name_score(ht)\n\n at = match.find(class_=\"at\")\n at_name, at_score = find_name_score(at)\n\n return date, ht_name, ht_score, at_score, at_name\n\n\ndef find_name_score(element):\n name = element.find(class_=\"name\").find(\"span\").get_text()\n score = element.find(class_=\"gls\").get_text()\n return name, score\n\n\ndef validate_date(date):\n try:\n datetime.strptime(date, \"%d.%m.%y\")\n return True\n except Exception as e:\n print(e)\n return False\n\n\ndef get_date_from_string(date):\n return datetime.strptime(date, \"%d.%m.%y\")\n\n\ndef check_game_exist(date, home_team, away_team):\n return Game.objects.filter(date=date, home_team__name=home_team, away_team__name=away_team).exists()\n","sub_path":"EloMain/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"120242788","text":"#!/usr/bin/python3\nimport os\nimport time\n\ndevice_file = \"/dev/usb/legousbtower\" + sys.argv[1]\n\nwith open(device_file, 'w') as f:\n print(\"[+] Sleeping for 5s while device file is deleted\")\n time.sleep(5)\n print(\"[+] Exploiting...\")\n #f.write(\"abc123\")\n","sub_path":"Exploits/legousbtower/legousbtower_exploit.py","file_name":"legousbtower_exploit.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"645662321","text":"#!/usr/bin/python\n# author Andrea Chiappo\t\t\nimport sys\nimport math\nimport yaml\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# this script is meant to extract the -log(Like) Best-Fit parameter values calculated\n# by iMinuit and stored in the logfile, load the grid values of the -log(Like) - also\n# evaluated in the Minuit scprit (i.e. jmin.py) - and display the correspoding colour map\n\n# i) extract BF parameters\ndwarf = sys.argv[1]\t\t\t\t\t\t\t# get the galaxy name from the command line\n\nbestfit = yaml.load(open(\"output/%s.yaml\"%dwarf,\"rb\"))\nrho0 = bestfit[1][0][\"value\"]\nrs = bestfit[1][1][\"value\"]\n\n# ii) load -log(Like) grid values\npts = np.load(\"output/%s.npy\"%dwarf)\n\n# iii) display colour map\nm = plt.imshow(np.flipud(pts),cmap='rainbow',extent=[0.1,rs+1.,rho0-1.,rho0+1.])\nct = plt.contour(np.linspace(0.1,rs+1.,20),np.linspace(rho0-1.,rho0+1.,20),pts)\nplt.clabel(ct,inline=1,fmt='%1.0f',colors='k')\nplt.semilogy()\nplt.xlabel(r'$r_s [kpc]$',fontsize=18)\nplt.ylabel(r'$\\rho_0 [M_{sun} kpc^{-3}]$',fontsize=18)\nplt.yticks([int(rho0),int(rho0+1.)],['%1.0e'%10**int(rho0),'%1.0e'%10**int(rho0+1.)])\nplt.grid()\ncx = plt.colorbar(m,pad=0)\ncx.set_label(r'$-log(Like)$',fontsize=18)\nplt.scatter(rs,rho0,s=200,marker='*',c='k',label=r'$r_s = $ %1.1f, $log_{10}(\\rho_0) = $ %1.1f'%(rs,rho0))\nplt.legend(scatterpoints=1,loc='lower right',fontsize=12)\nplt.savefig('output/%s_im.png'%dwarf,dpi=100,format='png')\nplt.show()\n","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"567562742","text":"\"\"\"\nModule housing resource handlers for User collection and detail endpoints\n\"\"\"\n# import logging\n\nimport falcon\nimport sqlalchemy as sa\n\nfrom app.resources import BaseDetailResource, BaseResource\nfrom app.models import User\n\n# log = logging.getLogger(__name__)\n\n\nclass UserResource(BaseResource):\n\n def on_get(self, req, resp):\n queryset = self.session.query(User)\n\n resp.media = User.json_collection(\n queryset,\n paginated=True,\n **req.params\n )\n\n def on_post(self, req, resp):\n existing_email = self.session.query(User) \\\n .filter(User.email == f\"{req.media.get('email')}\") \\\n .one_or_none()\n if existing_email:\n raise falcon.HTTPBadRequest(\n description='Email already exists'\n )\n user = User.create(req.media)\n try:\n self.session.add(user)\n self.session.commit()\n except Exception as exc:\n raise falcon.HTTPBadRequest(\n description=str(exc)\n )\n\n resp.status = falcon.HTTP_CREATED\n resp.media = user.json()\n\n\nclass UserDetailResource:\n\n def on_get(self, req, resp, pk):\n try:\n user = self.session.query(User).filter(User.uuid == pk).one()\n except sa.orm.exc.NoResultFound:\n raise falcon.HTTPNotFound(\n description='Item not found'\n )\n resp.media = user.json()\n\n def on_put(self, req, resp, pk):\n try:\n user = self.session.query(User).filter(User.uuid == pk).one()\n except sa.orm.exc.NoResultFound:\n raise falcon.HTTPNotFound(\n description='Item not found'\n )\n existing_email = self.session.query(User) \\\n .filter(\n User.email == f\"{req.media.get('email')}\",\n User.uuid != user.uuid\n ) \\\n .one_or_none()\n if existing_email:\n raise falcon.HTTPBadRequest(\n description='Email already exists'\n )\n user.update(req.media)\n try:\n self.session.add(user)\n self.session.commit()\n except Exception as exc:\n raise falcon.HTTPBadRequest(\n description=str(exc)\n )\n resp.media = user.json()\n\n def on_delete(self, req, resp, pk):\n try:\n user = self.session.query(User).filter(User.uuid == pk).one()\n except sa.orm.exc.NoResultFound:\n raise falcon.HTTPNotFound(\n description='Item not found'\n )\n self.session.query(User).filter(User.uuid == pk) \\\n .delete()\n self.session.commit()\n resp.status = falcon.HTTP_NO_CONTENT\n\n# class UserRelationResource(BaseDetailResource):\n# __collection__ = 'users'\n# __relations__ = ['teams', 'matches']\n\n# def on_get(self, req, resp, oid, relation):\n# item = self.collection.find_one({'_id': self.object_id})\n# if not item:\n# raise falcon.HTTPNotFound(\n# description='Item not found'\n# )\n\n# if relation not in self.__relations__:\n# raise falcon.HTTPNotFound(\n# description='Item relationship not found'\n# )\n\n# teams = self.db['teams'].find({'user': oid})\n# if relation == 'teams':\n# resp.body = dumps(teams)\n\n# team_ids = [str(x['_id']) for x in teams]\n# matches = self.db['matches'].find(\n# {\n# 'team': {\n# '$in': team_ids\n# }\n# }\n# )\n# if relation == 'matches':\n# resp.body = dumps(matches)\n","sub_path":"app/resources/user_resource.py","file_name":"user_resource.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"17403812","text":"#-*-coding:utf-8-*-\n\nimport json\nfrom requests_oauthlib import OAuth1Session\nimport os\nimport time\nimport random\nimport re\nfrom config import config11\nfrom datetime import datetime\n\nCK = config11.CK\nCS = config11.CS\nAT = config11.AT\nAS = config11.AS\nsession = OAuth1Session(CK, CS, AT, AS)\nurl = \"https://api.twitter.com/1.1/search/tweets.json\"\n\ndef check_res(res):\n flag_pass = 0\n print(res.headers['X-Rate-Limit-Remaining'])\n if int(res.headers['X-Rate-Limit-Remaining']) < 3:\n wait_time = int(res.headers['X-Rate-Limit-Reset']) - time.mktime(datetime.now().timetuple())\n time.sleep(wait_time + random.randint(1, 5))\n\ndef preprocessing(text):\n text = re.sub(' ','',text)\n text = re.sub('\\u3000','',text)\n text = re.sub('\\n','',text)\n text = re.sub('\\t','',text)\n text = re.sub('\\r','',text)\n text = re.sub('[,,]','、',text)\n text = re.sub('[..]','。',text)\n return text\n\nif __name__ == '__main__':\n\n lt = \"あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽ\"\n \n tweet_list = []\n id_list = []\n max_id = \"-1\"\n for i in range(10100):\n word = \"\".join([lt[random.randint(0,len(lt)-1)] for i in range(1)])\n print(i,word)\n params = {'q':word + \" lang:ja\",\n 'count':100,\n 'max_id':max_id,\n 'result_type':'recent'}\n res = session.get(url, params = params)\n check_res(res)\n for tweet in json.loads(res.text)['statuses']:\n id_str = tweet['id_str']\n if id_str in id_list:\n continue\n id_list.append(id_str)\n tweet_list.append(preprocessing(tweet['text']))\n max_id = id_str\n\n write_text = \"\\n\".join(tweet_list[:1000000])\n with open(\"./data/tweet_data1000000.txt\",\"w\") as fwrite:\n fwrite.write(write_text)\n","sub_path":"n2i/search_api.py","file_name":"search_api.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"582818296","text":"# Copyright (c) 2021 War-Keeper\nimport os\nimport sys\n\nimport discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport db\n\n\n# -----------------------------------------------------------\n# This File contains commands for joining a group, leaving a group,\n# and displaying which groups are available\n# -----------------------------------------------------------\nclass Groups(commands.Cog):\n student_pool = {}\n\n # -----------------------------------------------------------\n # initialize\n # -----------------------------------------------------------\n def __init__(self, bot):\n self.bot = bot\n\n # -------------------------------------------------------------------------------------------------------\n # Function: reset(self, ctx)\n # Description: deletes all group roles in the server\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: confirms role deletion\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name=\"reset\", help=\"Resets group channels and roles. DO NOT USE IN PRODUCTION!\")\n async def reset(self, ctx):\n await ctx.send('Deleting all roles...')\n\n for i in range(100):\n role_name = \"group_\" + str(i)\n role = get(ctx.message.guild.roles, name=role_name)\n if role is None:\n continue\n await role.delete()\n\n await ctx.author.send(\"Roles deleted!\")\n print(\"Roles deleted!\")\n\n # -------------------------------------------------------------------------------------------------------\n # Function: reset_error(self, ctx, error)\n # Description: prints error message for reset command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @reset.error\n async def reset_error(self, ctx, error):\n await ctx.author.send(error)\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: startupgroups(self, ctx)\n # Description: creates roles for the groups\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: creates roles for groups\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name=\"startupgroups\", help=\"Creates group roles for members\")\n async def startupgroups(self, ctx):\n await ctx.send('Creating roles....')\n\n for i in range(100):\n role_name = \"group_\" + str(i)\n existing_role = get(ctx.guild.roles, name=role_name)\n print(i)\n if existing_role is None:\n await ctx.guild.create_role(name=role_name)\n\n print(\"Roles created successfully!\")\n\n # -------------------------------------------------------------------------------------------------------\n # Function: startupgroups_error(self, ctx, error)\n # Description: prints error message for startupgroups command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @startupgroups.error\n async def startupgroups_error(self, ctx, error):\n await ctx.author.send(error)\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: connect(self, ctx)\n # Description: connects all users with their groups\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: creates a private text channel for all groups\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name=\"connect\", help=\"Creates group roles for members\")\n async def connect(self, ctx):\n for i in range(100):\n group_name = \"group-\" + str(i)\n existing_channel = get(ctx.guild.text_channels, name=group_name)\n if existing_channel is not None:\n await existing_channel.delete()\n\n groups = db.query(\n 'SELECT group_num, array_agg(member_name) '\n 'FROM group_members WHERE guild_id = %s GROUP BY group_num ORDER BY group_num',\n (ctx.guild.id,)\n )\n\n for group_num, *_ in groups:\n role_string = \"group_\" + str(group_num)\n user_role = get(ctx.guild.roles, name=role_string)\n\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),\n ctx.author: discord.PermissionOverwrite(read_messages=True),\n user_role: discord.PermissionOverwrite(read_messages=True)\n }\n group_channel_name = \"group-\" + str(group_num)\n await ctx.guild.create_text_channel(group_channel_name, overwrites=overwrites)\n\n # -------------------------------------------------------------------------------------------------------\n # Function: connect_error(self, ctx, error)\n # Description: prints error message for connect command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @connect.error\n async def connect_error(self, ctx, error):\n await ctx.author.send(error)\n\n # -------------------------------------------------------------------------------------------------------\n # Function: join(self, ctx, group_num='-1')\n # Description: joins the user to the given group\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # - group_num: the number of the group\n # Outputs: adds the user to the given group or returns an error if the group is invalid or in case of\n # syntax errors\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name='join', help='To use the join command, do: $join \\n \\\n ( For example: $join 0 )', pass_context=True)\n async def join(self, ctx, group_num: int):\n # get the name of the caller\n member_name = ctx.message.author.display_name.upper()\n member = ctx.message.author\n\n if group_num < 0 or group_num > 99:\n await ctx.send('Not a valid group')\n await ctx.send(\"To use the join command, do: $join \"\n \"where 0 <= <= 99 \\n ( For example: $join 0 )\")\n return\n\n group_count = db.query(\n 'SELECT COUNT(group_num) FROM group_members WHERE guild_id = %s AND group_num = %s',\n (ctx.guild.id, group_num)\n )\n\n if group_count == 6:\n await ctx.send('A group cannot have more than 6 people!')\n return\n\n current_group_num = db.query(\n 'SELECT group_num FROM group_members WHERE guild_id = %s AND member_name = %s',\n (ctx.guild.id, member_name)\n )\n\n if current_group_num:\n await ctx.send(f'You are already in Group {current_group_num[0][0]}')\n return\n\n db.query(\n 'INSERT INTO group_members (guild_id, group_num, member_name) VALUES (%s, %s, %s)',\n (ctx.guild.id, group_num, member_name)\n )\n identifier = \"group_\" + str(group_num)\n role = get(ctx.guild.roles, name=identifier)\n\n if role is None:\n await ctx.guild.create_role(name=identifier)\n role = get(ctx.guild.roles, name=identifier)\n\n await member.add_roles(role)\n\n await ctx.send(f'You are now in Group {group_num}! There are now {group_count[0][0] + 1}/6 members.')\n\n # -------------------------------------------------------------------------------------------------------\n # Function: join_error(self, ctx, error)\n # Description: prints error message for join command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @join.error\n async def join_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('To use the join command, do: $join \\n ( For example: $join 0 )')\n else:\n await ctx.author.send(error)\n #await ctx.message.delete()\n print(error)\n\n # -------------------------------------------------------------------------------------------------------\n # Function: leave(self, ctx)\n # Description: removes the user from the given group\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: removes the user from the given group or returns an error if the group is invalid or in\n # case of syntax errors\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name='leave', help='To use the leave command, do: $leave \\n \\\n ( For example: $leave )', pass_context=True)\n async def leave(self, ctx):\n # get the name of the caller\n member_name = ctx.message.author.display_name.upper()\n member = ctx.message.author\n\n current_group_num = db.query(\n 'SELECT group_num FROM group_members WHERE guild_id = %s AND member_name = %s',\n (ctx.guild.id, member_name)\n )\n\n if current_group_num:\n db.query(\n 'DELETE FROM group_members WHERE guild_id = %s AND member_name = %s',\n (ctx.guild.id, member_name)\n )\n await ctx.send(f'You have been removed from Group {current_group_num[0][0]}!')\n\n identifier = \"group_\" + str(current_group_num[0][0])\n role = get(ctx.guild.roles, name=identifier)\n await member.remove_roles(role)\n\n else:\n await ctx.send('You are not in a group!')\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: leave_error(self, ctx, error)\n # Description: prints error message for leave command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @leave.error\n async def leave_error(self, ctx, error):\n await ctx.author.send(error)\n #await ctx.message.delete()\n print(error)\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: group(self, ctx)\n # Description: prints the list of groups\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # Outputs: prints the list of groups\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name='groups', help='prints group counts', pass_context=True)\n # @commands.dm_only()\n # TODO maybe include channel where all groups displayed\n async def groups(self, ctx):\n # load groups csv\n groups = db.query(\n 'SELECT group_num, array_agg(member_name) '\n 'FROM group_members WHERE guild_id = %s GROUP BY group_num ORDER BY group_num',\n (ctx.guild.id,)\n )\n\n # create embedded objects\n embed = discord.Embed(title='Group List', color=discord.Color.teal())\n embed.set_thumbnail(url=\"https://i.pinimg.com/474x/e7/e3/bd/e7e3bd1b5628510a4e9d7a9a098b7be8.jpg\")\n\n for group_num, members in groups:\n embed.add_field(name=f'Group {group_num}', value=str(len(members)), inline=True)\n\n # print the embedded objects\n embed.set_footer(text=\"Number Represents the Group Size\")\n await ctx.send(embed=embed)\n\n # -------------------------------------------------------------------------------------------------------\n # Function: groups_error(self, ctx, error)\n # Description: prints error message for groups command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @groups.error\n async def groups_error(self, ctx, error):\n await ctx.author.send(error)\n #await ctx.message.delete()\n print(error)\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: group(self, ctx, group_num)\n # Description: prints the members of the group, or the current members group if they have a group\n # Inputs:\n # - self: used to access parameters passed to the class through the constructor\n # - ctx: used to access the values passed through the current context\n # - group_num: the group number to list names for\n # Outputs: prints the name of people in the group\n # -------------------------------------------------------------------------------------------------------\n @commands.command(name='group', help='print names of members in a group, or current groups members \\n \\\n ( For example: $group or $group 8 )', pass_context=True)\n # @commands.dm_only()\n # TODO maybe include channel where all groups displayed\n async def group(self, ctx, group_num: int = -1):\n\n if group_num == -1:\n member_name = ctx.message.author.display_name.upper()\n\n group_num = db.query(\n 'SELECT group_num FROM group_members WHERE guild_id = %s and member_name = %s',\n (ctx.guild.id, member_name)\n )\n\n if not group_num:\n await ctx.send('You are not in a group!')\n return\n\n group_num = group_num[0][0]\n\n # load groups csv\n group = db.query(\n 'SELECT member_name FROM group_members WHERE guild_id = %s and group_num = %s',\n (ctx.guild.id, group_num)\n )\n\n # create embedded objects\n embed = discord.Embed(title='Group Members', color=discord.Color.teal())\n embed.set_thumbnail(url=\"https://i.pinimg.com/474x/e7/e3/bd/e7e3bd1b5628510a4e9d7a9a098b7be8.jpg\")\n\n members = \"\"\n\n for member in group:\n members += member[0] + '\\n'\n\n if members == \"\":\n members = \"None\"\n\n embed.add_field(name=f'Group {group_num}: ', value=members, inline=True)\n\n # print the embedded objects\n await ctx.send(embed=embed)\n\n\n # -------------------------------------------------------------------------------------------------------\n # Function: group_error(self, ctx, error)\n # Description: prints error message for group command\n # Inputs:\n # - ctx: context of the command\n # - error: error message\n # Outputs:\n # - Error details\n # -------------------------------------------------------------------------------------------------------\n @group.error\n async def group_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('To use the group command, do: $group \\n ( For example: $group 0 )')\n else:\n await ctx.author.send(error)\n #await ctx.message.delete()\n print(error)\n\n\n # -----------------------------------------------------------\n # This is a testing arg, not really used for anything else but adding to the csv file\n # -----------------------------------------------------------\n # @commands.command(name='test_name', help='add a name to the name_mapping.csv', pass_context=True)\n # async def test_name(self, ctx, arg, arg2):\n # student_pool = load_pool()\n # display_name = ctx.message.author.display_name\n # display_name_upper = display_name.upper()\n #\n # if student_pool.get(display_name_upper) is None:\n # student_pool[display_name_upper] = arg.upper() + ' ' + arg2.upper()\n # else:\n # member_name = student_pool[display_name_upper]\n # await ctx.send('You have already registered with the name: ' + member_name.title())\n #\n # print_pool(student_pool)\n\n\n\n# # ------------------------------------------------------------\n# # Used to load the members from the csv file into a dictionary\n# # ------------------------------------------------------------\n# def load_pool() -> dict:\n# dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# os.chdir(dir)\n# os.chdir('data')\n# os.chdir('server_data')\n# with open('name_mapping.csv', mode='r') as infile:\n# reader = csv.reader(infile)\n# student_pools = {rows[0].upper(): rows[1].upper() for rows in reader}\n# return student_pools\n\n\n# # -----------------------------------------------------------\n# # Used to print the members to the csv file\n# # -----------------------------------------------------------\n# def print_pool(pools):\n# dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# os.chdir(dir)\n# os.chdir('data')\n# os.chdir('server_data')\n# with open('name_mapping.csv', mode='w', newline=\"\") as outfile:\n# writer = csv.writer(outfile)\n# for key, value in pools.items():\n# writer.writerow([key, value])\n\n\n# -----------------------------------------------------------\n# add the file to the bot's cog system\n# -----------------------------------------------------------\ndef setup(bot):\n bot.add_cog(Groups(bot))\n","sub_path":"cogs/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":19027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"393272322","text":"class BasicTrader:\n def __init__(\n self,\n upbit,\n ticker,\n ):\n self.upbit = upbit\n self.ticker = ticker\n\n @property\n def krw_balance(self):\n return self.upbit.get_balance(ticker=\"KRW\")\n\n @property\n def ticker_balance(self):\n return self.upbit.get_balance(ticker=self.ticker)\n\n def buy(self):\n krw_price = 10000\n if self.krw_balance > krw_price:\n self.upbit.buy_market_order(\n ticker=self.ticker,\n price=krw_price,\n )\n print(f\"Buy {self.ticker}, KRW: {krw_price}\")\n\n def sell(self, ticker_price, ticker_volume):\n self.upbit.sell_limit_order(\n ticker=self.ticker,\n price=ticker_price,\n volume=ticker_volume,\n )\n print(f\"Sell {self.ticker}, Ticker: {self.ticker_balance}\")\n\n @property\n def avg_ticker_price(self):\n balances = self.upbit.get_balances()\n for balance in balances:\n if self.ticker.split('-')[1] == balance['currency']:\n return float(balance['avg_buy_price'])\n return 0.0\n","sub_path":"src/trader/basic_trader.py","file_name":"basic_trader.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"514655734","text":"\"\"\"\nQuickSort Implementations Recursive & Iterative\n\n\"\"\"\n\n\ndef quick_sort_iterative(list_, left, right):\n \"\"\"\n Iterative version of quick sort\n \"\"\"\n temp_stack = []\n temp_stack.append((left, right))\n\n # Main loop to pop and push items until stack is empty\n while temp_stack:\n pos = temp_stack.pop()\n right, left = pos[1], pos[0]\n piv = partition(list_, left, right)\n # If items in the left of the pivot push them to the stack\n if piv - 1 > left:\n temp_stack.append((left, piv - 1))\n # If items in the right of the pivot push them to the stack\n if piv + 1 < right:\n temp_stack.append((piv + 1, right))\n\n\ndef quick_sort_recursive(list_, left, right):\n \"\"\"\n Quick sort method (Recursive)\n \"\"\"\n if right <= left:\n return\n else:\n # Get pivot\n piv = partition(list_, left, right)\n # Sort left side of pivot\n quick_sort(list_, left, piv - 1)\n # Sort right side of pivot\n quick_sort(list_, piv + 1, right)\n\n\ndef partition(list_, left, right):\n \"\"\"\n Partition method\n \"\"\"\n # Pivot first element in the array\n # piv = list_[right]\n piv = list_[left]\n i = left + 1\n j = right\n\n while 1:\n while i <= j and list_[i] <= piv:\n i += 1\n while j >= i and list_[j] >= piv:\n j -= 1\n if j <= i:\n break\n # Exchange items\n list_[i], list_[j] = list_[j], list_[i]\n # Exchange pivot to the right position\n list_[left], list_[j] = list_[j], list_[left]\n return j\n\n#\n# x = [4,87,12,9,21,7]\n#\n# quick_sort_iterative(x, 0, len(x)-1)\n# print(x)\n\ndef test(list_, left, right):\n stack = []\n stack.append([left, right])\n # stack.append([left, right])\n print(stack)\n pos = stack.pop()\n # print(pos)\n right, left = pos[1], pos[0]\n # print(right, left)\n piv = partition(list_, left, right)\n # print('piv', piv)\n print(stack)\n\nx = [14,87,12,9,21,7, 99]\ntest(x, 0, len(x)-1)\n\n\n","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"523098953","text":"from log_into_wiki import *\nimport mwparserfromhell\n\nsite = login('me', 'lol') # Set wiki\nsummary = 'team1footnote' # Set summary\n\nlimit = -1\n# startat_page = 'asdf'\nthis_template = site.pages['Template:MatchSchedule'] # Set template\npages = this_template.embeddedin()\n\npages_var = list(pages)\n\npages_array = [p.name for p in pages_var]\n\ntry:\n\tstartat = pages_array.index(startat_page)\nexcept NameError as e:\n\tstartat = -1\nexcept ValueError as e:\n\tstartat = -1\nprint(startat)\n\nlmt = 0\nfor page in pages_var:\n\tif lmt == limit:\n\t\tbreak\n\tlmt += 1\n\tif lmt < startat:\n\t\tprint(\"Skipping page %s\" % page.name)\n\telse:\n\t\ttext = page.text()\n\t\twikitext = mwparserfromhell.parse(text)\n\t\tfor template in wikitext.filter_templates():\n\t\t\tif template.name.matches('MatchSchedule'):\n\t\t\t\tif template.has('footnote1'):\n\t\t\t\t\tf = template.get('footnote1').value.strip()\n\t\t\t\t\ttemplate.add('team1footnote', f, before = 'footnote1')\n\t\t\t\t\ttemplate.remove('footnote1')\n\t\t\t\tif template.has('footnote2'):\n\t\t\t\t\tf = template.get('footnote2').value.strip()\n\t\t\t\t\ttemplate.add('team2footnote', f, before = 'footnote2')\n\t\t\t\t\ttemplate.remove('footnote2')\n\t\t\n\t\tnewtext = str(wikitext)\n\t\tif text != newtext:\n\t\t\tprint('Saving page %s...' % page.name)\n\t\t\tpage.save(newtext, summary=summary)\n\t\telse:\n\t\t\tprint('Skipping page %s...' % page.name)","sub_path":"team1footnote.py","file_name":"team1footnote.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"264777873","text":"# Uses python3\nimport sys\n\ndef gcd_naive(a, b):\n current_gcd = 1\n for d in range(2, min(a, b) + 1):\n if a % d == 0 and b % d == 0:\n if d > current_gcd:\n current_gcd = d\n\n return current_gcd\n\ndef gcd_fast(a, b):\n if a < b:\n \ta , b = b, a\n\n if b == 0:\n \treturn a\n else:\n \ta_rem=a % b\n return gcd_fast(b,a_rem)\n\n for d in range(2, min(a, b) + 1):\n if a % d == 0 and b % d == 0:\n if d > current_gcd:\n current_gcd = d\n\n return current_gcd\n\ndef stress_test_data(M):\n\t# return a random integer number from 0-M\n\tfrom random import randint\n\treturn randint(0,M), randint(0,M)\n\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.read()\n a, b = map(int, input.split())\n #print(gcd_naive(a, b))\n print(gcd_fast(a, b))\n\n\n'''\n# stress test\nif __name__ == \"__main__\":\n while True:\n a, b = stress_test_data(100)\n gcd1=gcd_naive(a, b)\n gcd2=gcd_fast(a, b)\n print(a)\n print(b)\n\n if gcd1 == gcd2:\n \tprint('OK')\n else:\n \tprint(gcd1)\n \tprint(gcd2)\n \tbreak\n'''","sub_path":"Coursera_Algorithm_and_data_structure/AlgorithmicToolbox/week2_algorithmic_warmup/3_greatest_common_divisor/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"534370834","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport re\nimport sys\nimport json\nimport traceback\nfrom urllib import quote, unquote\nfrom urlparse import urljoin, urlsplit\n\ntry:\n from forgeimporters.base import ProjectExtractor\n urlopen = ProjectExtractor.urlopen\nexcept ImportError:\n try:\n from allura.lib.helpers import urlopen\n except ImportError:\n from urllib2 import urlopen\n\ntry:\n # Ignore this import if the html2text package is not installed\n import html2text\nexcept ImportError:\n pass\n\nfrom BeautifulSoup import BeautifulSoup\n\nlog = logging.getLogger(__name__)\n\n\nclass WikiExporter(object):\n\n PAGE_LIST_URL = 'wiki/TitleIndex'\n PAGE_URL = 'wiki/%s'\n CONTENT_DIV_ATTRS = {'class': 'wikipage searchable'}\n EXCLUDE_PAGES = [\n 'CamelCase',\n 'InterMapTxt',\n 'InterTrac',\n 'InterWiki',\n 'PageTemplates',\n 'SandBox',\n 'TitleIndex',\n 'TracAccessibility',\n 'TracAdmin',\n 'TracBackup',\n 'TracBrowser',\n 'TracChangeset',\n 'TracEnvironment',\n 'TracFineGrainedPermissions',\n 'TracGuide',\n 'TracImport',\n 'TracIni',\n 'TracInterfaceCustomization',\n 'TracLinks',\n 'TracLogging',\n 'TracNavigation',\n 'TracNotification',\n 'TracPermissions',\n 'TracPlugins',\n 'TracQuery',\n 'TracReports',\n 'TracRevisionLog',\n 'TracRoadmap',\n 'TracRss',\n 'TracSearch',\n 'TracSupport',\n 'TracSyntaxColoring',\n 'TracTickets',\n 'TracTicketsCustomFields',\n 'TracTimeline',\n 'TracUnicode',\n 'TracWiki',\n 'TracWorkflow',\n 'WikiDeletePage',\n 'WikiFormatting',\n 'WikiHtml',\n 'WikiMacros',\n 'WikiNewPage',\n 'WikiPageNames',\n 'WikiProcessors',\n 'WikiRestructuredText',\n 'WikiRestructuredTextLinks',\n 'RecentChanges',\n ]\n RENAME_PAGES = {\n 'WikiStart': 'Home', # Change the start page name to Home\n 'Home': 'WikiStart', # Rename the Home page to WikiStart\n }\n\n def __init__(self, base_url, options):\n self.base_url = base_url\n self.options = options\n\n def export(self, out):\n pages = []\n for title in self.page_list():\n try:\n pages.append(self.get_page(title))\n except:\n self.log('Cannot fetch page %s. Skipping' % title)\n self.log(traceback.format_exc())\n continue\n out.write(json.dumps(pages, indent=2, sort_keys=True))\n out.write('\\n')\n\n def log(self, msg):\n log.info(msg)\n if self.options.verbose:\n print >>sys.stderr, msg\n\n def url(self, suburl, type=None):\n url = urljoin(self.base_url, suburl)\n if type is None:\n return url\n glue = '&' if '?' in suburl else '?'\n return url + glue + 'format=' + type\n\n def fetch(self, url):\n return urlopen(url)\n\n def page_list(self):\n url = urljoin(self.base_url, self.PAGE_LIST_URL)\n self.log('Fetching list of pages from %s' % url)\n r = self.fetch(url)\n html = BeautifulSoup(r)\n pages = html.find('div', attrs=self.CONTENT_DIV_ATTRS) \\\n .find('ul').findAll('li')\n pages = [page.find('a').text\n for page in pages\n if page.find('a')\n and page.find('a').text not in self.EXCLUDE_PAGES]\n # Remove duplicate entries by converting page list to a set.\n # As we're going to fetch all listed pages,\n # it's safe to destroy the original order of pages.\n return set(pages)\n\n def get_page(self, title):\n title = quote(title)\n convert_method = '_get_page_' + self.options.converter\n content = getattr(self, convert_method)(title)\n page = {\n 'title': self.convert_title(title),\n 'text': self.convert_content(content),\n 'labels': '',\n }\n return page\n\n def _get_page_html2text(self, title):\n url = self.url(self.PAGE_URL % title)\n self.log('Fetching page %s' % url)\n r = self.fetch(url)\n html = BeautifulSoup(r)\n return html.find('div', attrs=self.CONTENT_DIV_ATTRS)\n\n def _get_page_regex(self, title):\n url = self.url(self.PAGE_URL % title, 'txt')\n self.log('Fetching page %s' % url)\n r = self.fetch(url)\n return r\n\n def convert_title(self, title):\n title = self.RENAME_PAGES.get(title, title)\n title = title.replace('/', '-') # Handle subpages\n title = title.rstrip('?') # Links to non-existent pages ends with '?'\n return title\n\n def convert_content(self, content):\n convert_method = '_convert_content_' + self.options.converter\n return getattr(self, convert_method)(content)\n\n def _convert_wiki_toc_to_markdown(self, content):\n \"\"\"\n Removes contents of div.wiki-toc elements and replaces them with\n the '[TOC]' markdown macro.\n \"\"\"\n for toc in content('div', attrs={'class': 'wiki-toc'}):\n toc.string = '[TOC]'\n return content\n\n def _convert_content_html2text(self, content):\n html2text.BODY_WIDTH = 0 # Don't wrap lines\n content = self._convert_wiki_toc_to_markdown(content)\n content = html2text.html2text(unicode(content))\n # Convert internal links\n internal_url = urlsplit(self.base_url).path + 'wiki/'\n internal_link_re = r'\\[([^]]+)\\]\\(%s([^)]*)\\)' % internal_url\n internal_link = re.compile(internal_link_re, re.UNICODE)\n def sub(match):\n caption = match.group(1)\n page = self.convert_title(match.group(2))\n if caption == page:\n link = '[%s]' % unquote(page)\n else:\n link = '[%s](%s)' % (caption, page)\n return link\n return internal_link.sub(sub, content)\n\n def _convert_content_regex(self, text):\n # https://gist.github.com/sgk/1286682\n text = re.sub('\\r\\n', '\\n', text)\n text = re.sub(r'{{{(.*?)}}}', r'`\\1`', text)\n\n def indent4(m):\n return '\\n ' + m.group(1).replace('\\n', '\\n ')\n\n text = re.sub(r'(?sm){{{\\n(.*?)\\n}}}', indent4, text)\n text = re.sub(r'(?m)^====\\s+(.*?)\\s+====$', r'#### \\1', text)\n text = re.sub(r'(?m)^===\\s+(.*?)\\s+===$', r'### \\1', text)\n text = re.sub(r'(?m)^==\\s+(.*?)\\s+==$', r'## \\1', text)\n text = re.sub(r'(?m)^=\\s+(.*?)\\s+=$', r'# \\1', text)\n text = re.sub(r'^ * ', r'****', text)\n text = re.sub(r'^ * ', r'***', text)\n text = re.sub(r'^ * ', r'**', text)\n text = re.sub(r'^ * ', r'*', text)\n text = re.sub(r'^ \\d+. ', r'1.', text)\n a = []\n for line in text.split('\\n'):\n if not line.startswith(' '):\n line = re.sub(r'\\[(https?://[^\\s\\[\\]]+)\\s([^\\[\\]]+)\\]', r'[\\2](\\1)', line)\n line = re.sub(r'\\[(wiki:[^\\s\\[\\]]+)\\s([^\\[\\]]+)\\]', r'[\\2](/\\1/)', line)\n line = re.sub(r'\\!(([A-Z][a-z0-9]+){2,})', r'\\1', line)\n line = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'*\\1*', line)\n line = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', line)\n a.append(line)\n return '\\n'.join(a)\n","sub_path":"ForgeWiki/forgewiki/scripts/wiki_from_trac/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"342091242","text":"import sqlite3\nfrom dateutil import parser\nfrom datetime import datetime, timedelta\nfrom mat_line import mat_line\n\n\ndef cpu_read_db(dbname, last_min=1):\n # 链接数据库\n conn = sqlite3.connect(dbname)\n cursor = conn.cursor()\n # 获取当前时间\n now = datetime.now()\n # 计算最近几分钟的时间\n before_last_min = now - timedelta(minutes=last_min)\n # 根据时间查询数据\n cursor.execute(f'select time,cpu from routerdb where time > \"{before_last_min}\"')\n # 从数据库得到数据\n yourresults = cursor.fetchall()\n # 返回列表套列表\n return [[parser.parse(i[0]), i[1]] for i in yourresults]\n\n\nif __name__ == '__main__':\n # print(cpu_read_db('deviceinfo.sqlite',1))\n mat_line(cpu_read_db('deviceinfo.sqlite', 1))\n","sub_path":"homework_standard-2.py","file_name":"homework_standard-2.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"541749259","text":"import logging\n\nimport discord\n\nfrom bot.messaging.events import Events\nfrom bot.services.base_service import BaseService\n\nlog = logging.getLogger(__name__)\n\n\nclass RoleHandlingService(BaseService):\n\n def __init__(self, *, bot):\n super().__init__(bot)\n\n @BaseService.Listener(Events.on_guild_role_create)\n async def on_role_create(self, role):\n await self.bot.role_route.create_role(role.id,\n role.name,\n role.permissions.administrator,\n role.guild.id,\n raise_on_error=True)\n\n @BaseService.Listener(Events.on_guild_role_delete)\n async def on_role_delete(self, role):\n log.info(f'Role: {role.id} deleted in guild: {role.guild.id}')\n await self.bot.role_route.remove_role(role.id, raise_on_error=True)\n\n @BaseService.Listener(Events.on_guild_role_update)\n async def on_role_update(self, before, after: discord.Role):\n log.info(f'Role: {after.id} updated in guild: {after.guild.id}')\n await self.bot.role_route.edit_role(after.id, after.name, after.permissions.administrator, raise_on_error=True)\n\n async def load_service(self):\n pass\n","sub_path":"ClemBot.Bot/bot/services/role_handling_service.py","file_name":"role_handling_service.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"348958645","text":"import json\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--output_dir', required=True, type=str)\nparser.add_argument('--architecture', required=True, type=str)\nargs = parser.parse_args()\n\npred = []\nwith open(os.path.join(args.output_dir, f'prediction_{args.architecture}.txt')) as fin:\n\tfor line in fin:\n\t\tdata = float(line.strip())\n\t\tpred.append(data)\n\ni = 0\nwith open('../BM25/mag_filter.json') as fin, open(os.path.join(args.output_dir, f'prediction_{args.architecture}.json'), 'w') as fout:\n\tfor line in fin:\n\t\tjs = json.loads(line)\n\t\tout = {}\n\t\tout['paper'] = js['paper']\n\t\tout['label'] = js['label']\n\n\t\tlabels = js['predicted_label']\n\t\tl = len(labels)\n\t\tsim = {}\n\t\tfor label, score in zip(labels, pred[i:i+l]):\n\t\t\tsim[label] = score\n\t\tsim_sorted = sorted(sim.items(), key=lambda x:x[1], reverse=True)\n\t\tout['predicted_label'] = sim_sorted\n\t\tfout.write(json.dumps(out)+'\\n')\n\n\t\ti += l\n","sub_path":"calcsim.py","file_name":"calcsim.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"591787362","text":"import unittest\r\nfrom HW09_Himanshu import Student, Instructor, University\r\nfrom typing import List\r\n\"\"\" testing different files \"\"\"\r\n\r\nclass StudentTest(unittest.TestCase):\r\n \"\"\" Class to check class Student \"\"\"\r\n def test_student(self) -> None:\r\n \"\"\" Function that tests class Student \"\"\"\r\n actual: list = list()\r\n for student in University(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 1\")._students.values(): \r\n actual.append([student._cwid, student._name, sorted(student._course.keys())])\r\n expected: List[List[str, str, List[str]]] = [['10103', 'Baldwin, C', ['CS 501', 'SSW 564', 'SSW 567', 'SSW 687']],\r\n ['10115', 'Wyatt, X', ['CS 545', 'SSW 564', 'SSW 567', 'SSW 687']],\r\n ['10172', 'Forbes, I', ['SSW 555', 'SSW 567']],\r\n ['10175', 'Erickson, D', ['SSW 564','SSW 567','SSW 687']],\r\n ['10183', 'Chapman, O', ['SSW 689']],\r\n ['11399', 'Cordova, I', ['SSW 540']],\r\n ['11461', 'Wright, U', ['SYS 611', 'SYS 750', 'SYS 800']],\r\n ['11658','Kelly, P', ['SSW 540']],\r\n ['11714', 'Morton, A', ['SYS 611', 'SYS 645']],\r\n ['11788', 'Fuller, E', ['SSW 540']]]\r\n # test1: University = University(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 1\")\r\n # print(actual)\r\n self.assertEqual(actual, expected)\r\n\r\n with self.assertRaises(ValueError): # raises exception error\r\n Student('123', 'him', 'se').store_course_grade('SSW 810', '')\r\n \r\nclass InstructorTest(unittest.TestCase):\r\n \"\"\" Class to check class Instructor \"\"\"\r\n def test_instructor(self) -> None:\r\n \"\"\" Function that tests class Instructor \"\"\"\r\n actual: list = list()\r\n for instructor in University(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 1\")._instructors.values():\r\n for c in instructor._courses:\r\n actual.append([instructor._cwid, instructor._name, instructor._major, c, instructor._courses[c]])\r\n expected: List[List[str, str, List[str]]] = [['98765', 'Einstein, A','SFEN','SSW 567', 4],\r\n ['98765', 'Einstein, A','SFEN','SSW 540', 3],\r\n ['98764', 'Feynman, R','SFEN','SSW 564', 3],\r\n ['98764', 'Feynman, R','SFEN','SSW 687', 3],\r\n ['98764', 'Feynman, R','SFEN','CS 501', 1],\r\n ['98764', 'Feynman, R','SFEN','CS 545', 1],\r\n ['98763', 'Newton, I','SFEN','SSW 555', 1],\r\n ['98763', 'Newton, I','SFEN','SSW 689', 1],\r\n ['98760', 'Darwin, C','SYEN','SYS 800', 1],\r\n ['98760', 'Darwin, C','SYEN','SYS 750', 1],\r\n ['98760', 'Darwin, C','SYEN','SYS 611', 2],\r\n ['98760', 'Darwin, C','SYEN','SYS 645', 1]]\r\n self.assertEqual(actual, expected)\r\n\r\nclass ErrorTest(unittest.TestCase):\r\n \"\"\" testing all the errors \"\"\"\r\n def test_error_student(self) -> None:\r\n \"\"\" Function that tests Student file not found error \"\"\"\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n University(\"nothing to check\")._read_students()\r\n \r\n def test_error_instructor(self) -> None:\r\n \"\"\" Function that tests Instructor file not found error \"\"\"\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n University(\"nothing to check\")._read_instructors()\r\n \r\n def test_error_grades(self) -> None:\r\n \"\"\" Function that tests Grades file not found error \"\"\"\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n University(\"nothing to check\")._read_grades()\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main(exit = False, verbosity = 2)","sub_path":"HW09_Test_Himanshu.py","file_name":"HW09_Test_Himanshu.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"280932438","text":"\"\"\"\nDjango forms.\n\"\"\"\nfrom django import forms\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.extras.forms import AddRemoveTagsForm\nfrom nautobot.extras.models.tags import Tag\nfrom nautobot.extras.utils import FeatureQuery\nfrom nautobot.utilities.forms import (\n BootstrapMixin,\n BulkEditForm,\n BulkEditNullBooleanSelect,\n CSVContentTypeField,\n CSVMultipleContentTypeField,\n CSVModelForm,\n DynamicModelMultipleChoiceField,\n SlugField,\n)\n\nfrom nautobot_data_validation_engine.models import MinMaxValidationRule, RegularExpressionValidationRule\n\n\n#\n# RegularExpressionValidationRules\n#\n\n\nclass RegularExpressionValidationRuleForm(BootstrapMixin, forms.ModelForm):\n \"\"\"\n Base model form for the RegularExpressionValidationRule model.\n \"\"\"\n\n slug = SlugField()\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n )\n\n class Meta:\n model = RegularExpressionValidationRule\n fields = [\"name\", \"slug\", \"enabled\", \"content_type\", \"field\", \"regular_expression\", \"error_message\"]\n\n\nclass RegularExpressionValidationRuleCSVForm(CSVModelForm):\n \"\"\"\n Base csv form for the RegularExpressionValidationRule model.\n \"\"\"\n\n slug = SlugField()\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()),\n help_text=\"The object type to which this regular expression rule applies.\",\n )\n\n class Meta:\n model = RegularExpressionValidationRule\n fields = RegularExpressionValidationRule.csv_headers\n\n\nclass RegularExpressionValidationRuleBulkEditForm(BootstrapMixin, BulkEditForm):\n \"\"\"\n Base bulk edit form for the RegularExpressionValidationRule model.\n \"\"\"\n\n pk = forms.ModelMultipleChoiceField(\n queryset=RegularExpressionValidationRule.objects.all(), widget=forms.MultipleHiddenInput\n )\n enabled = forms.NullBooleanField(\n required=False,\n widget=BulkEditNullBooleanSelect(),\n )\n regular_expression = forms.CharField(required=False)\n error_message = forms.CharField(required=False)\n\n class Meta:\n nullable_fields = [\"error_message\"]\n\n\nclass RegularExpressionValidationRuleFilterForm(BootstrapMixin, forms.Form):\n \"\"\"\n Base filter form for the RegularExpressionValidationRule model.\n \"\"\"\n\n model = RegularExpressionValidationRule\n field_order = [\"q\", \"name\", \"enabled\", \"content_type\", \"field\", \"regular_expression\", \"error_message\"]\n q = forms.CharField(required=False, label=\"Search\")\n # \"CSV\" field is being used here because it is using the slug-form input for\n # content-types, which improves UX.\n content_type = CSVMultipleContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n required=False,\n )\n\n\n#\n# MinMaxValidationRules\n#\n\n\nclass MinMaxValidationRuleForm(BootstrapMixin, forms.ModelForm):\n \"\"\"\n Base model form for the MinMaxValidationRule model.\n \"\"\"\n\n slug = SlugField()\n content_type = forms.ModelChoiceField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n )\n\n class Meta:\n model = MinMaxValidationRule\n fields = [\"name\", \"slug\", \"enabled\", \"content_type\", \"field\", \"min\", \"max\", \"error_message\"]\n\n\nclass MinMaxValidationRuleCSVForm(CSVModelForm):\n \"\"\"\n Base csv form for the MinMaxValidationRule model.\n \"\"\"\n\n slug = SlugField()\n content_type = CSVContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()),\n help_text=\"The object type to which this regular expression rule applies.\",\n )\n\n class Meta:\n model = MinMaxValidationRule\n fields = MinMaxValidationRule.csv_headers\n\n\nclass MinMaxValidationRuleBulkEditForm(BootstrapMixin, BulkEditForm):\n \"\"\"\n Base bulk edit form for the MinMaxValidationRule model.\n \"\"\"\n\n pk = forms.ModelMultipleChoiceField(queryset=MinMaxValidationRule.objects.all(), widget=forms.MultipleHiddenInput)\n enabled = forms.NullBooleanField(\n required=False,\n widget=BulkEditNullBooleanSelect(),\n )\n min = forms.IntegerField(required=False)\n max = forms.IntegerField(required=False)\n error_message = forms.CharField(required=False)\n\n class Meta:\n nullable_fields = [\"error_message\"]\n\n\nclass MinMaxValidationRuleFilterForm(BootstrapMixin, forms.Form):\n \"\"\"\n Base filter form for the MinMaxValidationRule model.\n \"\"\"\n\n model = MinMaxValidationRule\n field_order = [\"q\", \"name\", \"enabled\", \"content_type\", \"field\", \"min\", \"max\", \"error_message\"]\n q = forms.CharField(required=False, label=\"Search\")\n # \"CSV\" field is being used here because it is using the slug-form input for\n # content-types, which improves UX.\n content_type = CSVMultipleContentTypeField(\n queryset=ContentType.objects.filter(FeatureQuery(\"custom_validators\").get_query()).order_by(\n \"app_label\", \"model\"\n ),\n required=False,\n )\n min = forms.IntegerField(required=False)\n max = forms.IntegerField(required=False)\n","sub_path":"nautobot_data_validation_engine/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"645476393","text":"from abc import ABC,abstractmethod\nfrom random import randint\nclass bank(ABC):\n @abstractmethod\n def create_account(self):\n pass\n\n @abstractmethod\n def login(self):\n pass\n\n @abstractmethod\n def display_balance(self):\n pass\n\n @abstractmethod\n def deposit_amount(self,amount):\n pass\n\n @abstractmethod\n def withdraw_amount(self,amount):\n pass\n\n @abstractmethod\n def transfer_amount(self,name,account_no,amount):\n pass\n \n\nclass customer(bank):\n\n def __init__(self):\n self.customer_detail=[]\n self.balance=0\n\n\n def create_account(self,name,phone_no,deposit_amount):\n condition=True\n if len(str(phone_no))>10 and len(str(phone_no))<10 :\n print(\"Invalid phone number ! please enter 10 digit number\")\n condition=False\n\n if condition==True:\n self.account_no=randint(100000,999999)\n self.balance=deposit_amount\n self.customer_detail=[name,phone_no,deposit_amount,self.account_no]\n print(\"Account created successfully\")\n print(\"Your account number is \",self.account_no)\n with open (f'{name}.txt','w') as fp:\n for details in self.customer_detail:\n fp.write(str(details) + \"\\n\" )\n\n def login(self,name,account_no):\n with open (f'{name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n if name in self.customer_detail:\n if self.customer_detail[3] == str(account_no):\n print('Login is successful!')\n self.name=name\n self.balance=self.customer_detail[2]\n return True\n else:\n print('Login failed!1234')\n return False\n else:\n print('Login failed!')\n \n return False\n \n\n def display_balance(self):\n with open (f'{self.name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n print('Balance in a/c:', self.customer_detail[2])\n\n\n def deposit_amount(self, amount):\n with open (f'{self.name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n\n with open (f'{self.name}.txt','w') as fp: \n new_balance=int(self.customer_detail[2]) + amount\n fp.write(details.replace(str(self.customer_detail[2]),str(new_balance)))\n print('Deposit was successful')\n self.display_balance()\n\n\n def withdraw_amount(self, amount):\n with open (f'{self.name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n with open (f'{self.name}.txt','w') as fp:\n if amount < int(self.customer_detail[2]):\n left_balance=int(self.customer_detail[2]) - amount\n fp.write(details.replace(str(self.customer_detail[2]),str(left_balance)))\n print('Withdraw successful')\n print('Balance in a/c ',left_balance)\n \n \n else:\n print('Insufficient Balance')\n\n\n def transfer_amount(self, name, account_no, amount):\n self.transfer_cash=False\n self.total_balance=False\n with open (f'{self.name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n\n with open (f'{self.name}.txt','w') as fp:\n if amount < int(self.customer_detail[2]):\n self.total_balance=True\n left_balance=int(self.customer_detail[2]) - amount\n fp.write(details.replace(str(self.customer_detail[2]),str(left_balance)))\n\n else:\n print('Insufficient Balance') \n\n if self.total_balance == True: \n with open (f'{name}.txt','r') as fp:\n details=fp.read()\n self.customer_detail=details.split('\\n')\n if account_no == self.customer_detail[3]:\n self.transfer_cash=True\n\n if self.transfer_cash == True:\n with open (f'{name}.txt','w') as fp: \n new_balance=int(self.customer_detail[2]) + amount\n fp.write(details.replace(str(self.customer_detail[2]),str(new_balance)))\n print('Transferred Successfully')\n self.display_balance()\n \n\ncustomer_obj=customer()\nwhile True:\n print('Welcome to Bank')\n print('Enter 1 to create account')\n print('Enter 2 to login account')\n print('Enter 3 to quit')\n choice=int(input())\n\n if choice==1:\n name=input('Enter your name ')\n phone_no=int(input('Enter your phone no '))\n deposit_amount=int(input('Enter the deposit amount '))\n customer_obj.create_account(name,phone_no,deposit_amount)\n\n elif choice==2:\n name=input('enter your name ')\n account_no=int(input('enter your account no '))\n login_status=customer_obj.login(name,account_no)\n loginin=True\n\n while loginin==True:\n if login_status==True:\n print('Enter 1 to check the balance ')\n print('Enter 2 to deposit the amount ')\n print('Enter 3 to withdraw the amount ')\n print('Enter 4 to transfer the cash')\n print('Enter 5 to logout')\n choice1=int(input())\n\n if choice1==1:\n customer_obj.display_balance()\n\n elif choice1==2:\n amount=int(input('Enter the amount to deposit '))\n customer_obj.deposit_amount(amount)\n\n elif choice1==3:\n amount=int(input('Enter the amount to withdraw '))\n customer_obj.withdraw_amount(amount)\n\n elif choice1==4:\n name=input('Enter the name of the accountholder on which money is to be transfered')\n account_no=input('Enter the account number in which you want to transfer money')\n amount=int(input('Enter the amount to be transfered'))\n customer_obj.transfer_amount(name,account_no,amount)\n\n elif choice1==5:\n loginin=False\n\n else:\n print('Try Again')\n\n elif choice==3:\n quit()\n\n\n\n","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"253813977","text":"# !/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n#***************************************#\n# Author: zilong.wu@aispeech.com\n# Created: 2020-09-26 15:24:33\n# Last modified: 2020-09-26 15:24:33\n# Filename: 1.py\n# Copyright © Aispeech\n#***************************************#\nimport os\nimport sys\nimport json\nimport pyecharts\nfrom pyecharts.charts.basic_charts.bar import Bar\nfrom pyecharts.charts.basic_charts.pie import Pie\nfrom pyecharts.globals import ThemeType\nfrom interval import Interval\nfrom pyecharts.charts import Grid\nfrom pyecharts.charts import Page\nfrom pyecharts import options as opts\nimport pandas as pd\nfrom collections import Counter\n\nif __name__ == \"__main__\":\n file_name = sys.argv[1]\n df = pd.read_excel(file_name)\n companies = df['单位名称'].tolist()\n comp_counter = Counter(companies).most_common(10)\n cc = dict(comp_counter)\n areas = df['所属区域'].tolist()\n area_counter = Counter(areas).most_common()\n dd = dict(area_counter)\n scores = df['总得分'].tolist()\n score_counter = Counter(scores).most_common()\n step_range = [{'<75分':[0,75]}, {'>=75分,<80分':[75,80]},{'>=80分,<85分': [80,85]},{'>=85分,<90分': [85,90]},{'>=90分,<95分': [90,95]},{'>=95分,<100分':[95,100]}]\n score_range = {k: sum(i[1] for i in score_counter if not isinstance(i[0], str) and i[0] < j[1] and i[0] >= j[0]) for kv in step_range for k, j in kv.items()}\n page = Page()\n bar_company = (\n Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))\n .add_xaxis(list(cc.keys()))\n .add_yaxis(\"人数\", list(cc.values()))\n .set_global_opts(\n title_opts=opts.TitleOpts(\n title = \"紧缺人才名单公司分布柱状图\", \n subtitle = \"company\", \n )\n )\n )\n page.add(bar_company)\n pie_company = (\n Pie()\n .add(\"\", comp_counter)\n .set_colors([\"blue\", \"green\", \"yellow\", \"red\", \"pink\", \"orange\", \"purple\"])\n .set_global_opts(title_opts=opts.TitleOpts(\n title=\"紧缺��才名单公司分布饼图\",\n ),\n legend_opts = opts.LegendOpts(\n pos_top = \"bottom\"\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(\n formatter=\"{b}: {c} ({d}%)\"))\n )\n page.add(pie_company)\n\n bar_area = (\n Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))\n .add_xaxis(list(dd.keys()))\n .add_yaxis(\"人数\", list(dd.values()))\n .set_global_opts(\n title_opts= opts.TitleOpts(\n title = \"紧缺人才名单区域分布柱状图\", \n subtitle = \"area\",\n )\n )\n )\n page.add(bar_area)\n\n pie_area = (\n Pie()\n .add(\"\", area_counter)\n .set_colors([\"blue\", \"green\", \"yellow\", \"red\", \"pink\", \"orange\", \"purple\"])\n .set_global_opts(title_opts=opts.TitleOpts(\n title=\"紧缺人才名单区域分布饼图\"\n ),\n legend_opts = opts.LegendOpts(\n #pos_top = \"2%\",\n pos_top = \"bottom\"\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(\n formatter=\"{b}: {c} ({d}%)\"\n )\n )\n #.render(\"pie_set_color.html\")\n )\n page.add(pie_area)\n bar_score = (\n Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))\n .add_xaxis(list(score_range.keys()))\n .add_yaxis(\"紧缺人才分数分布区间人数\", list(score_range.values()))\n .set_global_opts(\n title_opts=opts.TitleOpts(\n title = \"紧缺人才名单分数分布柱状图\", \n subtitle = \"score\", \n #pos_bottom = \"60%\"\n )\n )\n )\n page.add(bar_score)\n pie_score = (\n Pie()\n .add(\"\", list(zip(score_range.keys(), score_range.values())))\n .set_colors([\"blue\", \"green\", \"yellow\", \"red\", \"pink\", \"orange\", \"purple\"])\n .set_global_opts(title_opts=opts.TitleOpts(\n title=\"紧缺人才分数分布饼图\",\n ),\n legend_opts = opts.LegendOpts(\n pos_top = \"bottom\"\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(\n formatter=\"{b}: {c} ({d}%)\"))\n )\n page.add(pie_score)\n page.render('Qp.html')\n\n","sub_path":"talents_of_key_industry/Qp.py","file_name":"Qp.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"513902404","text":"# encoding:utf-8\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\nclass Sampling:\n def __init__(self, image):\n self.image = image\n self.points = []\n\n def mouse(self, event):\n if event.button == 3:\n self.axis.plot(event.xdata, event.ydata, \"ro\")\n self.points.append([event.ydata, event.xdata])\n self.fig.canvas.draw()\n\n def start(self):\n self.fig = plt.figure()\n self.axis = self.fig.add_subplot(111)\n self.fig.canvas.mpl_connect(\"button_press_event\", self.mouse)\n plt.gray()\n self.axis.imshow(self.image)\n plt.show()\n plt.clf()\n return np.array(self.points).astype(np.int)\n\n\ndef growCut(image, foreGround, backGround, iter=100):\n # 8近傍\n diffY = [-1, -1, -1, 0, 0, 1, 1, 1]\n diffX = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n # ラベル初期化\n label = np.zeros(image.shape)\n label[foreGround[:, 0], foreGround[:, 1]] = 1\n label[backGround[:, 0], backGround[:, 1]] = -1\n\n # 攻撃力\n power = np.zeros(image.shape)\n power[foreGround[:, 0], foreGround[:, 1]] = 1.0\n power[backGround[:, 0], backGround[:, 1]] = 1.0\n\n power_next = np.copy(power)\n label_next = np.copy(label)\n\n # growcut開始\n for t in range(iter):\n print(t)\n\n power = np.copy(power_next)\n label = np.copy(label_next)\n\n for i in range(1, image.shape[0] - 1):\n for j in range(1, image.shape[1] - 1):\n for k in range(8):\n dy, dx = diffY[k], diffX[k]\n\n # 注目セルの防御力\n shield = 1.0 - np.abs(image[i, j] - image[i + dy, j + dx])\n\n # 近傍セルの攻撃力が注目画素の防御力を上回るか\n if shield * power[i + dy, j + dx] > power[i, j]:\n label_next[i, j] = label[i + dy, j + dx]\n power_next[i, j] = power[i + dy, j + dx] * shield\n return label_next\n\n\ndef main():\n image = cv2.imread(\"result200.tif\", 0).astype(np.float)\n image = (image - image.min()) / (image.max() - image.min())\n\n plt.gray()\n plt.imshow(image)\n plt.show()\n\n foreGround = Sampling(image).start()\n backGround = Sampling(image).start()\n\n mask = growCut(image, foreGround, backGround)\n mask[mask != 1] = 0\n\n plt.gray()\n plt.subplot(131)\n plt.imshow(image)\n plt.subplot(132)\n plt.imshow(image)\n plt.plot(foreGround[:, 1], foreGround[:, 0], \"ro\")\n plt.plot(backGround[:, 1], backGround[:, 0], \"bo\")\n plt.subplot(133)\n plt.imshow(image * mask)\n plt.show()\n\nmain()","sub_path":"growcut.py","file_name":"growcut.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"334213059","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport util, constants, draw, cam\n\n# THIS IS ADITYA'S CODE\ndef so3_log_map_impl(R, eps=1e-5):\n R33 = R[:3, :3]\n # direction: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, W = torch.eig(R33.t(), eigenvectors=True)\n i = torch.where(abs(w[:, 0]-1.0) < eps)[0]\n if not len(i):\n raise ValueError('no unit eigenvector corresponding to eigenvalue 1')\n direction = W[:, i[-1]].squeeze()\n\n # rotation angle depending on direction\n cosa = (torch.trace(R33) - 1.0) / 2.0\n if torch.abs(direction[2]) > eps:\n sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]\n elif torch.abs(direction[1]) > eps:\n sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]\n else:\n sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]\n angle = torch.atan2(sina, cosa)\n return direction* angle\ndef so3_log_map(R, eps=1e-5):\n is_numpy = type(R) is np.ndarray\n if is_numpy:\n R = torch.from_numpy(R)\n bs = R.shape[:-2]\n bs_tot = np.prod(bs, dtype=int)\n R = R.view(bs_tot, 3, 3)\n axangs = torch.zeros(bs_tot, 3)\n for i in range(bs_tot):\n axangs[i] = so3_log_map_impl(R[i], eps)\n axangs = axangs.reshape(*bs, 3)\n if is_numpy:\n axangs = axangs.numpy()\n return axangs\n\n\ndef so3_exponential_map(axang, homogeneous=False):\n \"\"\"\n Return the rotation matrix associated with counterclockwise rotation about\n the given axis by theta radians.\n \"\"\"\n is_numpy = type(axang) is np.ndarray\n if is_numpy:\n axang = torch.from_numpy(axang)\n \n #axis = np.asarray(axis)\n theta = axang.norm(dim=-1)\n axis = axang/theta[..., None]\n\n # TODO: hacky broadcasting\n theta = theta[..., None]\n axis, theta = torch.broadcast_tensors(axis, theta)\n theta = theta[..., 0] # the last dimension gets broadcasted to \"3\", so truncate\n\n axis = axis / torch.norm(axis, 2, dim=-1)[..., None]\n a = torch.cos(theta / 2.0)\n # Transpose so we can unpack into variables\n b, c, d = torch.unbind(-axis * torch.sin(theta / 2.0)[..., None], dim=-1)\n\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n mat = torch.stack([torch.stack([aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], dim=-1),\n torch.stack([2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], dim=-1),\n torch.stack([2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc], dim=-1)],\n dim=-2)\n if homogeneous:\n extra_dims = axis.shape[:-1]\n zeros_column = torch.zeros_like(mat)[..., :, 0:1] # shape: (..., 3, 1)\n hstacked = torch.cat([mat, zeros_column], dim=-1) # shape: (..., 3, 4)\n\n hom_row = torch.cat((torch.zeros_like(hstacked)[..., :1, :3],\n torch.ones_like(hstacked)[..., :1, 3:]), dim=-1) # shape: (..., 1, 4)\n mat = torch.cat((hstacked, hom_row), dim=-2)\n \n if is_numpy:\n mat = mat.numpy()\n return mat\n \n\ndef to_homo(X, dim=-1):\n if type(X) is torch.Tensor:\n X = X.transpose(dim, -1)\n X = torch.cat((X, torch.ones(X.shape[:-1]+(1,))), dim=-1)\n X = X.transpose(dim, -1)\n else:\n X = np.swapaxes(X, dim, -1)\n X = np.concatenate([X, np.ones(X.shape[:-1]+(1,))], axis=-1).astype(X.dtype)\n X = np.swapaxes(X, dim, -1)\n return X\n\ndef from_homo(X, dim=-1):\n if type(X) is torch.Tensor:\n X = X.transpose(dim, -1)\n X = X[..., :-1]/X[..., -1, None]\n X = X.transpose(dim, -1)\n else:\n X = np.swapaxes(X, dim, -1)\n X = (X[..., :-1]/X[..., -1, None]).astype(X.dtype)\n X = np.swapaxes(X, dim, -1)\n return X\n\n# def translation_matrix(tvec: torch.Tensor) -> torch.Tensor:\n# extra_dims = tvec.shape[:-1]\n# eye_tiled = torch.eye(3, dtype=tvec.dtype).to(tvec.device).repeat(extra_dims + (1, 1))\n# hstacked = torch.cat([eye_tiled, tvec[..., :, None]], dim=-1)\n\n# hom_row = torch.cat((torch.zeros_like(hstacked)[..., :1, :3],\n# torch.ones_like(hstacked)[..., :1, 3:]), dim=-1) # shape: (..., 1, 4)\n# return torch.cat((hstacked, hom_row), dim=-2)\n# def dofs2mat(dofs):\n# T = dofs[..., :3]\n# angle = dofs[..., 3:].norm(dim=-1)\n# axis = dofs[..., 3:]/angle[..., None]\n# rot_mat = rotation_matrix(axis, angle, homogeneous=True)\n# trans_mat = translation_matrix(T)\n# return trans_mat @ rot_mat\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"576969203","text":"import sys\nsys.path.append('/Users/npolizzi/Projects/combs/src/')\nimport combs\nimport prody as pr\n# import time\n\n\npdb_path = '/Users/npolizzi/Projects/combs/src/runs/glutamine_binding_protein/1wdn_noligandH.pdb'\nsample = combs.Sample()\nsample.poi = pr.parsePDB(pdb_path)\nsample.bs_residues = list(zip([50, 67, 10, 115, 156, 13, 68, 70, 157, 185],\n ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']))\nsample.set_rois()\nsample.set_rois_phipsi()\nsample.set_poi_clash_coords()\nsample.set_roi_bb_coords()\n\nrelvdm_carboxamide = combs.Rel_Vandermer('carboxamide')\nrelvdm_carboxamide.rel_vdm_path = '/Users/npolizzi/Projects/combs/results/carboxamide/rel_vdms/20170830/'\nrelvdm_carboxamide.load_rel_vdms_pickle(sample)\nrelvdm_carboxamide.set_rel_vdm_bb_coords()\nrelvdm_carboxamide.set_rois_rot_trans(sample)\nrelvdm_carboxamide.set_rel_vdm_tags(sample)\nprint('moving vdMs')\nrelvdm_carboxamide.move_rel_vdms(sample)\nprint('removing clashing vdMs')\nrelvdm_carboxamide.remove_clash(sample)\nrelvdm_carboxamide.reshape_ifgs()\nprint('finding hotspots')\nrelvdm_carboxamide.find_hotspots(rmsd_cutoff=0.6)\noutdir = '/Users/npolizzi/Projects/combs/results/glutamine_binding_protein/20170908/carboxamide/'\nrelvdm_carboxamide.print_hotspots(outdir, number=10)\n\n","sub_path":"src/runs/glutamine_binding_protein/20170905/glutamine_bp_carboxamide_hotspot.py","file_name":"glutamine_bp_carboxamide_hotspot.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"322846788","text":"#!/usr/bin/python\n\n# *****************************************************************************\n#\n# Copyright (c) 2016, EPAM SYSTEMS INC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ******************************************************************************\n\nimport argparse\nimport json\nfrom dlab.actions_lib import *\nfrom dlab.meta_lib import *\nimport sys\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--instance_name', type=str, default='')\nparser.add_argument('--instance_size', type=str, default='')\nparser.add_argument('--region', type=str, default='')\nparser.add_argument('--vpc_name', type=str, default='')\nparser.add_argument('--network_interface_name', type=str, default='')\nparser.add_argument('--subnet_name', type=str, default='')\nparser.add_argument('--service_base_name', type=str, default='')\nparser.add_argument('--dlab_ssh_user_name', type=str, default='')\nparser.add_argument('--public_ip_name', type=str, default='')\nparser.add_argument('--public_key', type=str, default='')\nparser.add_argument('--primary_disk_size', type=str, default='')\nparser.add_argument('--security_group_name', type=str, default='')\nparser.add_argument('--instance_type', type=str, default='')\nparser.add_argument('--tags', type=str, default='{\"empty\":\"string\"}')\nparser.add_argument('--user_name', type=str, default='')\nparser.add_argument('--resource_group_name', type=str, default='')\nparser.add_argument('--image_name', type=str, default='')\nparser.add_argument('--image_type', type=str, default='default')\nparser.add_argument('--instance_storage_account_type', type=str, default='')\nargs = parser.parse_args()\n\n\nif __name__ == \"__main__\":\n disk_id = ''\n create_option = 'fromImage'\n if args.instance_name != '':\n try:\n if AzureMeta().get_instance(args.service_base_name, args.instance_name):\n print(\"REQUESTED INSTANCE {} ALREADY EXISTS\".format(args.instance_name))\n else:\n if args.public_ip_name != 'None':\n if AzureMeta().get_static_ip(args.service_base_name, args.public_ip_name):\n print(\"REQUESTED PUBLIC IP ADDRESS {} ALREADY EXISTS.\".format(args.public_ip_name))\n static_public_ip_address = AzureMeta().get_static_ip(\n args.service_base_name, args.public_ip_name).ip_address\n else:\n print(\"Creating Static IP address {}\".format(args.public_ip_name))\n static_public_ip_address = \\\n AzureActions().create_static_public_ip(args.service_base_name, args.public_ip_name,\n args.region, args.instance_name,\n json.loads(args.tags))\n if AzureMeta().get_network_interface(args.service_base_name, args.network_interface_name):\n print(\"REQUESTED NETWORK INTERFACE {} ALREADY EXISTS.\".format(args.network_interface_name))\n network_interface_id = AzureMeta().get_network_interface(args.service_base_name,\n args.network_interface_name).id\n else:\n print(\"Creating Network Interface {}\".format(args.network_interface_name))\n network_interface_id = AzureActions().create_network_if(args.service_base_name, args.vpc_name,\n args.subnet_name,\n args.network_interface_name, args.region,\n args.security_group_name,\n json.loads(args.tags),\n args.public_ip_name)\n disk = AzureMeta().get_disk(args.service_base_name, '{}-disk0'.format(\n args.instance_name))\n if disk:\n create_option = 'attach'\n disk_id = disk.id\n print(\"Creating instance {}\".format(args.instance_name))\n AzureActions().create_instance(args.region, args.instance_size, args.service_base_name,\n args.instance_name, args.dlab_ssh_user_name, args.public_key,\n network_interface_id, args.resource_group_name, args.primary_disk_size,\n args.instance_type, args.image_name, json.loads(args.tags), args.user_name,\n create_option, disk_id, args.instance_storage_account_type,\n args.image_type)\n except:\n sys.exit(1)\n else:\n sys.exit(1)\n","sub_path":"infrastructure-provisioning/src/general/scripts/azure/common_create_instance.py","file_name":"common_create_instance.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"380761201","text":"import os\nfrom celery import Celery, shared_task\nfrom celery.schedules import crontab\nfrom django.apps import apps, AppConfig\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import caches, cache\n\nif not settings.configured:\n # set the default Django settings module for the 'celery' program.\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover\n\napp = Celery('yadjangoblog')\n\n\nclass CeleryConfig(AppConfig):\n name = 'yadjangoblog.yataskapp'\n verbose_name = 'Celery Config'\n\n def ready(self):\n # Using a string here means the worker will not have to\n # pickle the object when using Windows.\n app.config_from_object('django.conf:settings', namespace='CELERY')\n installed_apps = [app_config.name for app_config in apps.get_app_configs()]\n app.autodiscover_tasks(lambda: installed_apps, force=True)\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request)) # pragma: no cover\n\n\n@shared_task\ndef add(x, y):\n return x + y\n\n\n@shared_task\ndef mul(x, y):\n return x * y\n\n\n@shared_task\ndef xsum(numbers):\n return sum(numbers)\n\n\n@app.task\ndef test(arg):\n print(arg)\n\n\n@app.task\ndef test_beat(args):\n print(\"===> test beat\", args)\n\n\n@app.task\ndef test_django_orm():\n print(\"===> test django orm\")\n Account = get_user_model()\n print(Account.objects.first())\n\n\n@app.task\ndef test_redis():\n cache.incr('test_beat_exec')\n print(\"===> test_redis\", cache.get('test_beat_exec'))\n\n\n@app.task\ndef test_elasticsearch():\n print(\"===> test_elasticsearch\")\n\n\n@app.task\ndef test_django_orm():\n Account = get_user_model()\n print(Account.objects.first())\n\n\n@app.on_after_configure.connect\ndef setup_periodic_tasks(sender, **kwargs):\n cache.set('test_beat_exec', 1)\n # Calls test_beat every 10 seconds\n sender.add_periodic_task(10.0, test.s('hello'), name='add every 10 seconds')\n sender.add_periodic_task(10.0, test_redis.s(), name='add every 10 seconds')\n sender.add_periodic_task(10.0, test_elasticsearch.s(), name='add every 10 seconds')\n sender.add_periodic_task(10.0, test_django_orm.s(), name='add every 10 seconds')\n # Calls test_beat every 30 seconds\n sender.add_periodic_task(30.0, test.s('hello'), name='add every 30 seconds')\n # Calls test_beat every 60 seconds\n sender.add_periodic_task(60.0, test.s('hello'), name='add every 60 seconds')\n # Calls test_beat every 10 minutes\n sender.add_periodic_task(10 * 60.0, test.s('hello'), name='add every 10 minutes')\n # Calls test_beat every 30 minutes\n sender.add_periodic_task(30 * 60.0, test.s('hello'), name='add every 30 minutes')\n # Calls test_beat every 60 minutes\n sender.add_periodic_task(60 * 60.0, test.s('hello'), name='add every 60 minutes')\n # Calls test_beat every 12 hour\n sender.add_periodic_task(12 * 60 * 60.0, test.s('hello'), name='add every 12 hours')\n # Calls test_beat every 24 hour\n sender.add_periodic_task(24 * 60 * 60.0, test.s('hello'), name='add every 24 hours')\n\n # Executes every morning at 7:30 a.m.\n sender.add_periodic_task(\n crontab(hour=7, minute=30),\n test.s('Happy Mondays!'),\n )\n # Executes every Monday morning at 7:30 a.m.\n sender.add_periodic_task(\n crontab(hour=6, minute=30, day_of_week=1),\n test.s('Happy Mondays!'),\n )\n\n\napp.conf.timezone = 'UTC'\n","sub_path":"yadjangoblog/yataskapp/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"204869330","text":"\"\"\"Data utilities.\"\"\"\nimport torch\nfrom torch.autograd import Variable\nimport operator\nimport json\n\n\n\ndef construct_vocab(lines, vocab_size):\n \"\"\"Construct a vocabulary from tokenized lines.\"\"\"\n vocab = {}\n for line in lines:\n for word in line:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n \n word2id = {}\n id2word = {}\n word2id[''] = 0\n word2id[''] = 1\n id2word[0] = ''\n id2word[1] = ''\n \n sorted_word2id = sorted(\n vocab.items(),\n key=operator.itemgetter(1),\n reverse=True\n )\n\n sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]\n\n for ind, word in enumerate(sorted_words):\n word2id[word] = ind + 2\n\n for ind, word in enumerate(sorted_words):\n id2word[ind + 2] = word\n\n return word2id, id2word\n\n\n\n\n\n\ndef convert_to_tensor(batch, word2ind):\n \"\"\"Prepare minibatch.\"\"\"\n lens = [len(line) for line in batch]\n max_len = lens[-1]\n input_lines = [\n [word2ind[w] if w in word2ind else word2ind[''] for w in line] +\n [word2ind['']] * (max_len - len(line))\n for line in batch\n ]\n\n #mask = [\n # ([1] * (l - 1)) + ([0] * (max_len - l))\n # for l in lens\n #]\n\n tensor_batch = Variable(torch.LongTensor(input_lines))\n #mask = Variable(torch.FloatTensor(mask))\n\n return tensor_batch, max_len\n","sub_path":"src/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"614924843","text":"from formatter import Formatter\nfrom utils import create_requirements\nfrom features.steps import project_setup\nfrom flask import Flask, render_template, request, send_file\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef base():\n Format = Formatter(filename=project_setup)\n if request.method == 'GET':\n return render_template(\"index.html\", F=Format)\n \n else:\n create_requirements(Format, request.form)\n return send_file('features/project_setup.feature',\n mimetype='text/csv',\n attachment_filename=f'project_setup.feature',\n as_attachment=True)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"648978820","text":"import os\r\n\r\nfrom career_test.models import (\r\n QuestionBank, Choice, MBTIAnwserType,\r\n MBTIResult, MBTIResultDetail, CareerResultType,\r\n HollandData, HollandDataItem, HollandTypeResult,\r\n NewHolland, NewHollandType, NewHollandTitleNumType\r\n)\r\n\r\n\r\ndef input_questions():\r\n for i in range(1, 13):\r\n new_item = QuestionBank()\r\n new_item.bank_name = '卡特尔'\r\n new_item.question_num = i\r\n new_item.question_name = '这是第{}题的题目'.format(i)\r\n new_item.save()\r\n print('录入了测试题目')\r\n\r\n\r\ndef input_anwsers():\r\n questions = QuestionBank.objects.all().order_by('question_num')\r\n for question in questions:\r\n for i in ['a', 'b', 'c']:\r\n new_item = Choice()\r\n new_item.choice_type = i\r\n new_item.choice_content = '这是第{q_num}题的{c_type}选项'.format(c_type=i, q_num=question.question_num)\r\n new_item.question = question\r\n new_item.save()\r\n print('录入了测试答案')\r\n\r\n\r\ndef input_MBTI_questions():\r\n with open('预处理数据/MBTI题目与选项.txt', 'r', encoding='UTF-8') as f:\r\n question_object = None\r\n split_tag = 'B'\r\n question_num = 1\r\n for i, line in enumerate(f.readlines()):\r\n if i % 2 == 0:\r\n new_item = QuestionBank()\r\n new_item.bank_name = 'MBTI'\r\n new_item.question_num = question_num\r\n new_item.question_name = line.replace('\\n', '')\r\n new_item.save()\r\n question_object = new_item\r\n question_num += 1\r\n else:\r\n choices = line.replace('\\n', '').split(split_tag)\r\n a_item = Choice()\r\n a_item.choice_type = choices[0][0]\r\n a_item.choice_content = choices[0][1:]\r\n a_item.question = question_object\r\n a_item.save()\r\n b_item = Choice()\r\n b_item.choice_type = split_tag\r\n b_item.choice_content = choices[1]\r\n b_item.question = question_object\r\n b_item.save()\r\n print('录入成功')\r\n\r\n\r\ndef input_career_anchor_questions():\r\n with open('预处理数据/职业锚题目与选项.txt', 'r', encoding='UTF-8') as f:\r\n question_num = 1\r\n for line in f.readlines():\r\n new_item = QuestionBank()\r\n new_item.bank_name = 'career_anchor'\r\n new_item.question_num = question_num\r\n new_item.question_name = line.replace('\\n', '')\r\n new_item.save()\r\n question_num += 1\r\n for i, value in enumerate(['从不', '偶尔', '有时', '经常', '频繁', '总是']):\r\n choice_item = Choice()\r\n choice_item.choice_type = '{}'.format(i + 1)\r\n choice_item.choice_content = value\r\n choice_item.question = new_item\r\n choice_item.save()\r\n print('录入成功')\r\n\r\n\r\ndef input_anwser_type():\r\n with open('预处理数据/MBTI答案类型.txt', 'r', encoding='UTF-8') as f:\r\n count_num = 1\r\n for index, line in enumerate(f.readlines()):\r\n questions = QuestionBank.objects.get(bank_name='MBTI', question_num=count_num)\r\n if index % 2 == 0:\r\n new_item = MBTIAnwserType()\r\n new_item.choice = questions.choice_set.get(choice_type='A')\r\n new_item.anwser_type = line.replace('\\n', '')\r\n new_item.save()\r\n # print('{}{}'.format(questions.choice_set.get(choice_type='A'), line.replace('\\n', '')))\r\n else:\r\n new_item = MBTIAnwserType()\r\n new_item.choice = questions.choice_set.get(choice_type='B')\r\n new_item.anwser_type = line.replace('\\n', '')\r\n new_item.save()\r\n count_num += 1\r\n # print('{}{}'.format(questions.choice_set.get(choice_type='B'), line.replace('\\n', '')))\r\n print('录入成功')\r\n\r\n\r\n\r\ndef input_holland_data():\r\n is_part = False\r\n is_title = False\r\n is_type = False\r\n # is_item = False\r\n item_type = None\r\n # part_num = None\r\n with open('预处理数据/霍兰德题目.txt', 'r', encoding='UTF-8') as f:\r\n holland_data = None\r\n item_num = 0\r\n for line in f.readlines():\r\n line_content = line.replace('\\n', '')\r\n if line_content == 'part':\r\n is_part = True\r\n holland_data = HollandData()\r\n continue\r\n if is_part:\r\n holland_data.part_num = line_content\r\n is_part = False\r\n continue\r\n if line_content == 'title':\r\n is_title = True\r\n continue\r\n if is_title:\r\n holland_data.part_title = line_content\r\n is_title = False\r\n holland_data.save()\r\n continue\r\n if line_content == 'type':\r\n is_type = True\r\n continue\r\n if is_type:\r\n item_type = line_content\r\n is_type = False\r\n # is_item = True\r\n continue\r\n item_num += 1\r\n holland_data_item = HollandDataItem()\r\n holland_data_item.item_num = item_num\r\n holland_data_item.part_type = item_type\r\n holland_data_item.content = line_content\r\n holland_data_item.part = holland_data\r\n holland_data_item.save()\r\n print('录入成功')\r\n\r\n\r\ndef input_new_holland_title():\r\n with open('预处理数据/新霍兰德测试题目.txt', 'r', encoding='UTF-8') as f:\r\n for i, line in enumerate(f.readlines()):\r\n holland_data_item = NewHolland()\r\n holland_data_item.title_num = i + 1\r\n holland_data_item.title = line.replace('\\n', '')\r\n holland_data_item.save()\r\n print('录入成功')\r\n\r\n\r\ndef input_condition():\r\n type_dic = dict()\r\n type_dic['传统型'] = ((7, 19, 29, 39, 41, 51, 57), (5, 18, 40))\r\n type_dic['现实型'] = ((2, 13, 22, 36, 43), (14, 23, 44, 47, 48))\r\n type_dic['研究型'] = ((6, 8, 20, 30, 31, 42), (21, 55, 56, 58))\r\n type_dic['企业型'] = ((11, 24, 28, 35, 38, 46, 60), (3, 16, 25))\r\n type_dic['社会型'] = ((26, 37, 52, 59), (1, 12, 15, 27, 45, 53))\r\n type_dic['艺术型'] = ((4, 9, 10, 17, 33, 34, 49, 50, 54), (32, ))\r\n\r\n for key, value in type_dic.items():\r\n for true_num in value[0]:\r\n new_item = NewHollandTitleNumType()\r\n new_item.new_holland = NewHolland.objects.get(title_num=true_num)\r\n new_item.new_holland_type = NewHollandType.objects.get(item_name=key)\r\n new_item.save()\r\n for false_num in value[1]:\r\n new_item = NewHollandTitleNumType()\r\n new_item.new_holland = NewHolland.objects.get(title_num=true_num)\r\n new_item.new_holland_type = NewHollandType.objects.get(item_name=key)\r\n new_item.score_condition = False\r\n new_item.save()\r\n print('成功录入一类')\r\n print('录入成功')\r\n","sub_path":"questionnaire/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"11424207","text":"from convokit.transformer import Transformer\nfrom convokit.model import Corpus\nfrom nltk.stem import SnowballStemmer\nfrom empath import Empath\nstemmer = SnowballStemmer('english')\nlexicon = Empath()\n\nclass EmoTracker(Transformer):\n \"\"\"\n Extracts information about politicization\n\n \"\"\"\n def __init__(self):\n self.ATTR_NAME = \"emotion_tranformer\"\n self.categories=[\"sadness\", \"violence\", \"rage\", \"pain\", \"anger\", \"love\", \"politics\"]\n\n def transform(self, corpus: Corpus):\n \"\"\"Adds metadata about politicization to each utterance.\n\n :param corpus: the corpus to compute features for.\n :type corpus: Corpus\n \"\"\"\n assert 'stem_tokens' in next(corpus.iter_utterances()).meta\n counter = 1\n for utt in corpus.iter_utterances():\n if utt.meta['valid']:\n utt.meta['analysis'] = lexicon.analyze(utt.text,categories=self.categories)\n for k in utt.meta['analysis'].keys():\n if utt.meta['analysis'][k] != 0.0:\n utt.meta['analysis'][k] = 1\n else:\n utt.meta['analysis'] = None\n\n counter = counter + 1\n if counter % 10000 == 0:\n print(\"processed \", counter, \"utterances \")\n return corpus","sub_path":"convokit/emotion/emotion.py","file_name":"emotion.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"613445089","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/flame/manage.py\n# Compiled at: 2018-06-26 10:52:49\n# Size of source mod 2**32: 8563 bytes\nfrom flame.util import utils\nimport os, sys, shutil, tarfile, json, pickle, pathlib\n\ndef set_model_repository(path=None):\n \"\"\"\n Set the model repository path.\n This is the dir where flame is going to create and load models\n \"\"\"\n utils.set_model_repository(path)\n\n\ndef action_new(model):\n \"\"\"\n Create a new model tree, using the given name.\n This creates the development version \"dev\",\n copying inside default child classes\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n ndir = utils.model_tree_path(model)\n if os.path.isdir(ndir):\n return (False, 'This endpoint already exists')\n os.mkdir(ndir)\n ndir += '/dev'\n os.mkdir(ndir)\n wkd = os.path.dirname(os.path.abspath(__file__))\n children_names = ['apply', 'idata', 'odata', 'learn']\n for cname in children_names:\n shutil.copy(wkd + '/children/' + cname + '_child.py', ndir + '/' + cname + '_child.py')\n\n shutil.copy(wkd + '/children/parameters.yaml', ndir)\n return 'new endpoint ' + model + ' created'\n\n\ndef action_kill(model):\n \"\"\"\n removes the model tree described by the argument\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n ndir = utils.model_tree_path(model)\n if not os.path.isdir(ndir):\n return (False, 'model not found')\n shutil.rmtree(ndir, ignore_errors=True)\n return (\n True, 'model ' + model + ' removed')\n\n\ndef action_publish(model):\n \"\"\"\n clone the development \"dev\" version as a new model version,\n assigning a sequential version number\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n bdir = utils.model_tree_path(model)\n if not os.path.isdir(bdir):\n return (False, 'model not found')\n v = None\n v = [int(x[-6:]) for x in os.listdir(bdir) if x.startswith('ver')]\n if not v:\n max_version = 0\n else:\n max_version = max(v)\n new_dir = bdir + '/ver%0.6d' % (max_version + 1)\n if os.path.isdir(new_dir):\n return (False, 'version already exists')\n shutil.copytree(bdir + '/dev', new_dir)\n return (\n True, 'development version published as version ' + str(max_version + 1))\n\n\ndef action_remove(model, version):\n \"\"\"\n Remove the version indicated as argument from the model tree indicated\n as argument\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n if version == 0:\n return (False, 'development version cannot be removed')\n rdir = utils.model_path(model, version)\n if not os.path.isdir(rdir):\n return (False, 'version not found')\n shutil.rmtree(rdir, ignore_errors=True)\n return (\n True, 'version ' + str(version) + ' of model ' + model + ' removed')\n\n\ndef action_list(model):\n \"\"\"\n Lists available models (if no argument is provided)\n and model versions (if \"model\" is provided as argument)\n \"\"\"\n if not model:\n rdir = utils.model_repository_path()\n print(rdir)\n num_models = 0\n for x in os.listdir(rdir):\n num_models += 1\n print(x)\n\n return (\n True, str(num_models) + ' models found in the repository')\n else:\n bdir = utils.model_tree_path(model)\n num_versions = 0\n for x in os.listdir(bdir):\n if x.startswith('ver'):\n num_versions += 1\n print(model, ':', x)\n\n return (\n True, 'model ' + model + ' has ' + str(num_versions) + ' published versions')\n\n\ndef action_import(model):\n \"\"\"\n Creates a new model tree from a tarbal file with the name \"model.tgz\"\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n base_model = os.path.basename(model)\n endpoint = os.path.splitext(base_model)[0]\n ext = os.path.splitext(base_model)[1]\n bdir = utils.model_tree_path(endpoint)\n if os.path.isdir(bdir):\n return (False, 'endpoint already exists')\n if ext != '.tgz':\n importfile = os.path.abspath(model + '.tgz')\n else:\n importfile = model\n print(importfile)\n if not os.path.isfile(importfile):\n return (\n False, 'importing package ' + importfile + ' not found')\n try:\n os.mkdir(bdir)\n except:\n return (\n False, 'error creating directory ' + bdir)\n else:\n with tarfile.open(importfile, 'r:gz') as (tar):\n tar.extractall(bdir)\n return (\n True, 'endpoint ' + endpoint + ' imported OK')\n\n\ndef action_export(model):\n \"\"\"\n Exports the whole model tree indicated in the argument as a single\n tarball file with the same name.\n \"\"\"\n if not model:\n return (False, 'empty model label')\n else:\n current_path = os.getcwd()\n exportfile = current_path + '/' + model + '.tgz'\n bdir = utils.model_tree_path(model)\n if not os.path.isdir(bdir):\n return (False, 'endpoint directory not found')\n os.chdir(bdir)\n itemend = os.listdir()\n itemend.sort()\n with tarfile.open(exportfile, 'w:gz') as (tar):\n for iversion in itemend:\n if not os.path.isdir(iversion):\n pass\n else:\n tar.add(iversion)\n\n os.chdir(current_path)\n return (\n True, 'endpoint ' + model + ' exported as ' + model + '.tgz')\n\n\ndef action_refactoring(file):\n \"\"\"\n NOT IMPLEMENTED,\n call to import externally generated models (eg. in KNIME or R)\n \"\"\"\n print('refactoring')\n return (True, 'OK')\n\n\ndef action_dir():\n \"\"\"\n Returns a JSON with the list of models and versions\n \"\"\"\n models_path = pathlib.Path(utils.model_repository_path())\n dirs = [x for x in models_path.iterdir() if x.is_dir()]\n model_dirs = [str(x) for x in dirs if list(x.glob('dev'))]\n results = []\n for imodel in model_dirs:\n versions = [\n {'text': 'dev'}]\n for iversion in os.listdir(utils.model_tree_path(imodel)):\n if iversion.startswith('ver'):\n versions.append({'text': iversion})\n\n results.append({'text':imodel, 'nodes':versions})\n\n return json.dumps(results)\n\n\ndef action_info(model, version=None, output='text'):\n \"\"\"\n Returns a text or JSON with info for a given model and version\n \"\"\"\n if model is None:\n return (False, 'empty model label')\n else:\n if version == None:\n return (False, 'no version provided')\n else:\n rdir = utils.model_path(model, version)\n if not os.path.isfile(os.path.join(rdir, 'info.pkl')):\n return (False, 'info not found')\n with open(os.path.join(rdir, 'info.pkl'), 'rb') as (handle):\n results = pickle.load(handle)\n results += pickle.load(handle)\n if output == 'text':\n for val in results:\n if len(val) < 3:\n print(val)\n else:\n print(val[0], ' (', val[1], ') : ', val[2])\n\n return (True, 'model informed OK')\n new_results = []\n for i in results:\n if 'numpy.int64' in str(type(i[2])):\n try:\n v = int(i[2])\n except:\n v = None\n\n new_results.append((i[0], i[1], v))\n else:\n if 'numpy.float64' in str(type(i[2])):\n try:\n v = float(i[2])\n except:\n v = None\n\n new_results.append((i[0], i[1], v))\n else:\n new_results.append(i)\n\n return (\n True, json.dumps(new_results))","sub_path":"pycfiles/flame-0.1-py3.6/manage.cpython-36.py","file_name":"manage.cpython-36.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"181997489","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport struct\nfrom hashlib import sha1\nfrom nine import basestring, str\n\nfrom warnings import warn\nwarn('Use bag.file_existence_manager instead of FileIdManager.',\n DeprecationWarning)\n\n\nclass FileIdManager(object):\n \"\"\"Creates 'file IDs' (hashcodes for file contents), stores these\n IDs in a binary file, allows user code to add_file_id(), and\n can answer whether is_id_known (whether a certain file ID\n has already been recorded).\n\n Only file content and length are considered; file names are\n irrelevant.\n \"\"\"\n recordlength = 24 # bytes\n\n def __init__(self, path):\n # Open the dictionary file for updates\n self.f = open(path, b\"ab+\")\n\n def close(self):\n self.f.close()\n\n def get_id_for(self, content, closefile=True):\n if not isinstance(content, basestring):\n fc = content\n content = fc.read()\n if closefile:\n fc.close()\n if len(content) == 0:\n return b\"\\0\" * self.recordlength\n else:\n h = sha1(content).digest() # 20 bytes for the hash\n s = struct.pack(\"i\", len(content)) # 04 bytes for the length\n return s + h # 24 bytes total\n\n def is_id_known(self, file_id):\n self.validate_id(file_id)\n self.f.seek(0)\n s = self.f.read(self.recordlength)\n while s != \"\":\n if file_id == s:\n return True\n s = self.f.read(self.recordlength)\n return False\n\n def validate_id(self, file_id):\n length = len(file_id)\n if length != self.recordlength:\n raise RuntimeError(\"file_id size incorrect: \" + str(length))\n\n def add_file_id(self, file_id):\n self.validate_id(file_id)\n self.f.seek(0, 2) # move to end of file\n self.f.write(file_id)\n self.f.flush()\n\n def process(self, content, closefile=True):\n \"\"\"Example implementation (see source code of this method).\"\"\"\n id = self.get_id_for(content, closefile)\n b = self.is_id_known(id)\n if not b:\n self.add_file_id(id)\n return b\n","sub_path":"bag/file_id_manager.py","file_name":"file_id_manager.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"38690297","text":"# PyPy で提出しないと通らない\nimport sys\ninput = sys.stdin.readline\n\nn, k = map(int, input().strip().split())\nhs = list(map(int, input().strip().split()))\n\ndp = [0] * n\n\nfor i in range(1, n):\n min_cost = 1e19\n for j in range(1, min(k + 1, i + 1)):\n cost = dp[i - j] + abs(hs[i] - hs[i - j])\n min_cost = min(cost, min_cost)\n dp[i] = min_cost\n\nprint(dp[n - 1])\n","sub_path":"Python_codes/p03161/s616483638.py","file_name":"s616483638.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"470947662","text":"import imp\nimport numpy as np\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Dropout, Input, Conv2D\nfrom tensorflow.python.keras.backend import dropout\nfrom icecream import ic\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nfrom tensorflow.keras.utils import to_categorical\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nx_train = x_train.reshape(60000, 28*28).astype('float32')/255\nx_test = x_test.reshape(10000, 28*28).astype('float32')/255\n\n#2 모델\ndef build_model(drop=0.5, optimizer='adam'):\n inputs = Input(shape=(28*28), name='input')\n x = Dense(512, activation='relu', name='hidden1')(inputs)\n x = Dropout(drop)(x)\n x = Dense(256, activation='relu', name='hidden2')(x)\n x = Dropout(drop)(x)\n x = Dense(128, activation='relu', name='hidden3')(x)\n x = Dropout(drop)(x)\n outputs = Dense(10, activation='softmax', name='outputs')(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=optimizer, metrics=['acc'], loss='categorical_crossentropy')\n\n return model\n\ndef create_hyperparameter():\n batches = [10,20,30,40,50]\n optimizers = ['rmsprop', 'adam', 'adadelta']\n dropout = [0.1, 0.2, 0.3]\n return {\"batch_size\": batches, \"optimizer\" : optimizers, \"drop\": dropout}\n\nhyperparameters = create_hyperparameter()\n# ic(hyperparameters)\n# model2 = build_model()\n\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nmodel2 = KerasClassifier(build_fn=build_model, verbose=1) # 텐서플로를 사이킷런에 wrapping\n\nmodel = RandomizedSearchCV(model2, hyperparameters, cv=5) # 서치 모델에 텐서플로 모델 입력안됨 -> 텐서플로모델을 사이킷런으로 wrapping\n\nmodel.fit(x_train, y_train, verbose=1, epochs=3, validation_split=0.2) # 파라미터가 우선순위로 적용됨\n\nbe = model.best_estimator_\nbp = model.best_params_\nbs = model.best_score_\n\nprint(\"best_estimator:\", be)\nprint(\"best_params: \", bp)\nprint(\"best_score\", bs)\nacc = model.score(x_test,y_test)\nic(acc)\n\n","sub_path":"keras2/keras64_1_hyperParameter.py","file_name":"keras64_1_hyperParameter.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"608572543","text":"# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport site\n\nsite.addsitedir(\n \".venv/lib/python3.10/site-packages\"\n) # adds the packages of the virtual environment (here: poetry)\n\nimport numpy as np\n\nprint(f\"version of numpy: {np.__version__}\") # just a little test\n\n\nfrom . import addon\nfrom .version import __version__\n\n\nbl_info = {\n \"name\": \"fur_pattern_generator\",\n \"author\": \"RobertHue\",\n \"description\": \"\",\n \"blender\": (2, 80, 0),\n \"version\": (0, 2, 0),\n \"location\": \"\",\n \"warning\": \"\",\n \"category\": \"Generic\",\n}\n\nprint(\n f\"\"\"\n\t__file__={__file__:<35}\n\t__name__={__name__:<20}\n\t__package__={__package__!s:<20}\n\t\"\"\"\n)\n\n\ndef register():\n addon.register()\n\n\ndef unregister():\n addon.unregister()\n","sub_path":"fpg/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"603845008","text":"from babel_global import *\nimport config, urllib\n\ncommand_name = \"distance\"\ncommand_triggers = [\"distance\"]\ncommand_description = \"Gives the distance between two zip codes.\"\ncommand_syntax = \"#distance zip1,zip2\"\ncommand_access_level = config.RANK_ACCESS_MEMBER\n\ndef process_command(globals, command_header, output_function):\n args = command_header['args']\n if args.find(\",\") < 0 or len(args.split(\",\")) != 2:\n output_function(command_header, \"Invalid syntax. Usage: #distance zip1, zip2\")\n else:\n zip1 = args.split(\",\")[0].strip()\n zip2 = args.split(\",\")[1].strip()\n if len(zip1) != 5 or (not isInt(zip1)) or len(zip2) != 5 or (not isInt(zip2)):\n output_function(command_header, \"Invalid syntax. You must provide two valid zip codes (5 digit)\")\n else:\n zip_distance = getZipDistance(int(zip1), int(zip2))\n if zip_distance != -1:\n output_function(command_header, \"The distance between those two locations is %s miles.\" % zip_distance)\n else:\n output_function(command_header, \"I am unable to determine how far apart those two locations are.\")\n\n","sub_path":"modules/commands/babelcommand_distance.py","file_name":"babelcommand_distance.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"317717110","text":"# 5-9 . Без пользователей: добавьте в hello_admin .py команду if, которая\n# проверит, что список пользователей не пуст . \n# • Если список пуст, выведите сообщение: «We need to find some users!» \n# • Удалите из списка все имена пользователей и убедитесь в том, что программа \n# выводит правильное сообщение . \n# users_list = [\"Andrew\", \"JOHHNAS\", \"AdMiN\", \"FrenchPress\", \"Dreamer1cc\", \"XIII\"]\nusers_list = []\nif users_list: \n for user in users_list:\n if user.lower() == \"admin\":\n print(\"Hello admin, would you like to see a status report?\")\n else:\n print(\"Helo \" + user.title() + \", thank you for logging in again\")\nelse:\n print(\"We need to find some users!\")","sub_path":"Chapter 5/5-9.py","file_name":"5-9.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"654073239","text":"from collections import deque\nimport sys\nfrom copy import deepcopy\nsys.setrecursionlimit(100000)\n\nclass Graph:\n def __init__(self, g_input=None):\n self.g = {}\n if isinstance(g_input, str):\n self.read_txt(g_input)\n else:\n self.g = g_input\n self.closed = []\n # self.open = deque()\n self.t = 0 # finishing time\n self.max_vertex = self.get_max_vertex() # source of dfs\n self.ft = {} # finishing time of each vertex\n self.scc_size = [] # scc size\n\n def read_txt(self, txt_name):\n \"\"\"\n read txt and store it in an adjecent dictionary\n \"\"\"\n with open(txt_name) as f:\n line = f.readline()\n while line and line != \"\\n\":\n str_list = line.split(' ')[:-1]\n new_list = list(map(int, str_list))\n if new_list[1] not in self.g.keys():\n self.g[new_list[1]] = []\n if new_list[0] not in self.g.keys():\n self.g[new_list[0]] = [new_list[1]]\n else:\n self.g[new_list[0]].append(new_list[1])\n\n line = f.readline()\n\n def get_r_graph(self):\n \"\"\"\n get a direction-reversed Graph\n :return: direction-reversed Graph\n \"\"\"\n r_graph = {}\n for k, v_list in self.g.items():\n for v in v_list:\n if v not in r_graph.keys():\n r_graph[v] = []\n r_graph[v].append(k)\n return Graph(r_graph)\n\n def get_max_vertex(self):\n \"\"\"\n get maximal vertex\n :return: max vertex\n \"\"\"\n return max(self.g.keys())\n\n def dfs(self, s):\n \"\"\"\n depth first search\n :param s: source of expansion in int\n \"\"\"\n self.closed.append(s)\n if s in self.g.keys(): # todo correct?\n for n in self.g[s]:\n if n not in self.closed:\n self.dfs(n)\n self.t += 1\n self.ft[s] = self.t\n\n def dfs_loop(self, step=1):\n \"\"\"\n the main depth first search loop\n \"\"\"\n s = self.max_vertex\n while True:\n last_size = len(self.closed)\n self.dfs(s)\n # in graph but not in closed\n ret = list(set(self.g.keys()).difference(set(self.closed)))\n this_size = len(self.closed)\n if step == 2:\n self.scc_size.append(this_size - last_size)\n if not ret:\n break\n s = max(ret)\n\n # def dfs_stack(self, step=1):\n # \"\"\"\n # the main depth first search based on stack\n # \"\"\"\n #\n # s = self.max_vertex\n # open_stack = deque([s])\n # while True:\n # last_size = len(self.closed)\n # while open_stack:\n # r = open_stack.pop()\n # if r in self.g.keys():\n # for n in self.g[s]:\n # if n not in self.closed:\n # open_stack.append(n)\n # else:\n # self.t += 1\n # self.ft[r] = self.t\n # # in graph but not in closed\n # ret = list(set(self.g.keys()).difference(set(self.closed)))\n # this_size = len(self.closed)\n # if step == 2:\n # self.scc_size.append(this_size - last_size)\n # if not ret:\n # break\n # s = max(ret)\n # open_stack = deque([s])\n\n def get_graph_from_mapping(self, map_dict):\n \"\"\"\n\n :param map_dict: mapping dictiionary\n :return: a mapped Graph\n \"\"\"\n new_g = {}\n for key, v_list in self.g.items():\n new_g[map_dict[key]] = [map_dict[v] for v in v_list]\n return Graph(new_g)\n\n\n# process:\n# create a graph (adjecent list)\n# create a reverse graph (copy from the first one)\n# DFS\n\nif __name__ == \"__main__\":\n # test\n g = Graph(\"jason_smko_last.txt\")\n print(g)\n g_ = g.get_r_graph()\n g_.dfs_loop()\n print(g_.ft)\n g_ft = g.get_graph_from_mapping(g_.ft)\n print(g_ft.g)\n del g_, g\n g_ft.dfs_loop(step=2)\n print(g_ft.scc_size)\n\n # # real data\n # g = Graph(\"SCC.txt\")\n # g_ = g.get_r_graph()\n # print(\"get reversed graph\")\n # del g\n # g_.dfs_loop()\n # print(\"first pass okay!\")\n # ft = deepcopy(g_.ft)\n # g = Graph(\"SCC.txt\")\n # g_ft = g.get_graph_from_mapping(ft)\n # del g_, g\n # g_ft.dfs_loop(step=2)\n # print(\"second pass okay!\")\n # print(g_ft.scc_size)\n\n\n #Sumary:\n # my code is correct is small test cases but memory meets an overflow. And it runs rather slowly.\n # To be improved:\n # Using stack for DFS instead of recursive call of functions, recursive calls are too many > 1000\n # using negative edge to record reversed graph in one dict instead of creating a new reversed graph\n # using set() to track visited nodes and finished nodes instead of list, cause the index is not important and membership test is faster for sets\n\n","sub_path":"Part_1/Homework_4/scc.py","file_name":"scc.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"190727307","text":"\"\"\"Tools for hadoop.\n\"\"\"\n\n# Author: Donald Cheung \n\nfrom .. import utils\nfrom .. import runner\n\nimport subprocess\nimport time\nimport sys\nimport re\nimport signal\nimport os\n\nclass Hadoop:\n \"\"\"Provide a set of useful methods for operating on hadoop task.\n\n Parameters\n ----------\n hadoop_path: str\n The hadoop's environment directory, should contains bin path.\n\n \"\"\"\n def __init__(self, hadoop_path = None):\n self.__hadoop_path = hadoop_path\n self.__streaming_job = None\n self.__streaming_killing_cmd = None\n self.__streaming_job_id = None\n\n def set_hadoop_path(self, hadoop_path):\n \"\"\"Set hadoop's environment directory.\n\n Parameters\n ----------\n hadoop_path: str\n Set hadoop's environment directory, should contains bin path.\n\n \"\"\"\n if os.path.isdir(hadoop_path) and os.path.isfile(\"%s/bin/hadoop\" % hadoop_path):\n self.__hadoop_path = hadoop_path\n else:\n raise ValueError(\"Hadoop path [%s] is illegal\")\n\n def fetch_env(self):\n \"\"\"Get the path of hadoop environment\"\"\"\n stdout_value, stderr_value = subprocess.Popen('which hadoop', \\\n shell=True, \\\n stdout=subprocess.PIPE, \\\n stderr=subprocess.PIPE).communicate()\n if len(stdout_value) > 0:\n return os.path.dirname(os.path.dirname(stdout_value))\n return None\n\n def streaming_cmd(self, input_path,\n output_path,\n mapper,\n reducer,\n jobname,\n files = None,\n map_task_num = 743,\n map_capacity = 743,\n reduce_task_num = 743,\n reduce_capacity = 743,\n memory_limit = 1200,\n map_output_separator = '\\t',\n map_output_key_fields = 1,\n key_fields_for_partition = 1,\n partitioner = \"org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner\",\n multiple_output = False,\n separator = False,\n priority = 'NORMAL'):\n \"\"\"Generating a hadoop streaming command.\n\n Parameters\n ----------\n input_path: str\n The streaming task's input path. If contains multiple path,\n using ',' to separate then.\n\n output_path: str\n The streaming task's output path.\n\n mapper: str\n The streaming task's mapper command.\n\n reducer: str\n The streaming task's reducer command. If does not exist, set None.\n\n jobname: str\n The streaming task's jobname. Best using naming method of programming languages,\n for it will be used to create log files on disk.\n\n files: str\n Files to be uploaded to hadoop cluster, multiple files should be separate by ','.\n\n \"\"\"\n if self.__hadoop_path is None:\n self.__hadoop_path = self.fetch_env()\n if self.__hadoop_path is None:\n raise ValueError(\"Not setting the hadoop's environment path yet\")\n\n cmd = '%s/bin/hadoop streaming ' % self.__hadoop_path\n cmd += ' -D mapred.job.name=\"%s\" ' % jobname\n cmd += ' -D mapred.map.tasks=%d ' % map_task_num\n cmd += ' -D mapred.reduce.tasks=%d ' % reduce_task_num\n cmd += ' -D mapred.job.map.capacity=%d ' % map_capacity\n cmd += ' -D mapred.job.reduce.capacity=%d ' % reduce_capacity\n cmd += ' -D stream.memory.limit=%d ' % memory_limit\n cmd += ' -D map.output.key.field.separator=\"%s\" ' % map_output_separator\n cmd += ' -D num.key.fields.for.partition=%s ' % key_fields_for_partition\n cmd += ' -D stream.num.map.output.key.fields=%d ' % map_output_key_fields\n cmd += ' -D mapred.job.priority=%s ' % priority\n\n if separator:\n cmd += ' -D mapred.textoutputformat.ignoreseparator=true '\n\n cmd += ' -partitioner %s ' % partitioner\n if multiple_output:\n cmd += ' -outputformat org.apache.hadoop.mapred.lib.SuffixMultipleTextOutputFormat '\n\n if files is not None and len(files.strip()) != 0:\n cmd += ' '.join([' -file %s ' % fname for fname in files.split(',')])\n\n cmd += ' -mapper \"%s\" ' % mapper.replace(\"\\\"\", \"\\\\\\\"\")\n if reducer is None:\n cmd += ' -reducer \"NONE\" '\n else:\n cmd += ' -reducer \"%s\" ' % reducer.replace(\"\\\"\", \"\\\\\\\"\")\n cmd += ' '.join([' -input %s ' % fin for fin in input_path.split(',')])\n cmd += ' -output %s ' % output_path\n cmd += ' -cacheArchive /share/python2.7.tar.gz#python '\n return cmd\n\n def streaming_join_cmd(self, left_input_path,\n left_fields_num,\n left_key_list,\n right_input_path,\n right_fields_num,\n right_key_list,\n output_path,\n left_value_list = None,\n right_value_list = None,\n method = \"left\",\n jobname = \"join_data\",\n map_task_num = 743,\n map_capacity = 743,\n reduce_task_num = 743,\n reduce_capacity = 743,\n memory_limit = 1200):\n \"\"\"Generating a hadoop streaming command for join two type of files.\n\n Parameters\n ----------\n left_input_path: string\n The join task's left input path. If contains multiple path,\n using ',' to separate then.\n\n left_fields_num: integer\n Left input files' fields number.\n\n left_key_list: string\n Left input's key list using to join right input.\n\n right_input_path: string\n The join task's right input path. If contains multiple path,\n using ',' to separate then.\n\n right_fields_num: integer\n Right input files' fields number.\n\n right_key_list: string\n Right input's key list using to join left input.\n\n output_path: string\n The streaming task's output path.\n\n left_value_list: string\n The output fields of left input. If None, output all of the fields.\n If contains multiple fields, using ',' to separate them.\n\n right_value_list: string\n The output fields of right input. If None, output all of the fields.\n If contains multiple fields, using ',' to separate them.\n\n method: string\n The join method, support \"left\", \"right\", \"inner\" currently.\n\n jobname: str\n The streaming task's jobname. Best using naming method of programming languages,\n for it will be used to create log files on disk.\n \"\"\"\n left_key_list = self.__join_fields_list(left_key_list, left_fields_num)\n left_value_list = self.__join_fields_list(left_value_list, left_fields_num)\n right_key_list = self.__join_fields_list(right_key_list, right_fields_num)\n right_value_list = self.__join_fields_list(right_value_list, right_fields_num)\n\n left_input_pattern = \"\\|\".join([\"\\(.*\\)\\(%s\\)\" % path.replace('*', '.*') \\\n for path in left_input_path.split(',')])\n right_input_pattern = \"\\|\".join([\"\\(.*\\)\\(%s\\)\" % path.replace('*', '.*') \\\n for path in right_input_path.split(',')])\n\n map_cmd = \"python/bin/python _join_mapred.py \"\n map_cmd += \" -e mapper -m %s \" % (method)\n map_cmd += \" --left_input_pattern \\'%s\\' \" % left_input_pattern\n map_cmd += \" --left_key_list %s \" % (\",\".join(map(lambda k: str(k), left_key_list)))\n map_cmd += \" --left_value_list %s \" % (\",\".join(map(lambda k: str(k), left_value_list)))\n map_cmd += \" --left_fields_num %d \" % left_fields_num\n\n map_cmd += \" --right_input_pattern \\'%s\\' \" % right_input_pattern\n map_cmd += \" --right_key_list %s \" % (\",\".join(map(lambda k: str(k), right_key_list)))\n map_cmd += \" --right_value_list %s \" % (\",\".join(map(lambda k: str(k), right_value_list)))\n map_cmd += \" --right_fields_num %d \" % right_fields_num\n\n reduce_cmd = \"python/bin/python _join_mapred.py \"\n reduce_cmd += \" -e reducer -m %s \" % (method)\n reduce_cmd += \" --left_value_num %d \" % len(left_value_list)\n reduce_cmd += \" --right_value_num %d \" % len(right_value_list)\n\n join_mapred_file = \"%s/_join_mapred.py\" % os.path.abspath(os.path.dirname(__file__))\n join_cmd = self.streaming_cmd(input_path = \"%s,%s\" % (left_input_path, right_input_path),\n output_path = output_path,\n mapper = map_cmd,\n reducer = reduce_cmd,\n jobname = jobname,\n files = join_mapred_file,\n map_task_num = map_task_num,\n reduce_task_num = reduce_task_num,\n map_capacity = map_capacity,\n reduce_capacity = reduce_capacity,\n key_fields_for_partition = 1,\n map_output_key_fields = 2,\n memory_limit = memory_limit)\n return join_cmd\n\n def run_streaming(self, streaming_cmd, clear_output = False, terminal = True):\n pre_sigint_handler = signal.signal(signal.SIGINT, self.__streaming_signal_handler)\n pre_sigterm_handler = signal.signal(signal.SIGTERM, self.__streaming_signal_handler)\n\n if clear_output:\n output_path = streaming_cmd.split(\" -output \")[-1].strip().split(' ')[0]\n self.remove_path(output_path)\n\n search_res = re.compile(r'.*-D mapred.job.name=\"(\\w*)\".*').search(streaming_cmd)\n if search_res is None:\n jobname = \"None\"\n else:\n jobname = search_res.groups()[0]\n\n self.__streaming_job = runner.task_runner.Job(jobname = jobname,\n command = streaming_cmd)\n self.__streaming_job.start()\n while self.__streaming_job.stderr is None:\n time.sleep(0.1)\n\n while True:\n line = self.__streaming_job.stderr.readline()\n if len(line) == 0:\n break\n\n if \"INFO mapred.JobClient: Running job\" in line:\n self.__streaming_job_id = line.strip().split(\" \")[-1]\n\n if \"-kill\" in line:\n self.__streaming_killing_cmd = line.strip().split('mapred.JobClient:')[-1].strip()\n if terminal:\n sys.stderr.write(line)\n\n if \"Streaming Job Failed!\" in line:\n signal.signal(signal.SIGINT, pre_sigint_handler)\n signal.signal(signal.SIGTERM, pre_sigterm_handler)\n return -1\n\n while self.__streaming_job.is_alive():\n time.sleep(0.1)\n\n signal.signal(signal.SIGINT, pre_sigint_handler)\n signal.signal(signal.SIGTERM, pre_sigterm_handler)\n if self.__streaming_job.returncode != 0:\n return -1\n return 0\n\n def remove_path(self, path):\n if self.__hadoop_path is None:\n self.__hadoop_path = self.fetch_env()\n if self.__hadoop_path is None:\n raise ValueError(\"Not setting the hadoop's environment path yet\")\n\n remove_cmd = \"%s/bin/hadoop fs -rmr %s\" % (self.__hadoop_path, path)\n stdout_value, stderr_value = subprocess.Popen(remove_cmd,\n shell=True, \\\n stdout=subprocess.PIPE, \\\n stderr=subprocess.PIPE).communicate()\n if len(stderr_value) > 0:\n return False\n return True\n\n def list_path(self, path, pattern = r'.*', line_sep = '\\n'):\n if self.__hadoop_path is None:\n self.__hadoop_path = self.fetch_env()\n if self.__hadoop_path is None:\n raise ValueError(\"Not setting the hadoop's environment path yet\")\n\n file_list = list()\n if type(path) in (list, set, dict):\n for p in path:\n file_list.extend(self.list_path(p, pattern))\n return file_list\n\n if type(path) != str:\n return list()\n list_cmd = \"%s/bin/hadoop fs -ls %s\" % (self.__hadoop_path, path)\n stdout_value, stderr_value = subprocess.Popen(list_cmd,\n shell=True, \\\n stdout=subprocess.PIPE, \\\n stderr=subprocess.PIPE).communicate()\n pattern_inst = re.compile('(%s)(/){0,1}(%s)' % (path.replace('*', '.*'), pattern))\n file_list = list()\n for line in stdout_value.split(line_sep):\n line = line.strip()\n last_space = line.rfind(' /')\n if last_space == -1 or last_space >= len(line):\n continue\n\n fname = line[last_space + 1:]\n if re.match(pattern_inst, fname):\n file_list.append(fname)\n\n if len(stderr_value) > 0:\n sys.stderr.write(\"[%s] %s\\n\" % (utils.shell.color_str(\"ERROR\", \"red\"), stderr_value))\n return file_list\n\n def distcp(self, src, dest, src_userpwd = None,\n dest_userpwd = None,\n map_capacity = 500,\n map_speed = 1000000):\n if self.__hadoop_path is None:\n self.__hadoop_path = self.fetch_env()\n if self.__hadoop_path is None:\n raise ValueError(\"Not setting the hadoop's environment path yet\")\n\n distcp_cmd = \"%s/bin/hadoop distcp\" % self.__hadoop_path\n distcp_cmd += \" -D mapred.job.map.capacity=%d \" % map_capacity\n distcp_cmd += \" -D distcp.map.speed.kb=%d \" % map_speed\n\n if src_userpwd != None:\n distcp_cmd += \" -su %s \" % src_userpwd\n\n if dest_userpwd != None:\n distcp_cmd += \" -du %s \" % dest_userpwd\n distcp_cmd += \" %s %s \" % (src, dest)\n\n distcp_process = subprocess.Popen(distcp_cmd, shell=True, \\\n stdout=subprocess.PIPE, \\\n stderr=subprocess.PIPE)\n stdout_value, stderr_value = distcp_process.communicate()\n if distcp_process.returncode != 0:\n sys.stderr.write(\"[%s] %s\\n\" % (utils.shell.color_str(\"ERROR\", \"red\"), stderr_value))\n return False\n return True\n\n def fetch_content(self, path, output_file = subprocess.PIPE, error_file = subprocess.PIPE):\n if self.__hadoop_path is None:\n self.__hadoop_path = self.fetch_env()\n if self.__hadoop_path is None:\n raise ValueError(\"Not setting the hadoop's environment path yet\")\n\n cat_cmd = \"%s/bin/hadoop fs -cat %s\" % (self.__hadoop_path, path)\n stdout_value, stderr_value = subprocess.Popen(cat_cmd,\n shell = True, \\\n stdout = output_file, \\\n stderr = error_file).communicate()\n return stdout_value, stderr_value\n\n def __streaming_signal_handler(self, sig_num, stack):\n sys.stderr.write(\"\\nSignal [%d] received. Streaming job begin to terminate.\\n\" % sig_num)\n if self.__streaming_job != None and self.__streaming_job.is_alive():\n self.__streaming_job.suicide()\n\n if self.__streaming_killing_cmd != None:\n stdout_value, stderr_value = subprocess.Popen(self.__streaming_killing_cmd, \\\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n if len(stderr_value) > 0:\n sys.stderr.write(\"Terminate hadoop job [%s] succeed\\n\" % self.__streaming_job_id)\n else:\n sys.stderr.write(\"Terminate hadoop job [%s] failed\\n\" % self.__streaming_job_id)\n exit()\n\n def __join_fields_list(self, key_list, fields_num):\n if key_list == None:\n return range(0, fields_num)\n\n fields_list = key_list[:]\n if type(key_list) == str:\n fields_list = [int(k) for k in key_list.split(',')]\n return filter(lambda col: 0 <= col < fields_num, fields_list)\n\n","sub_path":"jpyutils/distributed/hadoop.py","file_name":"hadoop.py","file_ext":"py","file_size_in_byte":17366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"35011862","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE',\n 'icompose_project.settings')\n\nimport django\nimport pytz\ndjango.setup()\nfrom icompose.models import *\nfrom django.contrib.auth.models import User\nimport datetime\nfrom django.template.defaultfilters import slugify\nfrom django.core.files import File\n#from rango.models import Category, Page\n\ndef populate():\n\n # Dictionaries for populating data\n\n users = [\n {\n \"username\": \"bob\",\n \"email\": \"bob@example.com\",\n \"about\": \"Hello, my name is Bob!\",\n \"picture\": \"example/green.png\",\n },\n {\n \"username\": \"dan5\",\n \"email\": \"dan5@example.com\",\n \"about\": \"Hello, my name is Dan!\",\n \"picture\": None,\n },\n {\n \"username\": \"mastercomposer\",\n \"email\": \"mastercomposer@example.com\",\n \"about\": \"I am a composer who likes to upload different types of original music for fun!\",\n \"picture\": \"example/pink.png\",\n },\n {\n \"username\": \"michael\",\n \"email\": \"michael@example.com\",\n \"about\": \"Hello, my name is Michael!\",\n \"picture\": None,\n },\n {\n \"username\": \"musiclover5\",\n \"email\": \"musiclover5\",\n \"about\": \"I LOVE MUSIC\",\n \"picture\": \"example/red.png\",\n },\n {\n \"username\": \"musiclover6\",\n \"email\": \"musiclover6@example.com\",\n \"about\": \"I LOVE MUSIC <3\",\n \"picture\": None,\n },\n {\n \"username\": \"sam7\",\n \"email\": \"sam7@example.com\",\n \"about\": \"I'm Sam! I love music!\",\n \"picture\": \"example/darkblue.png\",\n },\n {\n \"username\": \"user612\",\n \"email\": \"user612@example.com\",\n \"about\": \"\",\n \"picture\": None,\n },\n ]\n\n genres = [\n {\n \"name\": \"Other\",\n },\n {\n \"name\": \"Jazz\",\n },\n {\n \"name\": \"Classic\",\n },\n {\n \"name\": \"Pop\",\n },\n {\n \"name\": \"Rock\",\n },\n ]\n\n songs = [\n {\n \"name\": \"Epic Composition\",\n \"username\": \"mastercomposer\",\n \"plays\": 270,\n \"genre_slug\": \"rock\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/darkblue.png\",\n },\n {\n \"name\": \"The song with no name\",\n \"username\": \"mastercomposer\",\n \"plays\": 106,\n \"genre_slug\": \"pop\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/green.png\",\n },\n {\n \"name\": \"Funky Song\",\n \"username\": \"mastercomposer\",\n \"plays\": 102,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/lightblue.png\",\n },\n {\n \"name\": \"Birthday Song\",\n \"username\": \"mastercomposer\",\n \"plays\": 56,\n \"genre_slug\": \"jazz\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/orange.png\",\n },\n {\n \"name\": \"Not so Epic Composition\",\n \"username\": \"mastercomposer\",\n \"plays\": 13,\n \"genre_slug\": \"other\",\n \"file\": \"example/example.mp3\",\n \"picture\": None,\n },\n {\n \"name\": \"Bob's Song\",\n \"username\": \"bob\",\n \"plays\": 210,\n \"genre_slug\": \"jazz\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/pink.png\",\n },\n {\n \"name\": \"Bob's Song Two\",\n \"username\": \"bob\",\n \"plays\": 203,\n \"genre_slug\": \"rock\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/purple.png\",\n },\n {\n \"name\": \"Slow Random\",\n \"username\": \"michael\",\n \"plays\": 157,\n \"genre_slug\": \"other\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/red.png\",\n },\n {\n \"name\": \"Cool Song\",\n \"username\": \"michael\",\n \"plays\": 116,\n \"genre_slug\": \"rock\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/yellow.png\",\n },\n {\n \"name\": \"Classic Song\",\n \"username\": \"michael\",\n \"plays\": 10,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/orange.png\",\n },\n {\n \"name\": \"Jazz Song\",\n \"username\": \"musiclover5\",\n \"plays\": 19,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/pink.png\",\n },\n {\n \"name\": \"Other Song\",\n \"username\": \"musiclover6\",\n \"plays\": 20,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/purple.png\",\n },\n {\n \"name\": \"Pop Song\",\n \"username\": \"sam7\",\n \"plays\": 1,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/red.png\",\n },\n {\n \"name\": \"Rock Song\",\n \"username\": \"user612\",\n \"plays\": 16,\n \"genre_slug\": \"classic\",\n \"file\": \"example/example.mp3\",\n \"picture\": \"example/yellow.png\",\n },\n ]\n\n comments = [\n {\n \"username\": \"sam7\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"date_commented\": datetime.datetime(2017, 0o02, 22, 8, 3, tzinfo=pytz.UTC),\n \"comment\": \"First!\",\n },\n {\n \"username\": \"musiclover5\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"date_commented\": datetime.datetime(2017, 0o02, 21, 2, 23, tzinfo=pytz.UTC),\n \"comment\": \"A bit too slow for my taste...\",\n },\n {\n \"username\": \"user612\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"date_commented\": datetime.datetime(2017, 0o02, 21, 20, 8, tzinfo=pytz.UTC),\n \"comment\": \"Best thing I have listed to for quite some time!\",\n },\n {\n \"username\": \"dan5\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"date_commented\": datetime.datetime(2017, 0o02, 15, 16, 34, tzinfo=pytz.UTC),\n \"comment\": \"I love it!\",\n },\n {\n \"username\": \"musiclover6\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"date_commented\": datetime.datetime(2017, 0o02, 10, 12, 4, tzinfo=pytz.UTC),\n \"comment\": \"Great sound!\",\n },\n ]\n\n ratings = [\n {\n \"username\": \"sam7\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"value\": 10,\n },\n {\n \"username\": \"musiclover5\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"value\": 8,\n },\n {\n \"username\": \"musiclover6\",\n \"song_slug\": slugify(\"Epic Composition\"),\n \"value\": 9,\n },\n {\n \"username\": \"musiclover6\",\n \"song_slug\": slugify(\"Bob's Song\"),\n \"value\": 7,\n },\n {\n \"username\": \"Bob's Song Two\",\n \"song_slug\": slugify(\"Bob's Song\"),\n \"value\": 7,\n },\n ]\n\n # Add every user\n for user in users:\n print(\"User: \" + user[\"username\"])\n add_user(user[\"username\"], user[\"email\"], user[\"about\"], user[\"picture\"])\n\n # Add every user\n for genre in genres:\n print(\"Genre: \" + genre[\"name\"])\n add_genre(genre[\"name\"])\n\n # Add every song\n for song in songs:\n print(\"Song: \" + song[\"name\"])\n add_song(song[\"name\"], song[\"username\"], song[\"plays\"], song[\"genre_slug\"], song[\"file\"], song[\"picture\"])\n\n # Add every comment\n for comment in comments:\n print(\"Comment: On \" + comment[\"song_slug\"] + \" by \" + comment[\"username\"])\n add_comment(comment[\"username\"], comment[\"song_slug\"], comment[\"date_commented\"], comment[\"comment\"])\n\n # Add every rating\n for rating in ratings:\n print(\"Rating: Of \" + str(rating[\"value\"]) + \" on \" + rating[\"song_slug\"] + \" by \" + rating[\"username\"])\n add_rating(rating[\"username\"], rating[\"song_slug\"], rating[\"value\"])\n\n# Adds user to user and userprofile models in database\ndef add_user(username, email, about, picture):\n u = User.objects.get_or_create(username=username)[0]\n u.email = email\n u.save()\n up = UserProfile.objects.get_or_create(user=u)[0]\n\n # If song has a picture, open song picture and put in into django model\n if (picture):\n user_picture_open = open(picture, 'r')\n user_picture = File(user_picture_open)\n up.picture = user_picture\n\n up.about = about\n up.save()\n return u\n\n# Adds genre to database\ndef add_genre(name):\n g = Genre.objects.get_or_create(name=name)\n\n# Adds song to database\ndef add_song(name, username, plays, genre_slug, file, picture):\n u = User.objects.get_or_create(username=username)[0]\n up = UserProfile.objects.get_or_create(user=u)[0]\n g = Genre.objects.get_or_create(slug=genre_slug)[0]\n s = Song.objects.get_or_create(name=name, user=up)[0]\n\n # Open song file and put it into django model\n song_file_open = open(file, 'r')\n song_file = File(song_file_open)\n s.file = song_file\n\n # If song has a picture, open song picture and put in into django model\n if(picture):\n song_picture_open = open(picture, 'r')\n song_picture = File(song_picture_open)\n s.picture = song_picture\n\n s.plays = plays\n s.genre = g\n s.save()\n\n# Adds comment to database\ndef add_comment(username, song_slug, date_commented, comment):\n u = User.objects.get_or_create(username=username)[0]\n up = UserProfile.objects.get_or_create(user=u)[0]\n s = Song.objects.get(slug=song_slug)\n c = Comment.objects.get_or_create(user=up, song=s, dateCommented=date_commented, comment=comment)\n\n# Adds rating to database\ndef add_rating(username, song_slug, value):\n u = User.objects.get_or_create(username=username)[0]\n up = UserProfile.objects.get_or_create(user=u)[0]\n s = Song.objects.get_or_create(slug=song_slug)[0]\n r = Rating.objects.get_or_create(user=up, song=s, value=value)[0]\n\n# Start execution here!\nif __name__ == '__main__':\n print(\"Starting population script...\")\n populate()\n","sub_path":"population_script.py","file_name":"population_script.py","file_ext":"py","file_size_in_byte":10658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"609809063","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author: Ago\nimport os,sys\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\n\nif __name__ == '__main__':\n from core import managgement\n server = managgement.ManagementTool(sys.argv)\n server.execute()","sub_path":"20170913/server/bin/LuffyServer.py","file_name":"LuffyServer.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"582273664","text":"# Copyright 2015 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Command-line interface for the Mob* Monitor.\"\"\"\n\nfrom __future__ import print_function\n\nfrom chromite.lib import commandline\nfrom chromite.lib import remote_access\nfrom chromite.mobmonitor.rpc import rpc\n\n\nclass MobMonCli(object):\n \"\"\"Provides command-line functionality for using the Mob* Monitor.\"\"\"\n\n def __init__(self, host='localhost', port=9999):\n self.host = host\n self.port = remote_access.NormalizePort(port)\n\n def ExecuteRequest(self, request, service, action):\n \"\"\"Execute the request if an appropriate RPC function is defined.\n\n Args:\n request: The name of the RPC.\n service: The name of the service involved in the RPC.\n action: The action to be performed.\n \"\"\"\n rpcexec = rpc.RpcExecutor(self.host, self.port)\n\n if not hasattr(rpcexec, request):\n raise rpc.RpcError('The request \"%s\" is not recognized.' % request)\n\n if 'GetServiceList' == request:\n return rpcexec.GetServiceList()\n\n if 'GetStatus' == request:\n return rpcexec.GetStatus(service=service)\n\n if 'RepairService' == request:\n return rpcexec.RepairService(service=service, action=action)\n\n\ndef ParseArguments(argv):\n parser = commandline.ArgumentParser()\n parser.add_argument('request', choices=rpc.RPC_LIST)\n parser.add_argument('-s', '--service', help='The service to act upon')\n parser.add_argument('-a', '--action', help='The action to execute')\n parser.add_argument('--host', default='localhost',\n help='The hostname of the Mob* Monitor.')\n parser.add_argument('-p', '--port', type=int, default=9999,\n help='The Mob* Monitor port.')\n\n return parser.parse_args(argv)\n\n\ndef main(argv):\n \"\"\"Command line interface for the Mob* Monitor.\n\n The basic syntax is:\n mobmon [args]\n mobmon --help\n \"\"\"\n options = ParseArguments(argv)\n\n cli = MobMonCli(options.host, options.port)\n result = cli.ExecuteRequest(options.request, options.service,\n options.action)\n\n print(result)\n","sub_path":"third_party/chromite/scripts/mobmoncli.py","file_name":"mobmoncli.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"55992071","text":"\"\"\"\nTwitch Application\n\"\"\"\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nimport urllib2\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom twitch.models import Favorites\nimport json as simplejson\nimport re\nfrom twitter.twitterauth import TwitterAuth\nfrom youtube import YouTube\n\ndef index(request):\n \"\"\"\n index view for twitch\n \"\"\"\n featured_game = get_featured_game()\n twitterauth = TwitterAuth()\n twitterauth.set_access_token()\n tweets = twitterauth.get_tweets(featured_game)\n user = request.user\n if user.is_authenticated():\n username = user.username\n logged_in = True\n search_li = \"\"\n if request.method == 'GET' and 'error' in request.GET:\n get_error = request.GET['error']\n if get_error:\n return HttpResponseRedirect(\"/twitch/\")\n\n top_stream_id, featured_li = get_featured_info()\n\n if request.method == 'GET' and 'search' in request.GET:\n search_q = request.GET['search']\n if not (search_q == \"\"):\n if re.match(\"^[A-Za-z0-9_ ]*$\", search_q):\n search_li = search_q\n top_stream_id, featured_li = get_game_info(search_q)\n\n if request.method == 'GET' and 'sort' in request.GET:\n sort_str = request.GET['sort']\n featured_li = sort_list(featured_li, sort_str)\n \n if request.method == 'GET' and 'favorite' in request.GET:\n favorite_id = request.GET['favorite']\n addrem_to_user_favorites(favorite_id, username)\n\n if request.method == 'GET' and 'watch' in request.GET:\n tmp_id = request.GET['watch']\n if re.match(\"^[A-Za-z0-9_ ]*$\", tmp_id):\n top_stream_id = request.GET['watch']\n context = { 'username': username, \n 'logged_in': logged_in, \n 'favLi': get_user_favorites(username), \n 'favLiLenBool': \\\n len(get_user_favorites(username)) >= 2, \n 'featuredLi': featured_li, \n 'searchLi': search_li, \n 'streamID': top_stream_id,\n 'featuredGame': featured_game,\n 'tweets': tweets, }\n return render(request, 'twitch/index.html', context)\n else:\n return redirect('/accounts/login')\n\n\ndef get_featured_info():\n \"\"\"\n get the featured info from api\n \"\"\"\n url = 'https://api.twitch.tv/kraken/streams/featured?'+\\\n 'limit=35&offset=0&client_id=jqsd54ktgrxu0x05kh6w2lt4l3zkfck'\n json_contents = urllib2.urlopen(url)\n dict_obj = simplejson.load(json_contents)\n return_li = []\n for stream_num in range(len(dict_obj[\"featured\"])):\n return_li.append( (stream_num, \\\n dict_obj[\"featured\"][stream_num][\"stream\"][\"channel\"][\"display_name\"], \\\n dict_obj[\"featured\"][stream_num][\"stream\"][\"channel\"][\"status\"], \\\n dict_obj[\"featured\"][stream_num][\"stream\"][\"viewers\"], \\\n dict_obj[\"featured\"][stream_num][\"stream\"][\"channel\"][\"game\"], \\\n str(dict_obj[\"featured\"][stream_num][\"stream\"][\"channel\"][\"url\"]).replace(\"http://www.twitch.tv/\", \"\")) )\n return str(return_li[0][5]), return_li\n\ndef get_featured_game():\n \"\"\"return featured game being show\"\"\"\n url = 'https://api.twitch.tv/kraken/streams/featured?'+\\\n 'limit=35&offset=0&client_id=jqsd54ktgrxu0x05kh6w2lt4l3zkfck'\n json_contents = urllib2.urlopen(url)\n dict_obj = simplejson.load(json_contents)\n return dict_obj[\"featured\"][0][\"stream\"][\"channel\"][\"game\"]\n\ndef sort_list(featured_li, sort_str):\n \"\"\"\n sorts the list\n \"\"\"\n #cap sensitive\n sort_li = [\"1\", \"2\", \"3\", \"4\"]\n if sort_str in sort_li:\n if sort_str == \"3\":\n featured_li.sort(key=lambda x: x[int(sort_str)], reverse=True)\n else: \n featured_li.sort(key=lambda x: x[int(sort_str)] if x[int(sort_str)]\\\n is None else x[int(sort_str)].lower())\n return featured_li\n\ndef get_game_info(search_q):\n \"\"\"\n get the game info from api\n \"\"\"\n url = 'https://api.twitch.tv/kraken/search/streams?q='+str(search_q).replace(\" \", \"%20\")+\\\n '&type=suggest&client_id=jqsd54ktgrxu0x05kh6w2lt4l3zkfck'\n json_contents = urllib2.urlopen(url)\n dict_obj = simplejson.load(json_contents)\n return_li = []\n for stream_num in range(len(dict_obj[\"streams\"])):\n return_li.append( (stream_num, \\\n dict_obj[\"streams\"][stream_num][\"channel\"][\"display_name\"], \\\n dict_obj[\"streams\"][stream_num][\"channel\"][\"status\"], \\\n dict_obj[\"streams\"][stream_num][\"viewers\"], \\\n dict_obj[\"streams\"][stream_num][\"channel\"][\"game\"], \\\n str(dict_obj[\"streams\"][stream_num][\"channel\"][\"url\"]).replace(\"http://www.twitch.tv/\", \"\")) )\n ret_url = \"\"\n if return_li:\n ret_url = str(return_li[0][5])\n return ret_url, return_li\n\ndef addrem_to_user_favorites(favorite_id, username):\n \"\"\"\n add and or remove the user favorite channels\n \"\"\"\n usr = User.objects.get(username=username)\n try:\n fav_li = usr.favorites.favoriteStreams.split(', ')\n if favorite_id not in fav_li:\n fav_li.append(favorite_id)\n else:\n fav_li.remove(favorite_id)\n usr.favorites.favoriteStreams = \", \".join(str(x) for x in fav_li)\n usr.favorites.save()\n except ObjectDoesNotExist:\n usr = User.objects.get(username=username)\n usr.favorites, created = Favorites.objects.get_or_create(user=usr, \\\n favoriteStreams=\"@\")\n addrem_to_user_favorites(favorite_id, username)\n\ndef get_user_favorites(username):\n \"\"\"\n get the user favorite channels\n \"\"\"\n fav_li = []\n try:\n usr = User.objects.get(username=username)\n fav_li = usr.favorites.favoriteStreams.split(', ')\n except ObjectDoesNotExist:\n pass\n return fav_li\n\ndef youtube(request):\n \"\"\"get the youtube page with popular games\"\"\"\n logged_in = False\n user = request.user\n if user.is_authenticated():\n logged_in = True\n youtube = YouTube()\n popular_games = youtube.get_popular_games()\n context = {'popular_games': popular_games,\n 'logged_in': logged_in, }\n return render(request, 'youtube/index.html', context)\n","sub_path":"amateur_sports_announcers/twitch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"537065071","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport tkinter as tk\nfrom tkinter import ttk, filedialog, messagebox\nimport threading\nimport queue\nfrom urllib.request import urlopen, Request\nfrom urllib.parse import quote, quote_plus, urlparse\nfrom hashlib import md5\nimport os\nfrom enum import Enum\nfrom html.parser import HTMLParser\nfrom PIL import Image\nfrom io import BytesIO, StringIO\n\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n\ndef url_image_name(url):\n parts = urlparse(url)\n _, filename = os.path.split(parts.path)\n return os.path.splitext(filename)\n\n\ndef url_quote(url):\n parts = urlparse(url)\n quoted = [parts.scheme, quote(parts.netloc), quote(parts.path), quote(parts.params),\n quote_plus(parts.query, safe='='), quote_plus(parts.fragment)]\n result = '{}://{}{}'.format(quoted[0], quoted[1], quoted[2])\n if len(quoted) > 3 and len(quoted[3]) > 0:\n result = '{};{}'.format(result, quoted[3]) # params\n if len(quoted) > 4 and len(quoted[4]) > 0:\n result = '{}?{}'.format(result, quoted[4]) # query\n if len(quoted) > 5 and len(quoted[5]) > 0:\n result = '{}#{}'.format(result, quoted[5]) # fragment\n return result\n\n\ndef batch_jobs(template: str, from_: int, to_: int, pattern: str):\n for i in range(from_, to_ + 1):\n order = '{order:{width}}'.format(order=i, width=pattern).strip()\n entity = template.replace('(*)', order)\n yield entity, i\n yield None, None\n\n\nclass WebPageHandler(HTMLParser):\n domain_name = None\n def __init__(self, *a, **kw):\n super().__init__(*a, **kw)\n self._images = []\n\n def feed_file(self, web_page_file):\n self._images[:] = []\n web_page_file.seek(0)\n self.feed(web_page_file.read())\n self.close()\n return self._images\n\n @staticmethod\n def attrs_has_attr(attrs, attr):\n for i in attrs:\n if i == attr:\n return True\n return False\n\n @staticmethod\n def attrs_has_name(attrs, name):\n for i in attrs:\n if i[0] == name:\n return True\n return False\n\n @staticmethod\n def attrs_get_value(attrs, name):\n for i in attrs:\n if i[0] == name:\n return i[1]\n return None\n\n\nclass HachiRawHandler(WebPageHandler):\n def __init__(self, *a, **kw):\n super().__init__(*a, **kw)\n self._chapter_pages_found = False\n self._chapter_page_found = False\n self._image_found = False\n self.domain_name = 'hachiraw.com'\n\n def handle_starttag(self, tag, attrs):\n if not self._chapter_pages_found:\n if tag == 'div' and self.attrs_has_attr(attrs, ('class', 'chapter-pages')):\n self._chapter_pages_found = True\n elif not self._chapter_page_found:\n if tag == 'div' and self.attrs_has_attr(attrs, ('class', 'chapter-page')):\n self._chapter_page_found = True\n elif tag == 'img' and self.attrs_has_name(attrs, 'src'):\n url = self.attrs_get_value(attrs, 'src')\n self._images.append(url.strip())\n\n def handle_endtag(self, tag):\n if self._chapter_pages_found and self._chapter_page_found:\n if tag == 'div':\n self._chapter_page_found = False\n\n\nclass ParallelParadiseOnlineHandler(WebPageHandler):\n def __init__(self, *a, **kw):\n super().__init__(*a, **kw)\n self.domain_name = 'www.parallelparadise.online'\n self._reading_content_found = False\n self._page_break_found = False\n\n def handle_starttag(self, tag, attrs):\n if not self._reading_content_found:\n if tag == 'div' and self.attrs_has_attr(attrs, ('class', 'reading-content')):\n self._reading_content_found = True\n elif not self._page_break_found:\n if tag == 'div' and self.attrs_has_attr(attrs, ('class', 'page-break ')):\n self._page_break_found = True\n elif tag == 'img' and self.attrs_has_attr(attrs, ('class', 'wp-manga-chapter-img')):\n url = self.attrs_get_value(attrs, 'src')\n self._images.append(url.strip())\n\n def handle_endtag(self, tag):\n if self._reading_content_found and self._page_break_found:\n if tag == 'div':\n self._page_break_found = False\n\n\nclass DownloadStatus(Enum):\n Unknown = 0\n Failed = 1\n Downloaded = 2\n Successful = 3\n\n\nclass ThreadDownloader(threading.Thread):\n def __init__(self, url, dst, timeout=3, retry=1, callback=None):\n threading.Thread.__init__(self)\n self._url = url # source URL\n self._dst = dst # destination folder\n self._timeout = timeout\n self._retry = retry\n self._cb = callback\n self._status = DownloadStatus.Unknown\n self._content = None\n\n def run(self):\n user_agent = {'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50'}\n req = Request(self._url, headers=user_agent, unverifiable=True)\n for i in range(0, self._retry):\n try:\n ifo = urlopen(req, timeout=self._timeout)\n self._content = ifo.read()\n ifo.close()\n break\n except Exception as e:\n print('%s: %s' % (self._url, e))\n # 失败\n if self._content is None:\n self._status = DownloadStatus.Failed\n if self._cb is not None:\n self._cb(self)\n return\n # 成功下载,有一次回调\n self._status = DownloadStatus.Downloaded\n if self._cb is not None:\n self._cb(self)\n # 写完本地文件,有一次回调\n if not self.file_exists(self._content):\n with open(self._dst, 'wb') as ofo:\n ofo.write(self._content)\n self._status = DownloadStatus.Successful\n if self._cb is not None:\n self._cb(self)\n\n def file_exists(self, content):\n \"\"\"\n check if file exists already on local disk.\n @param filename: local disk filename (full dir + base name)\n @param content: is belonging to remote resource.\n @return: True if it exists\n \"\"\"\n if not os.path.exists(self._dst):\n return False\n if not os.path.isfile(self._dst):\n return False\n if os.path.getsize(self._dst) != len(content):\n return False\n with open(self._dst, 'rb') as ofo:\n old = ofo.read()\n digest1 = md5(old).digest()\n digest2 = md5(content).digest()\n return digest1 == digest2\n\n def is_successful(self):\n return self._status == DownloadStatus.Successful\n\n def is_downloaded(self):\n return self._status == DownloadStatus.Downloaded\n\n def is_failed(self):\n return self._status == DownloadStatus.Failed\n\n def file_path(self):\n return self._dst\n\n\nclass MainWnd(tk.Frame):\n WND_TITLE = 'Manga Crawler'\n WEB_PAGE = 'index.html'\n\n def __init__(self, master, *a, **kw):\n tk.Frame.__init__(self, master, *a, **kw)\n #\n frame = tk.Frame(self)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n lbl = tk.Label(frame, text='URL:')\n lbl.pack(side=tk.LEFT)\n lbl.bind('', lambda _: self._url.set(''))\n self._url = tk.StringVar()\n tk.Entry(frame, textvariable=self._url).pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)\n btn = tk.Button(frame, text='Download', command=self.onclick_analyze_url) # download m3u8 file (index file)\n btn.pack(side=tk.LEFT)\n tk.Button(frame, text='Paste from CB', command=self.paste_from_clipboard).pack(side=tk.LEFT)\n #\n frame = tk.Frame(self)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n tk.Label(frame, text='Dir:').pack(side=tk.LEFT)\n self._tmp_dir = tk.StringVar()\n tk.Entry(frame, textvariable=self._tmp_dir).pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)\n tk.Button(frame, text='Save to Dir', command=self.onclick_browse_dir).pack(side=tk.LEFT)\n #\n frame = tk.Frame(self)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n tk.Label(frame, text='Wild Card Pattern:').pack(side=tk.LEFT)\n self._wildcard_pattern = tk.StringVar(value='04')\n tk.Entry(frame, textvariable=self._wildcard_pattern, width=4).pack(side=tk.LEFT, expand=tk.NO)\n tk.Label(frame, text='Range:').pack(side=tk.LEFT)\n self._wildcard_from = tk.IntVar(value=1)\n tk.Spinbox(frame, textvariable=self._wildcard_from, from_=1, to=999, width=3).pack(side=tk.LEFT)\n tk.Label(frame, text='-->').pack(side=tk.LEFT)\n self._wildcard_to = tk.IntVar(value=99)\n tk.Spinbox(frame, textvariable=self._wildcard_to, from_=1, to=999, width=3).pack(side=tk.LEFT)\n #\n frame = tk.Frame(self)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n tk.Label(frame, text='Hanlder:').pack(side=tk.LEFT)\n self._handlers = dict()\n for handler in [HachiRawHandler(), ParallelParadiseOnlineHandler()]:\n self._handlers[handler.domain_name] = handler\n self._active_handler = tk.StringVar()\n tk.OptionMenu(frame, self._active_handler, *self._handlers.keys()).pack(side=tk.LEFT)\n #\n group = tk.LabelFrame(self, text='Links')\n group.pack(side=tk.TOP, expand=tk.YES, fill=tk.BOTH)\n # row 1 -- step 2\n frame = tk.Frame(group, padx=5, pady=5)\n frame.pack(side=tk.TOP, expand=tk.YES, fill=tk.BOTH)\n self._links = ttk.Treeview(frame, selectmode=tk.EXTENDED, show='headings', columns=('sn', 'url', 'state'))\n self._links.pack(side=tk.LEFT, expand=tk.YES, fill=tk.BOTH)\n yscroll = tk.Scrollbar(frame, orient=tk.VERTICAL, command=self._links.yview)\n yscroll.pack(side=tk.RIGHT, expand=tk.NO, fill=tk.Y)\n self._links['yscrollcommand'] = yscroll.set\n xscroll = tk.Scrollbar(group, orient=tk.HORIZONTAL, command=self._links.xview)\n xscroll.pack(side=tk.TOP, expand=tk.NO, fill=tk.X)\n self._links['xscrollcommand'] = xscroll.set\n self._links.bind('', self.onkey_tree_delete)\n self._links.bind('<1>', self.onkey_tree_click)\n self._links.heading('sn', text='SN')\n self._links.heading('url', text='URL') # '#0' column is icon and it's hidden here\n self._links.heading('state', text='State') # we only need two columns: '#1' and '#2'\n self._links.column('sn', width=40, stretch=False)\n self._links.column('state', width=50, stretch=False, anchor=tk.CENTER)\n #\n frame = tk.Frame(group)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n tk.Label(frame, text='Progress: ').pack(side=tk.LEFT)\n self._progress = tk.IntVar()\n self._progressbar = ttk.Progressbar(frame, mode='determinate', variable=self._progress)\n self._progressbar.pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)\n #\n frame = tk.Frame(self)\n frame.pack(side=tk.TOP, expand=tk.NO, fill=tk.BOTH)\n tk.Button(frame, text='Read Local Web Page', command=self.load_local_web_page).pack(side=tk.LEFT)\n tk.Label(frame, text='Threads:').pack(side=tk.LEFT)\n self._job_num = tk.IntVar()\n tk.Spinbox(frame, textvariable=self._job_num, from_=1, to=10, width=2).pack(side=tk.LEFT)\n tk.Label(frame, text='Timeout:').pack(side=tk.LEFT)\n self._timeout = tk.IntVar()\n tk.Spinbox(frame, textvariable=self._timeout, from_=3, to=9, width=2).pack(side=tk.LEFT)\n tk.Label(frame, text='Retry:').pack(side=tk.LEFT)\n self._retry = tk.IntVar()\n tk.Spinbox(frame, textvariable=self._retry, from_=1, to=5, width=2).pack(side=tk.LEFT)\n self._btn = tk.Button(frame, text='Download Images', command=self.onclick_download_images)\n self._btn.pack(side=tk.LEFT)\n #\n self._auto = tk.BooleanVar(value=True)\n tk.Checkbutton(frame, text='Auto', variable=self._auto).pack(side=tk.RIGHT)\n\n self._job_queue = queue.Queue()\n self._downloaders = []\n self._running = False\n self._url_generator = None\n self._dir_generator = None\n self._save_dir = None\n\n def onclick_analyze_url(self):\n url = self._url.get().strip()\n dst = self._tmp_dir.get().strip()\n if len(url) == 0 or len(dst) == 0:\n return\n wildcard_url = '(*)' in url\n wildcard_dst = '(*)' in dst\n if any([wildcard_url, wildcard_dst]) and not all([wildcard_url, wildcard_dst]):\n messagebox.askquestion(MainWnd.WND_TITLE, 'wild card pattern not match')\n return\n if wildcard_url:\n self._url_generator = batch_jobs(url, self._wildcard_from.get(), self._wildcard_to.get(), self._wildcard_pattern.get())\n url, current = next(self._url_generator)\n if url is None:\n return\n if wildcard_dst:\n self._dir_generator = batch_jobs(dst, self._wildcard_from.get(), self._wildcard_to.get(), self._wildcard_pattern.get())\n dst, current = next(self._dir_generator)\n if dst is None:\n return\n else:\n self._wildcard_from.set(current)\n if not os.path.exists(dst):\n os.mkdir(dst)\n url = url_quote(url)\n self._save_dir = dst\n job = ThreadDownloader(url=url, dst=os.path.join(self._save_dir, MainWnd.WEB_PAGE),\n timeout=self._timeout.get(), retry=self._retry.get(),\n callback=self.on_web_page_downloaded)\n job.start()\n\n def on_web_page_downloaded(self, job: ThreadDownloader):\n if job.is_failed():\n messagebox.showerror(MainWnd.WND_TITLE, 'Failed to download web page:\\n%s' % job._url)\n return\n if job.is_successful():\n return;\n # 使用内存文件加快速度\n parse_result = urlparse(job._url)\n if parse_result.netloc not in self._handlers:\n return\n self._active_handler.set(parse_result.netloc)\n content = job._content.decode(encoding='utf8')\n with StringIO(content) as file:\n self.load_links(file, parse_result.netloc)\n\n def load_local_web_page(self):\n dst = self._tmp_dir.get().strip()\n if len(dst) == 0 or '(*)' in dst:\n return\n web_page = os.path.join(dst.strip(), MainWnd.WEB_PAGE)\n if not os.path.exists(web_page):\n return\n active_handler = self._active_handler.get()\n if len(active_handler) == 0:\n messagebox.askquestion(MainWnd.WND_TITLE, 'Web content cannot be parsed without handler specified')\n return\n self._save_dir = dst.strip()\n with open(web_page) as file:\n self.load_links(file, active_handler)\n\n def load_links(self, file_obj, active_handler):\n links = self._handlers[active_handler].feed_file(file_obj)\n self._links.delete(*self._links.get_children())\n for i, url in enumerate(links, start=1):\n iid = 'I%04d' % i\n self._links.insert('', tk.END, iid=iid, values=(i, url, ''))\n if self._auto.get():\n self.enqueue_all_jobs()\n\n def paste_from_clipboard(self):\n url = self.clipboard_get().strip()\n if len(url) > 0:\n self._url.set(url)\n\n def onclick_browse_dir(self):\n a_dir = filedialog.askdirectory()\n if a_dir == '':\n return\n self._tmp_dir.set(a_dir)\n\n def onkey_tree_delete(self, _):\n # keep treeview items intact when downloading, because jobs are in queue.\n if self._running:\n return\n #\n selected = self._links.selection()\n num = len(selected)\n if num == 0:\n return\n if num == 1:\n msg = 'Are you sure to delete\\n\\n%s\\n\\n?' % selected[0]\n else:\n msg = 'Are you sure to delete %d jobs?' % num\n if not messagebox.askokcancel(MainWnd.WND_TITLE, msg):\n return\n self._links.delete(*selected)\n\n def onkey_tree_click(self, _):\n selected = self._links.selection()\n if len(selected) == 0:\n return\n _, url, _ = self._links.item(selected[0], 'values')\n self.clipboard_clear()\n self.clipboard_append(url)\n\n def onclick_download_images(self):\n if self._running:\n self._running = False # global signal to stop running jobs\n self._btn.config(text='Download')\n for i in self._downloaders:\n i.join()\n self._downloaders[:] = []\n return\n #\n self.enqueue_all_jobs()\n\n def enqueue_all_jobs(self):\n urls = self._links.get_children()\n if len(urls) == 0:\n return\n #\n self._running = True\n self._btn.config(text='Cancel')\n #\n self.clear_queue()\n for i in urls:\n self._job_queue.put(i)\n self._links.set(i, column='state', value='')\n #\n self._progress.set(0)\n self._progressbar.config(maximum=self._job_queue.qsize())\n #\n self.after(100, self.update_progress)\n\n def clear_queue(self):\n self._downloaders[:] = []\n while not self._job_queue.empty():\n self._job_queue.get()\n\n def update_progress(self):\n \"\"\"\n update UI\n \"\"\"\n if not self._running:\n return\n\n finished = 0\n for i in self._downloaders[:]:\n if i.isAlive():\n continue\n # visualize task state: completion or failure\n if i.is_successful():\n self._links.delete(i.iid)\n else:\n self._links.set(i.iid, column='state', value='X')\n self._downloaders.remove(i)\n finished += 1\n if finished > 0:\n self._progress.set(self._progress.get() + finished)\n #\n free_slots = max(self._job_num.get() - len(self._downloaders), 0)\n to_be_added = min(free_slots, self._job_queue.qsize())\n for i in range(0, to_be_added):\n iid = self._job_queue.get()\n # [ Important Point about ttk.Treeview ]\n # no matter what type it was when inserted into 'values',\n # it is str of type now when being retrieved.\n #\n # In short, be careful of below 'sn' in this app.\n sn, url, state = self._links.item(iid, 'values')\n _, ext = url_image_name(url)\n url = url_quote(url)\n dst = os.path.join(self._save_dir, 'img_%04d%s' % (int(sn), ext))\n job = ThreadDownloader(url, dst, self._timeout.get(), self._retry.get(), self.on_image_downloaded)\n job.iid = iid # attach a temporary attribute\n self._downloaders.append(job)\n job.start()\n self._links.set(iid, column='state', value='...')\n #\n if len(self._downloaders) > 0:\n self.after(100, self.update_progress)\n return\n # 任务全部下载完,无需保留网页\n if len(self._links.get_children()) == 0:\n os.remove(os.path.join(self._save_dir, MainWnd.WEB_PAGE))\n # 如果不继续,提醒\n if not self.can_automate_next():\n self.after(100, self.notify_finish)\n return\n # 尝试下一链接\n url, current = next(self._url_generator)\n dst, current = next(self._dir_generator)\n if url is None or dst is None:\n self._url_generator = None\n self._dir_generator = None\n self.after(100, self.notify_finish)\n return\n # 正式下载下一链接\n if not os.path.exists(dst):\n os.mkdir(dst)\n self._wildcard_from.set(current)\n url = url_quote(url)\n self._save_dir = dst\n job = ThreadDownloader(url=url, dst=os.path.join(dst, MainWnd.WEB_PAGE),\n timeout=self._timeout.get(), retry=self._retry.get(),\n callback=self.on_web_page_downloaded)\n job.start()\n\n def notify_finish(self):\n self._running = False\n self._btn.config(text='Download')\n messagebox.showinfo(MainWnd.WND_TITLE, 'All segments are downloaded.')\n\n def on_choose_handler(self, _):\n pass\n\n def on_image_downloaded(self, job):\n assert isinstance(job, ThreadDownloader)\n if not job.is_downloaded():\n return\n filepath, filename = os.path.split(job._dst)\n basename, ext = os.path.splitext(filename)\n if ext != '.webp':\n return\n # 我的漫画软件不支持webp格式\n job._dst = os.path.join(filepath, '{}.jpg'.format(basename))\n with Image.open(BytesIO(job._content)) as img:\n jpg_data = BytesIO()\n img.convert('RGB').save(jpg_data, format='JPEG')\n job._content = jpg_data.getvalue()\n\n def can_automate_next(self):\n if self._auto.get() is False:\n return False\n if len(self._links.get_children()) > 0:\n return False\n if self._url_generator is None or self._dir_generator is None:\n return False\n return True\n\n\ndef main():\n try:\n root = tk.Tk()\n root.title(MainWnd.WND_TITLE)\n MainWnd(root).pack(fill=tk.BOTH, expand=tk.YES, padx=5, pady=5)\n root.mainloop()\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n main()\n","sub_path":"MangaCrawler.py","file_name":"MangaCrawler.py","file_ext":"py","file_size_in_byte":21660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"285810029","text":"import ipdb\nimport numpy as np\nEPISODE = 100\nNUM_TESTS = 100\nTRAINOPT = ['random', 'initial']\ntrainopt = 'random'\nassert trainopt in TRAINOPT\ngame_name = 'CartPole-v0'\n\nprint(\"What is the episode size?(default==20)\")\nep = input()\nEPISODE = int(ep)\nprint(\"Episode is \", EPISODE)\n\ndef get_test_record_title(game_name, episode, trainOpt, num_tests=100):\n title = '{}_EPI{}_{}_#Test{}'.format(game_name, episode, trainOpt, num_tests)\n return title\n\ncsv_name= get_test_record_title(game_name, EPISODE, trainopt, num_tests=NUM_TESTS) + '.csv'\n\nwith open(csv_name, 'r') as rf:\n lines = [line for line in rf]\n rewards = []\n reward = []\n isNew = False\n for l in lines:\n if l == '\\n':\n rewards.append(reward)\n isNew = True\n continue\n if isNew:\n reward = []\n isNew = False\n reward.append(float(l))\n else:\n reward.append(float(l))\n \n rewards = np.asarray(rewards)\n rewardsT = rewards\n\nnew_csv_name = get_test_record_title(game_name, EPISODE, trainopt, num_tests=NUM_TESTS) + '_plot.csv'\n\n\nwith open(new_csv_name, 'w') as wf:\n \n for i in range(rewardsT.shape[0]):\n for j in range(rewardsT.shape[1]):\n wf.write(\"{}\".format(rewardsT[i][j]))\n if j != rewardsT.shape[1]-1:\n wf.write(\",\")\n wf.write('\\n')\n","sub_path":"utils/reward_plotter.py","file_name":"reward_plotter.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"49137429","text":"import nltk\nimport random\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport pickle\n\nfrom sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\n\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\n\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classifiers = classifiers\n\n def classify(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n\n\nshort_pos = open(\"short_reviews/positive.txt\", \"r\").read()\nshort_neg = open(\"short_reviews/negative.txt\", \"r\").read()\n\nall_words = []\n\ndocuments = []\n\nallowed_word_types = [\"J\"]\n# J = adjective, V = verb, R = adverb\n\n# specifying types of words to use\nfor p in short_pos.split('\\n'):\n documents.append((p, \"pos\"))\n words = nltk.word_tokenize(p)\n pos = nltk.pos_tag(words) # part of speech tagging\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\nfor p in short_neg.split('\\n'):\n documents.append((p, \"neg\"))\n words = nltk.word_tokenize(p)\n pos = nltk.pos_tag(words) # part of speech tagging\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\ndocuments_f = open(\"pickled_models/documents.pickle\", \"rb\")\ndocuments = pickle.load(documents_f)\ndocuments_f.close()\n\nword_features_load = open(\"pickled_models/word_features.pickle\", \"rb\")\nword_features = pickle.load(word_features_load)\nword_features_load.close()\n\n\ndef find_features(document):\n words = nltk.word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n return features\n\n\nfeature_sets_f = open(\"pickled_models/feature_sets.pickle\", \"rb\")\nfeature_sets = pickle.load(feature_sets_f)\nfeature_sets_f.close()\n\nrandom.shuffle(feature_sets)\n\ntraining_set = feature_sets[10000:]\ntesting_set = feature_sets[:10000]\n\n# load in pickled models\nopen_file = open(\"pickled_models/naivebayes.pickle\", \"rb\")\nclassifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/MultinomialNB.pickle\", \"rb\")\nMNB_classifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/BernoulliNB.pickle\", \"rb\")\nBernoulliNB_classifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/LogisticRegression.pickle\", \"rb\")\nLogisticRegression_classifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/SGD.pickle\", \"rb\")\nSGD_classifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/LinearSVC.pickle\", \"rb\")\nLinearSVC_classifier = pickle.load(open_file)\nopen_file.close()\n\nopen_file = open(\"pickled_models/NuSVC.pickle\", \"rb\")\nNuSVC_classifier = pickle.load(open_file)\nopen_file.close()\n\nvoted_classifier = VoteClassifier(classifier,\n LinearSVC_classifier,\n MNB_classifier,\n BernoulliNB_classifier,\n LogisticRegression_classifier)\n\n\ndef sentiment(text):\n feats = find_features(text)\n return voted_classifier.classify(feats), voted_classifier.confidence(feats)\n\n","sub_path":"nlp_basics/sentiment_mod.py","file_name":"sentiment_mod.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"149974284","text":"from django.urls import path\nfrom .views import fbv,cbv,authenticate,generic_cbv\n\nurlpatterns=[\n path('tasks/',fbv.task_list),\n path('task_detail//',fbv.task_detail),\n path('ctasks/',cbv.TaskListView.as_view()),\n path('ctask_detail//',cbv.TaskDetailView.as_view()),\n path('login/',authenticate.login),\n path('register/',authenticate.register),\n path('gtasks/',generic_cbv.TaskGenericListView.as_view())\n]","sub_path":"Week_11/todo/api3/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"193754908","text":"class Solution:\n\n # @param A, a list of integers\n # @return an integer\n\n # cannot be used on arrays that have duplicate items.\n def firstMissingPositive1(self, A):\n minp, maxp, np, total = None, None, 0, 0\n for p in A:\n if p > 0:\n if minp is None or minp > p:\n minp = p\n if maxp is None or maxp < p:\n maxp = p\n np, total = np + 1, total + p\n if minp is None or minp > 1:\n return 1\n n = maxp - minp + 1 - np\n if not n:\n return maxp + 1\n while n > 1:\n miss = ((maxp - minp + 1) * (maxp + minp) >> 1) - total\n maxp, np, total = (miss - (n * (n - 1) >> 1)) // n, 0, 0\n for p in A:\n if 0 < p <= maxp:\n np, total = np + 1, total + p\n n = maxp - minp + 1 - np\n return ((maxp - minp + 1) * (maxp + minp) >> 1) - total\n\n def firstMissingPositive(self, A):\n for i in range(len(A)):\n while 0 < A[i] <= len(A) and A[i] != A[A[i] - 1]:\n A[A[i] - 1], A[i] = A[i], A[A[i] - 1]\n for i, p in enumerate(A):\n if p != i + 1:\n return i + 1\n return len(A) + 1\n\n def first_missing_positive(self, A):\n if not A:\n return 1\n num = 0\n for p in A:\n if p > 0:\n num |= (1 << (p - 1))\n i = 0\n while num & (1 << i):\n i += 1\n return i + 1\n\n\nif __name__ == '__main__':\n fmp = Solution()\n print(fmp.firstMissingPositive([10, 4, 16, 54, 17, -7, 21, 15, 25, 31, 61,\n 1, 6, 12, 21, 46, 16, 56, 54, 12, 23, 20,\n 38, 63, 2, 27, 35, 11, 13, 47, 13, 11, 61,\n 39, 0, 14, 42, 8, 16, 54, 50, 12, -10, 43,\n 11, -1, 24, 38, -10, 13, 60, 0, 44, 11,\n 50, 33, 48, 20, 31, -4, 2, 54, -6, 51, 6]))\n","sub_path":"leetcode/python/findMissingPositive.py","file_name":"findMissingPositive.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"356029440","text":"\"\"\"Euler problem 26, repeating decimal fraction.\"\"\"\nfrom math import log, floor, ceil\n\nlog10 = lambda x: log(x, 10)\n\ndef divide(num):\n \"\"\"Look for patterns in decimal fraction.\"\"\"\n rem = 1\n mods = {1}\n while True:\n while rem < num:\n rem *= 10\n rem = rem % num\n if not rem:\n return 0\n if rem in mods:\n return len(mods)\n mods.add(rem)\n\ndef test_divide():\n \"\"\"Test against known case\"\"\"\n assert divide(7) == 6\n assert divide(3) == 1\n\nif __name__ == \"__main__\":\n print(max([(n, divide(n)) for n in range(1, 1001)], key=lambda x: x[1]))\n","sub_path":"euler/euler26.py","file_name":"euler26.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"334341169","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nimport datetime\nimport time\nimport Image\n\nfrom common import AllegroClient, scaleImage, base64ToImg, img2base64\n\nclass AllegroClientTest(unittest.TestCase):\n def setUp(self):\n self._client = AllegroClient(\".\")\n\n def tearDown(self):\n pass\n \n def test_convertToFields(self):\n self._fieldAssert(str(\"test\"), \"fvalue-string\")\n self._fieldAssert(int(1), \"fvalue-int\")\n self._fieldAssert(float(1.0), \"fvalue-float\")\n self._fieldAssert(True, \"fvalue-boolean\")\n d = datetime.datetime.now()\n self._fieldAssert(d, \"fvalue-datetime\", time.mktime(d.timetuple()))\n\n def _fieldAssert(self, value, field_name, expected=None):\n field = self._client._convertToFieldsValues(1, value)\n if expected == None:\n expected = value\n self.assertEqual(getattr(field, field_name), expected, \"wrong assignement! - \" + field_name)\n\n def test_translate(self):\n paramsMap = {'title' : 'test title', 'description' : 'test description'}\n\n val = self._client._translate('#country', paramsMap)\n self.assertEqual(val, 228, 'country!')\n val = self._client._translate('#title', paramsMap)\n self.assertEqual(val, 'test title', 'title!')\n val = self._client._translate('#description', paramsMap)\n self.assertEqual(val, 'test description', 'description!')\n \n def test_scaleImage(self):\n filename = \"./sample-img.jpg\"\n scaleImage(filename)\n \n def test_base64(self):\n filename = \"./sample-img.jpg\"\n in_img = Image.open(filename)\n img = base64ToImg(img2base64(in_img))\n img.save(\"tmp.jpg\")\n","sub_path":"allegro/commonTest.py","file_name":"commonTest.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"11016432","text":"\n\nimport re\nimport os\nimport typing\nimport time\nimport datetime\nimport json\nimport sys\nimport shutil\n\nimport jk_typing\nimport jk_utils\nimport jk_mounting\nimport jk_logging\nfrom jk_testing import Assert\nimport jk_json\n\nfrom .constants import *\nfrom .TargetDirectoryStrategy_StaticDir import TargetDirectoryStrategy_StaticDir\nfrom .AbstractBackupConnector import AbstractBackupConnector\nfrom .ThaniyaBackupContext import ThaniyaBackupContext\nfrom .ThaniyaIO import ThaniyaIO\nfrom .ProcessingFallThroughError import ProcessingFallThroughError\nfrom .ProcessingContext import ProcessingContext\nfrom .AbstractTargetDirectoryStrategy import AbstractTargetDirectoryStrategy\nfrom .ThaniyaBackupStats import ThaniyaBackupStats\nfrom .tasks.AbstractThaniyaTask import AbstractThaniyaTask\nfrom .BD2 import BD2\nfrom .ThaniyaClientCfg import ThaniyaClientCfg\n\n\n\n\n\n\nclass ThaniyaBackupDriver(object):\n\n\t################################################################################################################################\n\t## Constructor Method\n\t################################################################################################################################\n\n\t#\n\t# Constructor method.\n\t#\n\t# @param\tAbstractBackupConnector backupConnector\t\t\t\tAn object that is used to connect to a backup repository/backup server later.\n\t# @param\tdict backupConnectorParameters\t\t\t\t\t\tA dictionary that holds various parameters required to connect to the backup repository/backup server.\n\t# @param\tAbstractTargetDirectoryStrategy targetDirStrategy\tA strategy that decides about which target directory to use exactly.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __init__(self,\n\t\tbackupConnector:AbstractBackupConnector,\n\t\tbackupConnectorParameters:dict,\n\t\ttargetDirStrategy:typing.Union[AbstractTargetDirectoryStrategy,None] = None,\n\t\tcfg:ThaniyaClientCfg = None,\n\t\t):\n\n\t\tself.__targetDirStrategy = None\t\t\t# AbstractTargetDirectoryStrategy\n\t\tself.__backupConnector = None\t\t\t# AbstractBackupConnector\n\t\tself.__backupConnectorParameters = None\t# dict\n\n\t\tself.__setTargetDirStrategy(targetDirStrategy)\n\t\tself.__setConnector(backupConnector, backupConnectorParameters)\n\n\t\tif cfg is None:\n\t\t\tcfg = ThaniyaClientCfg.load()\n\t\tself.__cfg = cfg\n\t#\n\n\t################################################################################################################################\n\t## Low Level Helper Methods\n\t################################################################################################################################\n\n\t#\n\t# This method is invoked by __init__().\n\t#\n\tdef __setTargetDirStrategy(self, targetDirStrategy:typing.Union[AbstractTargetDirectoryStrategy,None]):\n\t\tif targetDirStrategy is None:\n\t\t\ttargetDirStrategy = TargetDirectoryStrategy_StaticDir()\n\t\telse:\n\t\t\tassert isinstance(targetDirStrategy, AbstractTargetDirectoryStrategy)\n\n\t\tself.__targetDirStrategy = targetDirStrategy\n\t#\n\n\t#\n\t# This method is invoked by __init__().\n\t#\n\tdef __setConnector(self, backupConnector:AbstractBackupConnector, backupConnectorParameters:dict = None):\n\t\tassert isinstance(backupConnector, AbstractBackupConnector)\n\n\t\tif backupConnectorParameters is None:\n\t\t\tbackupConnectorParameters = {}\n\t\telse:\n\t\t\tassert isinstance(backupConnectorParameters, dict)\n\n\t\tif backupConnector.needsToBeRoot:\n\t\t\tif os.geteuid() != 0:\n\t\t\t\traise Exception(\"Need to be root to use backup connector \" + repr(backupConnector.__class__.__name__) + \"!\")\n\n\t\tself.__backupConnector = backupConnector\n\t\tself.__backupConnectorParameters = backupConnectorParameters\n\t#\n\n\t################################################################################################################################\n\n\t#\n\t# This method is invoked by __perform_deinitialize().\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __writeLogToFiles(self,\n\t\tbufferLogger:jk_logging.BufferLogger,\n\t\teffectiveTargetDirPath:str,\n\t\tfileMode:typing.Union[int,str,jk_utils.ChModValue,None],\n\t\tlog:jk_logging.AbstractLogger,\n\t\t):\n\n\t\t# TODO: use safe writing mechanisms provided by jk_utils\n\n\t\ttextFilePath = os.path.join(effectiveTargetDirPath, PLAINTEXT_LOG_FILE_NAME)\n\t\ttextFilePathTemp = textFilePath + \".tmp\"\n\t\tjsonFilePath = os.path.join(effectiveTargetDirPath, JSON_LOG_FILE_NAME)\n\t\tjsonFilePathTemp = jsonFilePath + \".tmp\"\n\n\t\tjsonLogData = bufferLogger.getDataAsPrettyJSON()\n\n\t\tlog.notice(\"Writing to: \" + textFilePath)\n\t\tlog.notice(\"Writing to: \" + jsonFilePath)\n\n\t\tbAppendToExistingFile = False\n\t\tlogMsgFormatter = None\n\n\t\t# ----\n\n\t\twith open(jsonFilePathTemp, \"w\") as f:\n\t\t\tjson.dump(jsonLogData, f, indent=\"\\t\")\n\t\tif fileMode is not None:\n\t\t\tos.chmod(jsonFilePathTemp, fileMode.toInt())\n\t\tos.rename(jsonFilePathTemp, jsonFilePath)\n\n\t\t# ----\n\n\t\tfileLogger = jk_logging.FileLogger.create(\n\t\t\ttextFilePathTemp,\n\t\t\t\"none\",\n\t\t\tbAppendToExistingFile,\n\t\t\tFalse,\n\t\t\tfileMode,\n\t\t\tlogMsgFormatter,\n\n\t\t)\n\t\tbufferLogger.forwardTo(fileLogger)\n\t\tfileLogger.close()\n\t\tif fileMode is not None:\n\t\t\tos.chmod(textFilePathTemp, fileMode.toInt())\n\t\tos.rename(textFilePathTemp, textFilePath)\n\t#\n\n\t#\n\t# This method is invoked by testConnector() and performBackup().\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __checkBackupIdentifierE(self, backupIdentifier:str):\n\t\tif re.match(\"^[a-zA-Z_\\.\\-+\\(\\)\\[\\]\\{\\}]+$\", backupIdentifier):\n\t\t\treturn\n\t\traise Exception(\"Invalid backup identifier specified: \" + repr(backupIdentifier))\n\t#\n\n\t#\n\t# This method is invoked by __perform_initialize().\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef __buildAndCheckEffectiveTargetDirPath(self, ctx:ThaniyaBackupContext, bd2:BD2, bSimulate:bool, bAllowOverwriteOldBackup:bool = True) -> str:\n\t\t# select the target directory where we will store the data. the variable \"effectiveTargetDirPath\"\n\t\t# will receive the directory selected by the target directory strategy. we will write data there.\n\n\t\twith ctx.descend(\"Selecting target directory\") as ctx2:\n\t\t\tsTmp = self.__targetDirStrategy.selectEffectiveTargetDirectory(bd2)\n\t\t\tassert isinstance(sTmp, str)\n\t\t\tif sTmp:\n\t\t\t\tassert sTmp[0] not in [ \"/\", \"\\\\\", \".\" ]\n\t\t\t\tassert not os.path.isabs(sTmp)\n\t\t\t\teffectiveTargetDirPath = os.path.join(self.__backupConnector.baseTargetDirPath, sTmp)\n\t\t\telse:\n\t\t\t\teffectiveTargetDirPath = self.__backupConnector.baseTargetDirPath\n\t\t\tctx.log.notice(\"Selected target directory: \" + repr(effectiveTargetDirPath))\n\t\t\t\n\t\t\t# verify that we have the correct directory: the \"effectiveTargetDirPath\" must be located somewhere within\n\t\t\t# the mounted directory tree.\n\n\t\t\tif effectiveTargetDirPath.endswith(\"/\"):\n\t\t\t\teffectiveTargetDirPath2 = effectiveTargetDirPath\n\t\t\telse:\n\t\t\t\teffectiveTargetDirPath2 = effectiveTargetDirPath + \"/\"\n\t\t\tassert effectiveTargetDirPath2.startswith(self.__backupConnector.baseTargetDirPath2)\n\n\t\t\t# check that the target directory fits our requirements: it must be empty.\n\n\t\t\tif os.path.isdir(effectiveTargetDirPath):\n\t\t\t\tbIsEmpty, contentEntries = ThaniyaIO.checkIfDirIsEmpty(ctx2, effectiveTargetDirPath)\n\t\t\t\tif not bIsEmpty:\n\t\t\t\t\tif STATS_JSON_FILE_NAME in contentEntries:\n\t\t\t\t\t\t# target directory already seems to contain a backup \n\t\t\t\t\t\tctx2.log.warn(\"Target directory already seems to contain a backup: \" + effectiveTargetDirPath2)\n\t\t\t\t\t\tif not bSimulate:\n\t\t\t\t\t\t\tif bAllowOverwriteOldBackup:\n\t\t\t\t\t\t\t\tctx2.log.warn(\"Overwriting this backup.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\traise Exception(\"Target directory already seems to contain a backup: \" + effectiveTargetDirPath2)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Backup directory contains various non-backup files or directories!\")\n\n\t\t# ----\n\n\t\treturn effectiveTargetDirPath\n\t#\n\n\t################################################################################################################################\n\t## High Level Helper Methods\n\t################################################################################################################################\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __perform_calcDiskSpaceRequired(self, bd2:BD2, backupTasks:typing.List[AbstractThaniyaTask]) -> int:\n\t\twith ProcessingContext(\n\t\t\ttext=\"Calculating disk space required\",\n\t\t\tbd2=bd2,\n\t\t\tbMeasureDuration=True,\n\t\t\tstatsDurationKey=\"d0_calcDiskSpace\"\n\t\t) as ctx:\n\t\t\tnExpectedBytesToWrite = 0\n\t\t\tfor job in backupTasks:\n\t\t\t\t#assert isinstance(job, AbstractThaniyaTask)\n\t\t\t\tAssert.isInstance(job, AbstractThaniyaTask)\n\n\t\t\t\tnestedCtx = ctx.descend(job.logMessageCalculateSpaceRequired)\n\t\t\t\twith nestedCtx.log as nestedLog:\n\t\t\t\t\tnExpectedBytesToWrite += job.calculateSpaceRequired(nestedCtx)\n\n\t\t\tctx.log.info(\"Estimated total size of backup: \" + jk_utils.formatBytes(nExpectedBytesToWrite))\n\n\t\t\tbd2.statsContainer.setValue(\"expectedBytesToWrite\", nExpectedBytesToWrite)\n\n\t\t\t# ----\n\n\t\t\tctx.log.notice(\"Done.\")\n\n\t\treturn nExpectedBytesToWrite\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __perform_initialize(self, bd2:BD2, nExpectedBytesToWrite:typing.Union[int, None]):\n\t\twith ProcessingContext(\n\t\t\ttext=\"Connecting to backup repository and preparing backup\",\n\t\t\tbd2=bd2,\n\t\t\tbMeasureDuration=True,\n\t\t\tstatsDurationKey=\"d1_connectAndPrepare\"\n\t\t) as ctx:\n\t\t\t# mount the remote file system\n\n\t\t\tif nExpectedBytesToWrite is None:\n\t\t\t\tnExpectedBytesToWrite = 1024\n\n\t\t\twith ctx.descend(\"Initializinig connection ...\") as ctx2:\n\t\t\t\tself.__backupConnector.initialize(ctx2, nExpectedBytesToWrite, self.__backupConnectorParameters)\n\n\t\t\tif self.__backupConnector.performsMountUnmount:\n\t\t\t\t# connector performs mounting and unmounting\n\t\t\t\tassert self.__backupConnector.mountDirPath is not None\n\t\t\t\t# remember mount path\n\t\t\t\tbd2.mountDirPath = self.__backupConnector.mountDirPath\n\t\t\telse:\n\t\t\t\t# no mounting -> mount directory should be None\n\t\t\t\tassert self.__backupConnector.mountDirPath is None\n\n\t\t\tassert self.__backupConnector.baseTargetDirPath is not None\n\t\t\tbd2.baseTargetDirPath = self.__backupConnector.baseTargetDirPath\n\n\t\t\tif not self.__backupConnector.isReady:\n\t\t\t\traise Exception(\"Backup connector unexpectedly not ready for writing!\")\n\n\t\t\t# select the target directory where we will store the data. the variable \"effectiveTargetDirPath\"\n\t\t\t# will receive the directory selected by the target directory strategy. we will write data there.\n\t\t\t# verify that we have the correct directory: the \"effectiveTargetDirPath\" must be located somewhere within\n\t\t\t# the mounted directory tree.\n\t\t\t# check that the target directory fits our requirements: it must be empty.\n\n\t\t\tbd2.effectiveTargetDirPath = self.__buildAndCheckEffectiveTargetDirPath(ctx, bd2, True)\n\n\t\t\t# ensure that the directory exists\n\n\t\t\tThaniyaIO.ensureDirExists(ctx, bd2.effectiveTargetDirPath, jk_utils.ChModValue(\"rwx------\"))\n\n\t\t\t# now we are ready. but before we begin doing something let's write the backup stats first.\n\n\t\t\tfilePath = os.path.join(bd2.effectiveTargetDirPath, STATS_JSON_FILE_NAME)\n\t\t\tctx.log.notice(\"Writing to: \" + filePath)\n\t\t\tbd2.statsContainer.writeToFile(filePath)\n\n\t\t\t# ----\n\n\t\t\tctx.log.notice(\"Done.\")\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __perform_backup(self, bd2:BD2, backupTasks:typing.List[AbstractThaniyaTask]):\n\n\t\t# NOTE: we need to access this context later as it calculates the duration and we need this information separately to log it.\n\t\tprocessingContext = ProcessingContext(\n\t\t\ttext=\"Writing the backup data\",\n\t\t\tbd2=bd2,\n\t\t\tbMeasureDuration=True,\n\t\t\tstatsDurationKey=\"d2_backup\"\n\t\t)\n\n\t\twith processingContext as ctx:\n\n\t\t\tfor job in backupTasks:\n\t\t\t\tAssert.isInstance(job, AbstractThaniyaTask)\n\n\t\t\t\twith ctx.descend(job.logMessagePerformBackup) as nestedCtx:\n\t\t\t\t\tjob.performBackup(nestedCtx)\n\n\t\t\tctx.log.notice(\"All backup tasks completed.\")\n\n\t\t\t# calculate statistics\n\n\t\t\twith ctx.log.descend(\"Calculating size of backup performed ...\") as nestedLog:\n\t\t\t\tnTotalBytesWritten = jk_utils.fsutils.getFolderSize(bd2.effectiveTargetDirPath)\n\n\t\t\tfDuration = processingContext.duration\n\t\t\tif (nTotalBytesWritten > 0) and (fDuration > 0):\n\t\t\t\tfAvgWritingSpeed = nTotalBytesWritten/fDuration\n\t\t\t\tsAvgWritingSpeed = jk_utils.formatBytesPerSecond(fAvgWritingSpeed)\n\t\t\telse:\n\t\t\t\tfAvgWritingSpeed = None\n\t\t\t\tsAvgWritingSpeed = \"n/a\"\n\n\t\t\tctx.log.info(\"Total bytes written: \" + jk_utils.formatBytes(nTotalBytesWritten))\n\t\t\tctx.log.info(\"Average writing speed: \" + sAvgWritingSpeed)\n\n\t\t\tbd2.statsContainer.setValue(\"totalBytesWritten\", nTotalBytesWritten)\n\t\t\tbd2.statsContainer.setValue(\"avgWritingSpeed\", fAvgWritingSpeed)\n\n\t\t\t# ----\n\n\t\t\tctx.log.notice(\"Done.\")\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __perform_finalizeBackup(self, bd2:BD2):\n\t\twith ProcessingContext(\n\t\t\ttext=\"Finalizing backup\",\n\t\t\tbd2=bd2,\n\t\t\tbMeasureDuration=False,\n\t\t\tstatsDurationKey=None\n\t\t) as ctx:\n\n\t\t\t# detecting errors\n\n\t\t\tbHasError = bd2.hasError\n\t\t\tbHasWarning = bd2.hasWarning\n\n\t\t\t# storing final data\n\n\t\t\tbd2.statsContainer.setValue(\"tEnd\", time.time())\n\t\t\tbd2.statsContainer.setValue(\"bSuccess\", not bHasError)\n\n\t\t\t# writing final status log message\n\n\t\t\tif bHasError:\n\t\t\t\tctx.log.error(\"Backup terminated with errors.\")\n\t\t\telse:\n\t\t\t\tif bHasWarning:\n\t\t\t\t\tctx.log.warning(\"There were warnings!\")\n\t\t\t\tctx.log.success(\"Backup successfully completed.\")\n\n\t\t\t# let's try to write the backup stats before termination.\n\n\t\t\tif bd2.effectiveTargetDirPath is not None:\n\t\t\t\twith ctx.descend(\"Writing stats ...\") as ctx2:\n\t\t\t\t\tfilePath = os.path.join(bd2.effectiveTargetDirPath, STATS_JSON_FILE_NAME)\n\t\t\t\t\tctx2.log.notice(\"Writing to: \" + filePath)\n\t\t\t\t\tbd2.statsContainer.writeToFile(filePath)\n\n\t\t\t# let's try to write the backup log before termination.\n\n\t\t\tif bd2.effectiveTargetDirPath is not None:\n\t\t\t\twith ctx.descend(\"Writing log ...\") as ctx2:\n\t\t\t\t\tself.__writeLogToFiles(bd2.blog, bd2.effectiveTargetDirPath, None, ctx2.log)\n\n\t\t\t# ----\n\n\t\t\tctx.log.notice(\"Done.\")\n\t#\n\n\t@jk_typing.checkFunctionSignature()\n\tdef __perform_deinitialize(self, bd2:BD2):\n\t\twith ProcessingContext(\n\t\t\ttext=\"Disconnecting and cleaning up\",\n\t\t\tbd2=bd2,\n\t\t\tbMeasureDuration=False,\n\t\t\tstatsDurationKey=None\n\t\t) as ctx:\n\t\t\tmountDirPath = None\n\t\t\tmounter = None\n\t\t\tif self.__backupConnector.performsMountUnmount:\n\t\t\t\tmountDirPath = self.__backupConnector.mountDirPath\n\t\t\t\tmounter = jk_mounting.Mounter()\n\t\t\t\tassert mounter.isMounted(mountDirPath)\n\n\t\t\t# terminate connection\n\n\t\t\twith ctx.descend(\"Terminating connection ...\") as ctx2:\n\t\t\t\tself.__backupConnector.deinitialize(ctx2)\n\n\t\t\t# verify that a mounted directory has been unmounted as expected\n\n\t\t\tif bd2.mountDirPath:\n\t\t\t\tmounter.refresh()\n\t\t\t\tif mounter.isMounted(bd2.mountDirPath):\n\t\t\t\t\tctx.log.error(\"DEINITIALIZATION FAILED! Directory is still mounted: \" + bd2.mountDirPath)\n\t\t\t\t\tctx.log.error(\"This is a bug! Please contact bugs@binary-overflow.de and report this bug!\")\n\n\t\t\t#try:\n\t\t\t\t#if self.__backupConnector.performsMountUnmount:\n\t\t\t\t#\tThaniyaIO.removeEmptyDir(ctx, self.__mountDirPath)\n\t\t\t#except Exception as ee:\n\t\t\t#\tbError = True\n\n\t\t\t# ----\n\n\t\t\tctx.log.notice(\"Done.\")\n\t#\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\t#\n\t# Perform a test of the connector.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef testConnector(self,\n\t\tbackupIdentifier:str\n\t\t) -> bool:\n\n\t\tself.__checkBackupIdentifierE(backupIdentifier)\n\n\t\tmainLog = jk_logging.ConsoleLogger.create(logMsgFormatter=jk_logging.COLOR_LOG_MESSAGE_FORMATTER)\n\n\t\twith BD2(self.__cfg, backupIdentifier, mainLog) as bd2:\n\n\t\t\ttry:\n\n\t\t\t\tself.__perform_initialize(bd2, None)\n\t\t\t\treturn True\n\n\t\t\tfinally:\n\t\t\t\tself.__perform_deinitialize(bd2)\n\t#\n\n\t#\n\t# Perform a backup.\n\t#\n\t@jk_typing.checkFunctionSignature()\n\tdef performBackup(self,\n\t\tbackupIdentifier:str,\n\t\tbackupTasks:typing.List[AbstractThaniyaTask],\n\t\t) -> bool:\n\n\t\tself.__checkBackupIdentifierE(backupIdentifier)\n\n\t\tmainLog = jk_logging.ConsoleLogger.create(logMsgFormatter=jk_logging.COLOR_LOG_MESSAGE_FORMATTER)\n\n\t\twith BD2(self.__cfg, backupIdentifier, mainLog) as bd2:\n\t\t\tnExpectedBytesToWrite = self.__perform_calcDiskSpaceRequired(bd2, backupTasks)\n\t\t\tassert nExpectedBytesToWrite >= 0\n\n\t\t\ttry:\n\n\t\t\t\tself.__perform_initialize(bd2, nExpectedBytesToWrite)\n\t\t\t\tself.__perform_backup(bd2, backupTasks)\n\t\t\t\tself.__perform_finalizeBackup(bd2)\n\t\t\t\treturn True\n\n\t\t\tfinally:\n\t\t\t\tself.__perform_deinitialize(bd2)\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"thaniya_client/src/thaniya_client/ThaniyaBackupDriver.py","file_name":"ThaniyaBackupDriver.py","file_ext":"py","file_size_in_byte":16273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"374095561","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/nodeshot/networking/services/models/url.py\n# Compiled at: 2014-05-08 09:12:23\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom nodeshot.core.base.models import BaseDate\nfrom choices import APPLICATION_PROTOCOLS, TRANSPORT_PROTOCOLS\n\nclass Url(BaseDate):\n service = models.ForeignKey('services.Service', verbose_name=_('service'))\n transport = models.CharField(_('transport protocol'), max_length=5, choices=TRANSPORT_PROTOCOLS, default=TRANSPORT_PROTOCOLS[1][0])\n application = models.CharField(_('application protocol'), max_length=20, choices=APPLICATION_PROTOCOLS)\n ip = models.ForeignKey('net.Ip', verbose_name=_('ip address'))\n port = models.IntegerField(_('port'), blank=True, null=True)\n path = models.CharField(_('path'), max_length=50, blank=True)\n domain = models.CharField(_('domain'), max_length=50, blank=True)\n\n class Meta:\n app_label = 'services'\n db_table = 'service_urls'\n verbose_name = _('url')\n verbose_name_plural = _('urls')\n\n def __unicode__(self):\n value = ''\n if self.application:\n value = '%s://' % self.application\n if self.domain:\n value += self.domain\n else:\n if self.ip.protocol == 'ipv4':\n encaps = '%s'\n else:\n encaps = '[%s]'\n value += encaps % self.ip.address\n if self.port:\n value += ':%s' % self.port\n return value","sub_path":"pycfiles/nodeshot-1.0.pre-alpha.linux-x86_64.tar/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"56431897","text":"import copy\nimport inspect\nfrom collections import OrderedDict\ntry:\n from torch.nn import Sequential as tSeq\n from pipeline.component.nn.backend.torch import optim, init, nn\n from pipeline.component.nn.backend.torch import operation\n from pipeline.component.nn.backend.torch.base import Sequential, get_torch_instance\n from pipeline.component.nn.backend.torch.cust import CustModel, CustLoss\n from pipeline.component.nn.backend.torch.interactive import InteractiveLayer\nexcept ImportError:\n pass\n\n\ndef recover_layer_from_dict(nn_define, nn_dict):\n\n init_param_dict = copy.deepcopy(nn_define)\n if 'layer' in nn_define:\n class_name = nn_define['layer']\n init_param_dict.pop('layer')\n elif 'op' in nn_define:\n class_name = nn_define['op']\n init_param_dict.pop('op')\n else:\n raise ValueError(\n 'no layer or operation info found in nn define, please check your layer config and make'\n 'sure they are correct for pytorch backend')\n\n if 'initializer' in init_param_dict:\n init_param_dict.pop('initializer')\n\n # find corresponding class\n if class_name == CustModel.__name__:\n nn_layer_class = CustModel\n elif class_name == InteractiveLayer.__name__:\n nn_layer_class = InteractiveLayer\n else:\n nn_layer_class = nn_dict[class_name]\n\n # create layer or Module\n if nn_layer_class == CustModel: # converto to pytorch model\n layer: CustModel = CustModel(module_name=init_param_dict['module_name'],\n class_name=init_param_dict['class_name'],\n **init_param_dict['param'])\n layer = layer.get_pytorch_model()\n elif nn_layer_class == InteractiveLayer:\n layer: InteractiveLayer = InteractiveLayer(**init_param_dict)\n else:\n layer = get_torch_instance(nn_layer_class, init_param_dict)\n\n # initialize if there are configs\n if 'initializer' in nn_define:\n if 'weight' in nn_define['initializer']:\n init_para = nn_define['initializer']['weight']\n init_func = init.str_fate_torch_init_func_map[init_para['init_func']]\n init_func(layer, **init_para['param'])\n\n if 'bias' in nn_define['initializer']:\n init_para = nn_define['initializer']['bias']\n init_func = init.str_fate_torch_init_func_map[init_para['init_func']]\n init_func(layer, init='bias', **init_para['param'])\n\n return layer, class_name\n\n\ndef recover_sequential_from_dict(nn_define):\n nn_define_dict = nn_define\n nn_dict = dict(inspect.getmembers(nn))\n op_dict = dict(inspect.getmembers(operation))\n nn_dict.update(op_dict)\n\n class_name_list = []\n try:\n # submitted model have int prefixes, they make sure that layers are in\n # order\n add_dict = OrderedDict()\n keys = list(nn_define_dict.keys())\n keys = sorted(keys, key=lambda x: int(x.split('-')[0]))\n for k in keys:\n layer, class_name = recover_layer_from_dict(nn_define_dict[k], nn_dict)\n add_dict[k] = layer\n class_name_list.append(class_name)\n except BaseException:\n add_dict = OrderedDict()\n for k, v in nn_define_dict.items():\n layer, class_name = recover_layer_from_dict(v, nn_dict)\n add_dict[k] = layer\n class_name_list.append(class_name)\n\n if len(class_name_list) == 1 and class_name_list[0] == CustModel.__name__:\n # If there are only a CustModel, return the model only\n return list(add_dict.values())[0]\n else:\n return tSeq(add_dict)\n\n\ndef recover_optimizer_from_dict(define_dict):\n opt_dict = dict(inspect.getmembers(optim))\n from federatedml.util import LOGGER\n LOGGER.debug('define dict is {}'.format(define_dict))\n if 'optimizer' not in define_dict:\n raise ValueError('please specify optimizer type in the json config')\n opt_class = opt_dict[define_dict['optimizer']]\n param_dict = copy.deepcopy(define_dict)\n if 'optimizer' in param_dict:\n param_dict.pop('optimizer')\n if 'config_type' in param_dict:\n param_dict.pop('config_type')\n return opt_class(**param_dict)\n\n\ndef recover_loss_fn_from_dict(define_dict):\n loss_fn_dict = dict(inspect.getmembers(nn))\n if 'loss_fn' not in define_dict:\n raise ValueError('please specify loss function in the json config')\n param_dict = copy.deepcopy(define_dict)\n param_dict.pop('loss_fn')\n if define_dict['loss_fn'] == CustLoss.__name__:\n return CustLoss(loss_module_name=param_dict['loss_module_name'],\n class_name=param_dict['class_name'],\n **param_dict['param']).get_pytorch_model()\n else:\n return loss_fn_dict[define_dict['loss_fn']](**param_dict)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"python/fate_client/pipeline/component/nn/backend/torch/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"638945031","text":"from typing import Dict, List\n\nfrom boa3.builtin import public\nfrom boa3.builtin.interop.iterator import Iterator\n\n\n@public\ndef concat_iterators(x: List[str], y: Dict[int, bool]) -> Iterator:\n it1 = Iterator(x)\n it2 = Iterator(y)\n\n return it1.concat(it2)\n\n\n@public\ndef concat_and_get_result(x: List[str], y: Dict[int, bool]) -> dict:\n new_map: dict = {}\n it = concat_iterators(x, y)\n\n while it.next():\n k = it.key\n v = it.value\n new_map[k] = v\n\n return new_map\n","sub_path":"boa3_test/test_sc/interop_test/iterator/IteratorConcatWithDefinedTypes.py","file_name":"IteratorConcatWithDefinedTypes.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"330336164","text":"from django.shortcuts import render, render_to_response, redirect\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core import serializers\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Count\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom news.forms import *\nfrom news.models import *\n\n# normal views\n\n@login_required\ndef news(request):\n news = News.objects.filter(status=True)\n paginator = Paginator(news, 3)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n batch = {'news':items}\n \n return render_to_response('news/news.html',batch,context_instance=RequestContext(request))\n\n@login_required\ndef my_news_items(request):\n news = News.objects.filter(created_by=request.user)\n paginator = Paginator(news, 6)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n batch = {'news':items}\n \n return render_to_response('news/news.html',batch,context_instance=RequestContext(request))\n\n@login_required\ndef news_item(request, news_id):\n item = News.objects.get(id=int(news_id))\n comments = Comment.objects.filter(news_item=item).filter(status=True)\n if request.method == 'POST':\n comment_form = NewsCommentForm(request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.created_by = request.user\n new_comment.news_item = item\n new_comment.save()\n return HttpResponseRedirect('')\n else:\n comment_form = NewsCommentForm()\n \n batch = {'news_item':item,\n 'comments':comments,\n 'comment_form':comment_form}\n batch.update(csrf(request))\n\n return render_to_response('news/news_item.html',batch,context_instance=RequestContext(request))\n\n@login_required\ndef create_news(request):\n if request.method == 'POST':\n news_form = NewsCreateForm()\n if request.POST.get('save'):\n news_form = NewsCreateForm(request.POST)\n if news_form.is_valid():\n new_news = news_form.save(commit=False)\n new_news.created_by = request.user\n new_news.save()\n return HttpResponseRedirect(reverse('news'))\n elif request.POST.get('cancel'):\n pass\n else:\n news_form = NewsCreateForm()\n\n batch = {'news_form':news_form}\n batch.update(csrf(request))\n \n return render_to_response('news/create_news.html',batch,context_instance=RequestContext(request))\n\n@login_required\ndef edit_news(request, n_id):\n instance = News.objects.get(id=int(n_id))\n if request.method == 'POST':\n news_form = NewsCreateForm(instance=instance)\n if request.POST.get('save'):\n news_form = NewsCreateForm(request.POST, instance=instance)\n if news_form.is_valid():\n new_news = news_form.save(commit=False)\n new_news.save()\n return HttpResponseRedirect(reverse('news'))\n elif request.POST.get('cancel'):\n pass\n else:\n news_form = NewsCreateForm(instance=instance)\n\n batch = {'news_form':news_form}\n batch.update(csrf(request))\n \n return render_to_response('news/create_news.html',batch,context_instance=RequestContext(request))\n\n@login_required\ndef delete_comment(request, comment_id):\n comment = Comment.objects.get(id=int(comment_id))\n comment.status = False\n comment.save()\n messages.success(request, 'Comment Deleted.')\n return redirect('news_item', comment.news_item.id)\n","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"216754198","text":"\"\"\"\r\nFiles\r\n\"\"\"\r\n\r\nimport os\r\nimport time\r\nfrom pathlib import Path\r\n\r\nfilePath = str(Path(os.path.dirname(__file__)).parent.joinpath(\"_Temp\", \"files.txt\"))\r\n\r\nfileOut = open(filePath, \"w+\")\r\nfileOut.write(\"{}\\n\\n\".format(\"_\" * 40))\r\nfileOut.write(\"Backup created: \")\r\nfileOut.write(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"\\n\")\r\nfileOut.write(\"{}\\n\\n\".format(\"_\" * 40))\r\n\r\nfileIn = open(os.path.realpath(__file__), \"r\")\r\nfor line in fileIn:\r\n print(line)\r\n fileOut.write(line) \r\nfileIn.close()\r\nfileOut.close()\r\n","sub_path":"9_Files/D_files.py","file_name":"D_files.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"125690475","text":"import Preprocessing as pre\r\nimport TermWeighting as termW\r\nfrom Output import Output\r\n\r\nsource = open(\"source.txt\", \"r\")\r\nsource = source.read()\r\noutput = Output()\r\n\r\ndocuments = pre.tokenization(source)\r\noutput.write_pre(documents, \"tokenization\")\r\n\r\ndocuments = pre.filtering(documents)\r\noutput.write_pre(documents, \"filtering\")\r\n\r\ndocuments = pre.stemming(documents)\r\noutput.write_pre(documents, \"stemming\")\r\n\r\nterms = pre.termFromDocuments(documents)\r\n\r\noutput.column_number = 0\r\n\r\nbinaryWeight = termW.binaryTermWeighting(terms, documents)\r\noutput.write_term_weight(terms, binaryWeight, \"Binary term frequency\")\r\n\r\nrawWeight = termW.rawTermWeighting(terms, documents)\r\noutput.write_term_weight(terms, rawWeight, \"Raw term frequency\")\r\n\r\nlogWeight = termW.logTermWeighting(terms, documents)\r\noutput.write_term_weight(terms, logWeight, \"Log term frequency\")\r\n\r\ndf = termW.documentFrequency(terms, documents)\r\noutput.write_doc_frequency(df, \"Document frequencies\")\r\n\r\nidf = termW.inverseDocumentFrequency(df, documents)\r\noutput.write_doc_frequency(idf, \"Inverse Document Frequencies\")\r\n\r\ntf_idf = termW.tf_idf(logWeight, idf)\r\noutput.write_term_weight(terms, tf_idf, \"tf * idf\")\r\n\r\noutput.save(\"result.xls\")","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"33166198","text":"#!/usr/bin/env python3\n# -*- coding:utf8 -*-\n\n#Load necessary libraries for the Datasus class\nimport ftplib as ftp\nimport re, os, platform\nimport pandas as pd\nfrom tqdm import tqdm\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom functools import lru_cache\n\nclass Datasus:\n \n #Define the main structure of the class\n\n def __init__(self, banco=None, PAGINA = 'ftp.datasus.gov.br', \\\n PUBLICO = '/dissemin/publicos'):\n\n \"\"\"\n To start the class user must provide at least one valid name \n present in the datasus (e.g to append others repo we just need\n to add a new PAGINA)\n \"\"\"\n\n self.log = {}\n self.log['Data'], self.log['Horario'], self.log['Tamanho'], \\\n self.log['Nome'], self.log['Ano'], \\\n self.log['Endereco'] = [], [], [], [], [], []\n\n self.__log = []\n try:\n\n self.__pagina = ftp.FTP(PAGINA)\n self.__pagina.login()\n self.__pagina.cwd(PUBLICO)\n self.__banco = banco\n\n\n '''\n Aqui o modulo \"os\" foi utilizado para que o interpretador\n entenda o caminho como parte do diretorio ao invés de uma\n string com contra barra\n '''\n\n self.__dir = {\n 'win_data':\n [os.path.expanduser('~\\\\Meus Documentos\\\\files_csv\\\\'),\n os.path.expanduser('~\\\\Meus Documentos\\\\files_db\\\\')],\n 'linux_data':\n [os.path.expanduser('~/Documentos/files_csv/'),\n os.path.expanduser('~/Documentos/files_db/')]}\n except:\n pass\n\n def load_files(self, p_bar=False):\n self.__p_bar = p_bar\n\n \"\"\"\n This function load files present in the current directory.\n Right now this took many time to run since it will 'read' all \n files in a given repository\n \"\"\"\n try:\n self.__pagina.cwd(self.__banco)\n self.__pagina.dir(self.__log.append)\n self.__list_data(self.__log)\n\n except ftp.error_perm:\n print ('diretorio invalido')\n\n def __list_data(self, lista):\n\n \"\"\"\n Structure the information avaliable in the repo.\n \"\"\"\n if self.__p_bar == True:\n for i in tqdm(lista):\n if i.split()[3].endswith(('.dbc','.DBC','.DBF','.dbf')):\n self.log['Data'].append(i.split()[0]),\n self.log['Horario'].append(i.split()[1]),\n self.log['Tamanho'].append(i.split()[2]),\n self.log['Nome'].append(i.split()[3]),\n self.log['Endereco'].append(self.__pagina.pwd())\n if re.search(r\"\\d+\",i.split()[3]):\n self.log['Ano'].append(re.findall(r\"\\d+\",\\\n i.split()[3])[0])\n else:\n self.log[\"Ano\"].append(None)\n\n elif i.split()[3].endswith(('.dbc','.DBC','.DBF', \\\n '.dbf')) == False:\n try:\n self.__log = []\n self.__pagina.cwd(i.split()[3])\n self.__pagina.dir(self.__log.append)\n self.__list_data(self.__log)\n self.__pagina.cwd('..')\n except:\n pass\n else:\n break\n\n elif self.__p_bar == False:\n for i in lista:\n if i.split()[3].endswith(('.dbc','.DBC','.DBF','.dbf')):\n self.log['Data'].append(i.split()[0]),\n self.log['Horario'].append(i.split()[1]),\n self.log['Tamanho'].append(i.split()[2]),\n self.log['Nome'].append(i.split()[3]),\n self.log['Endereco'].append(self.__pagina.pwd())\n if re.search(r\"\\d+\",i.split()[3]):\n self.log['Ano'].append(re.findall(r\"\\d+\", \\\n i.split()[3])[0])\n else:\n self.log[\"Ano\"].append(None)\n\n elif i.split()[3].endswith(('.dbc','.DBC','.DBF', \\\n '.dbf')) == False:\n\n try:\n self.__log = []\n self.__pagina.cwd(i.split()[3])\n self.__pagina.dir(self.__log.append)\n self.__list_data(self.__log)\n self.__pagina.cwd('..')\n except:\n pass\n else:\n break\n\n def write_file(self, path):\n self.__dir_onSystem()\n\n \"\"\"\n A function to write \n \"\"\"\n\n try:\n file_csv = pd.DataFrame.from_dict(self.log)\n if platform.system().lower() == 'linux':\n file_csv.to_csv(self.__dir['linux_data'][0] + path + \".csv\",\\\n index = False)\n else:\n file_csv.to_csv(self.__dir['win_data'][0] + \\\n path + \".csv\", index = False)\n except:\n print(\"No file to write\")\n\n def __dir_onSystem(self):\n '''\n Função que identifica o sistema e posteriormente cria os\n diretórios de acordo com a estrutura do sistema encontrado\n '''\n if platform.system().lower() == 'linux':\n self.__sysLinux()\n\n else:\n self.__sysWindows()\n\n def __sysLinux(self):\n '''\n Função que cria o diretorio no sistema linux\n '''\n try:\n os.mkdir(self.__dir['linux_data'][0])\n os.mkdir(self.__dir['linux_data'][1])\n except:\n pass\n\n def __sysWindows(self):\n '''\n Função que cria o diretorio no sistema windows\n '''\n try:\n os.mkdir(self.__dir['win_data'][0])\n os.mkdir(self.__dir['win_data'][1])\n except:\n pass\n\n def download(self, *args):\n self.__dir_onSystem()\n '''\n Verifica e realiza download para as pastas setadas anteriormente\n \n ainda estou pensando nos argumentos extras :@\n '''\n\n if platform.system().lower() == 'linux':\n self.__verify_and_download(self.__dir['linux_data'][1])\n\n else:\n self.__verify_and_download(self.__dir['win_data'][1])\n\n def __verify_and_download(self, diretorio):\n for i,j in zip(self.log['Nome'], self.log['Endereco']):\n #print (f'{i}\\t\\t{j}')\n if os.path.isfile(diretorio + f'{i}'):\n print (f'O arquivo {i} ja existe')\n\n else:\n self.__pagina.cwd(j)\n self.__pagina.retrbinary('RETR ' + i \\\n ,open(diretorio + i, 'wb').write)\n","sub_path":"pydbsus.py","file_name":"pydbsus.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"585133036","text":"\"\"\"zgrab2 scanner http\"\"\"\n\n# -*- coding:utf-8 -*-\n\nimport json\nimport os\nimport traceback\n\nfrom datacontract.iscoutdataset import IscoutTask\n\nfrom .....clientdatafeedback.scoutdatafeedback import PortInfo, SiteInfo\nfrom ..zgrab2parser import Zgrab2ParserHttp, Zgrab2ParserTls\nfrom .zgrab2scannerbase import Zgrab2ScannerBase\n\n\nclass Zgrab2ScannerHttp(Zgrab2ScannerBase):\n \"\"\"zgrab2 http scanner\"\"\"\n\n def __init__(self, zgrab_path: str):\n Zgrab2ScannerBase.__init__(self, \"zgrab2http\")\n self._parser_http: Zgrab2ParserHttp = Zgrab2ParserHttp()\n self._parser_tls: Zgrab2ParserTls = Zgrab2ParserTls()\n\n def get_banner_http(\n self,\n task: IscoutTask,\n level,\n portinfo: PortInfo,\n *args,\n zgrab2path: str = \"zgrab2\",\n sudo: bool = False,\n timeout: float = 600,\n ) -> iter:\n \"\"\"scan http services and get the banner\"\"\"\n hostfi = None\n outfi = None\n try:\n port: int = portinfo._port\n if not isinstance(port, int) or port < 0 or port > 65535:\n raise Exception(\"Invalid port: {}\".format(port))\n\n hosts: list = []\n for d in portinfo.domains:\n if not d in hosts:\n hosts.append(d)\n if len(hosts) < 1:\n # scan ip is not good, only scan them when\n # no domain is available\n hosts.append(portinfo._host)\n for h in portinfo.hostnames:\n if not h in hosts:\n hosts.append(h)\n\n hostfi = self._write_hosts_to_file(task, hosts)\n if hostfi is None:\n return\n\n outfi = self._scan_http(\n task,\n level,\n hostfi,\n port,\n *args,\n zgrab2path=zgrab2path,\n sudo=sudo,\n timeout=timeout,\n )\n if outfi is None or not os.path.isfile(outfi):\n return\n\n self._parse_result(task, level, portinfo, outfi)\n\n except Exception:\n self._logger.error(\"Scan http error: {}\".format(traceback.format_exc()))\n finally:\n if not hostfi is None and os.path.isfile(hostfi):\n os.remove(hostfi)\n if not outfi is None and os.path.isfile(outfi):\n os.remove(outfi)\n\n #################################\n # scan\n\n def _scan_http(\n self,\n task: IscoutTask,\n level,\n host_file: str,\n port: int,\n *args,\n zgrab2path: str = \"zgrab2\",\n sudo: bool = False,\n timeout: float = 600,\n ) -> str:\n \"\"\"scan the ips or domains, and write the output files to specified output directory.\n host_file: the full path of a file with list of ['1.1.1.1','www.xxx.com'] in the file per line\n port: '80' or '443'\n outfi: result file path\n \"\"\"\n outfi: str = None\n exitcode = None\n try:\n enhanced_args = []\n\n # add hosts and ports to args\n enhanced_args.append(\"http\")\n enhanced_args.append(\"--port=%s\" % port)\n\n # zgrab2 http 192.168.40.114 --port=8020 --endpoint='/' --heartbleed\n # --extended-master-secret --extended-random --max-redirects=2\n # --session-ticket --follow-localhost-redirects --retry-https --timeout=30\n # --user-agent=\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\"\n # -f ./a.list -o ./a.json\n\n if not \"--endpoint=\" in args:\n enhanced_args.append(\"--endpoint='/'\")\n if not \"--heartbleed\" in args:\n enhanced_args.append(\"--heartbleed\")\n if not \"--extended-master-secret\" in args:\n enhanced_args.append(\"--extended-master-secret\")\n if not \"--extended-random\" in args:\n enhanced_args.append(\"--extended-random\")\n if not \"--max-redirects=\" in args:\n enhanced_args.append(\"--max-redirects=2\")\n if not \"--session-ticket\" in args:\n enhanced_args.append(\"--session-ticket\")\n if not \"--retry-https\" in args:\n enhanced_args.append(\"--retry-https\")\n if not \"--timeout=\" in args:\n enhanced_args.append(\"--timeout=30\")\n if not \"--user-agent=\" in args:\n enhanced_args.append(\n '--user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\"'\n )\n\n enhanced_args.extend(args)\n\n if not \"--input-file=\" in args or \"-f\" in args:\n enhanced_args.append(\"-f %s\" % host_file) # input file\n\n outfi = os.path.join(self._tmpdir, \"{}_{}.http\".format(task.batchid, port))\n if not \"--output-file=\" in args or \"-o\" in args:\n # here must use -o, use '--output-file' will cause exception 'No such file or directory'\n # this may be a bug\n enhanced_args.append(\"-o %s\" % outfi) # output file\n\n outdir = os.path.dirname(outfi)\n if not os.path.exists(outdir) or not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n curr_process = None\n try:\n\n curr_process = self._run_process(\n zgrab2path, *enhanced_args, rootDir=outdir, sudo=sudo\n )\n stdout, stderr = curr_process.communicate(timeout=timeout)\n exitcode = curr_process.wait(timeout=timeout)\n if not stdout is None:\n self._logger.trace(stdout)\n if not stderr is None:\n self._logger.trace(stderr)\n if exitcode != 0:\n raise Exception(\"Scan HTTP error: %s\\n%s\" % (stdout, stderr))\n self._logger.info(\n \"Scan HTTP exitcode={}\\ntaskid:{}\\nbatchid:{}\\nport:{}\".format(\n str(exitcode), task.taskid, task.batchid, port\n )\n )\n finally:\n if not curr_process is None:\n curr_process.kill()\n except Exception:\n if not outfi is None and os.path.isfile(outfi):\n os.remove(outfi)\n outfi = None\n self._logger.info(\n \"Scan HTTP exitcode={}\\ntaskid:{}\\nbatchid:{}\\nport:{}\".format(\n str(exitcode), task.taskid, task.batchid, port\n )\n )\n\n return outfi\n\n #################################\n # parse\n\n def _parse_result(self, task: IscoutTask, level: int, portinfo: PortInfo, outfi):\n \"\"\"parse http infor and ssl info\"\"\"\n try:\n\n if not os.path.isfile(outfi):\n self._logger.error(\n \"Resultfi not exists:\\ntaskid:{}\\nbatchid:{}\\nresultfi:{}\".format(\n task.taskid, task.batchid, outfi\n )\n )\n return\n\n # its' one json object per line\n linenum = 1\n with open(outfi, mode=\"r\") as fs:\n while True:\n try:\n line = fs.readline()\n if line is None or line == \"\":\n break\n\n sj = json.loads(line)\n if sj is None:\n continue\n\n # self._parser_http._parse_http(sj, portinfo)\n self._parse_http(task, sj, portinfo)\n\n # do not parse ssl certificate here,\n # cuz already got tls information\n # self._parser_tls._parse_cert(sj, portinfo)\n # self._parse_tls(task, sj, portinfo)\n\n except Exception:\n self._logger.error(\n \"Parse one http banner json line error:\\ntaskid:{}\\nbatchid:{}\\nresultfi:{}\\nlinenum:{}\\nerror:{}\".format(\n task.taskid,\n task.batchid,\n outfi,\n linenum,\n traceback.format_exc(),\n )\n )\n finally:\n linenum += 1\n except Exception:\n self._logger.error(\n \"Parse http result error:\\ntaskid:{}\\nbatchid:{}\\nresultfi:{}\".format(\n task.taskid, task.batchid, outfi\n )\n )\n\n def _parse_http(self, task: IscoutTask, sj, portinfo: PortInfo):\n \"\"\"parse site(http) info\"\"\"\n try:\n self._parser_http._parse_http(sj, portinfo)\n except Exception:\n self._logger.error(\n \"Parse http site result error:\\ntaskid:{}\\nbatchid:{}\\nerror:{}\".format(\n task.taskid, task.batchid, traceback.format_exc()\n )\n )\n\n def _parse_tls(self, task: IscoutTask, sj, portinfo: PortInfo):\n \"\"\"parse site(http) info\"\"\"\n try:\n if not sj.__contains__(\"data\") or not sj[\"data\"].__contains__(\"http\"):\n return\n if sj[\"data\"][\"http\"][\"status\"] != \"success\":\n return\n\n sjresp = sj[\"data\"][\"http\"][\"result\"][\"response\"]\n\n if not sjresp.__contains__(\"request\") or not sjresp[\"request\"].__contains__(\n \"tls_log\"\n ):\n return\n\n sjtls = sjresp[\"request\"][\"tls_log\"]\n sjhandshake = sjtls.get(\"handshake_log\")\n if sjhandshake is None or len(sjhandshake) < 1:\n return\n\n self._parser_tls._parse_cert(sjhandshake, portinfo)\n except Exception:\n self._logger.error(\n \"Parse http tls result error:\\ntaskid:{}\\nbatchid:{}\\nerror:{}\".format(\n task.taskid, task.batchid, traceback.format_exc()\n )\n )\n","sub_path":"savecode/threeyears/idownclient/scout/plugin/zgrab2/zgrab2scanner/zgrab2scannerhttp.py","file_name":"zgrab2scannerhttp.py","file_ext":"py","file_size_in_byte":10258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"73275562","text":"import wx\n\n\nclass SetupSchoolDetails(wx.Panel):\n\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.Size(533, 374),\n style=wx.TAB_TRAVERSAL)\n\n self.parent = parent\n\n container = wx.BoxSizer(wx.VERTICAL)\n\n self.school_dets_label = wx.StaticText(self, wx.ID_ANY, u\"School Details\", wx.DefaultPosition, wx.DefaultSize,\n wx.ALIGN_CENTRE)\n self.school_dets_label.Wrap(-1)\n self.school_dets_label.SetFont(wx.Font(14, 70, 90, 92, False, wx.EmptyString))\n\n container.Add(self.school_dets_label, 0, wx.ALL | wx.EXPAND, 30)\n\n self.navigation_disclaimer = wx.StaticText( self, wx.ID_ANY, u\"(Please use buttons to navigate)\", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTRE )\n self.navigation_disclaimer.Wrap(-1)\n\n container.Add(self.navigation_disclaimer, 0, wx.ALL | wx.EXPAND, 5)\n\n content_Sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n content_Sizer.AddSpacer((0, 0), 1, wx.EXPAND, 5)\n\n formSizer = wx.BoxSizer(wx.VERTICAL)\n\n formSizer.AddSpacer((0, 0), 1, wx.EXPAND, 5)\n\n sbSizer15 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, wx.EmptyString), wx.VERTICAL)\n\n self.school_name_label = wx.StaticText(sbSizer15.GetStaticBox(), wx.ID_ANY, u\"School Name\", wx.DefaultPosition,\n wx.DefaultSize, 0)\n self.school_name_label.Wrap(-1)\n sbSizer15.Add(self.school_name_label, 0, wx.ALL, 10)\n\n self.school_name = wx.TextCtrl(sbSizer15.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,\n wx.DefaultSize, 0)\n sbSizer15.Add(self.school_name, 0, wx.ALL | wx.EXPAND, 10)\n\n self.no_of_subjects_label = wx.StaticText(sbSizer15.GetStaticBox(), wx.ID_ANY,\n u\"Number of subjects done by lower forms\", wx.DefaultPosition,\n wx.DefaultSize, 0)\n self.no_of_subjects_label.Wrap(-1)\n sbSizer15.Add(self.no_of_subjects_label, 0, wx.ALL, 10)\n\n self.no_of_subjects = wx.TextCtrl(sbSizer15.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,\n wx.DefaultSize, 0)\n sbSizer15.Add(self.no_of_subjects, 0, wx.ALL | wx.EXPAND, 10)\n\n formSizer.Add(sbSizer15, 1, wx.EXPAND, 5)\n\n formSizer.AddSpacer((0, 0), 5, wx.EXPAND, 5)\n\n content_Sizer.Add(formSizer, 3, wx.EXPAND, 5)\n\n content_Sizer.AddSpacer((0, 0), 1, wx.EXPAND, 5)\n\n container.Add(content_Sizer, 1, wx.EXPAND, 5)\n\n self.SetSizer(container)\n self.Layout()\n\n def __del__(self):\n pass\n","sub_path":"initialization/SetupSchoolDetails.py","file_name":"SetupSchoolDetails.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"65190545","text":"from mcoLH import *\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n# Specify the states\nstates = [2**x for x in range(2,9)]\n\n# number of states\nr = len(states)\n\n# training size\nL = 10000\n# test size \n\nT = [500,10**3,10**4,10**5,10**6]\nb = len(T)\n# number of simulation\nconstviol = []\nsparsity_ratio = []\nnumsim = np.zeros(r)\nerr = np.zeros((r,b))\nerr_fixed = np.zeros((r,b))\nl = 0\nfor i in states:\n k = 0\n naive_vec = np.zeros(b)\n fixed_vec = np.zeros(b)\n for j in T:\n M, _ =createrandomDTMC(i-1,broken=False)\n ts = sampleDTMC(M,L,1)\n ts_test = sampleDTMC(M,j,1)\n ts[L] = i-1\n ts_test[j] = i - 1\n _, LTerr_test,_ = test_naiveDTMC(ts,ts_test,wantST=False)\n _, LTerr_test_fixed,constrviol,_,sparsity= test_fixedDTMC(ts,ts_test,wantST=False)\n \n naive_vec[k] = LTerr_test\n fixed_vec[k] = LTerr_test_fixed\n k += 1\n err[l,] = naive_vec\n err_fixed[l,] = fixed_vec\n l += 1\n\nind = np.array([500,1000,10000,100000,1000000]) \nind1 = ind - 1 \n\ndat = pd.DataFrame(err,index=states,columns=ind)\ndat = dat.rename_axis('# of states').rename_axis('DTMC simulation',axis='columns')\ndat_fixed = pd.DataFrame(err_fixed,index=states,columns=ind)\ndat_fixed = dat_fixed.rename_axis('# of states').rename_axis('DTMC simulation',axis='columns')\nprint(dat.to_latex(column_format='lccccc'))\nprint(dat_fixed.to_latex(column_format='lccccc'))\n\n\n\n","sub_path":"hsb/longts_dtmc.py","file_name":"longts_dtmc.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"523901428","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import int\nfrom builtins import str\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport json\nimport traceback\n\nfrom flask import request\nfrom flask_restx import Namespace, Resource, fields, inputs\n\nfrom hysds.celery import app as celery_app\nfrom hysds.task_worker import do_submit_task\nimport hysds_commons.job_utils\nfrom hysds_commons.action_utils import check_passthrough_query\n\nfrom mozart import app, mozart_es\nimport mozart.lib.queue_utils\n\n\nJOB_NS = \"job\"\njob_ns = Namespace(JOB_NS, description=\"Mozart job operations\")\n\nQUEUE_NS = \"queue\"\nqueue_ns = Namespace(QUEUE_NS, description=\"Mozart queue operations\")\n\nON_DEMAND_NS = \"on-demand\"\non_demand_ns = Namespace(ON_DEMAND_NS, description=\"For retrieving and submitting on-demand jobs for mozart\")\n\nHYSDS_IOS_INDEX = app.config['HYSDS_IOS_INDEX']\nJOB_SPECS_INDEX = app.config['JOB_SPECS_INDEX']\nJOB_STATUS_INDEX = app.config['JOB_STATUS_INDEX']\nCONTAINERS_INDEX = app.config['CONTAINERS_INDEX']\n\n\n@job_ns.route('/list', endpoint='job-list')\n@job_ns.doc(responses={200: \"Success\", 500: \"Query execution failed\"},\n description=\"Get list of submitted job IDs.\")\nclass GetJobs(Resource):\n \"\"\"Get list of job IDs.\"\"\"\n\n resp_model = job_ns.model('Jobs Listing Response(JSON)', {\n 'success': fields.Boolean(required=True, description=\"if 'false' encountered exception; \"\n \"otherwise no errors occurred\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'result': fields.List(fields.String, required=True, description=\"list of job IDs\")\n })\n parser = job_ns.parser()\n parser.add_argument('page_size', type=str, help=\"Job Listing Pagination Size\")\n parser.add_argument('offset', type=str, help=\"Job Listing Pagination Offset\")\n\n @job_ns.marshal_with(resp_model)\n def get(self):\n \"\"\"Paginated list submitted jobs\"\"\"\n jobs = mozart_es.query(index=JOB_STATUS_INDEX, _source=False)\n return {\n 'success': True,\n 'message': \"\",\n 'result': sorted([job[\"_id\"] for job in jobs])\n }\n\n\n@job_ns.route('/user/', endpoint='user-jobs')\n@job_ns.doc(responses={200: \"Success\", 500: \"Query execution failed\"}, description=\"Get list of user submitted job IDs\")\nclass UserJobs(Resource):\n parser = job_ns.parser()\n parser.add_argument('offset', type=int, help=\"Job Listing Pagination Offset\", default=0, required=False)\n parser.add_argument('page_size', type=int, help=\"Job Listing Pagination Size\", default=250, required=False)\n parser.add_argument('type', type=str, help=\"job type + version (ie. topsapp:v1.0)\", required=False)\n parser.add_argument('tag', type=str, help=\"user defined job tag\", required=False)\n parser.add_argument('queue', type=str, help=\"submitted job queue\", required=False)\n parser.add_argument('priority', type=int, help=\"job priority, 0-9\", required=False)\n parser.add_argument('start_time', type=str, help=\"start time of @timestamp field\", required=False)\n parser.add_argument('end_time', type=str, help=\"start time of @timestamp field\", required=False)\n parser.add_argument('status', type=str, help=\"job status, ie. job-queued, job-started, job-completed, \"\n \"job-failed\", required=False)\n\n @job_ns.expect(parser)\n def get(self, user):\n \"\"\"\n return user submitted jobs from ElasticSearch (sorted by @timestamp desc)\n \"\"\"\n offset = request.args.get('offset')\n page_size = request.args.get('page_size')\n job_type = request.args.get('job_type')\n tag = request.args.get('tag')\n queue = request.args.get('queue')\n priority = request.args.get('priority')\n start_time = request.args.get('start_time')\n end_time = request.args.get('end_time')\n status = request.args.get('status')\n\n if offset:\n try:\n offset = int(offset)\n except (ValueError, TypeError):\n return {'success': False, 'message': 'offset must be an int'}, 400\n if page_size:\n try:\n page_size = int(page_size)\n if page_size > 250:\n page_size = 250\n except (ValueError, TypeError):\n return {'success': False, 'message': 'page_size must be an int'}, 400\n if priority:\n try:\n priority = int(priority)\n if priority > 9:\n priority = 9\n except (ValueError, TypeError):\n return {'success': False, 'message': 'priority must be an int'}, 400\n\n query = {\n \"sort\": [\n {\"@timestamp\": {\"order\": \"desc\"}}\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"term\": {\n \"job.username\": user\n }\n }\n ]\n }\n }\n }\n\n if offset:\n query['from'] = offset\n if page_size:\n query['size'] = page_size\n if tag:\n query['query']['bool']['must'].append({\"term\": {\"tags.keyword\": tag}})\n if job_type:\n query['query']['bool']['must'].append({\"term\": {\"job.type\": job_type}})\n if priority:\n query['query']['bool']['must'].append({\"term\": {\"job.priority\": priority}})\n if status:\n query['query']['bool']['must'].append({\"term\": {\"status\": status}})\n if queue:\n query['query']['bool']['must'].append({\"term\": {\"job.job_info.job_queue\": queue}})\n if start_time is not None or end_time is not None:\n datetime_filter = {'range': {'@timestamp': {}}}\n if start_time:\n if start_time.isdigit():\n start_time = int(start_time)\n datetime_filter['range']['@timestamp']['gte'] = start_time\n if end_time:\n if end_time.isdigit():\n end_time = int(end_time)\n datetime_filter['range']['@timestamp']['lte'] = end_time\n query['query']['bool']['must'].append(datetime_filter)\n\n try:\n res = mozart_es.search(index=JOB_STATUS_INDEX, body=query, _source=['tags'])\n except Exception as e:\n return {'success': False, 'message': str(e), 'result': []}, 400\n return {\n 'success': True,\n 'result': [{\n 'id': doc['_id'],\n 'tags': doc['_source']['tags']\n } for doc in res['hits']['hits']]\n }\n\n\n@job_ns.route('/submit', endpoint='job-submit')\n@job_ns.doc(responses={200: \"Success\", 400: \"Invalid parameters\", 500: \"Job submission failed\"},\n description=\"Submit job for execution in HySDS.\")\nclass SubmitJob(Resource):\n \"\"\"Submit job for execution in HySDS.\"\"\"\n\n resp_model = job_ns.model('SubmitJobResponse', {\n 'success': fields.Boolean(required=True, description=\"if 'false' encountered exception; \"\n \"otherwise no errors occurred\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'result': fields.String(required=True, description=\"HySDS job ID\"),\n 'tags': fields.Raw(required=True, description='Submitted job tag')\n })\n\n parser = job_ns.parser()\n parser.add_argument('type', required=True, type=str, help=\"a job type from jobspec/list\")\n parser.add_argument('queue', required=True, type=str, help=\"Job queue from /queue/list e.g. grfn-job_worker-small\")\n parser.add_argument('priority', type=int, help='Job priority in the range of 0 to 9')\n parser.add_argument('tags', type=str, help='JSON list of tags, e.g. [\"dumby\", \"test_job\"]')\n parser.add_argument('name', type=str, help='base job name override; defaults to job type')\n parser.add_argument('payload_hash', type=str, help='user-generated payload hash')\n parser.add_argument('username', type=str, help='user to submit job')\n parser.add_argument('enable_dedup', type=bool, help='flag to enable/disable job dedup')\n parser.add_argument('soft_time_limit', type=str, help='soft time limit for job execution')\n parser.add_argument('time_limit', type=str, help='hard time limit for job execution')\n parser.add_argument('disk_usage', type=str, help='disk usage for PGE (KB, MB, GB, etc)')\n parser.add_argument('params', type=str,\n help=\"\"\"JSON job context, e.g. {\n \"entity_id\": \"LC80101172015002LGN00\",\n \"min_lat\": -79.09923,\n \"max_lon\": -125.09297,\n \"id\": \"dumby-product-20161114180506209624\",\n \"acq_time\": \"2015-01-02T15:49:05.571384\",\n \"min_sleep\": 1,\n \"max_lat\": -77.7544,\n \"min_lon\": -139.66082,\n \"max_sleep\": 10\n }\"\"\")\n\n @job_ns.marshal_with(resp_model)\n @job_ns.expect(parser, validate=True)\n def post(self):\n \"\"\"Submits a job to run inside HySDS\"\"\"\n job_type = request.form.get('type', request.args.get('type', None))\n job_queue = request.form.get('queue', request.args.get('queue', None))\n\n priority = int(request.form.get('priority', request.args.get('priority', 0)))\n tags = request.form.get('tags', request.args.get('tags', None))\n\n username = request.form.get('username', request.args.get('username', None))\n\n job_name = request.form.get('name', request.args.get('name', None))\n\n payload_hash = request.form.get('payload_hash', request.args.get('payload_hash', None))\n enable_dedup = str(request.form.get('enable_dedup', request.args.get('enable_dedup', \"true\")))\n\n soft_time_limit = request.form.get('soft_time_limit', request.args.get('soft_time_limit', None))\n time_limit = request.form.get('time_limit', request.args.get('time_limit', None))\n disk_usage = request.form.get('disk_usage', request.args.get('disk_usage', None))\n\n try:\n if enable_dedup.strip().lower() == \"true\":\n enable_dedup = True\n elif enable_dedup.strip().lower() == \"false\":\n enable_dedup = False\n else:\n raise Exception(\"Invalid value for param 'enable_dedup': {0}\".format(enable_dedup))\n\n if soft_time_limit is not None:\n soft_time_limit = int(soft_time_limit)\n if soft_time_limit < 1:\n return {\n 'success': False,\n 'message': \"soft_time_limit must be greater than 0\"\n }, 400\n if time_limit is not None:\n time_limit = int(time_limit)\n if time_limit < 1:\n return {\n 'success': False,\n 'message': \"time_limit must be greater than 0\"\n }, 400\n\n try:\n if tags is not None:\n tags = json.loads(tags)\n except Exception as e:\n app.logger.error(str(e))\n raise Exception(\"Failed to parse input tags. '{0}' is malformed\".format(tags))\n\n params = request.form.get('params', request.args.get('params', \"{}\"))\n app.logger.warning(params)\n try:\n if params is not None:\n params = json.loads(params)\n except Exception as e:\n app.logger.error(str(e))\n raise Exception(\"Failed to parse input params. '{0}' is malformed\".format(params))\n\n app.logger.warning(job_type)\n app.logger.warning(job_queue)\n job_json = hysds_commons.job_utils.resolve_hysds_job(job_type, job_queue, priority, tags, params,\n username=username, job_name=job_name,\n payload_hash=payload_hash, enable_dedup=enable_dedup,\n soft_time_limit=soft_time_limit, time_limit=time_limit,\n disk_usage=disk_usage)\n ident = hysds_commons.job_utils.submit_hysds_job(job_json)\n except Exception as e:\n message = \"Failed to submit job. {0}:{1}\".format(type(e), str(e))\n app.logger.error(message)\n return {'success': False, 'message': message}, 500\n return {\n 'success': True,\n 'message': '',\n 'result': ident,\n 'tags': tags\n }\n\n\n@queue_ns.route('/list', endpoint='queue-list')\n@queue_ns.doc(responses={200: \"Success\", 500: \"Queue listing failed\"},\n description=\"Get list of available job queues and return as JSON.\")\nclass GetQueueNames(Resource):\n \"\"\"Get list of job queues and return as JSON.\"\"\"\n\n resp_model = queue_ns.model('Queue Listing Response(JSON)', {\n 'success': fields.Boolean(required=True, description=\"if 'false' encountered exception; \"\n \"otherwise no errors occurred\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'result': fields.Raw(required=True, description=\"queue response\")\n })\n parser = queue_ns.parser()\n parser.add_argument('id', type=str, help=\"Job Type Specification ID\")\n\n @queue_ns.expect(parser)\n @queue_ns.marshal_with(resp_model)\n def get(self):\n \"\"\"Gets a listing of non-celery queues handling jobs.\"\"\"\n try:\n ident = request.form.get('id', request.args.get('id', None))\n queues = mozart.lib.queue_utils.get_queue_names(ident)\n app.logger.warn(\"Queues: \" + str(queues))\n except Exception as e:\n message = \"Failed to list job queues. {0}:{1}\".format(type(e), str(e))\n app.logger.warning(message)\n app.logger.warning(traceback.format_exc(e))\n return {'success': False, 'message': message}, 500\n return {\n 'success': True,\n 'message': \"\",\n 'result': queues\n }\n\n\n@job_ns.route('/status', endpoint='job-status')\n@job_ns.doc(responses={200: \"Success\", 500: \"Query execution failed\"}, description=\"Get status of job by ID.\")\nclass GetJobStatus(Resource):\n \"\"\"Get status of job ID.\"\"\"\n\n resp_model = job_ns.model('Job Status Response(JSON)', {\n 'success': fields.Boolean(required=True, description=\"if 'false' encountered exception; \"\n \"otherwise no errors occurred\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'status': fields.String(required=True,\n enum=['job-queued', 'job-started', 'job-failed',\n 'job-completed', 'job-offline', 'job-revoked'],\n description='job status')\n })\n\n parser = job_ns.parser()\n parser.add_argument('id', required=True, type=str, help=\"Job ID\")\n\n @job_ns.expect(parser)\n @job_ns.marshal_with(resp_model)\n def get(self):\n \"\"\"Gets the status of a submitted job based on job id\"\"\"\n _id = request.form.get('id', request.args.get('id', None))\n if _id is None:\n return {'success': False, 'message': 'id not supplied'}, 400\n\n job_status = mozart_es.search_by_id(index=JOB_STATUS_INDEX, id=_id, ignore=404, _source=['status'])\n if job_status['found'] is False:\n return {\n 'success': False,\n 'message': 'job status not found: %s' % _id\n }, 404\n\n return {\n 'success': True,\n 'message': \"\",\n 'status': job_status['_source']['status']\n }\n\n\n@job_ns.route('/info', endpoint='job-info')\n@job_ns.doc(responses={200: \"Success\", 500: \"Query execution failed\"},\n description=\"Gets the complete info for a job.\")\nclass GetJobInfo(Resource):\n \"\"\"Get info of job IDs.\"\"\"\n\n resp_model = job_ns.model('Job Info Response(JSON)', {\n 'success': fields.Boolean(required=True, description=\"if 'false' encountered exception; \"\n \"otherwise no errors occurred\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'result': fields.Raw(required=True, description=\"Job Info Object\")\n })\n parser = job_ns.parser()\n parser.add_argument('id', type=str, required=True, help=\"Job ID\")\n\n @job_ns.expect(parser)\n @job_ns.marshal_with(resp_model)\n def get(self):\n \"\"\"Get complete info for submitted job based on id\"\"\"\n _id = request.form.get('id', request.args.get('id', None))\n if _id is None:\n return {\n 'success': False,\n 'message': 'id must be supplied (as query param or url param)'\n }, 400\n\n info = mozart_es.search_by_id(index=JOB_STATUS_INDEX, id=_id, ignore=404)\n if info['found'] is False:\n return {\n 'success': False,\n 'message': 'job info not found: %s' % _id\n }, 404\n\n return {\n 'success': True,\n 'message': \"\",\n 'result': info['_source']\n }\n\n\n@job_ns.route('/products/<_id>', endpoint='products')\n@job_ns.doc(responses={200: \"Success\", 500: \"Query execution failed\"},\n description=\"Gets products staged for a job\")\nclass ProductsStaged(Resource):\n def get(self, _id):\n doc_fields = ['status', 'job.job_info.metrics.products_staged']\n prod = mozart_es.search_by_id(index=JOB_STATUS_INDEX, id=_id, _source_includes=doc_fields, ignore=404)\n app.logger.info('fetch products staged for %s' % _id)\n\n if prod['found'] is False:\n return {\n 'success': False,\n 'message': 'Job id not found: %s' % _id\n }, 404\n\n doc = prod['_source']\n status = doc['status']\n if status not in {'job-failed', 'job-completed'}:\n return {\n 'success': False,\n 'message': 'job has not been completed'\n }\n try:\n return {\n 'success': True,\n 'message': status,\n 'results': doc['job']['job_info']['metrics']['products_staged']\n }\n except (KeyError, Exception):\n app.logger.warning('%s does not have any products_staged' % _id)\n return {\n 'success': True,\n 'message': status,\n 'results': []\n }\n\n\n@on_demand_ns.route('', endpoint='on-demand')\n@on_demand_ns.doc(responses={200: \"Success\", 500: \"Execution failed\"}, description=\"Retrieve/submit on demand jobs\")\nclass OnDemandJobs(Resource):\n \"\"\"On Demand Jobs API. (jobs retrieval and job submission)\"\"\"\n\n resp_model = on_demand_ns.model('JsonResponse', {\n 'success': fields.Boolean(required=True, description=\"if request was successful\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\")\n })\n\n parser = on_demand_ns.parser()\n parser.add_argument('tag', type=str, location=\"form\", required=True, help='job tag')\n parser.add_argument('job_type', type=str, location=\"form\", required=True, help='job spec name')\n parser.add_argument('hysds_io', type=str, location=\"form\", required=True, help='hysds io name')\n parser.add_argument('queue', type=str, location=\"form\", required=True, help='queue')\n parser.add_argument('priority', type=int, location=\"form\", required=True, help='RabbitMQ job priority (0-9)')\n parser.add_argument('query_string', type=str, location=\"form\", required=True, help='elasticsearch query')\n parser.add_argument('kwargs', type=str, location=\"form\", required=True, help='keyword arguments for PGE')\n parser.add_argument('time_limit', type=int, location=\"form\", help='time limit for PGE job')\n parser.add_argument('soft_time_limit', type=int, location=\"form\", help='soft time limit for PGE job')\n parser.add_argument('disk_usage', type=str, location=\"form\", help='memory usage required for jon (KB, MB, GB)')\n parser.add_argument('enable_dedup', type=inputs.boolean, location=\"form\", help='enable job de-duplication')\n\n def get(self):\n \"\"\"List available on demand jobs\"\"\"\n query = {\n \"_source\": [\"id\", \"job-specification\", \"label\", \"job-version\"],\n \"sort\": [{\"label.keyword\": {\"order\": \"asc\"}}],\n \"query\": {\n \"exists\": {\n \"field\": \"job-specification\"\n }\n }\n }\n\n documents = mozart_es.query(index=HYSDS_IOS_INDEX, body=query)\n documents = [{\n 'hysds_io': row['_source']['id'],\n 'job_spec': row['_source']['job-specification'],\n 'version': row['_source']['job-version'],\n 'label': row['_source']['label']\n } for row in documents]\n\n return {\n 'success': True,\n 'result': documents\n }\n\n @on_demand_ns.expect(parser)\n def post(self):\n \"\"\"\n submits on demand job\n :return: submit job id\n \"\"\"\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n tag = request_data.get('tags')\n job_type = request_data.get('job_type')\n hysds_io = request_data.get('hysds_io')\n queue = request_data.get('queue')\n priority = int(request_data.get('priority', 0))\n query_string = request_data.get('query')\n kwargs = request_data.get('kwargs', '{}')\n time_limit = request_data.get('time_limit')\n soft_time_limit = request_data.get('soft_time_limit')\n disk_usage = request_data.get('disk_usage')\n enable_dedup = request_data.get('enable_dedup')\n if enable_dedup is not None:\n try:\n enable_dedup = inputs.boolean(enable_dedup)\n except ValueError as e:\n return {\n 'success': False,\n 'message': str(e)\n }, 400\n\n try:\n query = json.loads(query_string)\n query_string = json.dumps(query)\n except (ValueError, TypeError, Exception) as e:\n app.logger.error(e)\n return {\n 'success': False,\n 'message': 'invalid JSON query'\n }, 400\n\n if tag is None or job_type is None or hysds_io is None or queue is None or query_string is None:\n return {\n 'success': False,\n 'message': 'missing field: [tags, job_type, hysds_io, queue, query]'\n }, 400\n\n doc = mozart_es.get_by_id(index=HYSDS_IOS_INDEX, id=hysds_io, ignore=404)\n if doc['found'] is False:\n app.logger.error('failed to fetch %s, not found in hysds_ios' % hysds_io)\n return {\n 'success': False,\n 'message': '%s not found' % hysds_io\n }, 404\n\n params = doc['_source']['params']\n is_passthrough_query = check_passthrough_query(params)\n\n rule = {\n 'username': 'ops',\n 'workflow': hysds_io,\n 'priority': priority,\n 'enabled': True,\n 'job_type': job_type,\n 'rule_name': tag,\n 'kwargs': kwargs,\n 'query_string': query_string,\n 'query': query,\n 'passthru_query': is_passthrough_query,\n 'query_all': False,\n 'queue': queue\n }\n\n if time_limit and isinstance(time_limit, int):\n if time_limit <= 0 or time_limit > 86400 * 7:\n return {\n 'success': False,\n 'message': 'time_limit must be between 0 and 604800 (sec)'\n }, 400\n else:\n rule['time_limit'] = time_limit\n\n if soft_time_limit and isinstance(soft_time_limit, int):\n if soft_time_limit <= 0 or soft_time_limit > 86400 * 7:\n return {\n 'success': False,\n 'message': 'soft_time_limit must be between 0 and 604800 (sec)'\n }, 400\n else:\n rule['soft_time_limit'] = soft_time_limit\n\n if disk_usage:\n rule['disk_usage'] = disk_usage\n if enable_dedup is not None:\n rule['enable_dedup'] = enable_dedup\n\n payload = {\n 'type': 'job_iterator',\n 'function': 'hysds_commons.job_iterator.iterate',\n 'args': [\"figaro\", rule],\n }\n\n on_demand_job_queue = celery_app.conf['ON_DEMAND_JOB_QUEUE']\n celery_task = do_submit_task(payload, on_demand_job_queue)\n\n return {\n 'success': True,\n 'message': 'task submitted successfully',\n 'result': celery_task.id\n }\n\n\n@on_demand_ns.route('/job-params', endpoint='job-params')\n@on_demand_ns.doc(responses={200: \"Success\", 500: \"Execution failed\"},\n description=\"Retrieve on job params for specific jobs\")\nclass JobParams(Resource):\n \"\"\"Job Params API.\"\"\"\n\n resp_model = on_demand_ns.model('JsonResponse', {\n 'success': fields.Boolean(required=True, description=\"if request was processed successfully\"),\n 'message': fields.String(required=True, description=\"message describing success or failure\"),\n 'objectid': fields.String(required=True, description=\"ID of indexed dataset\"),\n 'index': fields.String(required=True, description=\"dataset index name\"),\n })\n\n parser = on_demand_ns.parser()\n parser.add_argument('job_type', type=str, required=True, help='job tag')\n\n @on_demand_ns.expect(parser)\n def get(self):\n job_type = request.args.get('job_type')\n if not job_type:\n return {'success': False, 'message': 'job_type not provided'}, 400\n\n query = {\n \"query\": {\n \"term\": {\"job-specification.keyword\": job_type}\n }\n }\n hysds_io = mozart_es.search(index=HYSDS_IOS_INDEX, body=query)\n\n if hysds_io['hits']['total']['value'] == 0:\n return {\n 'success': False,\n 'message': '%s not found in hysds_ios' % job_type\n }, 404\n\n hysds_io = hysds_io['hits']['hits'][0]\n job_params = hysds_io['_source']['params']\n job_params = list(filter(lambda x: x['from'] == 'submitter', job_params))\n\n job_spec = mozart_es.get_by_id(index=JOB_SPECS_INDEX, id=job_type, ignore=404)\n if job_spec.get('found', False) is False:\n return {\n 'success': False,\n 'message': '%s not found in job_specs' % job_type\n }, 404\n\n return {\n 'success': True,\n 'submission_type': hysds_io['_source'].get('submission_type'),\n 'hysds_io': hysds_io['_source']['id'],\n 'params': job_params,\n 'time_limit': job_spec['_source']['time_limit'],\n 'soft_time_limit': job_spec['_source']['soft_time_limit'],\n 'disk_usage': job_spec['_source']['disk_usage'],\n 'enable_dedup': hysds_io['_source'].get('enable_dedup', True)\n }\n","sub_path":"mozart/services/api_v01/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":27966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"173113469","text":"from django.shortcuts import redirect, render\nfrom .forms import AdvertiseForm\nfrom .models import Advertise\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\ndef advertise(request):\n if request.method == \"POST\":\n form = AdvertiseForm(request.POST)\n if form.is_valid():\n advertises = Advertise()\n advertises.user = request.user\n advertises.adv_heading = form.cleaned_data['adv_heading']\n advertises.adv_descriptions = form.cleaned_data['adv_descriptions']\n advertises.adv_conclude = form.cleaned_data['adv_conclude']\n advertises.adv_start_date = form.cleaned_data['adv_start_date']\n advertises.adv_end_date = form.cleaned_data['adv_end_date']\n advertises.adv_category = form.cleaned_data['adv_category']\n advertises.adv_images = form.cleaned_data['adv_images']\n advertises.save()\n else:\n return redirect('advertise')\n else:\n form = AdvertiseForm()\n\n context = {\n 'form':form\n }\n return render(request, 'advertise/my_advertise.html', context)\n\n\n@login_required(login_url='login')\ndef advertise_detail(request, category_slug, advertise_slug):\n try:\n adv = Advertise.objects.get(adv_category__slug=category_slug, slug=advertise_slug)\n except Exception as e:\n raise e\n context = {\n 'adv':adv\n }\n return render(request, 'advertise/my_advertise_detail.html', context)\n","sub_path":"advertise/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"337640277","text":"import re\n\n\nlistaPalavrasReservadas = ['int', 'float', 'if', 'while', 'for', 'union', 'struct', 'define', 'function']\nlistaSeparadores = [' ', '\\n', '[', ']', '(', ')', ';']\nresultado = []\n\n\ndef verificaPR(token):\n if token in listaPalavrasReservadas:\n return True\n return False\n\ndef verificaID(token):\n if re.fullmatch('[a-zA-Z]+([0-9a-zA-Z_-])*', token):\n return True\n return False\ndef verificaInt(token):\n if re.fullmatch('[0-9]+', token):\n return True\n return False\ndef verificaFloat(token):\n if (re.fullmatch('[0-9]+\\.[0-9]+', token)):\n return True\n return False\ndef verificaString(token):\n if len(token) > 0:\n if token[0] == \"'\" and token[-1] == \"'\":\n return True\n elif token[0] == '\"' and token[-1] == '\"':\n return True\n return False\n\n\ndef verificaToken(token):\n if verificaPR(token):\n return ('Palavra Reservada', token)\n if verificaID(token):\n return ('Identificador', token)\n if verificaInt(token):\n return ('Int', token)\n if verificaFloat(token):\n return ('Float', token)\n if verificaString(token):\n return ('String', token)\n\n\ndef procuraComentarioMultiLine(entrada):\n for indice, letra in enumerate(entrada):\n if letra == '/' and indice+1 0\n pred += 1\n target += 1\n pred = pred * (target > 0)\n\n inter = pred * (pred == target)\n (area_inter, _) = np.histogram(inter, bins=num_class, range=(1, num_class))\n (area_pred, _) = np.histogram(pred, bins=num_class, range=(1, num_class))\n (area_target, _) = np.histogram(target, bins=num_class, range=(1, num_class))\n area_union = area_pred + area_target - area_inter\n\n return (area_inter, area_union)\n\n\nif __name__ == '__main__':\n model = torch.load(voc_checkpoint_dir)\n model.to(device).eval()\n val_data = VOCSegmentation('data/', image_set='val', h=None, w=None)\n\n val_loader = DataLoader(val_data, batch_size=1, shuffle=False)\n\n iou_values = {}\n\n with torch.no_grad():\n for idx, (data, target) in enumerate(val_loader):\n data = data.to(device)\n output = model(data)\n\n output_predictions = output.argmax(1).detach().cpu()\n intersect, union = intersect_and_union(output_predictions, target, num_classes)\n union[union == 0] = 1\n iou = intersect/union\n avg_iou = np.mean(iou)\n iou_values[avg_iou] = idx\n\n sorted_values = sorted(iou_values, reverse=display_best)\n\n sample_data = val_data[iou_values[sorted_values[0]]]\n sample = sample_data[0].to(device)\n sample_output = sample_data[1]\n with torch.no_grad():\n output = model(sample.unsqueeze(0))[0]\n output_predictions = output.argmax(0)\n\n # create a color pallette, selecting a color for each class\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n colors = torch.as_tensor([i for i in range(21)])[:, None] * palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n\n # plot the semantic segmentation predictions of 21 classes in each color\n r = Image.fromarray(output_predictions.byte().cpu().numpy())\n r.putpalette(colors)\n\n s = Image.fromarray(sample_output.byte().cpu().numpy())\n s.putpalette(colors)\n\n plt.subplot(3, 3, 1)\n plt.title('Original')\n plt.imshow(sample.detach().cpu().transpose(0, 1).transpose(1, 2).long())\n plt.subplot(3, 3, 2)\n plt.title('Prediction')\n plt.imshow(r)\n plt.subplot(3, 3, 3)\n plt.title('Target')\n plt.imshow(s)\n\n sample_data = val_data[iou_values[sorted_values[1]]]\n sample = sample_data[0].to(device)\n sample_output = sample_data[1]\n with torch.no_grad():\n output = model(sample.unsqueeze(0))[0]\n output_predictions = output.argmax(0)\n\n r = Image.fromarray(output_predictions.byte().cpu().numpy())\n r.putpalette(colors)\n\n s = Image.fromarray(sample_output.byte().cpu().numpy())\n s.putpalette(colors)\n\n plt.subplot(3, 3, 4)\n plt.title('Original')\n plt.imshow(sample.detach().cpu().transpose(0, 1).transpose(1, 2).long())\n plt.subplot(3, 3, 5)\n plt.title('Prediction')\n plt.imshow(r)\n plt.subplot(3, 3, 6)\n plt.title('Target')\n plt.imshow(s)\n\n sample_data = val_data[iou_values[sorted_values[2]]]\n sample = sample_data[0].to(device)\n sample_output = sample_data[1]\n with torch.no_grad():\n output = model(sample.unsqueeze(0))[0]\n output_predictions = output.argmax(0)\n\n r = Image.fromarray(output_predictions.byte().cpu().numpy())\n r.putpalette(colors)\n\n s = Image.fromarray(sample_output.byte().cpu().numpy())\n s.putpalette(colors)\n\n plt.subplot(3, 3, 7)\n plt.title('Original')\n plt.imshow(sample.detach().cpu().transpose(0, 1).transpose(1, 2).long())\n plt.subplot(3, 3, 8)\n plt.title('Prediction')\n plt.imshow(r)\n plt.subplot(3, 3, 9)\n plt.title('Target')\n plt.imshow(s)\n\n plt.show()","sub_path":"visualisevoc.py","file_name":"visualisevoc.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"622829679","text":"import argparse\nimport logging\nimport math\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\n\nimport Optim\nfrom utils import DataUtility\nfrom models import LSTNet_pytorch # used in eval\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nDEFAULT_MODEL_PARAMETER_FILE = 'model/model.pt'\n\n\ndef load_checkpoint(filename=DEFAULT_MODEL_PARAMETER_FILE):\n # Note: Input model & optimizer should be pre-defined. This routine only updates their states.\n start_epoch = 0\n losslogger = None\n if os.path.isfile(filename):\n logger.debug(\"=> loading checkpoint '{}'\".format(filename))\n checkpoint = torch.load(filename)\n start_epoch = checkpoint.get('epoch', 0)\n model = checkpoint.get('model')\n model.load_state_dict(checkpoint['state_dict'])\n optim = checkpoint.get('optim')\n losslogger = checkpoint.get('losslogger')\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(filename, checkpoint['epoch']))\n else:\n logger.debug(\"=> no checkpoint found at '{}'\".format(filename))\n\n return model, optim, start_epoch, losslogger\n\n# --data data/exchange_rate.txt --save save/exchange_rate.pt --hidCNN 50 --hidRNN 50 --L1Loss False --output_fun None\n\ndef evaluate(data, X, Y, model, evaluateL2, evaluateL1, batch_size):\n model.eval()\n total_loss = 0\n total_loss_l1 = 0\n n_samples = 0\n predict = None\n test = None\n\n for X, Y in data.get_batches(X, Y, batch_size, False):\n output = model(X)\n if predict is None:\n predict = output\n test = Y\n else:\n predict = torch.cat((predict, output))\n test = torch.cat((test, Y))\n\n scale = data.scale.expand(output.size(0), data.m)\n total_loss += evaluateL2(output * scale, Y * scale).item()\n total_loss_l1 += evaluateL1(output * scale, Y * scale).item()\n n_samples += (output.size(0) * data.m)\n rse = math.sqrt(total_loss / n_samples) / data.rse\n rae = (total_loss_l1 / n_samples) / data.rae\n\n predict = predict.data.cpu().numpy()\n Ytest = test.data.cpu().numpy()\n sigma_p = (predict).std(axis=0)\n sigma_g = (Ytest).std(axis=0)\n mean_p = predict.mean(axis=0)\n mean_g = Ytest.mean(axis=0)\n index = (sigma_g != 0)\n correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g + 0.000000000000001)\n correlation = (correlation[index]).mean()\n return rse, rae, correlation\n\n\ndef train(data, X, Y, model, criterion, optim, batch_size):\n model.train()\n total_loss = 0\n n_samples = 0\n for X, Y in data.get_batches(X, Y, batch_size, True):\n model.zero_grad()\n output = model(X)\n scale = data.scale.expand(output.size(0), data.m)\n loss = criterion(output * scale, Y * scale)\n loss.backward()\n grad_norm = optim.step()\n total_loss += loss.item()\n n_samples += (output.size(0) * data.m)\n return total_loss / n_samples\n\n\n# --data data/exchange_rate.txt --save save/exchange_rate.pt --hidCNN 50 --hidRNN 50 --L1Loss False --output_fun None\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch Time series forecasting')\n parser.add_argument('--data', type=str, required=True,\n help='location of the data file')\n parser.add_argument('--model', type=str, default='LSTNet',\n help='')\n parser.add_argument('--hidCNN', type=int, default=100,\n help='number of CNN hidden units')\n parser.add_argument('--hidRNN', type=int, default=100,\n help='number of RNN hidden units')\n parser.add_argument('--window', type=int, default=24 * 7,\n help='window size')\n parser.add_argument('--CNN_kernel', type=int, default=6,\n help='the kernel size of the CNN layers')\n parser.add_argument('--highway_window', type=int, default=24,\n help='The window size of the highway component')\n parser.add_argument('--clip', type=float, default=10.,\n help='gradient clipping')\n parser.add_argument('--epochs', type=int, default=100,\n help='upper epoch limit')\n parser.add_argument('--batch_size', type=int, default=128, metavar='N',\n help='batch size')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--seed', type=int, default=54321,\n help='random seed')\n parser.add_argument('--gpu', type=int, default=None)\n parser.add_argument('--log_interval', type=int, default=2000, metavar='N',\n help='report interval')\n parser.add_argument('--load', type=str, default=None,\n help='path to load the initial model')\n parser.add_argument('--save', type=str, default=DEFAULT_MODEL_PARAMETER_FILE,\n help='path to save the final model')\n parser.add_argument('--cuda', type=str, default=True)\n parser.add_argument('--optim', type=str, default='adam')\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--horizon', type=int, default=12)\n parser.add_argument('--skip', type=float, default=24)\n parser.add_argument('--hidSkip', type=int, default=5)\n parser.add_argument('--L1Loss', type=bool, default=True)\n parser.add_argument('--normalize', type=int, default=2)\n parser.add_argument('--output_fun', type=str, default='sigmoid')\n args = parser.parse_args()\n logger.debug(\"=> Starting..... '\")\n\n args.cuda = args.gpu is not None\n if args.cuda:\n torch.cuda.set_device(args.gpu)\n # Set the random seed manually for reproducibility.\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n if not args.cuda:\n logger.debug(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n\n Data = DataUtility(args.data, 0.6, 0.2, args.cuda, args.horizon, args.window, args.normalize)\n logger.debug(Data.rse)\n filename_to_save_model = args.save\n filename_to_load_model = args.load\n start_epoch = 0\n if filename_to_load_model and os.path.isfile(filename_to_load_model):\n model, optim, start_epoch, _ = load_checkpoint(filename=filename_to_load_model)\n else:\n model = eval(args.model).Model(args, Data)\n optim = Optim.Optim(\n model.parameters(), args.optim, args.lr, args.clip,\n )\n\n if args.cuda:\n model.cuda()\n\n nParams = sum([p.nelement() for p in model.parameters()])\n logger.debug('* number of parameters: %d' % nParams)\n\n if args.L1Loss:\n criterion = nn.L1Loss(size_average=False)\n else:\n criterion = nn.MSELoss(size_average=False)\n evaluateL2 = nn.MSELoss(size_average=False)\n evaluateL1 = nn.L1Loss(size_average=False)\n if args.cuda:\n criterion = criterion.cuda()\n evaluateL1 = evaluateL1.cuda()\n evaluateL2 = evaluateL2.cuda()\n\n best_val = 10000000\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n logger.debug('begin training')\n for epoch in range(start_epoch, args.epochs + 1):\n epoch_start_time = time.time()\n train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, args.batch_size)\n val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, evaluateL2, evaluateL1,\n args.batch_size)\n logger.debug(\n '| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}'.format(\n epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr))\n # Save the model if the validation loss is the best we've seen so far.\n\n if val_loss < best_val:\n with open(filename_to_save_model, 'wb') as f:\n checkpoint = {'model': model,\n 'state_dict': model.state_dict(),\n 'optim': optim,\n 'epoch': epoch + 1}\n\n torch.save(checkpoint, f)\n logger.info(\"Saving checkpoint with val_loss:{:5.4f} < best_val:{:5.4f}\".format(val_loss, best_val))\n\n best_val = val_loss\n if epoch % 5 == 0:\n test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2,\n evaluateL1, args.batch_size)\n logger.info(\n \"test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}\".format(test_acc, test_rae, test_corr))\n\n except KeyboardInterrupt:\n logger.debug('-' * 89)\n logger.debug('Exiting from training early')\n\n\n test_acc, test_rae, test_corr = evaluate(Data, Data.test[0], Data.test[1], model, evaluateL2, evaluateL1,\n args.batch_size)\n logger.info(\"test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f}\".format(test_acc, test_rae, test_corr))\n","sub_path":"LSTNet_pytorch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"177912722","text":"'''\nApply a Cesar cipher of 7 to the 'secret' variable.\n\np.s.: http://www.montypython.net/scripts/dentist.php\n\n'''\n\nsecret = \"I hear the gooseberries are doing well this year, and so are the mangoes.\"\ncipher = 7\nabc = 'abcdefghijklmnopqrstuvwxyz'\nlength = len(secret)\n\nfor x in range(0, length):\n char = secret[x]\n if char == ' ':\n print(' ', end='')\n else:\n letter = int(abc.find(char))\n cesar = letter - 7\n newchar = abc[cesar]\n print(newchar, end= '')\n\n\n","sub_path":"week_02/labs/04_conditionals_loops/Exercise_11.py","file_name":"Exercise_11.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"590576822","text":"import pygame\n\n\nclass UtilitySprite(pygame.sprite.Sprite):\n def __init__(self, width, height, x_pos, y_pos):\n super().__init__()\n self.image = pygame.Surface([width, height])\n self.rect = self.image.get_rect()\n self.rect.move_ip(x_pos, y_pos)\n\n def move(self, _dx, _dy):\n self.rect = self.rect.move(_dx, _dy)\n\n\nclass AnchoredBlocks(pygame.sprite.Group):\n def __init__(self):\n super().__init__()\n\n\nclass BlockGroup(pygame.sprite.Group):\n def __init__(self, block_size, color, starting_pos, rotation_definitions):\n super().__init__()\n self.b0 = SingleBlock(color, block_size, block_size * starting_pos[0][0], block_size * starting_pos[0][1])\n self.b1 = SingleBlock(color, block_size, block_size * starting_pos[1][0], block_size * starting_pos[1][1])\n self.b2 = SingleBlock(color, block_size, block_size * starting_pos[2][0], block_size * starting_pos[2][1])\n self.b3 = SingleBlock(color, block_size, block_size * starting_pos[3][0], block_size * starting_pos[3][1])\n self.add(self.b0, self.b1, self.b2, self.b3)\n\n self.block_size = block_size\n self.rotations = 0\n self.rotation_definitions = rotation_definitions\n\n def rotate(self):\n rotation_command = self.rotation_definitions[self.rotations % 4]\n self.b0.move(self.block_size * rotation_command[0][0], self.block_size * rotation_command[0][1])\n self.b1.move(self.block_size * rotation_command[1][0], self.block_size * rotation_command[1][1])\n self.b2.move(self.block_size * rotation_command[2][0], self.block_size * rotation_command[2][1])\n self.b3.move(self.block_size * rotation_command[3][0], self.block_size * rotation_command[3][1])\n\n self.rotations += 1\n\n def move(self, _dx, _dy):\n for block in self.sprites():\n block.move(_dx, _dy)\n\n\nclass SingleBlock(pygame.sprite.Sprite):\n def __init__(self, color, width, x, y):\n super().__init__()\n\n self.block_size = width\n self.color = color\n self.image = pygame.Surface([width, width])\n self.image.fill(color)\n\n screen = pygame.display.get_surface()\n self.area = screen.get_rect()\n\n self.rect = self.image.get_rect()\n self.rect.move_ip(x, y)\n\n def move(self, _dx, _dy):\n self.rect = self.rect.move(_dx, _dy)\n\n\nclass LongBarPiece(BlockGroup):\n def __init__(self, block_size):\n\n rotation_definition = {\n 0: [(2, 2), (1, 1), (0, 0), (-1, -1)],\n 1: [(-2, -2), (-1, -1), (0, 0), (1, 1)],\n 2: [(2, 2), (1, 1), (0, 0), (-1, -1)],\n 3: [(-2, -2), (-1, -1), (0, 0), (1, 1)],\n }\n super().__init__(block_size, (255, 0, 0), ((5, 0), (5, 1), (5, 2), (5, 3)), rotation_definition)\n\n\nclass SquarePiece(BlockGroup):\n def __init__(self, block_size):\n\n rotation_definition = {\n 0: [(0, 0), (0, 0), (0, 0), (0, 0)],\n 1: [(0, 0), (0, 0), (0, 0), (0, 0)],\n 2: [(0, 0), (0, 0), (0, 0), (0, 0)],\n 3: [(0, 0), (0, 0), (0, 0), (0, 0)]\n }\n super().__init__(block_size, (255, 255, 0), ((5, 0), (6, 0), (5, 1), (6, 1)), rotation_definition)\n\n\nclass LeftElPiece(BlockGroup):\n def __init__(self, block_size):\n\n rotation_definition = {\n 0: [(1, 1), (0, 0), (-1, -1), (0, -2)],\n 1: [(-1, 1), (0, 0), (1, -1), (2, 0)],\n 2: [(-1, -1), (0, 0), (1, 1), (0, 2)],\n 3: [(1, -1), (0, 0), (-1, 1), (-2, 0)],\n }\n super().__init__(block_size, (0, 255, 0), ((6, 0), (6, 1), (6, 2), (5, 2)), rotation_definition)\n\n\nclass RightElPiece(BlockGroup):\n def __init__(self, block_size):\n\n rotation_definition = {\n 0: [(1, 1), (0, 0), (-1, -1), (-2, 0)],\n 1: [(-1, 1), (0, 0), (1, -1), (0, -2)],\n 2: [(-1, -1), (0, 0), (1, 1), (2, 0)],\n 3: [(1, -1), (0, 0), (-1, 1), (0, 2)],\n }\n super().__init__(block_size, (0, 0, 255), ((5, 0), (5, 1), (5, 2), (6, 2)), rotation_definition)\n\n\nclass TeePiece(BlockGroup):\n def __init__(self, block_size):\n\n rotation_definition = {\n 0: [(1, -1), (0, 0), (-1, 1), (-1, -1)],\n 1: [(1, 1), (0, 0), (-1, -1), (1, -1)],\n 2: [(-1, 1), (0, 0), (1, -1), (1, 1)],\n 3: [(-1, -1), (0, 0), (1, 1), (-1, 1)],\n }\n super().__init__(block_size, (0, 255, 255), ((4, 0), (5, 0), (6, 0), (5, 1)), rotation_definition)\n","sub_path":"Applications/Tetris/pieces.py","file_name":"pieces.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"61497094","text":"import binascii\nimport logging\nfrom bluepy.btle import BTLEException\nfrom miflora.miflora_poller import BYTEORDER\nfrom miflora import miflora_poller as mi\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _str2bytearray(hex_string):\n return binascii.unhexlify(hex_string.replace(' ', ''))\n\nVALUE_DEVICE_INFO = _str2bytearray(\"62 1d 32 2e 38 2e 36\")\nVALUE_MEASUREMENT = _str2bytearray(\"ce 00 00 35 00 00 00 1c c8 00 02 3c 00 fb 34 9b\")\nVALUE_NO_DATA = (0).to_bytes(16, BYTEORDER)\nVALUE_DEVICE_TIME = (36000).to_bytes(32, BYTEORDER)\n\n\nclass MockPeripheral:\n def __init__(self, history_items=10, **kwargs):\n self.history_items = history_items\n self.cache = {\n 0x03: b'Test Flower Care',\n 0x35: VALUE_NO_DATA,\n 0x38: VALUE_DEVICE_INFO,\n 0x41: VALUE_DEVICE_TIME,\n 0x3c: VALUE_NO_DATA\n }\n self._read_log = []\n self._connected = False\n\n @property\n def _helper(self):\n return 1 if self._connected else None\n\n def readCharacteristic(self, handle):\n if self._connected is False:\n raise BTLEException(BTLEException.INTERNAL_ERROR, \"Helper not started (did you call connect()?)\")\n\n response = self.cache.get(handle, b'\\x00\\x00')\n\n self._read_log.append((handle, response))\n LOGGER.debug(\"read(0x{:x}) value: %a\".format(handle, [x for x in response]))\n return response\n\n def writeCharacteristic(self, handle, payload, withResponse=False):\n LOGGER.debug(\"write(0x{:x}) payload: %a\".format(handle, payload))\n if self._connected is False:\n raise BTLEException(BTLEException.INTERNAL_ERROR, \"Helper not started (did you call connect()?)\")\n\n if handle == mi.handle_measurement_control and payload[0] == 0xa0:\n self.cache[mi.handle_measurement_read] = VALUE_MEASUREMENT\n\n if handle == mi.handle_history_control:\n if payload == mi.cmd_history_read_init:\n self.cache[mi.handle_history_read] = (self.history_items).to_bytes(2, BYTEORDER) + \\\n (0).to_bytes(14, BYTEORDER)\n\n elif payload[0] == int.from_bytes(b'\\xa1', BYTEORDER):\n address = int.from_bytes(payload[1:3], byteorder=BYTEORDER)\n self.cache[mi.handle_history_read] = \\\n (3600*(self.history_items - address)).to_bytes(4, BYTEORDER) + \\\n int(20.6*10).to_bytes(2, BYTEORDER) + b'\\x00' + \\\n (1066).to_bytes(3, BYTEORDER) + b'\\x00' + (57).to_bytes(1, BYTEORDER) + \\\n (123).to_bytes(2, BYTEORDER) + b'\\x00\\x00'\n\n elif payload == mi.cmd_history_read_success:\n self.cache[mi.handle_history_read] = VALUE_NO_DATA\n self.history_items = 0\n\n elif payload == mi.cmd_history_read_failed:\n self.cache[mi.handle_history_read] = VALUE_NO_DATA\n\n return {'resp': 'wr'} if withResponse else None\n\n def connect(self, *args, **kwargs):\n self._connected = True\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._connected = False\n","sub_path":"miflora/tests/mock_peripheral.py","file_name":"mock_peripheral.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"324852033","text":"# Airburg Emo Plus Plugin\n#\n# Author: wolfg1969, 2017\n#\n\"\"\"\n\n \n \n \n \n \n \n \n \n\n\"\"\"\nimport Domoticz\nfrom datetime import datetime, timedelta\nfrom emo_plus import EmoPlus\n\nicons = {\n \"airburgemoplusbatterylevelfull\": \"batterylevelfull icons.zip\",\n \"airburgemoplusbatterylevelok\": \"batterylevelok icons.zip\",\n \"airburgemoplusbatterylevellow\": \"batterylevellow icons.zip\",\n \"airburgemoplusbatterylevelempty\": \"batterylevelempty icons.zip\",\n \"airburgemoplusblue\": \"emoblue icons.zip\",\n \"airburgemoplusgreen\": \"emogreen icons.zip\",\n \"airburgemoyellow\": \"emoyellow icons.zip\",\n \"airburgemoorange\": \"emoorange icons.zip\",\n \"airburgemored\": \"emored icons.zip\",\n \"airburgemopurple\": \"emopurple icons.zip\",\n \"airburgemoblack\": \"emoblack icons.zip\"\n}\n\nclass EmoPlusPlugin:\n \n def __init__(self):\n \n self.debug = False\n \n self.emoDevice = None\n \n self.nextupdate = datetime.now()\n self.warmupcounter = 0\n self.startwarmup = False\n \n self.pollinterval = 30 # default polling interval in minutes\n\n def onStart(self):\n \n if Parameters[\"Mode6\"] == \"Debug\":\n self.debug = True\n Domoticz.Debugging(1)\n else:\n Domoticz.Debugging(0)\n \n # load custom battery images\n Domoticz.Debug('icons=%s,Images=%s' % (icons, Images))\n for key, value in icons.items():\n if key not in Images:\n Domoticz.Image(Filename=value).Create()\n Domoticz.Debug(\"Added icon: \" + key + \" from file \" + value)\n Domoticz.Debug('Images=%s' % Images.keys())\n Domoticz.Debug(\"Number of icons loaded = \" + str(len(Images)))\n for image in Images:\n Domoticz.Debug(\"Icon \" + str(Images[image].ID) + \" \" + Images[image].Name)\n \n # check polling interval parameter\n try:\n temp = int(Parameters[\"Mode1\"])\n except:\n Domoticz.Error(\"Invalid polling interval parameter\")\n else:\n if temp < 30:\n temp = 30 # minimum polling interval\n Domoticz.Error(\"Specified polling interval too short: changed to 30 minutes\")\n elif temp > 1440:\n temp = 1440 # maximum polling interval is 1 day\n Domoticz.Error(\"Specified polling interval too long: changed to 1440 minutes (24 hours)\")\n self.pollinterval = temp\n Domoticz.Log(\"Using polling interval of {} minutes\".format(str(self.pollinterval)))\n \n if (len(Devices) == 0):\n Domoticz.Device(Name=\"PM2.5 Count\", Unit=1, TypeName=\"Custom\").Create()\n Domoticz.Device(Name=\"PM2.5 Density\", Unit=2, TypeName=\"Custom\", Options={\"Custom\": \"1; ug/m3\"}).Create()\n Domoticz.Device(Name=\"Battery Level\", Unit=3, TypeName=\"Custom\", Options={\"Custom\": \"1;%\"}).Create()\n Domoticz.Log(\"Devices created.\")\n \n DumpConfigToLog()\n Domoticz.Log(\"Plugin is started.\")\n\n def onStop(self):\n Domoticz.Log(\"Plugin is stopping.\")\n Domoticz.Debugging(0)\n\n def onHeartbeat(self):\n try:\n now = datetime.now()\n \n if now >= self.nextupdate:\n self.nextupdate = now + timedelta(minutes=self.pollinterval)\n self.startwarmup = True\n \n self.warmUp()\n self.getBatteryLevel()\n \n if self.startwarmup:\n self.warmupcounter += 1\n \n if self.warmupcounter > 3: # 3 * default heartbeat = 30 sec\n \n self.startwarmup = False\n self.warmupcounter = 0\n \n self.readValue()\n except RuntimeError as e:\n Domoticz.Log('EmoPlus: %s (error: %s)' % (Parameters[\"Address\"], str(e)))\n \n \n def warmUp(self):\n Domoticz.Log('warmUp')\n \n try:\n if self.emoDevice is None:\n self.emoDevice = EmoPlus(Parameters[\"Address\"])\n \n self.emoDevice.connect()\n self.emoDevice.warm_up()\n \n except RuntimeError as e:\n emo = None\n raise e\n \n return True\n \n def getBatteryLevel(self):\n \n Domoticz.Log('getBatteryLevel')\n \n if 3 in Devices and self.emoDevice is not None and self.emoDevice.connected:\n \n levelBatt = self.emoDevice.get_battery_level()\n if levelBatt >= 75:\n icon = \"airburgemoplusbatterylevelfull\"\n elif levelBatt >= 50:\n icon = \"airburgemoplusbatterylevelok\"\n elif levelBatt >= 25:\n icon = \"airburgemoplusbatterylevellow\"\n else:\n icon = \"airburgemoplusbatterylevelempty\"\n \n Domoticz.Debug('icon=%s,Images=%s' % (icon, Images))\n \n try:\n if icon in Images:\n Devices[3].Update(nValue=0, sValue=str(levelBatt), Image=Images[icon].ID)\n else:\n Domoticz.Debug(\"icon not found: %s in %s\" % (icon, Images))\n Devices[3].Update(nValue=0, sValue=str(levelBatt))\n except:\n Domoticz.Error(\"Failed to update device unit \" + str(3))\n \n def readValue(self):\n \n Domoticz.Log('read value')\n \n if self.emoDevice is not None and self.emoDevice.connected:\n try:\n (count, density) = self.emoDevice.get_haze_value()\n \n if density >= 0 and density < 36:\n icon = \"airburgemoplusblue\"\n elif density >= 36 and density < 76:\n icon = \"airburgemoplusgreen\"\n elif density >= 76 and density < 116:\n icon = \"airburgemoplusyellow\"\n elif density >=116 and density < 151:\n icon = \"airburgemoplusorange\" \n elif density >=151 and density < 251:\n icon = \"airburgemoplusred\" \n elif density >=251 and density < 351:\n icon = \"airburgemopluspurple\"\n else:\n icon = \"airburgemoplusblack\"\n \n Domoticz.Debug('icon=%s,Images=%s' % (icon, Images))\n \n Domoticz.Log('count = %d' % count)\n if 1 in Devices:\n if icon in Images:\n Devices[1].Update(nValue=0, sValue='%d' % count, Image=Images[icon].ID)\n else:\n Domoticz.Debug(\"icon not found: %s in %s\" % (icon, Images))\n Devices[1].Update(nValue=0, sValue='%d' % count)\n \n Domoticz.Log('density = %d' % density)\n if 2 in Devices:\n if icon in Images:\n Devices[2].Update(nValue=0, sValue='%d' % density, Image=Images[icon].ID)\n else:\n Domoticz.Debug(\"icon not found: %s in %s\" % (icon, Images))\n Devices[2].Update(nValue=0, sValue='%d' % density)\n \n finally:\n self.emoDevice.disconnect()\n self.emoDevice = None\n\n\nglobal _plugin\n_plugin = EmoPlusPlugin()\n\ndef onStart():\n global _plugin\n _plugin.onStart()\n\ndef onStop():\n global _plugin\n _plugin.onStop()\n\ndef onHeartbeat():\n global _plugin\n _plugin.onHeartbeat()\n\n# Generic helper functions\ndef DumpConfigToLog():\n for x in Parameters:\n if Parameters[x] != \"\":\n Domoticz.Debug( \"'\" + x + \"':'\" + str(Parameters[x]) + \"'\")\n Domoticz.Debug(\"Device count: \" + str(len(Devices)))\n for x in Devices:\n Domoticz.Debug(\"Device: \" + str(x) + \" - \" + str(Devices[x]))\n Domoticz.Debug(\"Device ID: '\" + str(Devices[x].ID) + \"'\")\n Domoticz.Debug(\"Device Name: '\" + Devices[x].Name + \"'\")\n Domoticz.Debug(\"Device nValue: \" + str(Devices[x].nValue))\n Domoticz.Debug(\"Device sValue: '\" + Devices[x].sValue + \"'\")\n Domoticz.Debug(\"Device LastLevel: \" + str(Devices[x].LastLevel))\n return\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"126978758","text":"import pygame,sys\nfrom pygame.locals import *\npygame.font.init()\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\ncolor = (0,40,40)\nheight = 600\nwidth = 800\nscreen = pygame.display.set_mode((width,height))\npygame.display.set_caption('Air Hockey')\nscreen.fill(color)\nclock = pygame.time.Clock()\n\npaddle_color = (255,215,0)\npaddle1 = 300\npaddle2 = 300\nball_x = 400\nball_y = 300\ndirection_x = 5\ndirection_y = -5\nscore1,score2 = 0,0\n\ndef collision(ball_x,ball_y,direction_x,direction_y,paddle1,paddle2,width,height):\n if (ball_x == 30 and ball_y+10 >= paddle1 and ball_y <= paddle1+50):\n direction_x *= -1\n elif (ball_x == width-30 and ball_y >= paddle2 and ball_y <= paddle2+50):\n direction_x *= -1\n if (ball_y <= 0 or ball_y >= height):\n direction_y *= -1\n return direction_x,direction_y\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n screen.fill(color)\n pygame.draw.rect(screen,paddle_color,pygame.Rect(20,paddle1,10,50))\n pygame.draw.rect(screen,paddle_color,pygame.Rect(width-30,paddle2,10,50))\n pygame.draw.rect(screen,(255,255,255),pygame.Rect(width//2 ,0,2,height))\n pygame.draw.circle(screen, (255,255,255), (ball_x,ball_y) , 10)\n pressed = pygame.key.get_pressed()\n if (pressed[pygame.K_UP] and paddle1 >= 10):\n paddle1 -= 6\n if (pressed[pygame.K_DOWN] and paddle1 <= height-60):\n paddle1 += 6\n\n score_1 = myfont.render(str(score1), False, (255, 255, 0))\n score_2 = myfont.render(str(score2), False, (255, 255, 0))\n direction_x,direction_y = collision(ball_x,ball_y,direction_x,direction_y,paddle1,paddle2,width,height)\n ball_x += direction_x\n ball_y += direction_y\n if (ball_x >= 300 and ball_y - paddle2 >= 0 and paddle2 <= 540):\n paddle2 += 6\n if (ball_x >= 300 and ball_y - paddle2 <= 0 and paddle2 >= 10):\n paddle2 -= 6\n if (ball_x >= width):\n score1 += 1\n paddle1,paddle2 = 300,300\n ball_x = width//2\n ball_y = height//2\n if (ball_x <= 0):\n score2 += 1\n paddle1,paddle2 = 300,300\n ball_x = width//2\n ball_y = height//2\n screen.blit(score_1,(350,300))\n screen.blit(score_2,(450,300))\n clock.tick(60)\n pygame.display.update()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"510401931","text":"from contextlib import contextmanager\n\nimport sqlalchemy\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy_utils.functions import drop_database, create_database, database_exists\n\nfrom ..config import DB_URL\n\n\nengine = sqlalchemy.create_engine(DB_URL, client_encoding='utf8', use_batch_mode=True)\n\n\nBase = declarative_base()\n\n\n@contextmanager\ndef session_ctx():\n session = Session(bind=engine)\n try:\n yield session\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()\n\n\ndef drop():\n if database_exists(engine.url):\n drop_database(engine.url)\n\n\ndef create():\n if not database_exists(engine.url):\n create_database(engine.url)\n\n # INFO: we need to `load` to Base the Models\n from . import models # noqa\n\n Base.metadata.create_all(engine)\n\n\ndef recreate():\n drop()\n create()\n","sub_path":"patton/dal/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"308228384","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn import datasets\nimport numpy as np\n\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)\n\nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\nX_combined_std=np.vstack((X_train_std,X_test_std))\ny_combined=np.hstack((y_train,y_test))\n\n\nlr=LogisticRegression(C=1000.0,random_state=1)\nlr.fit(X_train_std,y_train)\nprint(f\"celnosc algorytmu: {np.round(lr.score(X_test_std,y_test)*100,2)}%\")\n\nfrom plots import plot_decision_regions\nimport matplotlib.pyplot as plt\nplot_decision_regions(X_combined_std,y_combined,classifier=lr,test_idx=range(105,150))\nplt.xlabel('dlugosc platka [standaryzowana]')\nplt.ylabel('szerokosc platka [standaryzowana]')\nplt.legend(loc='upper left')\nplt.show()","sub_path":"Regresja_Liniowa_sklearn.py","file_name":"Regresja_Liniowa_sklearn.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"618102566","text":"import sys\nsys.path.append(\"./\")\n\nfrom brainrender.Utils.data_io import connected_to_internet\n\nimport requests\nimport time\n\n\ndef request(url):\n\t\"\"\"\n\tSends a request to a url\n\n\t:param url: \n\n\t\"\"\"\n\tif not connected_to_internet():\n\t\traise ConnectionError(\"You need to have an internet connection to send requests.\")\n\tresponse = requests.get(url)\n\tif response.ok:\n\t\treturn response\n\telse:\n\t\texception_string = 'URL request failed: {}'.format(response.reason)\n\traise ValueError(exception_string)\n","sub_path":"brainrender/Utils/webqueries.py","file_name":"webqueries.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"573006923","text":"from bounce import run_processes, Ball\r\n\r\n\r\nif __name__ == '__main__':\r\n balls_for_queue = [Ball('Red ball', 3),\r\n Ball('Blue ball', 1),\r\n Ball('Yellow ball', 2),\r\n Ball('Green ball', 3)]\r\n process_args = [('Bob', 3), ('Lexa', 1), ('Mike', 5)]\r\n run_processes(balls_for_queue, process_args)\r\n print('---')\r\n # run with only one process\r\n run_processes(balls_for_queue, [('Bob', 1)])\r\n print('---')\r\n # run with only one ball\r\n run_processes([Ball('Red ball', 5)], process_args)\r\n","sub_path":"Test/python-sprint05/s05t11_bounce_main.py","file_name":"s05t11_bounce_main.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"55497310","text":"import graphene\n\nfrom django.contrib.auth.models import User\n\nfrom graphene_django_extras import (\n DjangoListObjectType,\n DjangoObjectType,\n DjangoFilterListField,\n DjangoSerializerMutation,\n)\nfrom graphene_django_extras.paginations import LimitOffsetGraphqlPagination\n\nimport graphql_jwt\n\n\nfrom graphql_demo.users import serializers as users_serializers\n\n\nclass UserType(DjangoObjectType):\n class Meta:\n model = User\n description = \" Type definition for a single user \"\n filter_fields = {\n \"id\": [\"exact\"],\n \"first_name\": [\"icontains\", \"iexact\"],\n \"last_name\": [\"icontains\", \"iexact\"],\n \"username\": [\"icontains\", \"iexact\"],\n \"email\": [\"icontains\", \"iexact\"],\n }\n\n\nclass UserListType(DjangoListObjectType):\n class Meta:\n description = \" Type definition for user list \"\n model = User\n # ordering can be: string, tuple or list\n pagination = LimitOffsetGraphqlPagination(\n default_limit=25, ordering=\"-username\"\n )\n\n\nclass UserSerializerMutation(DjangoSerializerMutation):\n \"\"\"\n DjangoSerializerMutation auto implement Create, Delete and Update functions\n \"\"\"\n\n class Meta:\n description = \" DRF serializer based Mutation for Users \"\n serializer_class = users_serializers.UserSerializer\n\n\nclass Queries(graphene.ObjectType):\n all_users = DjangoFilterListField(UserType)\n user = UserListType.RetrieveField(\n description=\"User List with pagination and filtering\"\n )\n\n\nclass Mutations(graphene.ObjectType):\n create_user = UserSerializerMutation.CreateField()\n update_user = UserSerializerMutation.UpdateField()\n delete_user = UserSerializerMutation.DeleteField()\n # JWT authentication\n token_auth = graphql_jwt.ObtainJSONWebToken.Field()\n verify_token = graphql_jwt.Verify.Field()\n refresh_token = graphql_jwt.Refresh.Field()\n","sub_path":"backend/graphql_demo/users/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"121991204","text":"#== == == == == == == == == == == == == == == == == == == == == == == == ==\r\n# Description\r\n# ===================================================\r\n\r\n\"\"\"\r\nFile Name : SignupPage.py\r\n\r\nPurpose : The sign up window configuration\r\n\r\n\"\"\"\r\n# ===================================================\r\n# Imports\r\n# ===================================================\r\nimport Tkinter as tk\r\nimport tkFileDialog\r\nfrom hashlib import sha256\r\nimport threading\r\n\r\n# ===================================================\r\n# Constants\r\n# ===================================================\r\nBACKGROUND = \"Images\\Signuppage.gif\"\r\n\r\n# ===================================================\r\n# Classes\r\n# ===================================================\r\n\r\n\r\nclass SignupPage(tk.Frame):\r\n\r\n def __init__(self, master, controller, user):\r\n \"\"\"\r\n Enter Statement: the constructor gets upper frame, controller, and user objects\r\n Exit Statement: the function init and create SignupPage object\r\n \"\"\"\r\n tk.Frame.__init__(self, master)\r\n\r\n #Init the master to the upper frame, and get the controller pointer\r\n self.__master = master\r\n self.__controller = controller\r\n\r\n #Pointer to user object, In order to setup his keys\r\n self.__user_pointer = user\r\n\r\n # Make Sure That The Master Has Been Rendered\r\n self.__master.update()\r\n\r\n #Set background of the login page\r\n self.__img = tk.PhotoImage(file=BACKGROUND)\r\n self.__background = tk.Label(self, image=self.__img, bd=0)\r\n self.__background.pack()\r\n\r\n #Create keys for the user, at signup\r\n self.__DataKeys = self.__user_pointer.create_keys()\r\n\r\n #Create the export button\r\n self.__export_wallet = tk.Button(self, width=10,bg='#202020', fg=\"white\", font=(\"Segoe UI\", 12), height=2,\r\n text='Export', command=self.exportfile).place(x=172, y=220)\r\n\r\n #Display wallet address\r\n tk.Label(self, text=SignupPage.get_hash_value(self.__DataKeys[0]),\r\n font=(\"Segoe UI\", 8)).place(x=40, y=150)\r\n\r\n tk.Label(self, text=\"Your wallet address:\", font=(\"Segoe UI\", 10)).place(x=40, y=125)\r\n\r\n @staticmethod\r\n def get_hash_value(init_txt):\r\n \"\"\"\r\n Enter Statement: the function gets String\r\n Exit Statement: the function returns his hash value (SHA-256)\r\n Return type: String\r\n \"\"\"\r\n return sha256(init_txt).hexdigest()\r\n\r\n def exportfile(self):\r\n \"\"\"\r\n Enter Statement: the function doesn't get parameters\r\n Exit Statement: the function export the file with the details , to the path which the user choose.\r\n Return type: None\r\n \"\"\"\r\n\r\n #Get the chosen path\r\n file_path = tkFileDialog.asksaveasfile(defaultextension=\".wallet\",\r\n filetypes=((\"Wallet file\", \"*.wallet\"), (\"All Files\", \"*.*\")))\r\n\r\n if file_path is not None:\r\n\r\n #Save the crypto keys in file\r\n f = open(file_path.name, \"w\")\r\n f.write(self.__DataKeys[0])\r\n f.write(\"\\n~%%BREAK~\\n\")\r\n f.write(self.__DataKeys[1])\r\n f.close()\r\n\r\n #Display the main window\r\n self.__controller.show_page(0)\r\n threading.Thread(target=self.__user_pointer.setup, args=(self.__master, self.__controller, True)).start()\r\n\r\n else:\r\n self.__controller.show_page(1)\r\n\r\n","sub_path":"Blockchain Files/SignupPage.py","file_name":"SignupPage.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"336345921","text":"# def std_weight(height, gender):\n# if gender == \"수\":\n# return height*height*22\n# else:\n# return height*height*21\n\n# myweight = int(input(\"몇근\"))\n# height = int(input(\"길이\"))\n# gender = input(\"암수구분\")\n# weight = std_weight(height/100, gender)\n# weight = round(weight, 2)\n# print(\"키\", height, \"표준체중\", weight)\n# print(\"감량해야 할 kg\", round(myweight - weight,2))\n\n'''\n함수정의 정수들을 전달하여\n1) 최대값을 구하는 max()함수\n2) 최소값을 구하는 min()함수\n3) 양수이면 1, 음수이면 -1, 0이면 0 sign()함수 정의\n4) 단을 전달하여 구구단 출력함수\n'''\n\nscore = map(int, input().split())\n\ndef max(*score) :\n ma = -999\n for a in score :\n if a > ma :\n ma = a\n return ma\n\ndef min(*score) :\n mi = 999\n for a in score :\n if a < mi :\n mi = a\n return mi\n\ndef sign(a):\n if a>0 :\n return 1\n elif a < 0 :\n return -1\n else :\n return 0\n\ndef gugudan(a) :\n for b in range(1, 10):\n print(a,b, \"=\" ,a*b )\n","sub_path":"python_source/수업/210513.py","file_name":"210513.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"191941361","text":"#!/usr/bin/env python3\n\n# 蒲公英上传测试包自动化操作,使用前请对 __userKey 和 __apiKey 的值进行修改,这两个值可以在蒲公英后台看到\n# 1. 将安装包上传到蒲公英上;\n# 2. 上传成功后给测试人员发送消息通知(目前支持发送钉钉群聊机器人消息);\n\nimport os, sys, json, hashlib\nfrom . import dingTalk\n\n__userKey = '11779c9856169abea9f9783fb75fdcef' # 蒲公英的 User Key\n__apiKey = 'fab462ab6a583bea5dec868e30119976' # ���公英的 API Key\n__rootDir = os.environ['HOME'] + '/Desktop/file/' # 安装包根目录,脚本会扫描并上传此路径下的全部安装包\n\n# 将安装包上传到蒲公英,上传成功后发送通知消息\ndef __upload(filePath):\n url = 'https://www.pgyer.com/apiv1/app/upload'\n cmd = 'curl -F \"file=@%s\" -F \"uKey=%s\" -F \"_api_key=%s\" %s' % (filePath, __userKey, __apiKey, url)\n f = os.popen(cmd, 'r')\n res = f.read()\n f.close()\n dic = json.loads(res)\n print('安装包上传结果: ', dic)\n if dic['code'] == 0:\n print('安装包上传成功')\n __sendMessage(dic['data'])\n return True\n else:\n print('安装包上传失败,', dic['message'])\n return False\n\n# 上传成功后,发送通知消息\ndef __sendMessage(data):\n __sendDingTalkMessage(data)\n\n# 发送钉钉机器人消息\ndef __sendDingTalkMessage(data):\n appName = data['appName']\n title = \"【%s】 有新版本啦😄\" % (appName)\n url = 'https://www.pgyer.com/' + data['appShortcutUrl']\n contents = \"## %s \\n\" % (title)\n contents += \"## 下载请认准最新版本 \\n\"\n contents += \"#### APP版本: %s (build:%s) \\n\" % (data['appVersion'], data['appVersionNo'])\n contents += \"#### APP平台: iOS \\n\"\n contents += \"#### 安装包大小: %.2f M\\n\" % (float(data['appFileSize']) / 1024 / 1024)\n contents += \"![APP下载地址二维码](%s) \\n\\n\" % (data['appQRCodeURL'])\n contents += \"#### 更新时间: %s \\n\" % (data['appUpdated'])\n contents += \"**友情提醒用手机的同学:** \\n\\n 点击二维码,可以长按识别😊 \\n\\n\"\n contents += \"[点击这里,直接打开下载页](%s)\" % (url)\n\n dingTalk.sendMarkdown(title, contents)\n\n# 递归扫描指定目录\ndef __scanDir():\n rootDic = __rootDir\n result = []\n for top, _, nondirs in os.walk(rootDic):\n for item in nondirs:\n suffix = item.split('.')[-1]\n if (suffix != 'ipa' and suffix != 'apk'):\n continue\n path = os.path.join(top, item)\n result.append(path)\n return result\n\n# 获取指定路径文件的md5值\ndef __md5(filepath):\n if not os.path.isfile(filepath):\n return None\n tHash = hashlib.md5()\n with open(filepath, 'rb') as f:\n while True:\n b = f.read(8096)\n if not b:\n break\n tHash.update(b)\n return tHash.hexdigest()\n\n# 过滤扫描到的文件中已经上传过的\ndef __filterUpload(paths):\n rootDir = os.environ['HOME'] + '/.sx/cache/upload/'\n if not os.path.exists(rootDir):\n os.makedirs(rootDir)\n result = []\n for path in paths:\n md5 = __md5(path)\n if not md5:\n continue\n p = rootDir + md5 + '.sx'\n if not os.path.exists(p):\n result.append({\n 'path': path,\n 'cachepath': p,\n 'md5': md5\n })\n return result\n\ndef pgy():\n paths = __scanDir()\n paths = __filterUpload(paths)\n \n for info in paths:\n path = info['path']\n print('开始上传: ', path)\n if __upload(path):\n with open(info['cachepath'], 'w+') as f:\n f.write(info['path'])","sub_path":"src/pgy.py","file_name":"pgy.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"261373870","text":"from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.http import HttpResponseRedirect, Http404\nfrom advertisingcategory.models import Advertisingcategory\nimport datetime\n\n\n@method_decorator(login_required, name='dispatch')\nclass IndexView(ListView):\n model = Advertisingcategory\n template_name = 'advertisingcategory/index.html'\n context_object_name = 'data_list'\n\n def get_queryset(self):\n return Advertisingcategory.objects.all().filter(isdeleted=0).order_by('-pk')\n\n\n@method_decorator(login_required, name='dispatch')\nclass DetailView(DetailView):\n model = Advertisingcategory\n template_name = 'advertisingcategory/detail.html'\n\n\n@method_decorator(login_required, name='dispatch')\nclass CreateView(CreateView):\n model = Advertisingcategory\n template_name = 'advertisingcategory/create.html'\n fields = ['code', 'description']\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('advertisingcategory.add_advertisingcategory'):\n raise Http404\n return super(CreateView, self).dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.enterby = self.request.user\n self.object.modifyby = self.request.user\n self.object.save()\n return HttpResponseRedirect('/advertisingcategory')\n\n\n@method_decorator(login_required, name='dispatch')\nclass UpdateView(UpdateView):\n model = Advertisingcategory\n template_name = 'advertisingcategory/edit.html'\n fields = ['code', 'description']\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('advertisingcategory.change_advertisingcategory'):\n raise Http404\n return super(UpdateView, self).dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.modifyby = self.request.user\n self.object.modifydate = datetime.datetime.now()\n self.object.save()\n return HttpResponseRedirect('/advertisingcategory')\n\n\n@method_decorator(login_required, name='dispatch')\nclass DeleteView(DeleteView):\n model = Advertisingcategory\n template_name = 'advertisingcategory/delete.html'\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.has_perm('advertisingcategory.delete_advertisingcategory'):\n raise Http404\n return super(DeleteView, self).dispatch(request, *args, **kwargs)\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.modifyby = self.request.user\n self.object.modifydate = datetime.datetime.now()\n self.object.isdeleted = 1\n self.object.status = 'I'\n self.object.save()\n return HttpResponseRedirect('/advertisingcategory')\n","sub_path":"financial/advertisingcategory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"346082820","text":"from django.shortcuts import render\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.urls import reverse\nfrom django import forms\nfrom django.http import JsonResponse\nimport json\n\nfrom .models import User, Song, Book, Song_Book\n\n# Get the list of all the songs\ndef index(request):\n\n songs = Song.objects.all()\n return render(request, 'database/index.html', {\n \"songs\":songs\n })\n\n# The Login View\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n # Redirecto to a success page.\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n # return an 'invalid login' error message\n return render(request, \"database/login.html\", {\n \"message\": \"Invalid username and/or password. \"\n })\n else:\n return render(request, \"database/login.html\")\n\n# The Logout function\ndef logout_view(request):\n logout(request)\n # redirect to a success page\n return HttpResponseRedirect(reverse(\"index\"))\n\n# The register view\ndef register_view(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"database/register.html\", {\n \"message\":\"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"database/register.html\", {\n \"message\":\"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n\n else:\n return render(request, \"database/register.html\")\n\n# View a single song\ndef song_view(request, id):\n\n song = Song.objects.get(id=id)\n favourite = False\n\n # If user is logged in, return the favourite status in JSON payload\n if request.user.is_authenticated:\n user = User.objects.get(id=request.user.id)\n favourite = user.favourites.filter(id=id).exists()\n\n return render(request, \"database/song.html\", {\n \"song\":song,\n \"form\":SongForm(),\n \"favourite\":favourite\n })\n\n# View favourites\n@login_required\ndef favourites_view(request):\n\n # Get the user object\n user = User.objects.get(id=request.user.id)\n\n # Get all the song and relevant info\n favourites = user.favourites.all()\n\n return render(request, \"database/favourites.html\", {\n \"favourites\":favourites,\n })\n\n# Edit a song\n@login_required\ndef edit(request, id):\n if request.method==\"POST\":\n # Take in the data the user submitted and save it as form\n form = SongForm(request.POST)\n\n # Check if form is valid\n if form.is_valid():\n\n # Isolate the data from the 'cleaned' version\n title = form.cleaned_data[\"title\"]\n author = form.cleaned_data[\"author\"]\n composer = form.cleaned_data[\"composer\"]\n key = form.cleaned_data[\"key\"]\n meter = form.cleaned_data[\"meter\"]\n year = form.cleaned_data[\"year\"]\n year = int(year) if year != None else None\n content = form.cleaned_data[\"content\"]\n\n try:\n song = Song.objects.get(id=id)\n song.title = title\n song.author = author\n song.composer = composer\n song.key = key\n song.meter = meter\n song.year = year\n song.content = content\n\n song.save()\n\n return HttpResponseRedirect(reverse(\"song\", args=(song.id,)))\n except Exception as e:\n print(e)\n print(\"caught exception\")\n return HttpResponse(e)\n \n # If form is invalid\n print(\"Form is invalid\")\n return render(request, \"database/add.html\", {\n \"id\":id,\n \"form\":form,\n \"route\":\"edit\",\n \"title\":\"Edit Song\"\n })\n \n # GET request\n else:\n song = Song.objects.get(id=id)\n\n form = SongForm(initial={\n \"title\": song.title,\n \"author\":song.author,\n \"composer\":song.composer,\n \"meter\":song.meter,\n \"key\":song.key,\n \"year\":song.year,\n \"content\":song.content\n })\n \n return render(request, \"database/add.html\",{ \n \"id\":id,\n \"form\":form,\n \"route\":\"edit\",\n \"title\":\"Edit Song\"\n })\n\n# Add a new song\n@login_required\ndef add(request):\n\n if request.method==\"POST\":\n # Take in the data the user submitted and save it as form\n form = SongForm(request.POST)\n\n # Check if form is valid\n if form.is_valid():\n\n # Isolate the data from the 'cleaned' version\n title = form.cleaned_data[\"title\"]\n author = form.cleaned_data[\"author\"]\n composer = form.cleaned_data[\"composer\"]\n key = form.cleaned_data[\"key\"]\n meter = form.cleaned_data[\"meter\"]\n year = form.cleaned_data[\"year\"]\n year = int(year) if year != None else None\n content = form.cleaned_data[\"content\"]\n\n try:\n song = Song.objects.create(\n title=title, \n author=author, \n composer=composer, \n key=key, \n meter=meter,\n year=year,\n content=content)\n song.save()\n\n # Link back to the page\n return HttpResponseRedirect(reverse(\"song\", args=(song.id,)))\n except Exception as e:\n print(e)\n return HttpResponse(e)\n \n # If form is invalid\n return render(request, \"database/add.html\", {\n \"form\":form,\n \"title\": \"Add Song\",\n })\n\n return render(request, \"database/add.html\", {\n \"form\":SongForm(),\n \"title\": \"Add Song\",\n })\n\n# JSON Response to get all the songs\ndef fetch_songs(request):\n\n # Return a list of songs\n return JsonResponse({\n \"success\":\"Success\",\n \"song_list\":[s.mini_serialize() for s in Song.objects.all()]\n })\n\n# Add a new book\n@login_required\ndef add_book(request):\n\n if request.method==\"POST\":\n # Take in the data the user submitted and save it as form\n form = BookForm(request.POST)\n\n # Check if form is valid\n if form.is_valid():\n # Isolate the data from the 'cleaned' version\n title = form.cleaned_data[\"title\"]\n year = form.cleaned_data[\"year\"]\n\n try:\n book = Book.objects.create(\n title=title,\n year=year\n )\n book.save()\n return HttpResponseRedirect(reverse(\"index\"))\n\n except Exception as e:\n print(e)\n return HttpResponse(e)\n\n # If form is invalid\n return render(request, \"database/add.html\", {\n \"form\":form,\n \"title\": \"New Book\",\n \"route\": \"add_book\"\n })\n\n return render(request, \"database/add.html\", {\n \"title\": \"New Book\",\n \"form\":BookForm(),\n \"route\": \"add_book\"\n })\n\n# View a book with all it's songs\ndef book_view(request, id):\n\n book = Book.objects.get(id=id)\n\n # Get all the song and relevant info\n songsinbook = Song_Book.objects.filter(book=book).select_related()\n\n return render(request, \"database/book.html\", {\n \"songsinbook\":songsinbook,\n \"book\":book,\n })\n\n# View all the books\ndef books_view(request):\n\n books = Book.objects.all()\n\n return render(request, \"database/books.html\", {\n \"books\":books\n })\n\n# Edit a book\n@login_required\ndef book_edit(request, id):\n\n book = Book.objects.get(id=id)\n # Get all the song and relevant info\n songsinbook = Song_Book.objects.filter(book=book).select_related()\n\n return render(request, \"database/book_add.html\", {\n \"songsinbook\":songsinbook,\n \"songs\":Song.objects.all(),\n \"book\":book\n })\n\n# JSON Request to put a song in a book\n@login_required\n@csrf_exempt\ndef song_to_book(request):\n if request.method != \"POST\":\n print(request)\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Get new post data\n data = json.loads(request.body)\n\n # Get the book to change\n book = Book.objects.get(id=data.get(\"book_id\"))\n\n # Get the song to change\n song = Song.objects.get(id=data.get(\"song_id\"))\n\n # Add or remove the song from the book\n try:\n if data.get(\"method\") == \"add\":\n book.songs.add(song)\n elif data.get(\"method\") == \"remove\":\n book.songs.remove(song)\n book.save()\n\n except Exception as e:\n return JsonResponse({\n \"fail\":e\n })\n\n # Return JsonResponse of new book list\n return JsonResponse({\n \"success\":\"Adding song to book\",\n \"songs\":[book.mini_serialize() for book in book.songs.all()],\n \"song\":song.mini_serialize()\n })\n\n# Delete a book\n@login_required\n@csrf_exempt\ndef book_delete(request, id):\n book = Book.objects.get(id=id).delete()\n\n return HttpResponseRedirect(reverse(\"books\"))\n\n# JSON Request to favourite a book\n@login_required\n@csrf_exempt\ndef favourite(request):\n if request.method != \"POST\":\n print(request)\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Get new post data\n data = json.loads(request.body)\n\n # Get the user\n user = User.objects.get(id=request.user.id)\n\n # Get the song\n song = Song.objects.get(id = int(data.get('song_id')))\n\n # Add or remove the song from the user's favourites\n try:\n if data.get(\"method\") == \"favourite\":\n user.favourites.add(song)\n elif data.get(\"method\") == \"unfavourite\":\n user.favourites.remove(song)\n user.save()\n\n except Exception as e:\n return JsonResponse({\n \"fail\":str(e)\n })\n\n # Return JsonResponse of new favourites list\n return JsonResponse({\n \"success\":\"Success\",\n \"id\": data.get(\"song_id\"),\n \"favourite\": user.favourites.filter(id=song.id).exists(),\n \"favourites_list\": [song.mini_serialize() for song in user.favourites.all()]\n })\n\n# JSON request to grab the sidebar data\ndef sidebar(request):\n\n favourites = False\n # Only send favourites data if user is logged in\n if request.user.is_authenticated:\n user = User.objects.get(id=request.user.id)\n favourites = [song.mini_serialize() for song in user.favourites.all()]\n\n return JsonResponse({\n \"books\":[book.serialize() for book in Book.objects.all()],\n \"favourites\": favourites\n })\n\n\n \n# DJANGO Form for Book Class\nclass BookForm(forms.Form):\n title = forms.CharField(label='Title', max_length=100, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n year = forms.IntegerField(label='Year', required=False, widget=forms.TextInput(attrs={'class':'col-md-6 form-control'}))\n\n# DJANGO Form for Song Class\nclass SongForm(forms.Form):\n title = forms.CharField(label='Title', max_length=100, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n author = forms.CharField(label='Author', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n composer = forms.CharField(label='Composer', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n meter = forms.CharField(label='Meter', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n key = forms.CharField(label='Key', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'col-md-6 form-control'}))\n year = forms.IntegerField(label='Year', required=False, widget=forms.TextInput(attrs={'class':'col-md-6 form-control'}))\n content = forms.CharField(label='Content', max_length=1024, widget=forms.Textarea(attrs={'class':'form-control'}))\n\n\n\n ","sub_path":"database/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"449895013","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import osv,fields\nfrom openerp import SUPERUSER_ID, api\nimport os\nimport datetime\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass rhwl_colorectal_sample_info(osv.osv):\n _name = \"rhwl.colorectal.sample.info\"\n\n # 实验数据\n ANALYSIS_PATH = \"/data/odoo/remote/colorectal/forAnalyse/\"\n # 分析结果\n RESULT_PATH = \"/data/odoo/remote/colorectal/forAnalyse/result\"\n # 样本数据\n DATA_PATH = \"/data/odoo/remote/colorectal/data/\"\n # 样本信息\n SAMPLE_PATH = \"/data/odoo/remote/colorectal/sample/\"\n # 样本报告\n REPORT_PATH = \"/data/odoo/remote/colorectal/report/\"\n\n STATE_SELECT_LIST = [\n (\"cancel\", u\"检测取消\"),\n (\"reuse\", u\"需重采\"),\n (\"draft\", u\"草稿\"),\n (\"confirm\", u\"确认\"),\n ('dna_except', u'DNA质检不合格'),\n ('dna_ok', u\"DNA质检合格\"),\n ('ok', u'分析结果接收'),\n ('report', u'生成报告中'),\n ('report_done', u\"报告已生成\"),\n (\"result_done\", u\"风险报告确认\"),\n (\"deliver\", u\"印刷厂已接收\"),\n (\"report_send\", u\"已发出\"),\n ('done', u'客户已收货')\n ]\n\n TEST_RESULT = [\n (\"0\", u\"阴性\"),\n (\"1\", u\"阳性\"),\n (\"KRAS ref no Amp\", \"KRAS ref no Amp\"),\n (\"MET ref no Amp\", \"MET ref no Amp\")\n ]\n\n _columns = {\n \"name\": fields.char(u\"样本编号\", required=True, size=15, copy=False),\n \"batch_no\": fields.char(u\"批次\"),\n \"pdf_file\": fields.char(u\"检测报告\", size=100),\n \"cust_name\": fields.char(u\"姓名\", size=50),\n \"sex\": fields.selection([('M', u\"男\"), ('F', u\"女\")], u\"性别\"),\n \"medical_card_no\": fields.char(u\"诊疗卡号\", size=20),\n \"identity\": fields.char(u\"身份证号\", size=18),\n \"sampling_date\": fields.date(u\"采样日期\"),\n \"state_id\": fields.many2one(\"res.country.state\", string=u'省'),\n \"city_id\": fields.many2one(\"res.country.state.city\", string=u'市', domain=\"[('state_id','=',state_id)]\"),\n \"area_id\": fields.many2one(\"res.country.state.city.area\", string=u\"区/县\", domain=\"[('city_id','=',city_id)]\"),\n \"address\": fields.char(u\"联系\", size=100),\n \"mobile\": fields.char(u\"手机号码\", size=15),\n \"height\": fields.integer(u\"身高cm\"),\n \"weight\": fields.float(u\"体重kg\"),\n \"age\": fields.integer(u\"年龄\"),\n \"hospital\": fields.many2one('res.partner', string=u'送检机构', domain=\"[('is_company', '=', True), ('customer', '=', True)]\", required=True),\n \"receiv_date\": fields.date(u\"收样日期\"),\n \"is_reuse\": fields.boolean(u\"重采\"),\n \"is_free\": fields.boolean(u\"免费\"),\n \"state\": fields.selection(STATE_SELECT_LIST, u\"状态\"),\n \"note\": fields.text(u\"备注\"),\n \"contant_user\": fields.char(u\"送样人\", size=10),\n \"to_publish\": fields.boolean(u\"位点/报告可对外发布\"),\n \"express_comp\": fields.char(u\"快递公司\", size=10),\n \"express_no\": fields.char(u\"快递单号\", size=15),\n \"u8_order_no\": fields.char(u\"U8订单号\", size=20),\n \"u8_send_no\": fields.char(u\"U8送货单号\", size=20),\n \"sample_type\": fields.selection([('1', u\"粪便\")], u\"样本类型\"),\n \"qcdesc\": fields.selection([('0', u\"不合格\"), ('1', u\"合格\")], u\"质检结果\"),\n\n \"test_result\": fields.selection(TEST_RESULT, u\"检测结果\"),\n\n \"q1_0\": fields.boolean(u\"大便习惯发生改变\"),\n \"q1_1\": fields.boolean(u\"腹泻\"),\n \"q1_2\": fields.boolean(u\"便秘\"),\n \"q1_3\": fields.boolean(u\"腹痛\"),\n \"q1_4\": fields.boolean(u\"便血\"),\n \"q1_5\": fields.boolean(u\"粘液便\"),\n \"q1_6\": fields.boolean(u\"其他\"),\n \"q1_7\": fields.char(u\"其他\", size=40),\n\n \"risk_q1_0\": fields.boolean(u\"无\"),\n \"risk_q1_1\": fields.boolean(u\"有\"),\n\n \"risk_q2_0\": fields.boolean(u\"无\"),\n \"risk_q2_1\": fields.boolean(u\"有\"),\n \"risk_q2_age\": fields.integer(u\"发病年龄\",),\n \"risk_q2_state_0\": fields.boolean(u\"否\"),\n \"risk_q2_state_1\": fields.boolean(u\"是\"),\n\n \"risk_q3_0\": fields.boolean(u\"无\"),\n \"risk_q3_1\": fields.boolean(u\"有\"),\n \"risk_q3_age\": fields.integer(u\"发病年龄\", ),\n \"risk_q3_state_0\": fields.boolean(u\"否\"),\n \"risk_q3_state_1\": fields.boolean(u\"是\"),\n\n \"risk_q4_0\": fields.boolean(u\"无\"),\n \"risk_q4_1\": fields.boolean(u\"有\"),\n \"risk_q4_age\": fields.integer(u\"发病年龄\", ),\n \"risk_q4_state_0\": fields.boolean(u\"否\"),\n \"risk_q4_state_1\": fields.boolean(u\"是\"),\n\n \"active\": fields.boolean(\"Active\"),\n \"log_line\": fields.one2many(\"rhwl.colorectal.sample.info.log\", \"log_line\", string=u\"操作日志\"),\n }\n\n _sql_constraints = [\n ('rhwl_colorectal_sample_info_name_uniq', 'unique(name)', u'样本编号不能重复!'),\n ]\n\n _defaults = {\n \"active\": True,\n \"state\": \"draft\",\n }\n\n @api.onchange(\"risk_q1_0\")\n def onchange_risk_q1_0(self):\n if self.risk_q1_0:\n self.risk_q1_1 = False\n\n @api.onchange(\"risk_q1_1\")\n def onchange_risk_q1_1(self):\n if self.risk_q1_1:\n self.risk_q1_0 = False\n\n @api.onchange(\"risk_q2_0\")\n def onchange_risk_q2_0(self):\n if self.risk_q2_0:\n self.risk_q2_1 = False\n\n @api.onchange(\"risk_q2_1\")\n def onchange_risk_q2_1(self):\n if self.risk_q2_1:\n self.risk_q2_0 = False\n\n @api.onchange(\"risk_q3_0\")\n def onchange_risk_q3_0(self):\n if self.risk_q3_0:\n self.risk_q3_1 = False\n\n @api.onchange(\"risk_q3_1\")\n def onchange_risk_q3_1(self):\n if self.risk_q3_1:\n self.risk_q3_0 = False\n\n @api.onchange(\"risk_q4_0\")\n def onchange_risk_q4_0(self):\n if self.risk_q4_0:\n self.risk_q4_1 = False\n\n @api.onchange(\"risk_q4_1\")\n def onchange_risk_q4_1(self):\n if self.risk_q4_1:\n self.risk_q4_0 = False\n\n def create(self, cr, uid, val, context=None):\n val[\"log_line\"] = [[0, 0, {\"note\": u\"资料新增\", \"data\": \"create\"}]]\n res = super(rhwl_colorectal_sample_info, self).create(cr, uid, val, context=context)\n return res\n\n def write(self, cr, uid, id, val, context=None):\n if not context:\n context = {}\n if val.has_key(\"state\"):\n sel = dict(fields.selection.reify(cr, uid, self, self._columns['state'], context=context))\n val[\"log_line\"] = [[0, 0, {\"note\": u\"状态变更为:\" + sel.get(val.get(\"state\")), \"data\": val.get(\"state\"),\"user_id\":context.get(\"user_id\",uid)}]]\n res = super(rhwl_colorectal_sample_info, self).write(cr, uid, id, val, context=context)\n return res\n\n def get_selection_columns(self, cr, uid, column_name, reverse=False, context=None):\n sel = dict(fields.selection.reify(cr, uid, self, self._columns[column_name], context=context))\n if reverse:\n sel = {value:key for key, value in sel.items()}\n return sel\n\n def action_view_pdf(self, cr, uid, ids, context=None):\n pass\n\n def action_state_cancel(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {\"state\": \"cancel\"}, context=context)\n\n def action_state_draft(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {\"state\": \"draft\"}, context=context)\n\n def action_state_confirm(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {\"state\": \"confirm\"}, context=context)\n\n def action_state_reuse(self, cr, uid, ids, context=None):\n if isinstance(ids, (long, int)):\n ids = [ids]\n for i in ids:\n self.pool.get(\"rhwl.colorectal.sample.info.reuse\").create(cr, uid, {\"name\": i})\n self.write(cr, uid, i, {\"state\": \"reuse\"})\n\n # 抓取生信分析结果\n def get_analysis_result_from_server(self, cr, uid, context=None):\n\n # 测试路径\n # module_path = os.path.split(os.path.split(__file__)[0])[0]\n # target_path = os.path.join(module_path, \"upload\" + os.sep)\n # self.RESULT_PATH = target_path\n\n for f in os.listdir(self.RESULT_PATH):\n if os.path.isdir(os.path.join(self.RESULT_PATH, f)): continue\n\n with open(os.path.join(self.RESULT_PATH, f), \"r\") as analysis_file:\n s = analysis_file.readlines()\n file_list = [y.split(\"\\r\") for y in [x.rstrip(\"\\r\") for x in s]]\n for list in file_list[0]:\n val = list.split(\"\\t\")\n ids = self.search(cr, SUPERUSER_ID, [(\"name\", \"=\", val[0])])\n if ids and val[1]:\n self.write(cr, SUPERUSER_ID, ids, {\"test_result\": str(val[1]).rstrip(), \"state\": \"ok\"})\n os.remove(os.path.join(self.RESULT_PATH, f))\n\n # 导出样本信息到服务器\n def output_sample_data_to_server(self, cr, uid, context=None):\n ids = self.search(cr, uid, [(\"state\", \"=\", \"ok\"), (\"qcdesc\", \"=\", \"1\")], context=context)\n if not ids:\n return\n\n # 测试路径\n # module_path = os.path.split(os.path.split(__file__)[0])[0]\n # target_path = os.path.join(module_path, \"upload\" + os.sep)\n # self.SAMPLE_PATH = self.DATA_PATH = target_path\n\n for sample_obj in self.browse(cr, uid, ids, context=context):\n sample_type = self.get_selection_columns(cr, uid, 'sample_type', context=context).get(sample_obj.sample_type, \"\").encode(\"utf-8\")\n hospital = sample_obj.hospital.name.split('(')[0].split(u'(')[0] if sample_obj.hospital.name else \"\"\n factors1 = 1 if any([sample_obj.q1_0, sample_obj.q1_1, sample_obj.q1_2, sample_obj.q1_3, sample_obj.q1_4, sample_obj.q1_5]) else 0\n\n sample_info = {\n \"barcode\": sample_obj.name.encode(\"utf-8\"),\n \"org\": hospital,\n \"name\": sample_obj.cust_name if sample_obj.cust_name else \"\",\n \"age\": sample_obj.age if sample_obj.age else \"\",\n \"gender\": sample_obj.sex if sample_obj.sex else \"\",\n \"sampleType\": sample_type,\n \"qcdesc\": u\"合格\",\n \"height\": sample_obj.height if sample_obj.height else \"\",\n \"weight\": sample_obj.weight if sample_obj.weight else \"\",\n \"factors1\": factors1,\n \"factors2\": 1 if sample_obj.risk_q1_1 else 0,\n \"factors3\": 1 if sample_obj.risk_q2_1 else 0,\n \"factors4\": 1 if sample_obj.risk_q3_1 else 0,\n \"factors5\": 1 if sample_obj.risk_q4_1 else 0,\n }\n data_info = {\n \"result\": sample_obj.test_result if sample_obj.test_result else \"\",\n }\n\n sample_info = sorted(sample_info.iteritems(), key=lambda x: x[0], reverse=False)\n data_info = sorted(data_info.iteritems(), key=lambda x: x[0], reverse=False)\n\n sample_file = os.path.join(self.SAMPLE_PATH, \"colorectal_\" + sample_obj.name) + \"_\" + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") + \".csv\"\n data_file = os.path.join(self.DATA_PATH, sample_obj.name) + \".txt\"\n\n # 文件按列方式写\n with open(sample_file, \"w+\") as fp_sample, open(data_file, \"w+\") as fp_data:\n for info in sample_info:\n fp_sample.write(\"%s\\t%s\\n\" % (info[0], info[1]))\n fp_sample.close()\n os.system(\"chmod 777 \" + sample_file)\n for info in data_info:\n fp_data.write(\"%s\\t%s\\n\" % (info[0], info[1]))\n fp_data.close()\n os.system(\"chmod 777 \" + data_file)\n\n val = {\"state\": \"report\", \"log\": [[0, 0, {\"note\": u\"生成报告中\", \"data\": \"report\"}]]}\n self.write(cr, SUPERUSER_ID, sample_obj.id, val, context=context)\n\n def create_sale_order(self, cr, uid, ids, context=None):\n pass\n\n\nclass rhwl_colorectal_sample_info_log(osv.osv):\n _name = \"rhwl.colorectal.sample.info.log\"\n _order = \"date desc\"\n _columns = {\n \"log_line\": fields.many2one(\"rhwl.colorectal.sample.info\", \"Parent ID\", select=True),\n \"date\": fields.datetime(u\"时间\"),\n \"user_id\": fields.many2one(\"res.users\", u\"操作人员\"),\n \"note\": fields.text(u\"作业说明\"),\n \"data\": fields.char(\"Data\")\n }\n\n _defaults = {\n \"date\": fields.datetime.now,\n \"user_id\": lambda obj, cr, uid, context: uid,\n }\n\n\n\nclass rhwl_colorectal_sample_info_reuse(osv.osv):\n _name = \"rhwl.colorectal.sample.info.reuse\"\n _inherit = ['ir.needaction_mixin']\n\n _order = \"id desc\"\n\n _columns = {\n \"name\": fields.many2one(\"rhwl.colorectal.sample.info\", u\"样本单号\", required=True, ondelete=\"restrict\"),\n \"cust_name\": fields.related('name', 'cust_name', type='char', string=u'客户姓名', readonly=1),\n \"receiv_date\": fields.related('name', 'receiv_date', type='char', string=u'收样日期', readonly=1),\n \"mobile\": fields.related('name', 'mobile', type='char', string=u'手机号码', readonly=1),\n \"hospital\": fields.related('name', 'hospital', relation=\"res.partner\", type='many2one', string=u'送检机构', readonly=1, store=True),\n \"new_name\": fields.many2one(\"rhwl.easy.genes.new\", u\"新样本编号\"),\n \"notice_user\": fields.many2one(\"res.users\", u\"通知人员\"),\n \"notice_date\": fields.date(u\"通知日期\"),\n \"reuse_note\": fields.char(u\"重采原因\", size=200),\n \"note\": fields.text(u\"客户说明及备注\"),\n \"state\": fields.selection([(\"draft\", u\"未通知\"), (\"done\", u\"已通知\"), (\"re_done\", u\"重复通知\"), (\"cancel\", u\"客户放弃\"), (\"reuse\", u\"已重采血\")], u\"状态\"),\n\n }\n _sql_constraints = [\n ('rhwl_colorectal_sample_info_reuse_name_uniq', 'unique(name)', u'样品编号不能重复!'),\n ]\n _defaults = {\n \"state\": lambda obj, cr, uid, context: \"draft\",\n }\n\n def _needaction_domain_get(self, cr, uid, context=None):\n return [('state', '=', 'draft')]\n\n def action_state_done(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'done', 'notice_user': uid}, context=context)\n\n def action_state_cancel(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'cancel', 'notice_user': uid}, context=context)\n\n def action_state_reuse(self, cr, uid, ids, context=None):\n self.write(cr, uid, ids, {'state': 'reuse'}, context=context)\n\n","sub_path":"data/odoo/8.0/rhwl_tumor_detection/models/rhwl_colorectal.py","file_name":"rhwl_colorectal.py","file_ext":"py","file_size_in_byte":15543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"517246442","text":"import numpy as np\nimport pandas as pd\nfrom functions import viscoPlastic2D\nimport sys\n\nworkdir = '/home/miguel/Documents/tese/ViscoPlastic-ML/2D/dataset/results/'\nprint ('Chaboche Constitutive Viscoplasticity Model')\n\n# trials = ['xy']\ntrials = ['xx', 'yy', 'xy']\n\n# number of time points\nn = int(sys.argv[1])\n# Time points\nt = np.linspace(0, int(sys.argv[2]), n)\n# initial conditions - inelastic strain / X / R\nz0 = [0, 0, 0, 0, 0, 0, 50.0, 0]\n#\nEmaxs = []\nfor k in range(len(sys.argv)):\n if (len(sys.argv) - k) == 3:\n break\n Emaxs.append(float(sys.argv[k+3]))\n\nfor Emax in Emaxs:\n for trial in trials:\n print ('Trial: ', trial, 'Emax: ', Emax)\n # Define material parameters for viscoplastic behaviour\n # E, v, R1, k, K, a, b, c, n, time_points, trial\n # # Steel 316 20C\n # model = viscoPlastic2D(200000.0, 0.3, 436.0, 80.0, 85.2, 93.57, 21.3, 843, 4.55, n, trial)\n model = viscoPlastic2D(5000.0, 0.3, 500.0, 0.0, 50.0, 7500.0,\n 0.6, 100.0, 3.0, n, trial, Emax)\n\n # Solve Chaboche's 1D model with given material parameters\n model.solve(n, z0, t)\n # Calculate Elastic strain\n model.Ee = model.ET - model.Ei\n # Save Results to csv file\n df = pd.DataFrame({\"ET11\": model.ET[:, 0], \"ET22\": model.ET[:, 1], \"ET12\": model.ET[:, 2],\n \"Ei11\": model.Ei[:, 0], \"Ei22\": model.Ei[:, 1], \"Ei12\": model.Ei[:, 2],\n \"dEi11\": model.dEi[:, 0], \"dEi22\": model.dEi[:, 1], \"dEi12\": model.dEi[:, 2],\n \"X11\": model.X[:, 0], \"X22\": model.X[:, 1], \"X12\": model.X[:, 2],\n \"dX11\": model.dX[:, 0], \"dX22\": model.dX[:, 1], \"dX12\": model.dX[:, 2],\n \"pStrain\": model.p, \"R\": model.R, \"dpStrain\": model.dp, \"dR\": model.dR,\n \"S11\": model.stress[:, 0], \"S22\": model.stress[:, 1], \"S12\": model.stress[:, 2],\n \"Ee11\": model.Ee[:, 0], \"Ee22\": model.Ee[:, 1], \"Ee12\": model.Ee[:, 2],\n \"Time\": t})\n\n df.to_csv(workdir + \"data_\" + str(Emax) +\"_\" + trial + \".csv\",\n float_format='%.5f', index=False)\n # break\n\nprint ('Dataset Generated')\n","sub_path":"2D/dataset/src/viscoplasticity_model.py","file_name":"viscoplasticity_model.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"466810584","text":"\"\"\"\nThis module defines base classes for the IC cities. The classes are:\nCity: Handles input and output files, compression, and access to data base\nDeconvolutionCity: A City that performs deconvolution of the PMT RWFs\nCalibratedCity: A DeconvolutionCity that perform the calibrated sum of the\n PMTs and computes the calibrated signals in the SiPM plane.\nPmapCity: A CalibratedCity that computes S1, S2 and S2Si that togehter\n constitute a PMAP.\nSensorResponseCity: A city that describes sensor response\n\nAuthors: J.J. Gomez-Cadenas and J. Generowicz.\nFeburary, 2017.\n\"\"\"\n\nimport sys\nfrom textwrap import dedent\n\nimport numpy as np\n\nfrom .. core.configure import print_configuration\nfrom .. core.exceptions import NoInputFiles\nfrom .. core.exceptions import NoOutputFile\nfrom .. core.system_of_units_c import units\nfrom .. core import fit_functions as fitf\n\nfrom .. database import load_db\n\nfrom ..reco import peak_functions_c as cpf\nfrom ..reco import peak_functions as pf\nfrom ..reco import pmaps_functions as pmp\nfrom ..reco import pmap_io as pio\nfrom ..reco import tbl_functions as tbf\nfrom ..reco import wfm_functions as wfm\nfrom ..reco.dst_io import PointLikeEvent\nfrom ..reco.nh5 import DECONV_PARAM\nfrom ..reco.corrections import Correction\nfrom ..reco.corrections import Fcorrection\n\nfrom ..sierpe import blr\nfrom ..sierpe import fee as FE\n\nif sys.version_info >= (3,5):\n # Exec to avoid syntax errors in older Pythons\n exec(\"\"\"def merge_two_dicts(a,b):\n return {**a, **b}\"\"\")\nelse:\n def merge_two_dicts(a,b):\n c = a.copy()\n c.update(b)\n return c\n\n\nclass City:\n \"\"\"Base class for all cities.\n An IC city consumes data stored in the input_files and produce new data\n which is stored in the output_file. In addition to setting input and\n output files, the base class sets the print frequency and accesses\n the data base, storing as attributed several calibration coefficients\n\n \"\"\"\n\n def __init__(self,\n run_number = 0,\n files_in = None,\n file_out = None,\n compression = 'ZLIB4',\n nprint = 10000):\n\n self.run_number = run_number\n self.nprint = nprint # default print frequency\n self.input_files = files_in\n self.output_file = file_out\n self.compression = compression\n # access data base\n DataPMT = load_db.DataPMT (run_number)\n DataSiPM = load_db.DataSiPM(run_number)\n self.det_geo = load_db.DetectorGeo()\n\n # This is JCK-1: text reveals symmetry!\n self.xs = DataSiPM.X.values\n self.ys = DataSiPM.Y.values\n self.pmt_active = np.nonzero(DataPMT.Active.values)[0].tolist()\n self.adc_to_pes = abs(DataPMT.adc_to_pes.values).astype(np.double)\n self.sipm_adc_to_pes = DataSiPM.adc_to_pes.values .astype(np.double)\n self.coeff_c = DataPMT.coeff_c.values .astype(np.double)\n self.coeff_blr = DataPMT.coeff_blr.values .astype(np.double)\n self.noise_rms = DataPMT.noise_rms.values .astype(np.double)\n\n @property\n def monte_carlo(self):\n return self.run_number <= 0\n\n def check_files(self):\n if not self.input_files:\n raise NoInputFiles('input file list is empty, must set before running')\n if not self.output_file:\n raise NoOutputFile('must set output file before running')\n\n def set_print(self, nprint=1000):\n \"\"\"Print frequency.\"\"\"\n self.nprint = nprint\n\n def set_input_files(self, input_files):\n \"\"\"Set the input files.\"\"\"\n self.input_files = input_files\n\n def set_output_file(self, output_file):\n \"\"\"Set the input files.\"\"\"\n self.output_file = output_file\n\n def set_compression(self, compression):\n \"\"\"Set the input files.\"\"\"\n self.compression = compression\n\n def conditional_print(self, evt, n_events_tot):\n if n_events_tot % self.nprint == 0:\n print('event in file = {}, total = {}'\n .format(evt, n_events_tot))\n\n def display_IO_info(self, nmax):\n print(\"\"\"\n {} will run a max of {} events\n Input Files = {}\n Output File = {}\n \"\"\".format(self.__class__.__name__,\n nmax, self.input_files, self.output_file))\n\n def print_configuration(self, sp):\n print_configuration({\"# PMT\" : sp.NPMT,\n \"PMT WL\" : sp.PMTWL,\n \"# SiPM\" : sp.NSIPM,\n \"SIPM WL\" : sp.SIPMWL})\n\n\nclass SensorResponseCity(City):\n \"\"\"A SensorResponseCity city extends the City base class adding the\n response (Monte Carlo simulation) of the energy plane and\n tracking plane sensors (PMTs and SiPMs).\n \"\"\"\n\n def __init__(self,\n run_number = 0,\n files_in = None,\n file_out = None,\n compression = 'ZLIB4',\n nprint = 10000,\n # Parameters added at this level\n sipm_noise_cut = 3 * units.pes,\n first_evt = 0):\n\n City.__init__(self,\n run_number = run_number,\n files_in = files_in,\n file_out = file_out,\n compression = compression,\n nprint = nprint)\n\n self.sipm_noise_cut = sipm_noise_cut\n self.first_evt = first_evt\n\n def set_sipm_noise_cut(self, noise_cut=3.0):\n \"\"\"Sets the SiPM noise cut (in PES)\"\"\"\n self.sipm_noise_cut = noise_cut\n\n def simulate_sipm_response(self, event, sipmrd,\n sipms_noise_sampler):\n \"\"\"Add noise with the NoiseSampler class and return\n the noisy waveform (in pes).\"\"\"\n # add noise (in PES) to true waveform\n dataSiPM = sipmrd[event] + sipms_noise_sampler.Sample()\n # return total signal in adc counts\n return wfm.to_adc(dataSiPM, self.sipm_adc_to_pes)\n\n def simulate_pmt_response(self, event, pmtrd):\n \"\"\" Full simulation of the energy plane response\n Input:\n 1) extensible array pmtrd\n 2) event_number\n\n returns:\n array of raw waveforms (RWF) obtained by convoluting pmtrd with the PMT\n front end electronics (LPF, HPF filters)\n array of BLR waveforms (only decimation)\n \"\"\"\n # Single Photoelectron class\n spe = FE.SPE()\n # FEE, with noise PMT\n fee = FE.FEE(noise_FEEPMB_rms=FE.NOISE_I, noise_DAQ_rms=FE.NOISE_DAQ)\n NPMT = pmtrd.shape[1]\n RWF = []\n BLRX = []\n\n for pmt in range(NPMT):\n # normalize calibration constants from DB to MC value\n cc = self.adc_to_pes[pmt] / FE.ADC_TO_PES\n # signal_i in current units\n signal_i = FE.spe_pulse_from_vector(spe, pmtrd[event, pmt])\n # Decimate (DAQ decimation)\n signal_d = FE.daq_decimator(FE.f_mc, FE.f_sample, signal_i)\n # Effect of FEE and transform to adc counts\n signal_fee = FE.signal_v_fee(fee, signal_d, pmt) * FE.v_to_adc()\n # add noise daq\n signal_daq = cc * FE.noise_adc(fee, signal_fee)\n # signal blr is just pure MC decimated by adc in adc counts\n signal_blr = cc * FE.signal_v_lpf(fee, signal_d) * FE.v_to_adc()\n # raw waveform stored with negative sign and offset\n RWF.append(FE.OFFSET - signal_daq)\n # blr waveform stored with positive sign and no offset\n BLRX.append(signal_blr)\n return np.array(RWF), np.array(BLRX)\n\n\n def store_FEE_table(self):\n \"\"\"Store the parameters of the EP FEE simulation.\"\"\"\n row = self.fee_table.row\n row[\"OFFSET\"] = FE.OFFSET\n row[\"CEILING\"] = FE.CEILING\n row[\"PMT_GAIN\"] = FE.PMT_GAIN\n row[\"FEE_GAIN\"] = FE.FEE_GAIN\n row[\"R1\"] = FE.R1\n row[\"C1\"] = FE.C1\n row[\"C2\"] = FE.C2\n row[\"ZIN\"] = FE.Zin\n row[\"DAQ_GAIN\"] = FE.DAQ_GAIN\n row[\"NBITS\"] = FE.NBITS\n row[\"LSB\"] = FE.LSB\n row[\"NOISE_I\"] = FE.NOISE_I\n row[\"NOISE_DAQ\"] = FE.NOISE_DAQ\n row[\"t_sample\"] = FE.t_sample\n row[\"f_sample\"] = FE.f_sample\n row[\"f_mc\"] = FE.f_mc\n row[\"f_LPF1\"] = FE.f_LPF1\n row[\"f_LPF2\"] = FE.f_LPF2\n row[\"coeff_c\"] = self.coeff_c\n row[\"coeff_blr\"] = self.coeff_blr\n row[\"adc_to_pes\"] = self.adc_to_pes\n row[\"pmt_noise_rms\"] = self.noise_rms\n row.append()\n self.fee_table.flush()\n\n @property\n def FE_t_sample(self):\n return FE.t_sample\n\n\nclass DeconvolutionCity(City):\n \"\"\"A Deconvolution city extends the City base class adding the\n deconvolution step, which transforms RWF into CWF.\n The parameters of the deconvolution are the number of samples\n used to compute the baseline (n_baseline) and the threshold to\n thr_trigger in the rising signal (thr_trigger)\n \"\"\"\n\n def __init__(self,\n run_number = 0,\n files_in = None,\n file_out = None,\n compression = 'ZLIB4',\n nprint = 10000,\n # Parameters added at this level\n n_baseline = 28000,\n thr_trigger = 5 * units.adc,\n acum_discharge_length = 5000):\n\n City.__init__(self,\n run_number = run_number,\n files_in = files_in,\n file_out = file_out,\n compression = compression,\n nprint = nprint)\n\n # BLR parameters\n self.n_baseline = n_baseline\n self.thr_trigger = thr_trigger\n self.acum_discharge_length = acum_discharge_length\n\n def write_deconv_params(self, ofile):\n group = ofile.create_group(ofile.root, \"DeconvParams\")\n\n table = ofile.create_table(group,\n \"DeconvParams\",\n DECONV_PARAM,\n \"deconvolution parameters\",\n tbf.filters(self.compression))\n\n row = table.row\n row[\"N_BASELINE\"] = self.n_baseline\n row[\"THR_TRIGGER\"] = self.thr_trigger\n row[\"ACUM_DISCHARGE_LENGTH\"] = self.acum_discharge_length\n table.flush()\n\n def set_blr(self, n_baseline, thr_trigger):\n \"\"\"Set the parameters of the Base Line Restoration (BLR)\"\"\"\n self.n_baseline = n_baseline\n self.thr_trigger = thr_trigger\n\n def deconv_pmt(self, RWF):\n \"\"\"Deconvolve the RWF of the PMTs\"\"\"\n return blr.deconv_pmt(RWF,\n self.coeff_c,\n self.coeff_blr,\n pmt_active = self.pmt_active,\n n_baseline = self.n_baseline,\n thr_trigger = self.thr_trigger,\n acum_discharge_length = self.acum_discharge_length)\n\n\nclass CalibratedCity(DeconvolutionCity):\n \"\"\"A calibrated city extends a DeconvCity, performing two actions.\n 1. Compute the calibrated sum of PMTs, in two flavours:\n a) csum: PMTs waveforms are equalized to photoelectrons (pes) and\n added\n b) csum_mau: waveforms are equalized to photoelectrons;\n compute a MAU that follows baseline and add PMT samples above\n MAU + threshold\n 2. Compute the calibrated signal in the SiPMs:\n a) equalize to pes;\n b) compute a MAU that follows baseline and keep samples above\n MAU + threshold.\n \"\"\"\n\n def __init__(self,\n run_number = 0,\n files_in = None,\n file_out = None,\n compression = 'ZLIB4',\n nprint = 10000,\n n_baseline = 28000,\n thr_trigger = 5 * units.adc,\n acum_discharge_length = 5000,\n # Parameters added at this level\n n_MAU = 100,\n thr_MAU = 3.0 * units.adc,\n thr_csum_s1 = 0.2 * units.pes,\n thr_csum_s2 = 1.0 * units.pes,\n n_MAU_sipm = 100,\n thr_sipm = 5.0 * units.pes):\n\n DeconvolutionCity.__init__(self,\n run_number = run_number,\n files_in = files_in,\n file_out = file_out,\n compression = compression,\n nprint = nprint,\n n_baseline = n_baseline,\n thr_trigger = thr_trigger,\n acum_discharge_length = acum_discharge_length)\n\n # Parameters of the PMT csum.\n self.n_MAU = n_MAU\n self.thr_MAU = thr_MAU\n self.thr_csum_s1 = thr_csum_s1\n self.thr_csum_s2 = thr_csum_s2\n\n # Parameters of the SiPM signal\n self.n_MAU_sipm = n_MAU_sipm\n self. thr_sipm = thr_sipm\n\n def set_csum(self,\n n_MAU = 100,\n thr_MAU = 3 * units.adc,\n thr_csum_s1 = 0.2 * units.pes,\n thr_csum_s2 = 1.0 * units.pes):\n \"\"\"Set CSUM parameters.\"\"\"\n self. n_MAU = n_MAU\n self.thr_MAU = thr_MAU\n self.thr_csum_s1 = thr_csum_s1\n self.thr_csum_s2 = thr_csum_s2\n\n def set_sipm(self, n_MAU_sipm=100, thr_sipm=5*units.pes):\n \"\"\"Cutoff for SiPMs.\"\"\"\n self. thr_sipm = thr_sipm\n self.n_MAU_sipm = n_MAU_sipm\n\n def calibrated_pmt_sum(self, CWF):\n \"\"\"Return the csum and csum_mau calibrated sums.\"\"\"\n return cpf.calibrated_pmt_sum(CWF,\n self.adc_to_pes,\n pmt_active = self.pmt_active,\n n_MAU = self. n_MAU ,\n thr_MAU = self.thr_MAU )\n\n def csum_zs(self, csum, threshold):\n \"\"\"Zero Suppression over csum\"\"\"\n return cpf.wfzs(csum, threshold=threshold)\n\n def calibrated_signal_sipm(self, SiRWF):\n \"\"\"Return the calibrated signal in the SiPMs.\"\"\"\n return cpf.signal_sipm(SiRWF,\n self.sipm_adc_to_pes,\n thr = self. thr_sipm,\n n_MAU = self.n_MAU_sipm)\n\n\nclass PmapCity(CalibratedCity):\n \"\"\"A PMAP city extends a CalibratedCity, computing the S1, S2 and S2Si\n objects that togehter constitute a PMAP.\n\n \"\"\"\n\n def __init__(self,\n run_number = 0,\n files_in = None,\n file_out = None,\n compression = 'ZLIB4',\n nprint = 10000,\n n_baseline = 28000,\n thr_trigger = 5 * units.adc,\n acum_discharge_length = 5000,\n n_MAU = 100,\n thr_MAU = 3.0 * units.adc,\n thr_csum_s1 = 0.2 * units.adc,\n thr_csum_s2 = 1.0 * units.adc,\n n_MAU_sipm = 100,\n thr_sipm = 5.0 * units.pes,\n # Parameters added at this level\n s1_params = None,\n s2_params = None,\n thr_sipm_s2 = 30 * units.pes):\n\n CalibratedCity.__init__(self,\n run_number = run_number,\n files_in = files_in,\n file_out = file_out,\n compression = compression,\n nprint = nprint,\n n_baseline = n_baseline,\n thr_trigger = thr_trigger,\n acum_discharge_length = acum_discharge_length,\n n_MAU = n_MAU,\n thr_MAU = thr_MAU,\n thr_csum_s1 = thr_csum_s1,\n thr_csum_s2 = thr_csum_s2,\n n_MAU_sipm = n_MAU_sipm,\n thr_sipm = thr_sipm)\n\n self.s1_params = s1_params\n self.s2_params = s2_params\n self.thr_sipm_s2 = thr_sipm_s2\n\n def set_pmap_params(self,\n s1_params,\n s2_params,\n thr_sipm_s2 = 30 * units.pes):\n \"\"\"Parameters for PMAP searches.\"\"\"\n self.s1_params = s1_params\n self.s2_params = s2_params\n self.thr_sipm_s2 = thr_sipm_s2\n\n def find_S12(self, s1_ene, s1_indx, s2_ene, s2_indx):\n \"\"\"Return S1 and S2.\"\"\"\n S1 = cpf.find_S12(s1_ene,\n s1_indx,\n **self.s1_params._asdict())\n\n S2 = cpf.find_S12(s2_ene,\n s2_indx,\n **self.s2_params._asdict())\n return S1, S2\n\n def correct_S1_ene(self, S1, csum):\n return cpf.correct_S1_ene(S1, csum)\n\n def find_S2Si(self, S2, sipmzs):\n \"\"\"Return S2Si.\"\"\"\n SIPM = cpf.select_sipm(sipmzs)\n S2Si = pf.sipm_s2_dict(SIPM, S2, thr = self.thr_sipm_s2)\n return pio.S2Si(S2Si)\n\n\nclass S12SelectorCity:\n def __init__(self,\n drift_v = 1 * units.mm/units.mus,\n\n S1_Nmin = 0,\n S1_Nmax = 1000,\n S1_Emin = 0,\n S1_Emax = np.inf,\n S1_Lmin = 0,\n S1_Lmax = np.inf,\n S1_Hmin = 0,\n S1_Hmax = np.inf,\n S1_Ethr = 0,\n\n S2_Nmin = 0,\n S2_Nmax = 1000,\n S2_Emin = 0,\n S2_Emax = np.inf,\n S2_Lmin = 0,\n S2_Lmax = np.inf,\n S2_Hmin = 0,\n S2_Hmax = np.inf,\n S2_NSIPMmin = 1,\n S2_NSIPMmax = np.inf,\n S2_Ethr = 0):\n\n self.drift_v = drift_v\n\n self.S1_Nmin = S1_Nmin\n self.S1_Nmax = S1_Nmax\n self.S1_Emin = S1_Emin\n self.S1_Emax = S1_Emax\n self.S1_Lmin = S1_Lmin\n self.S1_Lmax = S1_Lmax\n self.S1_Hmin = S1_Hmin\n self.S1_Hmax = S1_Hmax\n self.S1_Ethr = S1_Ethr\n\n self.S2_Nmin = S2_Nmin\n self.S2_Nmax = S2_Nmax\n self.S2_Emin = S2_Emin\n self.S2_Emax = S2_Emax\n self.S2_Lmin = S2_Lmin\n self.S2_Lmax = S2_Lmax\n self.S2_Hmin = S2_Hmin\n self.S2_Hmax = S2_Hmax\n self.S2_NSIPMmin = S2_NSIPMmin\n self.S2_NSIPMmax = S2_NSIPMmax\n self.S2_Ethr = S2_Ethr\n\n def select_S1(self, s1s):\n return pf.select_peaks(s1s,\n self.S1_Emin, self.S1_Emax,\n self.S1_Lmin, self.S1_Lmax,\n self.S1_Hmin, self.S1_Hmax,\n self.S1_Ethr)\n\n def select_S2(self, s2s, sis):\n s2s = pf.select_peaks(s2s,\n self.S2_Emin, self.S2_Emax,\n self.S2_Lmin, self.S2_Lmax,\n self.S2_Hmin, self.S2_Hmax,\n self.S2_Ethr)\n sis = pf.select_Si(sis,\n self.S2_NSIPMmin, self.S2_NSIPMmax)\n\n valid_peaks = set(s2s) & set(sis)\n s2s = {peak_no: peak for peak_no, peak in s2s.items() if peak_no in valid_peaks}\n sis = {peak_no: peak for peak_no, peak in sis.items() if peak_no in valid_peaks}\n return s2s, sis\n\n def select_event(self, evt_number, evt_time, S1, S2, Si):\n evt = PointLikeEvent()\n evt.event = evt_number\n evt.time = evt_time * 1e-3 # s\n\n S1 = self.select_S1(S1)\n S2, Si = self.select_S2(S2, Si)\n\n if (not self.S1_Nmin <= len(S1) <= self.S1_Nmax or\n not self.S2_Nmin <= len(S2) <= self.S2_Nmax):\n return None\n\n evt.nS1 = len(S1)\n for peak_no, (t, e) in sorted(S1.items()):\n evt.S1w.append(pmp.width(t))\n evt.S1h.append(np.max(e))\n evt.S1e.append(np.sum(e))\n evt.S1t.append(t[np.argmax(e)])\n\n evt.nS2 = len(S2)\n for peak_no, (t, e) in sorted(S2.items()):\n s2time = t[np.argmax(e)]\n\n evt.S2w.append(pmp.width(t, to_mus=True))\n evt.S2h.append(np.max(e))\n evt.S2e.append(np.sum(e))\n evt.S2t.append(s2time)\n\n IDs, Qs = pmp.integrate_charge(Si[peak_no])\n xsipms = self.xs[IDs]\n ysipms = self.ys[IDs]\n x = np.average(xsipms, weights=Qs)\n y = np.average(ysipms, weights=Qs)\n q = np.sum (Qs)\n\n evt.Nsipm.append(len(IDs))\n evt.S2q .append(q)\n\n evt.X .append(x)\n evt.Y .append(y)\n\n evt.Xrms .append((np.sum(Qs * (xsipms-x)**2) / (q - 1))**0.5)\n evt.Yrms .append((np.sum(Qs * (ysipms-y)**2) / (q - 1))**0.5)\n\n evt.R .append((x**2 + y**2)**0.5)\n evt.Phi .append(np.arctan2(y, x))\n\n dt = s2time - evt.S1t[0] if len(evt.S1t) > 0 else -1e3\n dt *= units.ns / units.mus\n evt.DT .append(dt)\n evt.Z .append(dt * units.mus * self.drift_v)\n\n return evt\n\n\nclass MapCity(City):\n def __init__(self,\n lifetime ,\n\n xbins = 100,\n xmin = None,\n xmax = None,\n\n ybins = 100,\n ymin = None,\n ymax = None):\n\n self._lifetimes = [lifetime] if not np.shape(lifetime) else lifetime\n self._lifetime_corrections = tuple(map(self._create_fcorrection, self._lifetimes))\n\n xmin = self.det_geo.XMIN[0] if xmin is None else xmin\n xmax = self.det_geo.XMAX[0] if xmax is None else xmax\n ymin = self.det_geo.YMIN[0] if ymin is None else ymin\n ymax = self.det_geo.YMAX[0] if ymax is None else ymax\n\n self._xbins = xbins\n self._ybins = ybins\n self._xrange = xmin, xmax\n self._yrange = ymin, ymax\n\n def xy_correction(self, X, Y, E):\n xs, ys, es, us = \\\n fitf.profileXY(X, Y, E, self._xbins, self._ybins, self._xrange, self._yrange)\n\n norm_index = xs.size//2, ys.size//2\n return Correction((xs, ys), es, us, norm_strategy=\"index\", index=norm_index)\n\n def xy_statistics(self, X, Y):\n return np.histogram2d(X, Y, (self._xbins, self._ybins), (self._xrange, self._yrange))\n\n def _create_fcorrection(self, LT):\n return Fcorrection(lambda x, lt: fitf.expo(x, 1, -lt),\n lambda x, lt: x / LT**2 * fitf.expo(x, 1, -lt),\n (LT,))\n","sub_path":"invisible_cities/cities/base_cities.py","file_name":"base_cities.py","file_ext":"py","file_size_in_byte":24448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"484928177","text":"def oddEven():\n for value in range(1, 2000):\n if (value % 2 == 0):\n print(str(value) + \" is an even number\")\n else:\n print(str(value) + \" is an odd number\")\n\nsetArray = [1,2,3,4,5]\n\ndef multiply(arr, multiple):\n for item in range(0, len(arr)):\n arr[item] = arr[item] * multiple;\n \n return arr;\n\ndef layered_multiples():\n newArr = [];\n too_many_arrays = [];\n too_many_arrays = multiply(setArray, 2);\n for item in too_many_arrays:\n playArr = [];\n for bit in range(0, item):\n playArr.append(1)\n newArr.append(playArr);\n print(newArr);\n\n\n","sub_path":"Python/funFuncs.py","file_name":"funFuncs.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"512076431","text":"##############\n# Author: Ben Joris\n# Created: August 25th, 2020\n# Purpose: Separate contigs and write to Pyani input directory for conjugative systems extracted from genomes\n # Not part of main analyses\n##############\n\nimport glob\ncurfh=\"\"\nfor file in glob.glob(\"/Volumes/data/gutDB/paper_script_testing/plasmids/output/*.DNA.fa\"):\n with open(file) as oldfh:\n for line in oldfh:\n if \">\" in line:\n curfh=line[1:].strip()\n if \"/\" in curfh:\n curfh=curfh.replace(\"/\",\"_\")\n if \",\" in curfh:\n curfh=curfh.replace(\",\",\"_\")\n with open(\"../ani_input/\"+curfh+\".fa\",\"a\") as newfh:\n newfh.write(line)\n","sub_path":"bin/separate_genome_conj_ani.py","file_name":"separate_genome_conj_ani.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"363657121","text":"import os\nimport sys\nimport numpy as np \nfrom numpy import array, dot, diag\nfrom randmat import RandomMatrix\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nmatplotlib.rcParams['font.family'] = \"serif\"\n\n\nclass Analyzer:\n\n\tdef __init__(self, m_list, sigma_min_bounds):\n\n\t\tself.m_list = m_list\n\t\tself.bounds = sigma_min_bounds\n\t\t#self.plot_eigenvalues(100)\n\t\t#self.plot_norm_rho()\n\t\tself.print_sigma_min()\n\n\tdef random_samples(self, N, m):\n\n\t\t# construct lists of N random matrices with zeros below kth subdiagonal\n\t\tfull_randmats = [RandomMatrix(m,-m+1) for i in range(N)]\n\t\thess_randmats = [RandomMatrix(m,-1) for i in range(N)]\n\t\ttri_randmats = [RandomMatrix(m,0) for i in range(N)]\n\n\t\t# construct matrices of eigenvalues\n\t\tfull_eig_real = np.zeros((N,m))\n\t\tfull_eig_imag = np.zeros((N,m))\n\t\thess_eig_real = np.zeros((N,m))\n\t\thess_eig_imag = np.zeros((N,m))\n\t\ttri_eig_real = np.zeros((N,m))\n\t\ttri_eig_imag = np.zeros((N,m))\n\n\t\tfor i in range(N):\n\t\t\tfull_eig_real[i] = full_randmats[i].eigvals.real\n\t\t\tfull_eig_imag[i] = full_randmats[i].eigvals.imag\n\t\t\thess_eig_real[i] = hess_randmats[i].eigvals.real\n\t\t\thess_eig_imag[i] = hess_randmats[i].eigvals.imag\n\t\t\ttri_eig_real[i] = tri_randmats[i].eigvals.real\n\t\t\ttri_eig_imag[i] = tri_randmats[i].eigvals.imag\n\n\t\t# reshape into vectors\n\t\tfull_eig_real.reshape(-1)\n\t\tfull_eig_imag.reshape(-1)\n\t\thess_eig_real.reshape(-1)\n\t\thess_eig_imag.reshape(-1)\n\t\ttri_eig_real.reshape(-1)\n\t\ttri_eig_imag.reshape(-1)\n\n\t\t# store average norm for each batch of samples\n\t\tfull_norm = [full_randmats[i].norm for i in range(N)]\n\t\thess_norm = [hess_randmats[i].norm for i in range(N)]\n\t\ttri_norm = [tri_randmats[i].norm for i in range(N)]\n\t\tfull_norm_avg = np.mean(full_norm)\n\t\tfull_norm_std = np.std(full_norm)\n\t\thess_norm_avg = np.mean(hess_norm)\n\t\thess_norm_std = np.std(hess_norm)\n\t\ttri_norm_avg = np.mean(tri_norm)\n\t\ttri_norm_std = np.std(tri_norm)\n\n\t\t# store average spectral radius for each batch of samples\n\t\tfull_rho = [full_randmats[i].rho for i in range(N)]\n\t\thess_rho = [hess_randmats[i].rho for i in range(N)]\n\t\ttri_rho = [tri_randmats[i].rho for i in range(N)]\n\t\tfull_rho_avg = np.mean(full_rho)\n\t\tfull_rho_std = np.std(full_rho)\n\t\thess_rho_avg = np.mean(hess_rho)\n\t\thess_rho_std = np.std(hess_rho)\n\t\ttri_rho_avg = np.mean(tri_rho)\n\t\ttri_rho_std = np.std(tri_rho)\n\n\t\t# store average sigma_min for each batch of samples\n\t\tfull_sigma_min = [full_randmats[i].sigma_min for i in range(N)]\n\t\thess_sigma_min = [hess_randmats[i].sigma_min for i in range(N)]\n\t\ttri_sigma_min = [tri_randmats[i].sigma_min for i in range(N)]\n\t\tfull_sigma_min_avg = np.mean(full_sigma_min)\n\t\tfull_sigma_min_std = np.std(full_sigma_min)\n\t\thess_sigma_min_avg = np.mean(hess_sigma_min)\n\t\thess_sigma_min_std = np.std(hess_sigma_min)\n\t\ttri_sigma_min_avg = np.mean(tri_sigma_min)\n\t\ttri_sigma_min_std = np.std(tri_sigma_min)\n\n\t\t# get proportions of matrices with sigma_min <= sigma_min_bound\n\t\tratios = np.zeros((3,len(self.bounds)))\n\t\tfor bound in self.bounds:\n\n\t\t\tif bound > 0.0:\n\n\t\t\t\tj = self.bounds.index(bound)\n\n\t\t\t\tfull_ratio = 0.0\n\t\t\t\thess_ratio = 0.0\n\t\t\t\ttri_ratio = 0.0\n\n\t\t\t\tfor i in range(N):\n\n\t\t\t\t\tif full_randmats[i].sigma_min <= bound:\n\t\t\t\t\t\tratios[0,j] += 1.0\n\t\t\t\t\tif hess_randmats[i].sigma_min <= bound:\n\t\t\t\t\t\tratios[1,j] += 1.0\n\t\t\t\t\tif tri_randmats[i].sigma_min <= bound:\n\t\t\t\t\t\tratios[2,j] += 1.0\n\n\t\t\t\tratios[0,j] = ratios[0,j]/N\n\t\t\t\tratios[1,j] = ratios[1,j]/N\n\t\t\t\tratios[2,j] = ratios[2,j]/N\n\n\t\t'''\n\t\tprint(\"{:>4s}{:<3d}\".format(\"m = \",m))\n\t\tprint(\"{:<7s}{:^10s}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}\".format(\" \",\"full\",full_rho_avg,full_rho_std,full_norm_avg,full_norm_std,full_sigma_min_avg,full_sigma_min_std))\n\t\tprint(\"{:<7s}{:^10s}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}\".format(\" \",\"hess\",hess_rho_avg,hess_rho_std,hess_norm_avg,hess_norm_std,hess_sigma_min_avg,hess_sigma_min_std))\n\t\tprint(\"{:<7s}{:^10s}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}{:^15.7f}\".format(\" \",\"tri\",tri_rho_avg,tri_rho_std,tri_norm_avg,tri_norm_std,tri_sigma_min_avg,tri_sigma_min_std))\n\t\t'''\n\n\t\treturn full_eig_real, full_eig_imag, hess_eig_real, hess_eig_imag, tri_eig_real, tri_eig_imag, \\\n\t\t\t full_norm_avg, hess_norm_avg, tri_norm_avg, \\\n\t\t\t full_rho_avg, hess_rho_avg, tri_rho_avg, \\\n\t\t\t full_sigma_min_avg, hess_sigma_min_avg, tri_sigma_min_avg,\\\n\t\t\t full_norm_std, hess_norm_std, tri_norm_std, \\\n\t\t\t full_rho_std, hess_rho_std, tri_rho_std, \\\n\t\t\t full_sigma_min_std, hess_sigma_min_std, tri_sigma_min_std, \\\n\t\t\t ratios\n\n\n\tdef plot_eigenvalues(self, N):\n\n\t\tM = len(self.m_list)\n\t\t#print(\"\\n{:<7s}{:^10s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}\".format(\" \",\"matrix\",\"avg rho\",\"std rho\",\"avg norm\",\"std norm\",\"avg sigma_min\",\"std sigma_min\"))\n\n\t\t# get colors for each m\n\t\tcolors = cm.rainbow(np.linspace(0.0, 1.0, M))\n\n\t\t# make plot\n\t\tfig, ax = plt.subplots(3, M, sharex=True, sharey=True)\n\t\tmatplotlib.rcParams['axes.unicode_minus'] = False\n\t\txlabels = [' ',-1,' ',0,' ',1,' ']\n\t\tylabels = [' ','-i',' ',0,' ','i',' ']\n\t\txymin, xymax = -1.5, 1.5\n\n\t\t# overlay eigenvalues for various m\n\t\tfor m in self.m_list:\n\n\t\t\t# get index of m\n\t\t\tj = self.m_list.index(m)\n\n\t\t\t# take N samples eigenvalues of mxm full, hessially triangular, and triangular random matrices\n\t\t\tdata = self.random_samples(N, m)\n\t\t\tavg_rhos = data[9:12]\n\n\t\t\t# title\n\t\t\tax[0,j].set_title('m = '+str(m),fontsize=12)\n\n\t\t\t# superimpose eigenvalues N samples and spectral radii\n\t\t\tfor i in range(3):\n\n\t\t\t\tax[i,j].set_xlim([xymin,xymax])\n\t\t\t\tax[i,j].set_ylim([xymin,xymax])\n\t\t\t\tax[i,j].set_xticklabels(xlabels)\n\t\t\t\tax[i,j].set_yticklabels(ylabels)\n\t\t\t\tax[i,j].tick_params(axis='both', labelsize=9, direction='inout', length=4.0)\n\t\t\t\tax[i,j].scatter(data[2*i], data[2*i+1], c=colors[j], marker='o', s=2.0, edgecolor='none', alpha=0.7)\n\n\t\t\t\trho = plt.Circle((0,0), avg_rhos[i], edgecolor='k', fill=False, linewidth=0.7)\n\t\t\t\tax[i,j].add_patch(rho)\n\t\t\t\tax[i,j].text(1.35,-1.35,r'$\\overline{\\rho}=$'+\"{:.3f}\".format(avg_rhos[i]), fontsize=9, ha='right')\n\t\t\t\tax[i,j].set_aspect('equal')\n\t\t\t\tax[i,j].set_adjustable('box-forced')\n\n\t\tax[0,0].set_ylabel('full',fontsize=12)\n\t\tax[1,0].set_ylabel('upper-Hessenberg',fontsize=12)\n\t\tax[2,0].set_ylabel('upper-triangular',fontsize=12)\n\t\tfig.suptitle('Eigenvalues of '+str(N)+' Random Matrices', fontsize=16, va='top')\n\t\tplt.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n\t\t# save and open\n\t\tfigname = 'eigenvalues_N'+str(N)+'.png'\n\t\tplt.savefig(figname, format='png')\n\t\tos.system('okular '+figname)\n\t\tplt.clf()\n\t\t\t\n\n\n\tdef plot_norm_rho(self):\n\n\t\t# define a finer mesh of m\n\t\tm_list = [i for i in range(self.m_list[0],self.m_list[-1]+2,2)]\n\t\tM = len(m_list)\n\t\tN = 200\n\t\t#print(\"\\n{:<7s}{:^10s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}\".format(\" \",\"matrix\",\"avg rho\",\"std rho\",\"avg norm\",\"std norm\",\"avg sigma_min\",\"std sigma_min\"))\n\n\t\t# store stats\n\t\tavg_norms = np.zeros((3,M))\n\t\tavg_rhos = np.zeros((3,M))\n\t\tstd_norms = np.zeros((3,M))\n\t\tstd_rhos = np.zeros((3,M))\n\n\t\tfor m in m_list:\n\n\t\t\t# get index of m\n\t\t\tj = m_list.index(m)\n\n\t\t\t# take many samples\n\t\t\tdata = self.random_samples(N, m)\n\n\t\t\t# extract stats\n\t\t\tavg_norms[:,j] = data[6:9]\n\t\t\tavg_rhos[:,j] = data[9:12]\n\t\t\tstd_norms[:,j] = data[15:18]\n\t\t\tstd_rhos[:,j] = data[18:21]\n\n\n\t\t# plot stats\n\t\tplt.figure(figsize=(8,6))\n\t\tplt.xlim([m_list[0],m_list[-1]])\n\n\t\tplt.plot(m_list, avg_norms[0], c='b', label=r'full $\\overline{\\| A \\|}_2$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_norms[0]-std_norms[0], avg_norms[0]+std_norms[0], facecolor='b', alpha=0.3)\n\t\tplt.plot(m_list, avg_norms[1], c='r', label=r'Hessenberg $\\overline{\\| A \\|}_2$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_norms[1]-std_norms[1], avg_norms[1]+std_norms[1], facecolor='r', alpha=0.3)\n\t\tplt.plot(m_list, avg_norms[2], c='g', label=r'triangular $\\overline{\\| A \\|}_2$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_norms[2]-std_norms[2], avg_norms[2]+std_norms[2], facecolor='g', alpha=0.3)\n\n\t\tplt.plot(m_list, avg_rhos[0], c='b', linestyle='dashed', label=r'full $\\overline{\\rho(A)}$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_rhos[0]-std_rhos[0], avg_rhos[0]+std_rhos[0], facecolor='b', alpha=0.3)\n\t\tplt.plot(m_list, avg_rhos[1], c='r', linestyle='dashed', label=r'Hessenberg $\\overline{\\rho(A)}$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_rhos[1]-std_rhos[1], avg_rhos[1]+std_rhos[1], facecolor='r', alpha=0.3)\n\t\tplt.plot(m_list, avg_rhos[2], c='g', linestyle='dashed', label=r'triangular $\\overline{\\rho(A)}$', lw=1.5)\n\t\tplt.fill_between(m_list, avg_rhos[2]-std_rhos[2], avg_rhos[2]+std_rhos[2], facecolor='g', alpha=0.3)\n\n\t\tplt.xlabel(r'dimension $m$',fontsize=12)\n\t\tplt.legend(loc='upper left', shadow=True, fontsize=12)\n\t\tplt.title(r'Norms and Spectral Radii of '+str(N)+' Random Matrices')\n\n\t\t# save and open\n\t\tfigname = 'norm_rho_N'+str(N)+'.png'\n\t\tplt.savefig(figname, format='png')\n\t\tos.system('okular '+figname)\n\t\tplt.clf()\n\n\n\tdef print_sigma_min(self):\n\n\t\t# define a finer mesh of m\n\t\tM = len(self.m_list)\n\t\tB = len(self.bounds)\n\t\tN = 100\n\t\t#print(\"\\n{:<7s}{:^10s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}{:^15s}\".format(\" \",\"matrix\",\"avg rho\",\"std rho\",\"avg norm\",\"std norm\",\"avg sigma_min\",\"std sigma_min\"))\n\n\t\tavg_sigma_min = np.zeros((3,M))\n\t\tstd_sigma_min = np.zeros((3,M))\n\t\tratios = np.zeros((M,3,B))\n\n\t\tfor m in self.m_list:\n\n\t\t\t# get index of m\n\t\t\tj = self.m_list.index(m)\n\n\t\t\t# extract stats\n\t\t\tdata = self.random_samples(N,m)\n\n\t\t\tavg_sigma_min[:,j] = data[12:15]\n\t\t\tstd_sigma_min[:,j] = data[21:24]\n\t\t\tratios[j,:,:] = data[-1]\n\n\t\t# print\n\t\tprint(\"Proportions of matrices with sigma_min <= bound\")\n\t\tfor i in range(M):\n\n\t\t\tprint(\"{:>4s}{:<3d}\".format(\"m = \",self.m_list[i]))\n\n\t\t\tfor k in range(B): \n\n\t\t\t\tprint(\"{:>7s}{:<7s}{:<7.3f}\".format(\" \",\"bound = \",self.bounds[k]))\n\n\t\t\t\tprint(\"{:<21s}{:>5s}{:>14.5f}\".format(\" \",\"full\",ratios[i,0,k]))\n\t\t\t\tprint(\"{:<21s}{:>5s}{:>14.5f}\".format(\" \",\"hess\",ratios[i,1,k]))\n\t\t\t\tprint(\"{:<21s}{:>5s}{:>14.5f}\".format(\" \",\"tri\",ratios[i,2,k]))\n\n\t\t","sub_path":"homework5/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"105609520","text":"\nimport numpy as np\nimport random\n\n# boardcalcul = [[\"0\" for x in range(9)] for y in range(9)]\nsudokuboard = list()\ndic_rc99 = dict()\ndic_rc99control = ()\n\n\nclass CreateBoard:\n def openfileCreateSboard():\n global sudokuboard\n fichier = open('sudoku1.txt', 'r')\n for line in fichier:\n linelist = []\n for chiffre in line:\n if chiffre != \"\\n\":\n if chiffre == \"_\":\n chiffre = 0\n linelist.append(int(chiffre))\n sudokuboard.append(linelist)\n fichier.close()\n return sudokuboard\n\n def dictcreater():\n global sudokuboard\n dic_rc99 = {(rx, cx): c for rx, r in enumerate(sudokuboard)\n for cx, c in enumerate(r)}\n for key in dic_rc99:\n if dic_rc99[key] == 0:\n dic_rc99[key] = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n else:\n continue\n\n return dic_rc99\n\n def regioncreate():\n global sudokuboard # for create a matrix with regions of sudoku\n r00 = []\n r01 = []\n r02 = []\n r10 = []\n r11 = []\n r12 = []\n r20 = []\n r21 = []\n r22 = []\n regionlist = [[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]\n for row in range(9):\n for col in range(9):\n # for append all elemenets of sudoku to the matrix of sudoku regions\n rowlist = sudokuboard[row]\n element = rowlist[col]\n rowi = row//3\n coli = col//3\n if rowi == 0 and coli == 0:\n r00.append(element)\n elif rowi == 0 and coli == 1:\n r01.append(element)\n elif rowi == 0 and coli == 2:\n r02.append(element)\n elif rowi == 1 and coli == 0:\n r10.append(element)\n elif rowi == 1 and coli == 1:\n r11.append(element)\n elif rowi == 1 and coli == 2:\n r12.append(element)\n elif rowi == 2 and coli == 0:\n r20.append(element)\n elif rowi == 2 and coli == 1:\n r21.append(element)\n elif rowi == 2 and coli == 2:\n r22.append(element)\n else:\n continue\n return regionlist\n\n\ndef zerocounter(sudokuboard):\n # for count all of the zeros in the sudokuboard\n cnt = 0\n for row in sudokuboard:\n for chiffre in row:\n if chiffre == 0:\n cnt = cnt+1\n # for calculate how many for loops in the research function\n cnt = (cnt//9)+1\n print(cnt)\n return cnt\n\n\ndef researchMatrix0s():\n global sudokuboard\n sudokuboard = CreateBoard.openfileCreateSboard()\n dic_rc99 = CreateBoard.dictcreater()\n cnt = zerocounter(sudokuboard)\n print(cnt)\n for i in range(25):\n for (row, col) in dic_rc99:\n if sudokuboard[row][col] == 0:\n rowi = row//3\n coli = col//3\n liste = dic_rc99[(row, col)]\n for i in range(3):\n # i dont know why but it works with 3, maybe because of 3*3=9 region or 3 tip control: row, colomn, region :-)\n # I converted the sudoku board to array because of its easy to control each row and column with numpy\n sudokuboardarr = np.array(sudokuboard)\n sudokuboardTarr = np.transpose(sudokuboardarr)\n regionlist = CreateBoard.regioncreate()\n # Maybe i dont need it, but i`m ametor i will learn how can i do with numpy arrays\n sudokuboardrowlist = sudokuboardarr[row].tolist()\n sudokuboardTcolumlist = sudokuboardTarr[col].tolist()\n for chiffre in liste:\n if chiffre in sudokuboardrowlist:\n liste.remove(chiffre)\n for chiffre in liste:\n if chiffre in sudokuboardTcolumlist:\n liste.remove(chiffre)\n for chiffre in liste:\n if chiffre in regionlist[rowi][coli]:\n liste.remove(chiffre)\n if len(liste) == 1:\n dic_rc99[(row, col)] = liste[0]\n sudokuboard[row][col] = liste[0]\n\n # sudokuboardControl = openfileCreateSboard()\n for line in sudokuboard:\n print(line)\n # cnt = zerocounter(sudokuboard)\n # print(cnt)\n # sudokuboardControl = sudokuboard\n # probabiltylst = []\n # probabilty = []\n # while True:\n # def randomsudoku():\n # for (row, col) in dic_rc99:\n # if type(dic_rc99[(row, col)]) is int:\n # continue\n # else:\n # p = random.choice(dic_rc99[(row, col)])\n # sudokuboardControl[row][col] = p\n # probabilty.append(''.join(p))\n\n # if probabilty[0] in probabiltylst:\n # randomsudoku()\n # else:\n # probabiltylst.append(probabilty)\n\n # sudokuboardarr = np.array(sudokuboardControl)\n # sudokuboardTarr = np.transpose(sudokuboardarr)\n # regionlist = CreateBoard.regioncreate()\n # liste = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n # def searchmistake():\n # for i in range(len(liste)):\n # counter = 0\n # # i dont know why but it works with 3, maybe because of 3*3=9 region or 3 tip control: row, colomn, region :-)\n # # I converted the sudoku board to array because of its easy to control each row and column with numpy\n # sudokuboardarr = np.array(sudokuboardControl)\n # sudokuboardTarr = np.transpose(sudokuboardarr)\n # # regionlist = CreateBoard.regioncreate()\n # # Maybe i dont need it, but i`m ametor i will learn how can i do with numpy arrays\n # sudokuboardrowlist = sudokuboardarr[i].tolist()\n # # sudokuboardTcolumlist = sudokuboardTarr[col].tolist()\n # for chiffre in liste:\n # if sudokuboardrowlist.count(chiffre) > 0:\n # print(chiffre, sudokuboardarr[i],\n # sudokuboardrowlist.count(chiffre))\n # randomsudoku()\n # else:\n # continue\n # for chiffre in liste:\n # if sudokuboardTarr.count(chiffre) > 0:\n # print(chiffre, sudokuboardTarr[i],\n # sudokuboardTarr.count(chiffre))\n # randomsudoku()\n # else:\n # continue\n\n # searchmistake()\n # for chiffre in liste:\n # if chiffre in sudokuboardTcolumlist:\n # liste.remove(chiffre)\n # for chiffre in liste:\n # if chiffre in regionlist[rowi][coli]:\n # liste.remove(chiffre)\n # if len(liste) == 1:\n # dic_rc99[(row, col)] = liste[0]\n # sudokuboard[row][col] = liste[0]\n\n for (row, col) in dic_rc99:\n print((row, col), dic_rc99[(row, col)])\n\n\nresearchMatrix0s()\n","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"396622293","text":"#!/bin/bash python\n# -*- coding:utf-8 -*-\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/froest2012/sysexec\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='xiuc',\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version='1.0.0',\n\n description='A project to extend XML-RPC of supervisor',\n long_description=long_description,\n\n # The project's main homepage.\n url='https://github.com/froest2012/sysexec',\n\n # Author details\n author='xiuc001',\n author_email='972994718@qq.com',\n\n # 选择证书类型\n # 这个属性不懂可以看这里:http://docs.python-guide.org/en/latest/writing/license/\n # 可以选择哪些证书类型:https://opensource.org/licenses/alphabetical\n license='MIT',\n\n # 这个属性请看这个网址:https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n # 此项目成熟度,Alpha测试版本, Beta测试版, 还是稳定版\n 'Development Status :: 5 - Production/Stable',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Topic :: Software Development :: Build Tools',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n\n # What does your project relate to?\n keywords='xiuc supervisor xml-rpc extend system command execute',\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=['tests']),\n\n # Alternatively, if you want to distribute just a my_module.py, uncomment\n # this:\n # py_modules=[\"my_module\"],\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=['supervisor >= 3.0'],\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # for example:\n # $ pip install -e .[dev,test]\n extras_require={\n # 'dev': ['check-manifest'],\n # 'test': ['setuptools'],\n },\n\n # If there are data files included in your packages that need to be\n # installed, specify them here. If using Python 2.6 or less, then these\n # have to be included in MANIFEST.in as well.\n package_data={\n },\n\n # Although 'package_data' is the preferred approach, in some case you may\n # need to place data files outside of your packages. See:\n # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa\n # In this case, 'data_file' will be installed into '/my_data'\n # data_files=[('my_data', ['data/data_file'])],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n # entry_points={\n # 'console_scripts': [\n # 'xiuc=xiuc:main',\n # ],\n # },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"325760707","text":"from tkinter import *\r\nfrom tkinter import ttk,font,messagebox\r\nimport tkinter as tk\r\nclass Aplicacion():\r\n __ventana=None\r\n __peso=None\r\n __altura=None\r\n __re1=None\r\n __re2=None\r\n def __init__(self) -> None:\r\n '''VENTANA'''\r\n self.__ventana=Tk()\r\n ancho_ventana = 500\r\n alto_ventana = 300\r\n\r\n x_ventana = self.__ventana.winfo_screenwidth() // 2 - ancho_ventana // 2\r\n y_ventana = self.__ventana.winfo_screenheight() // 2 - alto_ventana // 2\r\n\r\n posicion = str(ancho_ventana) + \"x\" + str(alto_ventana) + \"+\" + str(x_ventana) + \"+\" + str(y_ventana)\r\n \r\n self.__ventana.geometry(posicion)\r\n self.__ventana.resizable(0,0)\r\n self.__ventana.title('\\t\\tCalculadora de IMC')\r\n '''FUENTES'''\r\n self.fuente1= font.Font(family='Arial', size=10,weight='bold')\r\n self.fuente2 = font.Font(family='Arial', size=10)\r\n self.fuente3 = font.Font(family='Arial', size=17)\r\n '''TEXTOS'''\r\n self.marco=tk.Frame(self.__ventana,bg='white') #FONDO BLANCO\r\n self.titulo=tk.Label(self.__ventana,text='Calculadora de IMC',font=self.fuente1,bg='light gray')\r\n self.alturaLbl=ttk.Label(self.__ventana,text='Altura: ',padding=(5,5),background='white')\r\n self.pesoLbl=ttk.Label(self.__ventana,text='Peso: ',padding=(5,5),background='white')\r\n self.cmLbl=tk.Label(self.__ventana,text='cm',bg='light gray')\r\n self.kgLbl=tk.Label(self.__ventana,text='kg',bg='light gray')\r\n self.__altura=StringVar()\r\n self.__peso=StringVar()\r\n self.__re1=StringVar()\r\n self.__re2=StringVar()\r\n '''ENTRYS'''\r\n self.entryA=ttk.Entry(self.__ventana,textvariable=self.__altura)\r\n self.entryP=ttk.Entry(self.__ventana,textvariable=self.__peso)\r\n '''SEPARADORES'''\r\n self.separ1 = ttk.Separator(self.__ventana, orient=HORIZONTAL)\r\n self.separ2 = ttk.Separator(self.__ventana, orient=HORIZONTAL)\r\n self.separ3 = ttk.Separator(self.__ventana, orient=HORIZONTAL)\r\n '''BOTONES'''\r\n self.boton1 =tk.Button(self.__ventana,text='Calcular',font=self.fuente1,bg='lime green',border=0.25,fg='white',command=self.calcular)\r\n self.boton2 =tk.Button(self.__ventana,text='Limpiar',font=self.fuente1,bg='lime green',border=0.25,fg='white',command=self.limpiar)\r\n '''RECUADRO RESULTADOS'''\r\n self.resultados=tk.Frame(self.__ventana,bg='white')\r\n self.resultado1=tk.Label(self.resultados,bg='white',fg='white',text='Tu Indice de Masa Corporal (IMC) es',font=self.fuente2)\r\n self.resultado2=tk.Label(self.resultados,bg='white',fg='white',textvariable=self.__re1,font=self.fuente1)\r\n self.resultado3=tk.Label(self.resultados,bg='white',fg='white',textvariable=self.__re2,font=self.fuente3)\r\n \r\n '''POSICION TEXTOS'''\r\n self.alturaLbl.place(x=0,y=60)\r\n self.pesoLbl.place(x=0,y=130)\r\n self.cmLbl.place(x=465,y=62,height=25,width=30)\r\n self.kgLbl.place(x=465,y=132,height=25,width=30)\r\n self.titulo.place(relwidth=1,height=40,anchor=tk.N + tk.W)\r\n self.marco.place(x=0,y=0,width=500,height=300)\r\n\r\n '''POSICION ENTRYS'''\r\n self.entryA.place(x=50,y=62,height=25,width=415)\r\n self.entryP.place(x=50,y=132,height=25,width=415)\r\n '''POSICION SEPARADORES'''\r\n self.separ1.place(relx=0.5,y=50,anchor=tk.N,bordermode=OUTSIDE, relwidth=0.95)\r\n self.separ2.place(relx=0.5,y=125,anchor=tk.N,bordermode=OUTSIDE, relwidth=0.95)\r\n self.separ3.place(relx=0.5,y=170,anchor=tk.N,bordermode=OUTSIDE, relwidth=0.95)\r\n '''POSICION BOTONES'''\r\n self.boton1.place(relx=0.25,y=175,width=175,height=30,anchor=tk.N)\r\n self.boton2.place(relx=0.75,y=175,width=175,height=30,anchor=tk.N)\r\n '''POSICION RESULTADOS'''\r\n self.resultados.place(relx=0.5,y=215,anchor=tk.N,relwidth=0.61,height=70)\r\n self.resultado1.place(x=2,y=10)\r\n self.resultado2.place(relx=0.9999999999,y=10,anchor=tk.N + tk.E)\r\n self.resultado3.place(relx=0.5,rely=0.95,anchor=tk.S)\r\n self.__ventana.mainloop()\r\n \r\n def calcular(self, *args):\r\n if self.entryA.get()!='' and self.entryP.get()!='':\r\n try:\r\n imc=float(float(self.entryP.get()) / (((float(self.entryA.get()))/100)**2))\r\n self.resultados['bg']='#D0EBC7' #COLOREA EL RECUADRO YA QUE ESTABA EN BLANCO\r\n self.resultado1['bg']='#D0EBC7' #COLOREA LAS FUENTES YA QUE ESTABA EN BLANCO\r\n self.resultado1['fg']='#41763D'\r\n self.resultado2['bg']='#D0EBC7'\r\n self.resultado2['fg']='#41763D'\r\n self.__re1.set('{:.2f} Kg/m2'.format(imc))\r\n if imc<18.5:\r\n self.__re2.set('Peso inferior al normal')\r\n elif imc>=18.5 and imc<25:\r\n self.__re2.set('Peso Normal')\r\n elif imc>=25.0 and imc<30:\r\n self.__re2.set('Peso superior al normal')\r\n elif imc>=30:\r\n self.__re2.set('Obesidad')\r\n self.resultado3['bg']='#D0EBC7'\r\n self.resultado3['fg']='#41763D'\r\n except ValueError:\r\n messagebox.showerror(title='Error de tipo',\r\n message='Debe ingresar un valor numérico')\r\n self.__peso.set('')\r\n self.__altura.set('')\r\n self.entryA.focus()\r\n else:\r\n messagebox.showerror(title='Error de tipo',\r\n message='Debe completar los campos')\r\n self.__peso.set('')\r\n self.__altura.set('')\r\n self.entryA.focus()\r\n\r\n def limpiar(self):\r\n self.resultados['bg']='white'\r\n self.resultado1['bg']='white'\r\n self.resultado1['fg']='white'\r\n self.resultado2['bg']='white'\r\n self.resultado2['fg']='white'\r\n self.resultado3['bg']='white'\r\n self.resultado3['fg']='white'\r\n self.__re1.set('')\r\n self.__re2.set('')\r\n self.__peso.set('')\r\n self.__altura.set('')\r\n self.entryA.focus()\r\n self.entryA.focus()\r\n\r\n","sub_path":"classAplicacion.py","file_name":"classAplicacion.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"451349986","text":"import chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\nfrom abc import abstractmethod, ABCMeta\n\nimport models\n\ndef build_predictor(model_type='ata', **kwargs):\n\n # set model\n model_type = model_type.lower()\n\n if model_type == 'ata':\n model = models.Ata(**kwargs)\n elif model_type == 'alex':\n model = models.Alex(**kwargs)\n elif model_type == 'resnet50':\n model = models.ResNet50(**kwargs)\n elif model_type == 'vgg':\n model = models.VGG(**kwargs)\n else:\n raise NotImplementedError()\n\n return model\n\nclass Predictor(chainer.Chain):\n __metaclass__ = ABCMeta\n\n def __init__(self,\n in_channels=1,\n n_out=1):\n\n super().__init__()\n\n self.in_channels = in_channels\n self.n_out = n_out\n\n @abstractmethod\n def forward(self, x, **kwargs):\n '''\n Args:\n x (chainer.Variable): input image.\n kwargs: Optional arguments will be contained.\n Return:\n o (chainer.Variable): output.\n '''\n raise NotImplementedError()\n\n def freeze_layers(self, startwith, verbose=False):\n for l in self.children():\n if l.name.startswith(startwith):\n l = getattr(self, l.name)\n l.disable_update()\n if verbose==True:\n print(l.name, 'disable_update')\n\n def __call__(self, x):\n o = self.forward(x)\n return o\n","sub_path":"models/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"131995080","text":"\"\"\"Schedule observations during an observing night.\n\nThis module supercedes desisurvey.old.schedule.\n\"\"\"\nfrom __future__ import print_function, division\n\nimport os.path\n\nimport numpy as np\n\nimport astropy.io.fits\nimport astropy.units as u\n\nimport desiutil.log\n\nimport desisurvey.config\nimport desisurvey.utils\nimport desisurvey.etc\nimport desisurvey.tiles\nimport desisurvey.ephem\n\n\nclass Scheduler(object):\n \"\"\"Create a new next-tile scheduler.\n\n Design hour angles are read from the output of ``surveyinit`` using\n :func:`desisurvey.plan.load_design_hourangle`, by default.\n\n The only internal state needed by the scheduler is the list of\n accumulated SNR2 fractions per tile, which can be restored\n from a file created using :meth:`save`.\n\n A newly created or restored scheduler must be configured with\n calls to :meth:`update_tiles` (to tile availablity and priority)\n and :meth:`init_night` (to precompute data for a night's observing)\n before tiles can be selected.\n\n Use :meth:`next_tile` to select the next tile to observe during\n a night. If the tile is observed, the internal state must be\n updated with a call to :meth:`update_snr`.\n\n Parameters\n ----------\n restore : str or None\n Restore internal state from the snapshot saved to this filename,\n or initialize a new scheduler when None. Use :meth:`save` to\n save a snapshot to be restored later. Filename is relative to\n the configured output path unless an absolute path is\n provided.\n design_hourangles : array or None\n 1D array of design hour angles to use in degrees, or use\n :func:`desisurvey.plan.load_design_hourangle` when None.\n \"\"\"\n def __init__(self, restore=None, design_hourangle=None, bgs_footprint=None):\n self.log = desiutil.log.get_logger()\n # Load our configuration.\n config = desisurvey.config.Configuration()\n self.min_snr2frac = config.min_snr2_fraction()\n GRAY = desisurvey.config.Configuration().programs.GRAY\n self.max_prod = GRAY.max_moon_illumination_altitude_product().to(u.deg).value\n self.max_frac = GRAY.max_moon_illumination()\n self.threshold_alt = self.max_prod / self.max_frac\n self.max_airmass = desisurvey.utils.cos_zenith_to_airmass(np.sin(config.min_altitude()))\n self.max_ha = config.max_hour_angle().to(u.deg).value\n # Load static tile info.\n self.tiles = desisurvey.tiles.get_tiles(bgs_footprint=bgs_footprint)\n ntiles = self.tiles.ntiles\n # Check hourangles.\n if design_hourangle is None:\n self.design_hourangle = desisurvey.plan.load_design_hourangle(bgs_footprint=bgs_footprint)\n else:\n self.design_hourangle = np.asarray(design_hourangle)\n if self.design_hourangle.shape != (self.tiles.ntiles,):\n raise ValueError('Array design_hourangle has wrong shape.')\n # Initialize snr2frac, which is our only internal state.\n if restore is not None:\n # Restore the snr2frac array for a survey in progress.\n fullname = config.get_path(restore)\n if not os.path.exists(fullname):\n raise RuntimeError('Cannot restore scheduler from non-existent \"{}\".'.format(fullname))\n with astropy.io.fits.open(fullname, memmap=False) as hdus:\n self.snr2frac = hdus[0].data.copy()\n if self.snr2frac.shape != (ntiles,):\n raise ValueError('Invalid snr2frac array shape.')\n self.log.debug('Restored scheduler snapshot from \"{}\".'.format(fullname))\n else:\n # Initialize for a new survey.\n self.snr2frac = np.zeros(ntiles, float)\n # Initialize arrays derived from snr2frac.\n # Note that indexing of completed_by_pass uses tiles.pass_index, which is not necessarily\n # the same as range(tiles.npasses).\n self.completed = (self.snr2frac >= self.min_snr2frac)\n self.completed_by_pass = np.zeros(self.tiles.npasses, np.int32)\n for passnum in self.tiles.passes:\n idx = self.tiles.pass_index[passnum]\n self.completed_by_pass[idx] = np.count_nonzero(self.completed[self.tiles.passnum == passnum])\n # Allocate memory for internal arrays.\n self.exposure_factor = np.zeros(ntiles)\n self.hourangle = np.zeros(ntiles)\n self.airmass = np.zeros(ntiles)\n self.in_night_pool = np.zeros(ntiles, bool)\n self.tile_sel = np.zeros(ntiles, bool)\n self.LST = 0.\n self.night = None\n # Load the ephemerides to use.\n self.ephem = desisurvey.ephem.get_ephem()\n # Initialize tile availability and priority.\n # No tiles will be scheduled until these are updated using update_tiles().\n self.tile_available = np.zeros(self.tiles.ntiles, bool)\n self.tile_planned = np.zeros(self.tiles.ntiles, bool)\n self.tile_priority = np.zeros(self.tiles.ntiles, float)\n # Lookup avoidance cone angles.\n self.avoid_bodies = {}\n for body in config.avoid_bodies.keys:\n self.avoid_bodies[body] = getattr(config.avoid_bodies, body)().to(u.deg).value\n\n def save(self, name):\n \"\"\"Save a snapshot of our current state that can be restored.\n\n The only internal state required to restore a Scheduler is the array\n of snr2frac values per tile.\n\n The snapshot file size is about 130Kb.\n\n Parameters\n ----------\n name : str\n Name of FITS file where the snapshot will be saved. The file will\n be saved under our configuration's output path unless name is\n already an absolute path. Pass the same name to the constructor's\n ``restore`` argument to restore this snapshot.\n \"\"\"\n config = desisurvey.config.Configuration()\n fullname = config.get_path(name)\n hdr = astropy.io.fits.Header()\n # Record the last night this scheduler was initialized for.\n hdr['NIGHT'] = self.night.isoformat() if self.night else ''\n # Record the number of completed tiles.\n hdr['NDONE'] = self.completed_by_pass.sum()\n # Save a copy of our snr2frac array.\n astropy.io.fits.PrimaryHDU(self.snr2frac, header=hdr).writeto(fullname+'.tmp', overwrite=True)\n os.rename(fullname+'.tmp', fullname)\n self.log.debug('Saved scheduler snapshot to \"{}\".'.format(fullname))\n\n def update_tiles(self, tile_available, tile_priority):\n \"\"\"Update tile availability and priority.\n\n A valid update must have some tiles available with priority > 0.\n Once a tile has been \"planned\", i.e., assigned priority > 0, it can\n not be later un-planned, i.e., assigned zero priority.\n\n Parameters\n ----------\n tile_available : array\n 1D array of booleans to indicate which tiles have had fibers assigned\n and so are available to schedule.\n tile_priority : array\n 1D array of per-tile priority values >= 0 used to implement survey strategy.\n\n Returns\n -------\n tuple\n Tuple (new_available, new_planned) of 1D arrays of tile indices that\n identify any tiles are newly available or \"planned\" (assigned priority > 0).\n \"\"\"\n new_available = tile_available & ~self.tile_available\n new_unavailable = ~tile_available & self.tile_available\n if np.any(new_unavailable):\n raise RuntimeError('Some previously available tiles now unavailable.')\n self.tile_available[new_available] = True\n\n assert np.all(self.tile_priority >= 0)\n if np.any(tile_priority < 0):\n raise ValueError('All tile priorities must be >= 0.')\n tile_planned = tile_priority > 0\n new_planned = tile_planned & ~self.tile_planned\n new_unplanned = ~tile_planned & self.tile_planned\n if np.any(new_unplanned):\n raise RuntimeError('Some previously planned tiles now have zero priority.')\n self.tile_planned[new_planned] = True\n # Tile priorities can change after they become > 0, so copy all planned priorities,\n # not just the newly planned priorities.\n self.tile_priority[self.tile_planned] = tile_priority[self.tile_planned]\n # Precompute log(priority) since that is what we use for scheduling.\n # Ignore harmless warnings about log(0) = -inf.\n with np.errstate(divide='ignore'):\n self.log_priority = np.log(self.tile_priority)\n\n if not np.any(self.tile_available & self.tile_planned):\n raise ValueError('No available tiles with priority > 0 to schedule.')\n new_available, new_planned = np.where(new_available)[0], np.where(new_planned)[0]\n self.log.debug('{} new available tiles, {} new planned tiles.'.format(\n len(new_available), len(new_planned)))\n return new_available, new_planned\n\n def init_night(self, night, use_twilight=False):\n \"\"\"Initialize scheduling for the specified night.\n\n Must be called before calls to :meth:`next_tile` and\n :meth:`update_tile` during the night.\n\n The pool of available tiles during the night consists of those that:\n\n - Have fibers assigned.\n - Have non-zero priority (aka \"planned\").\n - Have not already reached their target SNR (aka \"completed\").\n - Are not too close to a planet during this night.\n\n Tile availability and priority is assumed fixed during the night.\n When the moon is up, tiles are also vetoed if they are too\n close to the moon. The angles that define \"too close\" to a\n planet or the moon are specified in config.avoid_bodies.\n\n Parameters\n ----------\n night : str\n Date on the evening this night starts in the format YYYY-MM-DD.\n use_twilight : bool\n Include twilight when calculating the scheduled program changes\n during this night when True.\n verbose : bool\n Generate verbose logging output when True.\n \"\"\"\n self.log.debug('Initializing scheduler for {}'.format(night))\n if self.tile_available is None or self.tile_priority is None:\n raise RuntimeError('Must call update_tiles() before init_night().')\n self.night = night\n self.night_ephem = self.ephem.get_night(night)\n midnight = self.night_ephem['noon'] + 0.5\n # Lookup the program for this night.\n self.night_programs, self.night_changes = self.ephem.get_night_program(\n night, include_twilight=use_twilight)\n self.log.debug(' Program: {}'.format(self.night_programs))\n self.log.debug(' Changes: {}'.format(np.round(24 * (self.night_changes - midnight), 3)))\n # Initialize linear interpolation of MJD -> LST in degrees during this night.\n self.MJD0, MJD1 = self.night_ephem['brightdusk'], self.night_ephem['brightdawn']\n self.LST0, LST1 = [\n self.night_ephem['brightdusk_LST'], self.night_ephem['brightdawn_LST']]\n self.dLST = (LST1 - self.LST0) / (MJD1 - self.MJD0)\n # Initialize tracking of the program through the night.\n self.night_index = 0\n # Remember the last tile observed this night.\n self.last_idx = None\n # Initialize the pool of tiles that could be observed this night.\n self.in_night_pool[:] = ~self.completed & self.tile_planned & self.tile_available\n # Check if any tiles cannot be observed because they are too close to a planet this night.\n poolRA = self.tiles.tileRA[self.in_night_pool]\n poolDEC = self.tiles.tileDEC[self.in_night_pool]\n avoid_idx = []\n for body in self.avoid_bodies:\n if body == 'moon':\n continue\n # Get body (RA,DEC) at midnight.\n bodyDEC, bodyRA = desisurvey.ephem.get_object_interpolator(\n self.night_ephem, body, altaz=False)(midnight)\n too_close = desisurvey.utils.separation_matrix(\n [bodyRA], [bodyDEC], poolRA, poolDEC, self.avoid_bodies[body])[0]\n if np.any(too_close):\n idx = np.where(self.in_night_pool)[0][too_close]\n tileIDs = self.tiles.tileID[idx]\n self.log.debug(' Tiles within {} deg of {}: {}.'.format(\n self.avoid_bodies[body], body, ','.join([str(ID) for ID in tileIDs])))\n avoid_idx.extend(idx)\n self.in_night_pool[avoid_idx] = False\n # Initialize moon tracking during this night.\n self.moon_DECRA = desisurvey.ephem.get_object_interpolator(self.night_ephem, 'moon', altaz=False)\n self.moon_ALTAZ = desisurvey.ephem.get_object_interpolator(self.night_ephem, 'moon', altaz=True)\n # Initialize sun tracking during this night.\n self.sun_DECRA = desisurvey.ephem.get_object_interpolator(self.night_ephem, 'sun', altaz=False) \n self.sun_ALTAZ = desisurvey.ephem.get_object_interpolator(self.night_ephem, 'sun', altaz=True) \n\n def next_tile(self, mjd_now, ETC, seeing, transp, skylevel, HA_sigma=15.,\n greediness=0., use_brightsky=False, program=None):\n \"\"\"Select the next tile to observe.\n\n The :meth:`init_night` method must be called before calling this\n method during a night.\n\n The (log) score for each observable tile is calculated as:\n\n .. math::\n\n -(1 - g)\\\\,\\\\frac{1}{2} \\\\left( \\\\frac{\\\\text{HA} - \\\\text{HA}_0}{\\\\sigma_{\\\\text{HA}}}\n \\\\right)^2 - g \\\\log \\\\frac{t_\\\\text{exp}}{t_\\\\text{nom}} + \\log P\n\n where :math:`\\\\text{HA}` and :math:`\\\\text{HA}_0` are the current and design\n hour angles, respectively, :math:`g` is the ``greediness`` parameter below,\n and :math:`P` are the tile priorities used to implement survey strategy\n and updated via :meth:`update_tiles`.\n\n Parameters\n ----------\n mjd_now : float\n Time when the decision is being made.\n ETC : :class:`desisurvey.etc.ExposureTimeCalculator`\n Object with methods ``could_complete()`` and ``weather_factor()``.\n Normally an instance of :class:`desisurvey.etc.ExposureTimeCalculator`.\n seeing : float\n Estimate of current atmospherid seeing in arcseconds.\n transp : float\n Estimate of current atmospheric transparency in the range 0-1.\n HA_sigma : float\n RMS in degrees for the Gaussian penalty applied to tiles observed\n away from their design hour angle.\n greediness : float\n Parameter that controls the balance between observing at the design\n hour angle and observing tiles with the small exposure-time factor.\n Set this value to zero to only consider hour angle or to one to\n only consider isntantaneous efficiency. The meaning of intermediate\n values will depend on the value of ``HA_sigma`` and how exposure\n factors are calculated. Refer to the equation above for details.\n Must be between 0 and 1.\n use_brightsky : bool \n If True use improved bright sky model in next_tile selection to\n calculate exposure factor for bright sky. If False, exposure factor\n does not include bright sky model.\n program : string\n PROGRAM of tile to select. Default of None selects the appropriate\n PROGRAM given current moon/twilight conditions. Forcing a particular\n program leads PROGEND to be infinity.\n\n Returns\n -------\n tuple\n Tuple (TILEID,PASSNUM,SNR2FRAC,EXPFAC,AIRMASS,PROGRAM,PROGEND)\n giving the ID and associated properties of the selected tile.\n When no tile is observable, only the last two tuple fields\n will be valid, and this method should be called again after\n some dead-time delay. The tuple fields are:\n\n - TILEID: ID of the tile to observe.\n - PASSNUM: pass number of the tile to observe.\n - SNR2FRAC: fractional SNR2 already accumulated for the selected tile.\n - EXPFAC: initial exposure-time factor for the selected tile.\n - AIRMASS: initial airmass of the selected tile.\n - PROGRAM: scheduled program at ``mjd_now``, which might be\n different from the program of the selected (TILEID, PASSNUM).\n - PROGEND: MJD timestamp when the scheduled program ends.\n \"\"\"\n if self.night is None:\n raise ValueError('Must call init_night() before next_tile().')\n if greediness < 0 or greediness > 1:\n raise ValueError('Expected greediness between 0 and 1.')\n # Which program are we in?\n self.night_index = 0 # not so bad to recompute this?\n while ((self.night_index + 1 < len(self.night_changes)) and\n (mjd_now >= self.night_changes[self.night_index + 1])):\n self.night_index += 1\n if program is None:\n program = self.night_programs[self.night_index]\n # How much time remaining in this program?\n mjd_program_end = self.night_changes[self.night_index + 1]\n else:\n mjd_program_end = self.night_changes[-1] # end of night?\n t_remaining = mjd_program_end - mjd_now\n # Select available tiles in this program.\n self.tile_sel = self.tiles.program_mask[program] & self.in_night_pool\n if not np.any(self.tile_sel):\n # No tiles available to observe tonight in this program.\n return None, None, None, None, None, program, mjd_program_end\n # Calculate the local apparent sidereal time in degrees.\n self.LST = self.LST0 + self.dLST * (mjd_now - self.MJD0)\n # Calculate the hour angle of each available tile in degrees.\n #######################################################\n ### should be offset to estimated exposure midpoint ###\n #######################################################\n self.hourangle[:] = 0.\n self.hourangle[self.tile_sel] = self.LST - self.tiles.tileRA[self.tile_sel]\n # Calculate the airmass of each available tile.\n self.airmass[:] = self.max_airmass\n self.airmass[self.tile_sel] = self.tiles.airmass(\n self.hourangle[self.tile_sel], self.tile_sel)\n self.tile_sel &= self.airmass < self.max_airmass\n absha = np.abs(((self.hourangle + 180) % 360)-180)\n self.tile_sel &= (absha < self.max_ha)\n if not np.any(self.tile_sel):\n # No tiles left to observe after airmass cut.\n return None, None, None, None, None, program, mjd_program_end\n\n # Is the moon up?\n if mjd_now > self.night_ephem['moonrise'] and mjd_now < self.night_ephem['moonset']:\n moon_is_up = True\n # calculate the moon (RA,DEC).\n moonDEC, moonRA = self.moon_DECRA(mjd_now)\n # Identify tiles that are too close to the moon to observe now.\n too_close = desisurvey.utils.separation_matrix(\n [moonRA], [moonDEC],\n self.tiles.tileRA[self.tile_sel], self.tiles.tileDEC[self.tile_sel],\n self.avoid_bodies['moon'])[0]\n idx = np.where(self.tile_sel)[0][too_close]\n self.tile_sel[idx] = False\n if not np.any(self.tile_sel):\n # No tiles left to observe after moon avoidance veto.\n return None, None, None, None, None, program, mjd_program_end\n else:\n moon_is_up = False\n\n # Estimate exposure factors for all available tiles.\n self.exposure_factor[:] = 1e8\n self.exposure_factor[self.tile_sel] = self.tiles.dust_factor[self.tile_sel]\n if use_brightsky and program == 'BRIGHT': \n self.exposure_factor[self.tile_sel] *= \\\n self.update_exposure_factor(mjd_now, self.tiles.tileID[self.tile_sel])\n else: \n self.exposure_factor[self.tile_sel] *= \\\n desisurvey.etc.airmass_exposure_factor(self.airmass[self.tile_sel])\n # Apply global weather factors that are the same for all tiles.\n self.exposure_factor[self.tile_sel] /= ETC.weather_factor(seeing, transp)\n\n if not np.any(self.tile_sel):\n return None, None, None, None, None, program, mjd_program_end\n # Calculate (the log of a) Gaussian multiplicative penalty for\n # observing tiles away from their design hour angle.\n dHA = self.hourangle[self.tile_sel] - self.design_hourangle[self.tile_sel]\n dHA[dHA >= 180.] -= 360\n dHA[dHA < -180] += 360\n assert np.all((dHA >= -180) & (dHA < 180))\n # Calculate a score that combines dHA and instantaneous efficiency.\n log_score = (\n -0.5 * (dHA / HA_sigma) ** 2 * (1 - greediness) +\n -np.log(self.exposure_factor[self.tile_sel]) * greediness)\n # Add tile priorities.\n log_score += self.log_priority[self.tile_sel]\n # Select the tile with the highest (log) score.\n idx = np.where(self.tile_sel)[0][np.argmax(log_score)]\n # Return info about the selected tile and scheduled program.\n return (self.tiles.tileID[idx], self.tiles.passnum[idx],\n self.snr2frac[idx], self.exposure_factor[idx],\n self.airmass[idx], program, mjd_program_end)\n\n def update_snr(self, tileID, snr2frac):\n \"\"\"Update SNR for one tile.\n\n A tile whose update ``snr2frac`` exceeds the ``min_snr2frac``\n configuration parameter will be considered completed, and\n not scheduled for future observing.\n\n Parameters\n ----------\n tileID : int\n ID of the tile to update.\n snr2frac : float\n New value of the fractional SNR2 accumulated for this tile, including\n all previous exposures.\n \"\"\"\n idx = self.tiles.index(tileID)\n self.snr2frac[idx] = snr2frac\n if self.snr2frac[idx] >= self.min_snr2frac:\n self.in_night_pool[idx] = False\n self.completed[idx] = True\n passidx = self.tiles.pass_index[self.tiles.passnum[idx]]\n self.completed_by_pass[passidx] += 1\n # Remember the last tile observed this night.\n self.last_idx = idx\n\n def survey_completed(self):\n \"\"\"Test if all tiles have been completed.\n \"\"\"\n return self.completed_by_pass.sum() == self.tiles.ntiles\n \n def update_exposure_factor(self, mjd, tileid, return_obs_cond=False): \n \"\"\" get updated exposure factor on this night given mjd, and tile ID.\n \"\"\"\n # get tile index \n idx = [] \n for _id in np.atleast_1d(tileid): \n idx.append(np.where(self.tiles.tileID == _id)[0])\n idx = np.array(idx).flatten() \n assert len(idx) > 0 \n\n # (RA,DEC) of the moon and sun at mjd\n moonDEC, moonRA = self.moon_DECRA(mjd)\n moonALT, moonAZ = self.moon_ALTAZ(mjd) \n sunDEC, sunRA = self.sun_DECRA(mjd)\n sunALT, sunAZ = self.sun_ALTAZ(mjd) \n\n # moon illumination \n moonILL = self.night_ephem['moon_illum_frac']\n\n # calculate moon and sun separation \n moonSEP = desisurvey.utils.separation_matrix(\n [moonRA], [moonDEC],\n self.tiles.tileRA[idx], self.tiles.tileDEC[idx])\n sunSEP = desisurvey.utils.separation_matrix(\n [sunRA], [sunDEC],\n self.tiles.tileRA[idx], self.tiles.tileDEC[idx])\n\n fexp = desisurvey.etc.bright_exposure_factor(\n self.airmass[idx], moonILL, moonSEP, moonALT, sunSEP, sunALT)\n if not return_obs_cond: \n return fexp\n else: \n return fexp, moonILL, moonSEP, moonALT, sunSEP, sunALT\n\n def get_observing_conditions(self, mjd, tileid): \n \"\"\" get observing conditions this night given mjd, and tile ID.\n \"\"\"\n # get tile index \n idx = [] \n for _id in np.atleast_1d(tileid): \n idx.append(np.where(self.tiles.tileID == _id)[0])\n idx = np.array(idx).flatten() \n assert len(idx) > 0 \n\n # (RA,DEC) of the moon and sun at mjd\n moonDEC, moonRA = self.moon_DECRA(mjd)\n moonALT, moonAZ = self.moon_ALTAZ(mjd) \n sunDEC, sunRA = self.sun_DECRA(mjd)\n sunALT, sunAZ = self.sun_ALTAZ(mjd) \n\n # moon illumination \n moonILL = self.night_ephem['moon_illum_frac']\n\n # calculate moon and sun separation \n moonSEP = desisurvey.utils.separation_matrix(\n [moonRA], [moonDEC],\n self.tiles.tileRA[idx], self.tiles.tileDEC[idx])\n sunSEP = desisurvey.utils.separation_matrix(\n [sunRA], [sunDEC],\n self.tiles.tileRA[idx], self.tiles.tileDEC[idx])\n\n return moonILL, moonSEP, moonALT, sunSEP, sunALT\n\n","sub_path":"py/desisurvey/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":24933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"64901975","text":"import numpy as np\nimport timeit\n\n\n# jacobi method\ndef jacobi(systemMatrix, errorRate, maxIteration, showTime=False):\n startTime = timeit.default_timer()\n systemLength = len(systemMatrix)\n error = 0\n index = 0\n oldXValues = np.zeros(systemLength)\n xValues = np.zeros(systemLength)\n systemMatrix = convertToValidMatrix(systemMatrix)\n while index < maxIteration:\n if index > 900:\n a = 2\n\n for i in range(systemLength):\n t = 1 / systemMatrix[i][i]\n sumValue = 0\n for j in range(systemLength):\n if(j != i):\n sumValue += -(systemMatrix[i][j]) * oldXValues[j]\n\n t *= (sumValue + systemMatrix[i][systemLength])\n xValues[i] = t\n\n error = calculateErrorRate(xValues, oldXValues)\n\n if(error < errorRate):\n if(showTime == True):\n print(\"time to execute: \" +\n str((timeit.default_timer() - startTime)) + \" seconds\")\n\n return xValues\n\n for i in range(systemLength):\n oldXValues[i] = xValues[i]\n\n index += 1\n\n raise Exception('limited')\n\n\n# calculate error rate to jacobi end execution\ndef calculateErrorRate(xValues, oldXValues):\n length = len(xValues)\n biggestDiffValue = 0.0\n for i in range(length):\n diff = abs(xValues[i] - oldXValues[i])\n if(i == 0 or biggestDiffValue < diff):\n biggestDiffValue = diff\n\n return biggestDiffValue\n\n#convert a matrix to valid matrix in jacobi algorithm\ndef convertToValidMatrix(systemMatrix):\n systemLength = len(systemMatrix)\n for i in range(systemLength):\n if(systemMatrix[i][i] == 0):\n change = False\n for j in range(i, systemLength):\n if(systemMatrix[j][j] != 0 and systemMatrix[j][i] != 0):\n systemMatrix = switchEquations(systemMatrix, i, j)\n change = True\n break\n\n if(change == False):\n for j in range(i):\n if(systemMatrix[j][j] != 0 and systemMatrix[j][i] != 0):\n systemMatrix = switchEquations(systemMatrix, i, j)\n break\n\n return systemMatrix\n\n# switch 2 lines of matrix\ndef switchEquations(matrix, lineA, lineB):\n length = len(matrix[lineA])\n for i in range(length):\n oldValue = matrix[lineA][i]\n matrix[lineA][i] = matrix[lineB][i]\n matrix[lineB][i] = oldValue\n\n return matrix\n\n\nprint(\"exercise 1\")\nmatrix = [\n [2.0, 1.0, 2.0],\n [1.0, -2.0, -2.0],\n]\n\nprint(\"test ex 1:\")\nprint(jacobi(matrix, 0.000001, 200, True))\n\n\nprint(\"exercise 2\")\ntry:\n matrix2 = [\n [15, 5, -5, 30],\n [4, 10, 1, 23],\n [2, -2, 8, -10]\n ]\n\n print(\"test ex 2:\")\n print(jacobi(matrix2, 0.1, 10, True))\nexcept Exception as ex:\n print(\"error in solution\")\n\nprint(\"exercise 3\")\nprint(\"a)\")\ntry:\n matrix3a = [\n [1.0, -1.0, 3.0, 2.0],\n [3.0, -3.0, 1.0, -1.0],\n [1.0, 1.0, 0.0, 3.0]\n ]\n print(jacobi(matrix3a, 0.001, 200, True))\nexcept Exception as ex:\n print(\"error in solution\")\n\n\nprint(\"b)\")\ntry:\n matrix3b = [\n [2.0, -1.5, 3.0, 1.0],\n [-1.0, 0.0, 2.0, 3.0],\n [4.0, -4.5, 5.0, 1.0]\n ]\n print(jacobi(matrix3b, 0.001, 200, True))\nexcept Exception as ex:\n print(\"error in solution\")\n\n\nprint(\"c)\")\ntry:\n matrix3c = [\n [2.0, 0.0, 0.0, 0.0, 3.0],\n [1.0, 1.5, 0.0, 0.0, 4.5],\n [0.0, -3.0, 0.5, 0.0, -6.6],\n [2.0, -2.0, 1.0, 1.0, 0.8]\n ]\n print(jacobi(matrix3c, 0.001, 200, True))\nexcept Exception as ex:\n print(\"error in solution\")\n\n\nprint(\"d)\")\ntry:\n matrix3d = [\n [1.0, 1.0, 0.0, 1.0, 2.0],\n [2.0, 1.0, -1.0, 1.0, 1.0],\n [4.0, -1.0, -2.0, 2.0, 0.0],\n [3.0, -1.0, -1.0, 2.0, -3.0]\n ]\n print(jacobi(matrix3d, 0.001, 200, True))\nexcept Exception as ex:\n print(\"error in solution\")\n","sub_path":"algorithms/jacobi_method.py","file_name":"jacobi_method.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"403740295","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport re\r\nimport scws\r\nimport sys\r\nimport csv\r\nimport time\r\nfrom elasticsearch import Elasticsearch\r\nsys.path.append('../../')\r\nfrom parameter import r_path as abs_path\r\nfrom global_utils import *\r\nfrom global_config import *\r\n\r\n#for test\r\nRUN_TYPE = 0 #0 mark run for test; 1 mark run for operation\r\nR_BEGIN_TIME = '2013-09-01'\r\nDAY = 24*3600\r\nTIME_STR = '20161127'\r\n\r\ndef ts2datetime(ts):\r\n return time.strftime('%Y-%m-%d', time.localtime(ts))\r\n\r\ndef datetime2ts(date):\r\n return int(time.mktime(time.strptime(date, '%Y-%m-%d')))\r\n\r\n#use to get retweet/be_retweet/comment/be_comment db_number\r\ndef get_db_num(timestamp):\r\n date = ts2datetime(timestamp)\r\n date_ts = datetime2ts(date)\r\n r_beigin_ts = datetime2ts(R_BEGIN_TIME)\r\n db_number = ((date_ts - r_beigin_ts) / (DAY*7)) % 2 + 1\r\n #run_type\r\n if RUN_TYPE == 0:\r\n db_number = 1\r\n return db_number\r\n\r\nes_bci = Elasticsearch(user_profile_host, timeout = 600)\r\n\r\n##对微博文本进行预处理\r\n\r\ndef cut_filter(text):\r\n pattern_list = [r'\\(分享自 .*\\)', r'http://\\w*']\r\n for i in pattern_list:\r\n p = re.compile(i)\r\n text = p.sub('', text)\r\n return text\r\n\r\ndef re_cut(w_text):#根据一些规则把无关内容过滤掉\r\n \r\n w_text = cut_filter(w_text)\r\n w_text = re.sub(r'[a-zA-z]','',w_text)\r\n a1 = re.compile(r'\\[.*?\\]' )\r\n w_text = a1.sub('',w_text)\r\n a1 = re.compile(r'回复' )\r\n w_text = a1.sub('',w_text)\r\n a1 = re.compile(r'\\@.*?\\:' )\r\n w_text = a1.sub('',w_text)\r\n a1 = re.compile(r'\\@.*?\\s' )\r\n w_text = a1.sub('',w_text)\r\n if w_text == u'转发微博':\r\n w_text = ''\r\n\r\n return w_text\r\n\r\n##微博文本预处理结束\r\n\r\n## 加载分词工具\r\n\r\nSCWS_ENCODING = 'utf-8'\r\nSCWS_RULES = '/usr/local/scws/etc/rules.utf8.ini'\r\nCHS_DICT_PATH = '/usr/local/scws/etc/dict.utf8.xdb'\r\nCHT_DICT_PATH = '/usr/local/scws/etc/dict_cht.utf8.xdb'\r\nIGNORE_PUNCTUATION = 1\r\nABSOLUTE_DICT_PATH = os.path.abspath(os.path.join(abs_path, './dict'))\r\nCUSTOM_DICT_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'userdic.txt')\r\nEXTRA_STOPWORD_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'stopword.txt')\r\nEXTRA_EMOTIONWORD_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'emotionlist.txt')\r\nEXTRA_ONE_WORD_WHITE_LIST_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'one_word_white_list.txt')\r\nEXTRA_BLACK_LIST_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'black.txt')\r\n\r\ncx_dict = ['an','Ng','n','nr','ns','nt','nz','vn','@']#关键词词性词典\r\n\r\ndef load_one_words():\r\n one_words = [line.strip('\\r\\n') for line in file(EXTRA_EMOTIONWORD_PATH)]\r\n return one_words\r\n\r\ndef load_black_words():\r\n one_words = [line.strip('\\r\\n') for line in file(EXTRA_BLACK_LIST_PATH)]\r\n return one_words\r\n\r\nsingle_word_whitelist = set(load_one_words())\r\nblack_word = set(load_black_words())\r\n\r\ndef load_scws():\r\n s = scws.Scws()\r\n s.set_charset(SCWS_ENCODING)\r\n\r\n s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)\r\n s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)\r\n s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)\r\n\r\n # 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的\r\n s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)\r\n # 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除\r\n s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)\r\n\r\n s.set_rules(SCWS_RULES)\r\n s.set_ignore(IGNORE_PUNCTUATION)\r\n return s\r\n\r\nSW = load_scws()\r\n\r\ndef cut_des(text):\r\n\r\n des_dict = ['nr','ns','nt','nz']\r\n tks = [token[0] for token\r\n in SW.participle(text)\r\n if token[1] in des_dict]\r\n\r\n return set(tks)\r\n\r\n##加载分词工具结束\r\n\r\n##加载事件类别对应的权重\r\nEVENT_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'event_type.csv')\r\n\r\ndef load_event_type():\r\n reader = csv.reader(file(EVENT_PATH, 'rb'))\r\n event_type = dict()\r\n for e_t,weight in reader:\r\n event_type[e_t] = float(weight)\r\n return event_type\r\n\r\nevent_type_dict = load_event_type()\r\n\r\n##加载事件类别对应的权重结束\r\n\r\ninteraction_count = 100\r\nN_GRAM = 5#词共现窗口长度\r\nWORD_N = 30#提取关键词的数量\r\nTOPIC_N = 10#lda话题数量\r\nMAX_COUNT = 500#最大的词语数量(topic pagerank)\r\nCOUNT_RATE = 0.1#限制交互数量的比例\r\ninter_sta = 4#最小交互次数\r\nevent_sta = 0.1#最小交叉词语数量\r\nMAX_I = 1.3#最大影响力值的百分比\r\nMIN_I = 0.7#最小影响力值的百分比\r\n\r\n#认证类型\r\npeo_list = [-1,0,200,220,400]\r\norg_list = [1,2,3,4,5,6,7,8]\r\n\r\n#人物相似度\r\npeo_sta = 0.3\r\neve_sta = 0.3\r\ncom_sta = 3\r\n\r\n#事件相似度\r\ncom_sta_eve = 2\r\n","sub_path":"knowledge/cron/API_user_portrait/config_relation.py","file_name":"config_relation.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"384247098","text":"from aiohttp import web\nfrom .db import DataBase\n\nasync def getKeyword(request):\n \"\"\"\n API endpoint to return information about a specific keyword.\n \"\"\"\n db = request.app['db']\n keyword = request.match_info.get('keyword')\n return web.json_result(db.getKeywordDetails(keyword))\n\nasync def getKeywords(request):\n \"\"\"\n API endpoint to return all known keywords.\n \"\"\"\n db = request.app['db']\n\n return web.json_result(db.getKeywords())\n\n\nif __name__==\"__main__\":\n try:\n # initilize server\n app = web.Application()\n app.add_routes([\n web.get('/{keyword}', getKeyword),\n web.get('/keywords', getKeywords)\n ])\n\n # initialize database\n db = DataBase()\n app['db'] = db\n\n # run server\n web.run_app(app)\n except KeyboardInterrupt:\n db.close()","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"93808751","text":"import logging\n\nfrom speedling import conf\nfrom speedling import facility\nfrom speedling import gitutils\nfrom speedling import tasks\nfrom speedling import usrgrp\nfrom speedling import util\n\nLOG = logging.getLogger(__name__)\nsp = 'sl-'\n\n\ndef task_cinder_steps(self):\n c_srv = set(self.services.keys())\n self.wait_for_components(self.sql)\n\n schema_node_candidate = self.hosts_with_service('cinder-api')\n schema_node = util.rand_pick(schema_node_candidate)\n sync_cmd = 'su -s /bin/sh -c \"cinder-manage db sync\" cinder'\n self.call_do(schema_node, facility.do_retrycmd_after_content, c_args=(sync_cmd, ))\n\n # start services\n self.call_do(self.hosts_with_any_service(c_srv), self.do_local_cinder_service_start)\n self.wait_for_components(self.messaging, self.keystone)\n facility.task_wants(self.keystone.final_task, self.messaging.final_task,\n *set.union(*(s['component'].get_waits_for_cinder_task() for s in self.backends)))\n\n\nclass Cinder(facility.OpenStack):\n origin_repo = 'https://github.com/openstack/cinder.git'\n deploy_source = 'src'\n deploy_source_options = {'src', 'pkg'},\n services = {'cinder-api': {'deploy_mode': 'standalone',\n 'unit_name': {'src': sp + 'c-api',\n 'pkg': 'openstack-cinder-api'}},\n 'cinder-volume': {'deploy_mode': 'standalone',\n 'unit_name': {'src': sp + 'c-vol',\n 'pkg': 'openstack-cinder-volume'}},\n 'cinder-scheduler': {'deploy_mode': 'standalone',\n 'unit_name': {'src': sp + 'c-sch',\n 'pkg': 'openstack-cinder-scheduler'}},\n 'cinder-backup': {'deploy_mode': 'standalone',\n 'unit_name': {'src': sp + 'c-bak',\n 'pkg': 'openstack-cinder-backup'}}}\n\n def __init__(self, *args, **kwargs):\n super(Cinder, self).__init__(*args, **kwargs)\n self.final_task = self.bound_to_instance(task_cinder_steps)\n self.peer_info = {}\n self.upload_image_registry = {}\n self.sql = self.dependencies[\"sql\"]\n self.backends = self.dependencies[\"backends\"]\n self.haproxy = self.dependencies[\"loadbalancer\"]\n self.keystone = self.dependencies[\"keystone\"]\n self.messaging = self.dependencies[\"messaging\"]\n\n # multi backend config with one backend, named 'ceph'\n def etc_cinder_cinder_conf(self):\n gconf = conf.get_global_config()\n return {\n 'DEFAULT': {'debug': True,\n 'glance_api_version': 2,\n 'enabled_backends': 'ceph',\n 'default_volume_type': 'ceph',\n 'backup_swift_url': 'http://' + conf.get_vip('public')['domain_name'] + ':8080/v1/AUTH_',\n 'transport_url': self.messaging.transport_url()},\n 'database': {'connection': self.sql.db_url('cinder')},\n 'keystone_authtoken': self.keystone.authtoken_section('cinder'),\n 'oslo_concurrency': {'lock_path': '$state_path/lock'},\n 'ceph': {'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',\n 'rbd_pool': 'volumes',\n 'rbd_user': 'cinder',\n 'rbd_ceph_conf': '/etc/ceph/ceph.conf',\n 'volume_backend_name': 'ceph',\n 'rbd_secret_uuid': gconf['cinder_ceph_libvirt_secret_uuid']}}\n\n def etccfg_content(self):\n super(Cinder, self).etccfg_content()\n c_srv = set(self.services.keys())\n usrgrp.group('cinder', 165)\n usrgrp.user('cinder', 'cinder')\n util.base_service_dirs('cinder')\n comp = self\n self.file_path('/var/lib/cinder/lock',\n owner='cinder', group='cinder')\n\n self.file_ini('/etc/cinder/cinder.conf', self.etc_cinder_cinder_conf(),\n owner='cinder', group='cinder')\n cinder_git_dir = gitutils.component_git_dir(comp)\n\n self.file_install('/etc/cinder/api-paste.ini',\n '/'.join((cinder_git_dir,\n 'etc/cinder/api-paste.ini')),\n mode=0o644, owner='cinder', group='cinder')\n self.file_install('/etc/cinder/resource_filters.json',\n '/'.join((cinder_git_dir,\n 'etc/cinder/resource_filters.json')),\n mode=0o644,\n owner='cinder', group='cinder')\n services = self.filter_node_enabled_services(c_srv)\n if comp.deploy_source == 'src':\n co_srv = comp.services\n util.unit_file(co_srv['cinder-scheduler']['unit_name']['src'],\n '/usr/local/bin/cinder-scheduler',\n 'cinder')\n util.unit_file(co_srv['cinder-api']['unit_name']['src'],\n '/usr/local/bin/cinder-api',\n 'cinder')\n util.unit_file(co_srv['cinder-volume']['unit_name']['src'],\n '/usr/local/bin/cinder-volume',\n 'cinder')\n util.unit_file(co_srv['cinder-backup']['unit_name']['src'],\n '/usr/local/bin/cinder-backup',\n 'cinder')\n # TODO handle bin dir\n if 'cinder-volume' in services or 'cinder-backup' in services:\n self.file_plain('/etc/sudoers.d/cinder', \"\"\"Defaults:cinder !requiretty\ncinder ALL = (root) NOPASSWD: /usr/local/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *\ncinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *\n\"\"\")\n self.file_path('/etc/cinder/rootwrap.d',\n owner='cinder', group='cinder')\n self.file_install('/etc/cinder/rootwrap.d/volume.filters',\n '/'.join((cinder_git_dir,\n 'etc/cinder/rootwrap.d/volume.filters')),\n mode=0o444)\n self.file_install('/etc/cinder/rootwrap.conf',\n '/'.join((cinder_git_dir,\n 'etc/cinder/rootwrap.conf')),\n mode=0o444)\n\n def do_local_cinder_service_start(cname):\n self = facility.get_component(cname)\n tasks.local_os_service_start_by_component(self)\n\n def get_node_packages(self):\n pkgs = super(Cinder, self).get_node_packages()\n pkgs.update({'lvm2', 'util-cli\\\\qemu-img'})\n if self.deploy_source == 'pkg':\n pkgs.update({'openstack-cinder'})\n return pkgs\n\n def compose(self):\n # it can consider the full inventory and config to influnce facility registered\n # resources\n super(Cinder, self).compose()\n url_base = \"http://\" + conf.get_vip('public')['domain_name']\n dr = conf.get_default_region()\n\n self.keystone.register_endpoint_tri(region=dr,\n name='cinder',\n etype='volume',\n description='OpenStack Volume Service',\n url_base=url_base + ':8776/v1/$(tenant_id)s')\n self.keystone.register_endpoint_tri(region=dr,\n name='cinderv2',\n etype='volumev2',\n description='OpenStack Volume Service',\n url_base=url_base + ':8776/v2/$(tenant_id)s')\n self.keystone.register_endpoint_tri(region=dr,\n name='cinderv3',\n etype='volumev3',\n description='OpenStack Volume Service',\n url_base=url_base + ':8776/v3/$(tenant_id)s')\n self.keystone.register_service_admin_user('cinder')\n comp = self\n cins = self.hosts_with_any_service(set(comp.services.keys()))\n self.sql.register_user_with_schemas('cinder', ['cinder'])\n self.sql.populate_peer(cins, ['client'])\n self.messaging.populate_peer(cins)\n util.bless_with_principal(cins, [(self.keystone.name, 'cinder@default'),\n (self.messaging.name, 'openstack'),\n (self.sql.name, 'cinder')])\n","sub_path":"slos/cinder.py","file_name":"cinder.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"260221824","text":"import argparse\nimport copy\nimport csv\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\n\n\nclass InputError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return repr(self.msg)\n\n\nclass PreferenceSchedule():\n\n def __init__(self, candidates, prefs):\n # check whether the candidates list consists of only strings\n if not all(map(lambda x: type(x) == str, candidates)):\n raise InputError('Candidate must be a string')\n\n # check the validity of the preferences\n for pref in prefs:\n # check whether the number of candidates in the preference schedule\n # is valid\n if len(pref) != len(candidates):\n raise InputError('Invalid preference schedule')\n\n # check whether the candidates in the preference schedule are unique\n if len(pref) != len(candidates):\n raise InputError('Invalid preference schedule')\n\n # check whether the candidates in the preference schedule are also\n # in the candidates list\n for candidate in pref:\n if candidate not in candidates:\n raise InputError('Invalid preference schedule')\n\n self.prefs = prefs\n\n def original(self):\n '''Returns the original preference schedule as a printable string'''\n\n res = ''\n for i in range(len(self.prefs)):\n res += 'Voter {}: '.format(i+1) + ', '.join(self.prefs[i]) + '\\n'\n\n return res[:-1]\n\n def detailed(self):\n '''Returns the detailed preference schedule as a printable string'''\n\n # count the number of occurences of each preference\n prefs = self.prefs[:]\n prefs = [tuple(p) for p in self.prefs]\n counts = {}\n while prefs:\n pref = prefs.pop(0)\n count = 1\n while pref in prefs:\n prefs.remove(pref)\n count += 1\n counts[pref] = count\n\n res = ''\n for pref in counts:\n res += str(counts[pref]) + ' Voters: ' + ', '.join(pref) + '\\n'\n\n return res[:-1]\n\n\nclass Aggregator():\n\n def __init__(self, file):\n df = pd.read_csv('input.csv', header=None)\n prefs = df.to_numpy()\n prefs[0] = prefs[0].astype(np.int)\n prefs = np.transpose(prefs)\n repeat = list(prefs[:, 0])\n prefs = np.delete(prefs, 0, axis=1)\n candidates = np.unique(np.sort(prefs[0], axis=None)).tolist()\n prefs = np.repeat(prefs, repeats=repeat, axis=0).tolist()\n self.candidates = candidates\n self.pref_schedule = PreferenceSchedule(candidates, prefs)\n\n def __str__(self):\n res = ''\n res += 'Preference Schedule:\\n'\n res += self.pref_schedule.original() + '\\n\\n'\n res += 'Detailed Preference Schedule:\\n'\n res += self.pref_schedule.detailed() + '\\n'\n\n return res\n\n def plurality(self):\n '''Prints who wins by the plurality method'''\n\n counts = {}\n for pref in self.pref_schedule.prefs:\n highest = pref[0]\n if highest in counts:\n counts[highest] += 1\n else:\n counts[highest] = 1\n\n winner = []\n highest_votes = max(counts.values())\n for candidate in counts:\n if counts[candidate] == highest_votes:\n winner.append(candidate)\n\n print('The numbers of votes for each candidate:', counts)\n print('The Plurality winner(s) is(are)', find_winner(counts))\n\n def condorcet(self):\n '''Prints who wins by the Condorcet method'''\n\n points = {candidate: 0 for candidate in self.candidates}\n candidates = list(self.candidates)\n for candidate in candidates[:]:\n candidates.remove(candidate)\n for rival in candidates:\n candidate_points = 0\n for pref in self.pref_schedule.prefs:\n if pref.index(candidate) < pref.index(rival):\n candidate_points += 1\n else:\n candidate_points -= 1\n if candidate_points > 0:\n points[candidate] += 1\n else:\n points[rival] += 1\n\n print('The Condorcet winner(s) is(are)', find_winner(points))\n\n def borda(self):\n '''Prints who wins by the Borda count'''\n\n counts = {}\n candidates = list(self.pref_schedule.prefs[0])\n for candidate in candidates:\n counts[candidate] = 0\n\n max_point = len(candidates)\n for pref in self.pref_schedule.prefs:\n for i in range(len(pref)):\n counts[pref[i]] += max_point - i\n\n print('Borda scores:', counts)\n print('The Borda winner(s) is(are)', find_winner(counts))\n\n def copeland(self):\n '''Prints who wins by the Copeland’s Rule'''\n\n points = {candidate: 0 for candidate in self.candidates}\n candidates = list(self.candidates)\n for candidate in candidates[:]:\n candidates.remove(candidate)\n for rival in candidates:\n candidate_points = 0\n for pref in self.pref_schedule.prefs:\n if pref.index(candidate) < pref.index(rival):\n candidate_points += 1\n else:\n candidate_points -= 1\n if candidate_points > 0:\n points[candidate] += 1\n else:\n points[rival] += 1\n\n print('Copeland points:', points)\n print('The Copeland winner(s) is(are)', find_winner(points))\n\n def runoff(self):\n '''Prints who wins by the Instant runoff'''\n\n num_round = 1\n candidates = self.candidates[:]\n prefs = copy.deepcopy(self.pref_schedule.prefs)\n\n while len(candidates) >= 2:\n counts = {}\n for pref in prefs:\n highest = pref[0]\n if highest in counts:\n counts[highest] += 1\n else:\n counts[highest] = 1\n print('The numbers of votes for each candidate (round {}):'.format(\n num_round), counts)\n\n lowest_votes = min(counts.values())\n for candidate in counts:\n if counts[candidate] == lowest_votes:\n candidates.remove(candidate)\n for pref in prefs:\n pref.remove(candidate)\n\n num_round += 1\n\n print('The Runoff winner(s) is(are)', find_winner(counts))\n\n\ndef find_winner(aggregated_result):\n max_point = 0\n for point in aggregated_result.values():\n if point > max_point:\n max_point = point\n\n winner = [] # winner can be many, so use a list here\n for candidate in aggregated_result.keys():\n if aggregated_result[candidate] == max_point:\n winner.append(candidate)\n\n return winner\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'csv', help='a csv file containing preferences', type=str)\n parser.add_argument('-m', '--method', type=str,\n help='specify a winner selection method by a name')\n args = parser.parse_args()\n\n aggr = Aggregator(args.csv)\n\n if args.method:\n method = args.method\n try:\n if method == 'plurality':\n print('Plurality method\\n')\n print(aggr)\n aggr.plurality()\n elif method == 'condorcet':\n print('Condorcet method\\n')\n print(aggr)\n aggr.condorcet()\n elif method == 'borda':\n print('Borda count\\n')\n print(aggr)\n aggr.borda()\n elif method == 'copeland':\n print('Copeland’s Rule\\n')\n print(aggr)\n aggr.copeland()\n elif method == 'runoff':\n print('Instant runoff method\\n')\n print(aggr)\n aggr.runoff()\n else:\n raise InputError('Invalid method name')\n except InputError as e:\n print('Error:', e.msg)\n sys.exit()\n else:\n # examine all winner selection methods\n print(aggr)\n print('Plurality method:')\n aggr.plurality()\n print('\\nCondorcet method:')\n aggr.condorcet()\n print('\\nBorda count:')\n aggr.borda()\n print('\\nCopeland’s Rule:')\n aggr.copeland()\n print('\\nInstant runoff method:')\n aggr.runoff()\n","sub_path":"social_choice.py","file_name":"social_choice.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"528232627","text":"import os.path\nfrom lxml import etree\nfrom prog.document import Document\n\n\ndef getTraits(file_path):\n file_path_array = file_path.split(\"/\")\n filename = file_path_array[-1].split(\".\")[0]\n file_info = filename.split(\"_\")\n if (len(file_info) != 4):\n raise NameError('Wrong file name')\n doc_id = file_info[0]\n lang = file_info[1]\n traits = {'age': file_info[2], 'gender': file_info[3]}\n return doc_id, lang, traits\n\n\ndef getContent(file_path):\n with open(file_path, 'r', encoding=\"cp1252\") as content_file:\n content = content_file.read()\n\n parser = etree.XMLParser(recover=True)\n root = etree.fromstring(content, parser=parser)\n posts = []\n for child in root.iter('conversation'):\n posts.append(child.text.strip())\n return posts\n\n\ndef getObjectRepresentation(file_path):\n if (os.path.exists(file_path)):\n doc_id, lang, traits = getTraits(file_path)\n contents = getContent(file_path)\n fileRepresentations = list(\n map(lambda c: Document(c, doc_id, traits, lang), contents))\n return fileRepresentations\n else:\n raise NameError('File does not exist')\n","sub_path":"prog/readingFiles.py","file_name":"readingFiles.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"115936682","text":"import random\r\n\r\nclass Egg:\r\n\r\n def __init__(self, critical_floor):\r\n self.critical_floor = critical_floor\r\n\r\n def drop(self, floor):\r\n if (floor >= self.critical_floor):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\nclass FloorAlgorithm:\r\n\r\n def __init__(self, floors, critical_floor):\r\n self.egg = Egg(critical_floor)\r\n self.floors = floors\r\n self.rec_comparisons = 0\r\n self.iter_comparisons = 0\r\n\r\n def run_iterative(self):\r\n\r\n for i in range(0, self.floors):\r\n self.iter_comparisons = self.iter_comparisons + 1\r\n if (self.egg.drop(i)):\r\n return i\r\n \r\n\r\n def run_rec(self):\r\n return self.run_recursive(self.egg, self.floors / 2, self.floors)\r\n\r\n\r\n def run_recursive(self, egg, current_floor, total_floors):\r\n self.rec_comparisons = self.rec_comparisons + 1\r\n if (current_floor == 0 or (egg.drop(current_floor) and not egg.drop(current_floor-1))):\r\n return int(current_floor) # assuming at least one floor will break the egg\r\n elif (egg.drop(current_floor)):\r\n return self.run_recursive(egg, current_floor - int(current_floor / 2), current_floor)\r\n else:\r\n return self.run_recursive(egg, current_floor + int((total_floors - current_floor) / 2), total_floors)\r\n\r\n def print_history(self):\r\n\r\n actual_critical_floor = self.egg.critical_floor\r\n rec_critical_floor = self.run_rec()\r\n iter_critical_floor = self.run_iterative()\r\n\r\n print(\"Results of Egg Comparison:\\n\")\r\n print(f\"Actual Critical Floor: {actual_critical_floor}\")\r\n print(\"---\\nIterative Egg:\")\r\n print(f\"Critical Floor Returned: {iter_critical_floor}\")\r\n print(f\"Number of Comparisons: {self.iter_comparisons}\")\r\n print(\"---\\nRecursive Egg:\")\r\n print(f\"Critical Floor Returned: {rec_critical_floor}\")\r\n print(f\"Number of Comparisons: {self.rec_comparisons}\")\r\n \r\n \r\n\r\nfloors = int(input(\"Please enter the number of floors in the building: \\n\"))\r\n\r\n\r\nfloor_algo = FloorAlgorithm(floors, random.randrange(floors))\r\n\r\nfloor_algo.print_history()\r\n","sub_path":"egg.py","file_name":"egg.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"370682139","text":"#python3.4.1 is required\n#xively and aiocoap libary is requiered\nimport asyncio\nimport socket\nimport xively\nimport datetime\nimport requests\nimport aiocoap\nimport aiocoap.proxy\nimport sys\n\n\ndef log(msg):\n f = open(\"log.txt\", \"a+\")\n time = datetime.datetime.now()\n f.write(str(time) + \"\\n\")\n f.write(msg + \"\\n\\n\\n\")\n f.close()\n\n\ndef update(channel, val):\n api = xively.XivelyAPIClient(API_KEY)\n feed = api.feeds.get(FEED_ID)\n now = datetime.datetime.utcnow()\n feed.datastreams = [\n xively.Datastream(id=channel, current_value=val, at=now,)\n ]\n try:\n feed.update()\n except (requests.HTTPError, requests.ConnectError) as e:\n log(\"Error while update \\n Error({0}): {1}\".format(e.errno, e.strerror))\n\n@asyncio.coroutine\ndef main(fileName):\n try:\n file = open(fileName)\n except:\n log(\"Error while opening file: \" + fileName + \"\\n\")\n\n for line in file:\n url, channel = line.strip(\"\\n\").split(\" \")\n\n endpoint = yield from aiocoap.Endpoint.create_client_endpoint()\n request = aiocoap.Message(code=aiocoap.GET)\n\n try:\n request.set_request_uri(url)\n except:\n log(\"Wrong URL: \" + url)\n\n if not request.opt.uri_host:\n log(\"Request URLs need to be absolute.\")\n\n try:\n interface = endpoint\n requester = interface.request(request)\n except:\n log(\"Time out\")\n\n try:\n response_data = yield from requester.response\n except socket.gaierror as e:\n log(\"Name resolution error:\" + e)\n\n if response_data.code.is_successful():\n tmp = float(str(response_data.payload)[2:-1])\n update(channel, tmp)\n sys.stdout.buffer.flush()\n else:\n log(\"Response data unsuccessful\")\n if response_data.payload:\n log(response_data.payload.decode('utf-8'))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n API_KEY = \"b9ZQ5Aopk6alRiCAtc5YAW0cY1QFPDf8r740lCELQgkvDUqW\"\n FEED_ID = \"1946998623\"\n\n try:\n asyncio.get_event_loop().run_until_complete(main(sys.argv[1]))\n except :\n log(\"Request time out\")\n","sub_path":"coapClient.py","file_name":"coapClient.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"634913205","text":"from djangocms_text_ckeditor.widgets import TextEditorWidget\nfrom crispy_forms.layout import Layout, Fieldset\nfrom crispy_forms.bootstrap import Field\nfrom django.forms import CharField\nfrom django.shortcuts import redirect, render, get_object_or_404\n\nfrom apps.newsletter.models import Newsletter\nfrom areas.hb_admin.forms import BaseEditForm\nfrom areas.hb_admin.models import ReturnUrl\nfrom areas.hb_admin.views import BaseAdminView\n\n\n#\n# newsletter edit view\n#\nclass NewsletterEditView(BaseAdminView):\n template_name = 'hb_admin/edit_form.html'\n template_vars = None\n form = None\n return_url = None\n pk = None\n\n #\n # Load everything needed for the view\n #\n def load_view(self, request, *args, **kwargs):\n\n # get the vars\n self.pk = kwargs.get('pk', None)\n\n # save the return url\n self.return_url = ReturnUrl(request, ReturnUrl.HB_ADMIN.NEWSLETTER_EDIT)\n self.return_url.push()\n\n # load the form\n if self.pk is None:\n\n # add\n if request.method == 'POST':\n self.form = EditForm(\n request.POST or None,\n return_url=self.return_url.previous()\n )\n else:\n mdl = Newsletter()\n self.form = EditForm(\n instance=mdl,\n return_url=self.return_url.previous()\n )\n else:\n # edit\n mdl = get_object_or_404(Newsletter, pk=self.pk)\n\n # setup the form\n self.form = EditForm(\n request.POST or None,\n instance=mdl,\n return_url=self.return_url.previous()\n )\n\n # set the template vars\n self.template_vars = {\n 'form': self.form\n }\n\n #\n # GET\n #\n def get(self, request, *args, **kwargs):\n\n # load the view\n self.load_view(request, *args, **kwargs)\n\n return render(request, self.template_name, self.template_vars)\n\n #\n # POST\n #\n def post(self, request, *args, **kwargs):\n\n # load the view\n self.load_view(request, *args, **kwargs)\n\n # if form valid\n if self.form.is_valid():\n\n # save form\n self.form.save()\n\n # Redirect to previous page\n url = self.return_url.previous()\n return redirect(url)\n\n return render(request, self.template_name, self.template_vars)\n\n\n#\n# Add/Edit Form\n#\nclass EditForm(BaseEditForm):\n title = CharField(required=True)\n\n def __init__(self, *args, **kwargs):\n return_url = kwargs.pop('return_url')\n super(EditForm, self).__init__(*args, **kwargs)\n\n self.helper.cancel_url = return_url\n self.update_col_size(0)\n self.helper.layout = Layout(\n Fieldset(\n '{} Newsletter'.format('Add' if self.instance.pk is None else 'Edit'),\n # Field('published_date', template='_shared/fields/datetime.html'),\n 'published_date',\n Field('title', css_class=\"control-lg\"),\n 'main_content',\n 'summary_content',\n Field('hidden', template='_shared/fields/checkbox.html'),\n )\n )\n\n def clean(self):\n return self.cleaned_data\n\n class Meta:\n model = Newsletter\n fields = [\n 'hidden', 'main_content', 'published_date', 'summary_content', 'title'\n ]\n widgets = {\n 'main_content': TextEditorWidget(),\n 'summary_content': TextEditorWidget(),\n }","sub_path":"areas/hb_admin/newsletter/newsletters/newsletter_edit.py","file_name":"newsletter_edit.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"248362240","text":"def decode(digit):\n number = len(digit)\n count = [0] * (number+1)\n count[0] = 1\n count[1] = 0 if digit[0] == '0' else 1\n\n for i in range(2, number+1):\n count[i] = 0\n\n if digit[i-1] > '0':\n count[i] = count[i-1]\n\n if digit[i-2] == '1' or (digit[i-2] == '2' and digit[i-1] < '7'):\n count[i] += count[i-2]\n\n return count[number]\n\n\nprint(decode(digit=input()))\n","sub_path":"leetcode_decode_ways.py","file_name":"leetcode_decode_ways.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"276715683","text":"#!/usr/bin/python\nimport os\nimport random\nimport subprocess\n\n# User Variables\nwallpaper_dir = \"/home/daniel/Pictures/\" # Wallpaper directory\n\nclass Wall:\n def __init__(self, identifier):\n self.identifier = identifier\n\n def set_wallpaper(self, new_wall):\n script = 'xfconf-query -c xfce4-desktop -p ' + str(self.identifier) + ' -s \"' + new_wall + '\" 2> /dev/null'\n\n proc = subprocess.Popen(script, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n\ndef getWallpaper():\n wallpaper_list = os.listdir(wallpaper_dir)\n wallpaper_new = random.randrange(0, len(wallpaper_list))\n\n return(wallpaper_dir+wallpaper_list[wallpaper_new])\n\ndef getScreens():\n # Get a list of connected displays\n list_monitors = 'xfconf-query -c xfce4-desktop -p /backdrop -l|egrep -e \"screen.*/monitor.*/last-image$\"' # Modified from Variety script. Thanks Peter Levi!\n xconf = subprocess.Popen(list_monitors, stdout=subprocess.PIPE, shell=True) # Runs list_monitors as a bash script\n (out, err) = xconf.communicate() # Returns the output of list_monitors\n screens = out.split('\\n') # Splits multi-line result into a list\n screens.pop(-1) # Removes the extra entry created from the final endline\n return(screens)\n\nwalls = []\nfor screen in getScreens():\n walls.append(Wall(screen)) # Create an instance of Wall for each screen\n\nfor wall in walls:\n wall.set_wallpaper(getWallpaper()) # Set each Wall to a random wallpaper\n","sub_path":"xfce4-multiwall.py","file_name":"xfce4-multiwall.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"489648435","text":"import wx\nimport wx.grid\n#from dodatki import Dodatki\nfrom .dodatki import DodatkiView\nfrom model.oferta import OfertaService\nREMOVE_EL_MESSAGE = \"Czy na pewno chcesz usunąć element\"\nREMOVE_EL_CAPTION = \"Usuń Element\"\nclass ElementView(wx.Panel):\n\n def __init__(self,data_representation = None,show_remove_button=True, parent=None,*args, **kw):\n super(ElementView, self).__init__(parent=parent ,*args, **kw)\n self.data = data_representation\n self.show_remove_button = show_remove_button\n self._id = None\n self.dodatki = list()\n self.inputElements = []\n\n #Dialog\n self.remove_offer_dlg = wx.MessageDialog(self.Parent, REMOVE_EL_MESSAGE,REMOVE_EL_CAPTION ,wx.YES_NO)\n self.remove_offer_dlg.SetYesNoLabels(\"&Tak\", \"&Nie\")\n\n # def Sizers\n self.btn_sizer = wx.BoxSizer(wx.HORIZONTAL)\n self.data_sizer = wx.GridSizer(2)\n self.main_sizer = wx.BoxSizer(wx.VERTICAL)\n self.dodatki_sizer = wx.GridSizer(1)\n\n #\"Dodaj Dodatek\" and \"Usuń Element\" btn Create\n self.btn_add = wx.Button(parent=self,label=\"Dodaj Dodatek\",size=(-1,50))\n self.btn_add.Bind(wx.EVT_BUTTON,self.AddDodatek)\n self.btn_sizer.Add(self.btn_add,1, wx.EXPAND)\n if self.show_remove_button:\n self.btn_rm = wx.Button(parent=self,label=\"Usuń Element\",size=(-1,50))\n self.btn_rm.Bind(wx.EVT_BUTTON,self.Remove)\n self.btn_sizer.Add(self.btn_rm,1, wx.EXPAND) \n \n #Set Size Sizers\n self.data_sizer.SetMinSize(self.Parent.GetSize()[0]-20,(self.data_sizer.GetSize()[1]))\n self.dodatki_sizer.SetMinSize(self.Parent.GetSize()[0]-20,(self.dodatki_sizer.GetSize()[1]))\n self.btn_sizer.SetMinSize(self.Parent.GetSize()[0]-20,(self.btn_sizer.GetSize()[1]))\n #Append subSizers to main sizer\n self.main_sizer.Add(self.data_sizer,0,wx.EXPAND)\n self.main_sizer.Add(self.dodatki_sizer,0,wx.EXPAND)\n self.main_sizer.Add(self.btn_sizer,0,wx.EXPAND)\n self.SetSizerAndFit(self.main_sizer)\n self.UpdateLayout()\n\n \n \n #----------------------------------------------------------------------\n def Remove(self,event=None):\n if self.remove_offer_dlg.ShowModal() == wx.ID_YES: # Show Remove dialog \n p = self.Parent \n OfertaService().RemoveOfferElement(self.data)\n self.Destroy()\n p.Layout() #Resize offer panel after remove element\n\n #----------------------------------------------------------------------\n def AddDodatek(self,event= None, dodatek=None): \n if dodatek != None:\n dod = DodatkiView(parent=self,value1=dodatek.value1,cena=dodatek.cenaZL2,value2=dodatek.value2, UpdateLayout = self.UpdateLayout)\n else:\n dod = DodatkiView(parent=self, UpdateLayout= self.UpdateLayout)\n self.inputElements.append(dod.value1)\n self.inputElements.append(dod.value2)\n self.inputElements.append(dod.cenaZL2)\n self.dodatki.append(dod)\n self.dodatki_sizer.Add(dod,1, wx.EXPAND)\n self.UpdateLayout()\n\n #----------------------------------------------------------------------\n def UpdateLayout(self): \n \"\"\"\n \n Resize Element and Element children \n\n \"\"\" \n self.main_sizer.SetMinSize(self.GetSize()[0],-1)\n self.main_sizer.SetSizeHints(self)\n self.dodatki_sizer.Layout() \n self.Parent.FitInside()\n def GetData(self):\n pass\n\n \n ","sub_path":"view/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"600932684","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nModules to compute the matching cost and solve the corresponding LSAP.\n\"\"\"\nimport torch\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n # get the size of first two output dimensions, see Params\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n # outputs[\"pred_logits\"] gets reshaped to [batch_size * num_queries, num_classes],\n # then apply softmax on the num_classes dimension\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).softmax(-1) \n # outputs[\"pred_boxes\"] gets reshaped to [batch_size * num_queries, 4]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) \n\n # Also concat the target labels and boxes\n # tgt_ids is a long list of labels for each bounding box, of shape [batch_size * num_target_boxes, 1]\n tgt_ids = torch.cat([img[\"labels\"] for img in targets])\n # tgt_bbox is a long list of boxes for each bounding box, of shape [batch_size * num_target_boxes, 4]\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n # Select the class probability at num_classes[tgt_ids]\n # Higher out_prob gets lower cost\n # out_prob is a long list of class vector probabilities for each bounding box\n # cost_class shape: [batch_size * num_queries, len(tgt_ids)]\n # TODO: need intuitive, not wasting computation and memory, since only 1 out of bs will be used.\n #cost_class = -out_prob[:, tgt_ids]\n \n \n list_img_pred_probs = [probs for probs in outputs[\"pred_logits\"]]\n list_img_target_classes = [img[\"labels\"] for img in targets]\n list_img_cost_class = [\n self.cost_class * -queries_probs[:, target_classes]\n for queries_probs, target_classes in zip(list_img_pred_probs, list_img_target_classes)] \n\n # Compute the L1 cost between boxes for each value in the bounding box\n list_img_pred_bboxes = [bboxes for bboxes in outputs[\"pred_boxes\"]]\n list_img_target_bboxes = [img[\"boxes\"] for img in targets]\n list_img_cost_bboxes = [\n self.cost_bbox * torch.cdist(pred_bboxes, target_bboxes, p=1)\n for pred_bboxes, target_bboxes in zip(list_img_pred_bboxes, list_img_target_bboxes)]\n #cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n list_img_cost_giou = [\n self.cost_giou * -generalized_box_iou(box_cxcywh_to_xyxy(pred_bboxes), box_cxcywh_to_xyxy(target_bboxes))\n for pred_bboxes, target_bboxes in zip(list_img_pred_bboxes, list_img_target_bboxes)]\n #cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n \n # Final cost matrix\n list_img_cost_matrix = [\n cost_class + cost_bboxes + cost_giou\n for cost_class, cost_bboxes, cost_giou in zip(list_img_cost_class, list_img_cost_bboxes, list_img_cost_giou)]\n # Final cost matrix\n # C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n # reshape the cost of each bounding box back into shape [batch_size, num_queries, total_num_targets_in_img_batch]\n # C = C.view(bs, num_queries, -1).cpu()\n \n # get best matching indices\n list_img_matching_query_target = [linear_sum_assignment(cost_matrix) for cost_matrix in list_img_cost_matrix]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in list_img_matching_query_target]\n\n # get the number of boxes of each image in the ground truth\n # num_targets_in_each_img = [len(img[\"boxes\"]) for img in targets]\n \n # we need a list of cost matrix of each img of shape [num_queries, targets[i]]\n \n # C.shape is [bs, num_queries, sum(num_targets_in_each_img)] and c.shape is [bs, num_queries, num_targets_in_imgs[i]]\n # c[i].shape is [num_queries, sizes[i]]\n # indices is a list of tuples (row_ind, col_ind) of length batch_size, each representing one image\n # where row_ind is a list ids of length num_targets, col_ind is a list ids of length num_targets\n # indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(num_targets_in_imgs, -1))]\n \n # convert numpy array into torch tensor\n # return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\ndef build_matcher(args):\n return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)\n","sub_path":"models/efficient_matcher.py","file_name":"efficient_matcher.py","file_ext":"py","file_size_in_byte":7375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"455559556","text":"from flask import Flask, render_template, request\nfrom clients import WikiPlacesRecognition, PageNotFound\n\n\napp = Flask(__name__)\nwiki_places_recognition = WikiPlacesRecognition.from_api_token_path(\"mapbox_key.txt\")\n\n\n@app.route(\"/form\")\ndef form():\n return render_template(\"form.html\")\n\n\n@app.route(\"/data\", methods=[\"POST\", \"GET\"])\ndef data():\n if request.method == \"GET\":\n return render_template(\"form.html\")\n if request.method == \"POST\":\n try:\n wiki_places = wiki_places_recognition(request.form[\"wiki_page_url\"])\n except PageNotFound:\n return render_template(\"form.html\")\n api_key = wiki_places_recognition.get_api_key()\n form_data = {\n \"wiki_page_url\": request.form[\"wiki_page_url\"],\n \"wiki_places\": wiki_places,\n \"api_key\": api_key,\n }\n return render_template(\"data.html\", form_data=form_data)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"localhost\", port=5000, debug=False)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"141422447","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom configs._base_.schedules.schedule_1x import *\n\nfrom alpharotate.utils.pretrain_zoo import PretrainModelZoo\nfrom configs._base_.models.retinanet_r50_fpn import *\nfrom configs._base_.datasets.dota_detection import *\nfrom configs._base_.schedules.schedule_1x import *\n\n# schedule\nBATCH_SIZE = 1\nGPU_GROUP = \"0\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSAVE_WEIGHTS_INTE = 20673\nDECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE\nMAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH\nWARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)\n\n# dataset\nDATASET_NAME = 'DOTATrain'\n\n# model\n# backbone\npretrain_zoo = PretrainModelZoo()\nPRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\n\n# bbox head\nANCHOR_SCALES = [2 ** 0]\nANCHOR_RATIOS = [1.]\n\n# loss\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0 / 5.0\nREG_LOSS_MODE = None\n\nVERSION = 'RetinaNet_DOTA_ATSS_1x_20210901'\n\n\"\"\"\nRetinaNet-H + atss\nFLOPs: 468318345; Trainable params: 32080916\n\n\"\"\"\n\n\n\n","sub_path":"configs/DOTA/atss/cfgs_res50_dota_atss_v3.py","file_name":"cfgs_res50_dota_atss_v3.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"507106382","text":"import time,os\nimport glob\n\nFFMPEG_PATH = 'F:\\\\ffmpeg-20190802-11f99a9-win64-static\\\\bin\\\\ffmpeg.exe'\nVIDEO_DIR = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\homework\\\\'\n\n#录制视频\ndef recording():\n #输出的文件名,确定输出路径\n outputfile=VIDEO_DIR+time.strftime('%Y%m%d_%H%M%S', time.localtime())+'.mp4'\n #运行参数\n settings= '-y -rtbufsize 100M -f gdigrab -framerate 20 -draw_mouse 1 -i desktop -c:v libx264 -r 20 -crf 35 -pix_fmt yuv420p -fs 100M \"%s\"'%outputfile\n #cmd命令\n recordCmd=FFMPEG_PATH+\" \"+settings\n print(recordCmd)\n #执行录制命令\n os.system(recordCmd)\n#合并视频\ndef merging():\n os.chdir(VIDEO_DIR)\n filelist=glob.glob(VIDEO_DIR + '*.mp4')\n # 遍历去除路径,返回文件名\n filelist=[os.path.basename(one) for one in filelist]\n if filelist:\n print('\\n目录中有这些视频文件:')\n else:\n print('\\n目录中没有视频文件')\n return\n idx=1\n for one in filelist:\n print('%s:%s'%(idx,one))\n idx+=1\n print('\\n请选择要合并视频的视频文件序号(格式 1,2,3,4) :', end=' ')\n mergesource=input(\"\")\n videoidx=mergesource.split(',')\n videofilename=[ filelist[int(one.strip())-1] for one in videoidx]\n #打印出需合并的文件名\n print(videofilename)\n with open('concat.txt','w',encoding='utf8')as f:\n for one in videofilename:\n f.write(\"file \"+one+\"\\n\")\n mergecmd=FFMPEG_PATH + ' -f concat -i concat.txt -codec copy out.mp4'\n #执行合并命令\n os.system(mergecmd)\n\nwhile True:\n print('\\n请选择您要做的操作:1-录制视频,2-合并视频 :', end=' ')\n choice = input('')\n if choice==\"1\":\n recording()\n elif choice==\"2\":\n merging()\n else:\n print(\"操作无效,请重输!\")\n\n\n\n\n\n\n","sub_path":"SONGQIN/pythonUP/hw02外部程序调用.py","file_name":"hw02外部程序调用.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"189229212","text":"from sqlalchemy import Column, DateTime, func\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\ndb_uri = 'mysql+mysqldb://root:123@127.0.0.1:3306/test?charset=utf8'\nengine = create_engine(db_uri, convert_unicode=True)\ndb_session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\nclass MyBase(Base):\n __abstract__ = True\n \n create_time = Column(\n DateTime,\n default=func.current_timestamp(),\n )\n update_time = Column(\n DateTime,\n default=func.current_timestamp(),\n onupdate=func.current_timestamp(),\n )\n\n\ndef init_db():\n #Base.metadata.drop_all(bind=engine)\n \n import app.example.models\n \n Base.metadata.create_all(bind=engine)\n","sub_path":"python/flask_skeleton/app/common/sqlalchemyutil.py","file_name":"sqlalchemyutil.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"585972004","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom six import iteritems\nfrom ansible import errors\n\n\ndef is_member_of(value, membership):\n \"\"\"\n Return value if value is in given list, else raise error\n\n This form was inspired from http://www.dasblinkenlichten.com/creating-ansible-filter-plugins/\n and https://projectme10.wordpress.com/2016/01/17/how-to-write-an-ansible-filter-dynamically-configuring-interface-descriptions-in-a-multivendor-environment/\n \"\"\"\n if isinstance(membership, list):\n if value in membership:\n return value\n else:\n raise errors.AnsibleFilterError(\"Variable value is invalid!\")\n else:\n raise errors.AnsibleFilterError(\"Provided membership list is not a list!\")\n\n\ndef dot1x_interfaces(configuration, type='dot1x'):\n \"\"\"\n Return list of dot1x enabled interfaces.\n\n Default will return all dot1x interfaces, use type to get a subset.\n \"\"\"\n # Redo this!\n dot1x_filter = set(['dot1x', 'dot1x_mab', 'dot1x_parking'])\n if type not in dot1x_filter:\n raise errors.AnsibleFilterError(\"Invalid type provided. Valid types: dot1x, dot1x_mab, dot1x_parking\")\n\n interface_list = []\n for iface, iface_config in iteritems(configuration['interfaces']):\n if type == 'dot1x':\n if len(dot1x_filter.intersection(set(iface_config))) > 0:\n interface_list.append(iface)\n else:\n if type in iface_config:\n interface_list.append(iface)\n\n return interface_list\n\n\nclass FilterModule(object):\n def filters(self):\n return {\n 'is_member_of': is_member_of,\n 'dot1x_interfaces': dot1x_interfaces\n }\n","sub_path":"filter_plugins/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"437753406","text":"# Implements gradient descent based off of an L2 regularized least squares objective\nimport numpy\n\ndef gradient_descent(inputs,\n labels,\n step_size,\n epochs,\n gradient,\n **gradient_hyperparameters):\n \"\"\"\n Preconditions:\n inputs and labels are numpy arrays\n inputs.shape[0] == labels.shape[0]\n inputs.shape = (N,d)\n labels.shape = (N,1)\n step_size > 0\n epochs > 0\n gradient is a user provided implementation of the gradient of your objective\n gradient has a signature of gradient(inputs, labels, weights, **gradient_hyperparameters)\n Returns:\n The computed weight matrix\n \"\"\"\n weights = numpy.zeros((inputs.shape[1], 1))\n for _ in range(epochs):\n weights = weights - step_size * gradient(inputs, labels, weights, **gradient_hyperparameters)\n return weights","sub_path":"src/algorithms/gradient_descent/gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"49847817","text":"import os\nfrom zipfile import ZipFile\n\n\ndef human_read_format(a):\n b = int(a)\n if b < 1024:\n return str(b) + \"Б\"\n else:\n b = round(b / 1024)\n if b < 1024:\n return str(b) + \"КБ\"\n else:\n b = round(b / 1024)\n if b < 1024:\n return str(b) + \"МБ\"\n else:\n b = round(b / 1024)\n return str(b) + \"ГБ\"\n\n\nwith ZipFile('input.zip') as myzip:\n a = (myzip.namelist())\n info = myzip.infolist()\n for i in range(len(a)):\n b = 0\n c = 0\n d = 0\n for u in range(i):\n if a[u] in a[i]:\n b += 2\n c = len(a[u])\n if info[i].orig_filename[-1].isalnum():\n print(b * ' ' + info[i].orig_filename[c:] + ' ' + human_read_format(os.path.getsize(info[i].orig_filename)))\n else:\n print(b * ' ' + info[i].orig_filename[c:-1] + ' ')\n","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"497669507","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport time\nimport argparse\nimport collections\nimport numpy as np\nimport multiprocessing\n\nimport paddle\nimport paddle.fluid as fluid\n\nfrom utils.placeholder import Placeholder\nfrom utils.init import init_pretraining_params, init_checkpoint\nfrom utils.configure import ArgumentGroup, print_arguments, JsonConfig\n\nfrom model import mlm_net\nfrom model import mrqa_net\n\nfrom optimizer.optimization import optimization\nfrom model.bert_model import ModelBERT\nfrom reader.mrqa_reader import DataProcessor, write_predictions\nfrom reader.mrqa_distill_reader import DataProcessorDistill \nfrom reader.mlm_reader import DataReader\nfrom reader.joint_reader import create_reader\n\n\nparser = argparse.ArgumentParser(__doc__)\nmodel_g = ArgumentGroup(parser, \"model\", \"model configuration and paths.\")\nmodel_g.add_arg(\"bert_config_path\", str, None, \"Path to the json file for bert model config.\")\nmodel_g.add_arg(\"init_checkpoint\", str, None, \"Init checkpoint to resume training from.\")\nmodel_g.add_arg(\"init_pretraining_params\", str, None,\n \"Init pre-training params which preforms fine-tuning from. If the \"\n \"arg 'init_checkpoint' has been set, this argument wouldn't be valid.\")\nmodel_g.add_arg(\"checkpoints\", str, \"checkpoints\", \"Path to save checkpoints.\")\n\ntrain_g = ArgumentGroup(parser, \"training\", \"training options.\")\ntrain_g.add_arg(\"epoch\", int, 3, \"Number of epoches for fine-tuning.\")\ntrain_g.add_arg(\"learning_rate\", float, 5e-5, \"Learning rate used to train with warmup.\")\ntrain_g.add_arg(\"lr_scheduler\", str, \"linear_warmup_decay\",\n \"scheduler of learning rate.\", choices=['linear_warmup_decay', 'noam_decay'])\ntrain_g.add_arg(\"weight_decay\", float, 0.01, \"Weight decay rate for L2 regularizer.\")\ntrain_g.add_arg(\"use_ema\", bool, True, \"Whether to use ema.\")\ntrain_g.add_arg(\"ema_decay\", float, 0.9999, \"Decay rate for expoential moving average.\")\ntrain_g.add_arg(\"warmup_proportion\", float, 0.1,\n \"Proportion of training steps to perform linear learning rate warmup for.\")\ntrain_g.add_arg(\"save_steps\", int, 1000, \"The steps interval to save checkpoints.\")\ntrain_g.add_arg(\"sample_rate\", float, 0.02, \"train samples num.\")\ntrain_g.add_arg(\"use_fp16\", bool, False, \"Whether to use fp16 mixed precision training.\")\ntrain_g.add_arg(\"mix_ratio\", float, 0.4, \"batch mix ratio for masked language model task\")\ntrain_g.add_arg(\"loss_scaling\", float, 1.0,\n \"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled.\")\n\ntrain_g.add_arg(\"do_distill\", bool, False, \"do distillation\")\n\nlog_g = ArgumentGroup(parser, \"logging\", \"logging related.\")\nlog_g.add_arg(\"skip_steps\", int, 10, \"The steps interval to print loss.\")\nlog_g.add_arg(\"verbose\", bool, False, \"Whether to output verbose log.\")\n\ndata_g = ArgumentGroup(parser, \"data\", \"Data paths, vocab paths and data processing options\")\ndata_g.add_arg(\"train_file\", str, None, \"json data for training.\")\ndata_g.add_arg(\"mlm_path\", str, None, \"data for masked language model training.\")\ndata_g.add_arg(\"predict_file\", str, None, \"json data for predictions.\")\ndata_g.add_arg(\"vocab_path\", str, None, \"Vocabulary path.\")\ndata_g.add_arg(\"with_negative\", bool, False,\n \"If true, the examples contain some that do not have an answer.\")\ndata_g.add_arg(\"max_seq_len\", int, 512, \"Number of words of the longest seqence.\")\ndata_g.add_arg(\"max_query_length\", int, 64, \"Max query length.\")\ndata_g.add_arg(\"max_answer_length\", int, 30, \"Max answer length.\")\ndata_g.add_arg(\"batch_size\", int, 12,\n \"Total examples' number in batch for training. see also --in_tokens.\")\ndata_g.add_arg(\"in_tokens\", bool, False,\n \"If set, the batch size will be the maximum number of tokens in one batch. \"\n \"Otherwise, it will be the maximum number of examples in one batch.\")\ndata_g.add_arg(\"do_lower_case\", bool, True,\n \"Whether to lower case the input text. Should be True for uncased models and False for cased models.\")\ndata_g.add_arg(\"doc_stride\", int, 128,\n \"When splitting up a long document into chunks, how much stride to take between chunks.\")\ndata_g.add_arg(\"n_best_size\", int, 20,\n \"The total number of n-best predictions to generate in the nbest_predictions.json output file.\")\ndata_g.add_arg(\"null_score_diff_threshold\", float, 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\ndata_g.add_arg(\"random_seed\", int, 0, \"Random seed.\")\n\nrun_type_g = ArgumentGroup(parser, \"run_type\", \"running type options.\")\nrun_type_g.add_arg(\"use_cuda\", bool, True, \"If set, use GPU for training.\")\nrun_type_g.add_arg(\"use_fast_executor\", bool, False,\n \"If set, use fast parallel executor (in experiment).\")\nrun_type_g.add_arg(\"num_iteration_per_drop_scope\", int, 1,\n \"Ihe iteration intervals to clean up temporary variables.\")\nrun_type_g.add_arg(\"do_train\", bool, True, \"Whether to perform training.\")\nrun_type_g.add_arg(\"do_predict\", bool, True, \"Whether to perform prediction.\")\n\nargs = parser.parse_args()\n\n\nmax_seq_len = args.max_seq_len\n\nif args.do_distill: \n input_shape = [\n ([1, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'), # src_ids\n ([-1, max_seq_len, 1], 'int64'), # pos_ids\n ([-1, max_seq_len, 1], 'int64'), # sent_ids\n ([-1, max_seq_len, 1], 'float32'), # input_mask\n ([-1, max_seq_len, 1], 'float32'), # start_logits_truth\n ([-1, max_seq_len, 1], 'float32'), # end_logits_truth\n ([-1, 1], 'int64'), # start label\n ([-1, 1], 'int64'), # end label\n ([-1, 1], 'int64'), # masked label\n ([-1, 1], 'int64')] # masked pos\nelse: \n input_shape = [\n ([1, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'float32'),\n ([-1, 1], 'int64'), # start label\n ([-1, 1], 'int64'), # end label\n ([-1, 1], 'int64'), # masked label\n ([-1, 1], 'int64')] # masked pos\n\n# yapf: enable.\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef predict(test_exe, test_program, test_pyreader, fetch_list, processor, prefix=''):\n if not os.path.exists(args.checkpoints):\n os.makedirs(args.checkpoints)\n output_prediction_file = os.path.join(args.checkpoints, prefix + \"predictions.json\")\n output_nbest_file = os.path.join(args.checkpoints, prefix + \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(args.checkpoints, prefix + \"null_odds.json\")\n\n test_pyreader.start()\n all_results = []\n time_begin = time.time()\n while True:\n try:\n np_unique_ids, np_start_logits, np_end_logits, np_num_seqs = test_exe.run(\n fetch_list=fetch_list, program=test_program)\n for idx in range(np_unique_ids.shape[0]):\n if np_unique_ids[idx] < 0:\n continue\n if len(all_results) % 1000 == 0:\n print(\"Processing example: %d\" % len(all_results))\n unique_id = int(np_unique_ids[idx])\n start_logits = [float(x) for x in np_start_logits[idx].flat]\n end_logits = [float(x) for x in np_end_logits[idx].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n time_end = time.time()\n\n features = processor.get_features(\n processor.predict_examples, is_training=False)\n write_predictions(processor.predict_examples, features, all_results,\n args.n_best_size, args.max_answer_length,\n args.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file,\n args.with_negative,\n args.null_score_diff_threshold, args.verbose)\n\n\ndef train(args):\n\n if not (args.do_train or args.do_predict):\n raise ValueError(\"For args `do_train` and `do_predict`, at \"\n \"least one of them must be True.\")\n\n if args.use_cuda:\n place = fluid.CUDAPlace(0)\n dev_count = fluid.core.get_cuda_device_count()\n else:\n place = fluid.CPUPlace()\n dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n exe = fluid.Executor(place)\n\n startup_prog = fluid.default_startup_program()\n\n if args.random_seed is not None:\n startup_prog.random_seed = args.random_seed\n\n if args.do_train: \n if args.do_distill: \n train_processor = DataProcessorDistill()\n mrc_train_generator = train_processor.data_generator(\n data_file=args.train_file,\n batch_size=args.batch_size,\n max_len=args.max_seq_len,\n in_tokens=False,\n dev_count=dev_count,\n epochs=args.epoch,\n shuffle=True)\n else: \n train_processor = DataProcessor(\n vocab_path=args.vocab_path,\n do_lower_case=args.do_lower_case,\n max_seq_length=args.max_seq_len,\n in_tokens=args.in_tokens,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length)\n\n mrc_train_generator = train_processor.data_generator(\n data_path=args.train_file,\n batch_size=args.batch_size,\n max_len=args.max_seq_len,\n phase='train',\n shuffle=True,\n dev_count=dev_count,\n with_negative=args.with_negative,\n epoch=args.epoch)\n\n bert_conf = JsonConfig(args.bert_config_path)\n \n data_reader = DataReader(\n args.mlm_path,\n vocab_path=args.vocab_path,\n batch_size=args.batch_size,\n in_tokens=args.in_tokens,\n voc_size=bert_conf['vocab_size'],\n shuffle_files=False,\n epoch=args.epoch,\n max_seq_len=args.max_seq_len,\n is_test=False)\n mlm_train_generator = data_reader.data_generator()\n gens = [\n (mrc_train_generator, 1.0),\n (mlm_train_generator, args.mix_ratio)\n ]\n # create joint pyreader\n joint_generator, train_pyreader, model_inputs = \\\n create_reader(\"train_reader\", input_shape, True, args.do_distill, \n gens)\n train_pyreader.decorate_tensor_provider(joint_generator)\n\n task_id = model_inputs[0]\n if args.do_distill: \n bert_inputs = model_inputs[1:5]\n mrc_inputs = model_inputs[1:9]\n mlm_inputs = model_inputs[9:11]\n else: \n bert_inputs = model_inputs[1:5]\n mrc_inputs = model_inputs[1:7]\n mlm_inputs = model_inputs[7:9]\n \n # create model\n train_bert_model = ModelBERT(\n conf={\"bert_conf_file\": args.bert_config_path},\n is_training=True)\n train_create_bert = train_bert_model.create_model(args, bert_inputs)\n\n build_strategy = fluid.BuildStrategy()\n if args.do_distill: \n num_train_examples = train_processor.num_examples\n print(\"runtime number of examples:\")\n print(num_train_examples)\n else: \n print(\"estimating runtime number of examples...\")\n num_train_examples = train_processor.estimate_runtime_examples(\n args.train_file, sample_rate=args.sample_rate)\n print(\"runtime number of examples:\")\n print(num_train_examples)\n\n if args.in_tokens:\n max_train_steps = args.epoch * num_train_examples // (\n args.batch_size // args.max_seq_len) // dev_count\n else:\n max_train_steps = args.epoch * num_train_examples // (\n args.batch_size) // dev_count\n max_train_steps = int(max_train_steps * (1 + args.mix_ratio))\n warmup_steps = int(max_train_steps * args.warmup_proportion)\n print(\"Device count: %d\" % dev_count)\n print(\"Num train examples: %d\" % num_train_examples)\n print(\"Max train steps: %d\" % max_train_steps)\n print(\"Num warmup steps: %d\" % warmup_steps)\n\n train_program = fluid.default_main_program()\n with fluid.program_guard(train_program, startup_prog):\n with fluid.unique_name.guard():\n train_create_bert()\n mlm_output_tensors = mlm_net.create_model(\n mlm_inputs, base_model=train_bert_model, is_training=True, args=args\n )\n mrc_output_tensors = mrqa_net.create_model(\n mrc_inputs, base_model=train_bert_model, is_training=True, args=args\n )\n task_one_hot = fluid.layers.one_hot(task_id, 2)\n mrc_loss = mrqa_net.compute_loss(mrc_output_tensors, args)\n if args.do_distill: \n distill_loss = mrqa_net.compute_distill_loss(mrc_output_tensors, args)\n mrc_loss = mrc_loss + distill_loss\n num_seqs = mrc_output_tensors['num_seqs']\n mlm_loss = mlm_net.compute_loss(mlm_output_tensors)\n num_seqs = mlm_output_tensors['num_seqs']\n all_loss = fluid.layers.concat([mrc_loss, mlm_loss], axis=0)\n loss = fluid.layers.reduce_sum(task_one_hot * all_loss)\n \n scheduled_lr = optimization(\n loss=loss,\n warmup_steps=warmup_steps,\n num_train_steps=max_train_steps,\n learning_rate=args.learning_rate,\n train_program=train_program,\n startup_prog=startup_prog,\n weight_decay=args.weight_decay,\n scheduler=args.lr_scheduler,\n use_fp16=args.use_fp16,\n loss_scaling=args.loss_scaling)\n\n loss.persistable = True\n num_seqs.persistable = True\n\n ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)\n ema.update()\n\n train_compiled_program = fluid.CompiledProgram(train_program).with_data_parallel(\n loss_name=loss.name, build_strategy=build_strategy)\n\n if args.verbose:\n if args.in_tokens:\n lower_mem, upper_mem, unit = fluid.contrib.memory_usage(\n program=train_program,\n batch_size=args.batch_size // args.max_seq_len)\n else:\n lower_mem, upper_mem, unit = fluid.contrib.memory_usage(\n program=train_program, batch_size=args.batch_size)\n print(\"Theoretical memory usage in training: %.3f - %.3f %s\" %\n (lower_mem, upper_mem, unit))\n\n if args.do_predict:\n predict_processor = DataProcessor(\n vocab_path=args.vocab_path,\n do_lower_case=args.do_lower_case,\n max_seq_length=args.max_seq_len,\n in_tokens=args.in_tokens,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length)\n mrc_test_generator = predict_processor.data_generator(\n data_path=args.predict_file,\n batch_size=args.batch_size,\n max_len=args.max_seq_len,\n phase='predict',\n shuffle=False,\n dev_count=dev_count,\n epoch=1)\n\n test_input_shape = [\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'int64'),\n ([-1, max_seq_len, 1], 'float32'),\n ([-1, 1], 'int64')]\n build_strategy = fluid.BuildStrategy()\n test_prog = fluid.Program()\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n placeholder = Placeholder(test_input_shape)\n test_pyreader, model_inputs = placeholder.build(\n capacity=100, reader_name=\"test_reader\")\n\n test_pyreader.decorate_tensor_provider(mrc_test_generator)\n\n # create model\n bert_inputs = model_inputs[0:4]\n mrc_inputs = model_inputs\n test_bert_model = ModelBERT(\n conf={\"bert_conf_file\": args.bert_config_path},\n is_training=False)\n test_create_bert = test_bert_model.create_model(args, bert_inputs)\n\n test_create_bert()\n mrc_output_tensors = mrqa_net.create_model(\n mrc_inputs, base_model=test_bert_model, is_training=False, args=args\n )\n unique_ids = mrc_output_tensors['unique_id']\n start_logits = mrc_output_tensors['start_logits']\n end_logits = mrc_output_tensors['end_logits']\n num_seqs = mrc_output_tensors['num_seqs']\n\n if 'ema' not in dir():\n ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)\n\n unique_ids.persistable = True\n start_logits.persistable = True\n end_logits.persistable = True\n num_seqs.persistable = True\n\n test_prog = test_prog.clone(for_test=True)\n test_compiled_program = fluid.CompiledProgram(test_prog).with_data_parallel(\n build_strategy=build_strategy)\n\n exe.run(startup_prog)\n\n if args.do_train:\n if args.init_checkpoint and args.init_pretraining_params:\n print(\n \"WARNING: args 'init_checkpoint' and 'init_pretraining_params' \"\n \"both are set! Only arg 'init_checkpoint' is made valid.\")\n if args.init_checkpoint:\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n elif args.init_pretraining_params:\n init_pretraining_params(\n exe,\n args.init_pretraining_params,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n elif args.do_predict:\n if not args.init_checkpoint:\n raise ValueError(\"args 'init_checkpoint' should be set if\"\n \"only doing prediction!\")\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n\n if args.do_train:\n train_pyreader.start()\n\n steps = 0\n total_cost, total_num_seqs = [], []\n time_begin = time.time()\n while True:\n try:\n steps += 1\n if steps % args.skip_steps == 0:\n if warmup_steps <= 0:\n fetch_list = [loss.name, num_seqs.name]\n else:\n fetch_list = [\n loss.name, scheduled_lr.name, num_seqs.name\n ]\n else:\n fetch_list = []\n\n outputs = exe.run(train_compiled_program, fetch_list=fetch_list)\n\n if steps % args.skip_steps == 0:\n if warmup_steps <= 0:\n np_loss, np_num_seqs = outputs\n else:\n np_loss, np_lr, np_num_seqs = outputs\n total_cost.extend(np_loss * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n\n if args.verbose:\n verbose = \"train pyreader queue size: %d, \" % train_pyreader.queue.size(\n )\n verbose += \"learning rate: %f\" % (\n np_lr[0]\n if warmup_steps > 0 else args.learning_rate)\n print(verbose)\n\n time_end = time.time()\n used_time = time_end - time_begin\n print(\"progress: %d/%d, step: %d, loss: %f\" % (steps, max_train_steps, steps, np.sum(total_cost) / np.sum(total_num_seqs)))\n \n total_cost, total_num_seqs = [], []\n time_begin = time.time()\n\n if steps % args.save_steps == 0:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n if steps == max_train_steps:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps) + \"_final\")\n fluid.io.save_persistables(exe, save_path, train_program)\n break\n except paddle.fluid.core.EOFException as err:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps) + \"_final\")\n fluid.io.save_persistables(exe, save_path, train_program)\n train_pyreader.reset()\n break\n\n if args.do_predict:\n if args.use_ema:\n with ema.apply(exe):\n predict(exe, test_compiled_program, test_pyreader, [\n unique_ids.name, start_logits.name, end_logits.name, num_seqs.name\n ], predict_processor, prefix='ema_')\n else:\n predict(exe, test_compiled_program, test_pyreader, [\n unique_ids.name, start_logits.name, end_logits.name, num_seqs.name\n ], predict_processor)\n\n\nif __name__ == '__main__':\n print_arguments(args)\n train(args)\n","sub_path":"PaddleNLP/Research/MRQA2019-D-NET/knowledge_distillation/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":22658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"162401736","text":"import keras_craft\n\n\ndetector = keras_craft.Detector(\"generic-english\")\n\n\ndef predictor(image_paths, batch_size=2):\n all_boxes = detector.detect(image_paths, batch_size=batch_size)\n\n all_boxes = [[box.tolist() for box in boxes] for boxes in all_boxes]\n\n return all_boxes\n\n\nif __name__ == \"__main__\":\n import json\n import pickle\n import base64\n\n example = [\"example.png\"]\n\n print(json.dumps(predictor(example)))\n\n example = {\n file_name: base64.b64encode(open(file_name, \"rb\").read()).decode(\"utf-8\")\n for file_name in example\n }\n\n pickle.dump(example, open(\"example.pkl\", \"wb\"), protocol=2)\n","sub_path":"recipes/craft_text_detection/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"90187158","text":"import sys, os\nimport glob\nimport subprocess\n\nfile_list = glob.glob('..' + os.path.sep + '..' + os.path.sep + 'source' +\n os.path.sep + '*' + os.path.sep + '*' + sys.argv[1] + os.path.sep + 'main.tex')\n\nwith open(\"runs\" + os.path.sep + \"run1_\" + sys.argv[1] + \".log\", 'w+') as log:\n for index, item in enumerate(file_list):\n log.write(str(index) + \":\" + item + \"\\n\")\n#\n# for mainfile in [file_list[0]]:\nerr_list = []\nto_do = file_list[int(sys.argv[2]):]\nfor index, mainfile in enumerate(to_do):\n with open(mainfile[:-8] + \"run1.log\", 'w+') as log:\n print(str(index + int(sys.argv[2])) + \":\" + mainfile)\n try:\n subprocess.call([\"perl\", \"latexml\", \"--destination=\" + mainfile[:-8] + \"run1.xml\", mainfile],\n timeout=120, stdout=log, stderr=subprocess.STDOUT)\n except Exception as err:\n print(\"error with\" + mainfile + \": \")\n print(err)\n err_list.append(mainfile)\n\nif err_list:\n with open(\"runs\" + os.path.sep + \"errrun1_\" + sys.argv[1] + \".log\", 'w+') as log:\n for index, item in enumerate(err_list):\n log.write(str(index + int(sys.argv[2])) + \":\" + item + \"\\n\")\n","sub_path":"run_latexML.py","file_name":"run_latexML.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"269587067","text":"# Exercício 2: Dada uma string, construa um dicionário com a contagem de cada caractere da palavra. Utilize o pseudocódigo e o exemplo abaixo para se nortear.\n#\n# Para cada char na string:\n# - Se o char não estiver no dicionário, inclua com o valor 1;\n#\n# - Se estiver, incremente o valor.\n#\n# Exemplo:\n#\n# str = \"bbbbaaaacccaaaaaaddddddddccccccc\"\n# saída: {'b': 4, 'a': 10, 'c': 10, 'd': 8}\n#\n# str = \"coxinha\"\n# saída: {'c': 1, 'o': 1, 'x': 1, 'i': 1, 'n': 1, 'h': 1, 'a': 1}\n# Explicação: Nenhuma letra repete em coxinha :)\n\n\ndef count_char(string):\n counter = dict()\n\n for char in string:\n if (char in counter):\n counter[char] += 1\n else:\n counter[char] = 1\n\n return counter\n","sub_path":"4.Computer-Science/exercises/Unit_36/Day_3/class-exercises/using_dict/exercise_two.py","file_name":"exercise_two.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"495460363","text":"import datetime\nimport logging\nimport math\n\nfrom pajbot.managers import HandlerManager\nfrom pajbot.models.command import Command\nfrom pajbot.modules import BaseModule\nfrom pajbot.modules import ModuleSetting\n\nlog = logging.getLogger(__name__)\n\n\nclass PaidTimeoutModule(BaseModule):\n\n ID = __name__.split('.')[-1]\n NAME = 'Paid Timeout'\n DESCRIPTION = 'Allows user to time out other users with points'\n CATEGORY = 'Feature'\n SETTINGS = [\n ModuleSetting(\n key='command_name',\n label='Command name (i.e. $timeout)',\n type='text',\n required=True,\n placeholder='Command name (no !)',\n default='timeout',\n constraints={\n 'min_str_len': 2,\n 'max_str_len': 15,\n }),\n ModuleSetting(\n key='timeout_length',\n label='Timeout length',\n type='number',\n required=True,\n placeholder='Timeout length in seconds',\n default=60,\n constraints={\n 'min_value': 1,\n 'max_value': 3600,\n }),\n ModuleSetting(\n key='cost',\n label='Point cost',\n type='number',\n required=True,\n placeholder='Point cost',\n default=400,\n constraints={\n 'min_value': 1,\n 'max_value': 10000,\n }),\n ModuleSetting(\n key='bypass_level',\n label='Level to bypass module',\n type='number',\n required=True,\n placeholder='',\n default=500,\n constraints={\n 'min_value': 100,\n 'max_value': 1000,\n }),\n ]\n\n def paid_timeout(self, **options):\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n _time = self.settings['timeout_length']\n _cost = self.settings['cost']\n\n if message is None or len(message) == 0:\n return False\n\n username = message.split(' ')[0]\n if len(username) < 2:\n return False\n\n victim = bot.users.find(username)\n if victim is None:\n bot.whisper(source.username, 'This user does not exist FailFish')\n return False\n\n if victim.last_active is None or (datetime.datetime.now() - victim._last_active).total_seconds() > 10 * 60:\n bot.whisper(source.username, 'This user has not been active in chat within the last 10 minutes.')\n return False\n\n \"\"\"\n if victim == source:\n bot.whisper(source.username, 'You can\\'t timeout yourself FailFish')\n return False\n \"\"\"\n\n if victim.moderator is True:\n bot.whisper(source.username, 'This person has mod privileges, timeouting this person is not worth it.')\n return False\n\n if victim.level >= self.settings['bypass_level']:\n bot.whisper(source.username, 'This person\\'s user level is too high, you can\\'t timeout this person.')\n return False\n\n now = datetime.datetime.now()\n if victim.timed_out is True and victim.timeout_end > now:\n victim.timeout_end += datetime.timedelta(seconds=_time)\n bot.whisper(victim.username, '{victim.username}, you were timed out for an additional {time} seconds by {source.username}'.format(\n victim=victim,\n source=source,\n time=_time))\n bot.whisper(source.username, 'You just used {0} points to time out {1} for an additional {2} seconds.'.format(_cost, username, _time))\n num_seconds = int((victim.timeout_end - now).total_seconds())\n bot._timeout(username, num_seconds)\n else:\n bot.whisper(source.username, 'You just used {0} points to time out {1} for {2} seconds.'.format(_cost, username, _time))\n bot.whisper(username, '{0} just timed you out for {1} seconds. /w {2} !$unbanme to unban yourself for points forsenMoney'.format(source.username, _time, bot.nickname))\n bot._timeout(username, _time)\n victim.timed_out = True\n victim.timeout_start = now\n victim.timeout_end = now + datetime.timedelta(seconds=_time)\n\n payload = {'user': source.username, 'victim': victim.username}\n bot.websocket_manager.emit('timeout', payload)\n HandlerManager.trigger('on_paid_timeout',\n source, victim, _cost,\n stop_on_false=False)\n\n def load_commands(self, **options):\n self.commands[self.settings['command_name'].lower().replace('!', '').replace(' ', '')] = Command.raw_command(self.paid_timeout, cost=self.settings['cost'])\n\n\nclass PaidTimeoutDiscountModule(BaseModule):\n\n ID = 'paidtimeoutdiscount'\n NAME = 'Paid Timeout Discount'\n DESCRIPTION = 'Allows user to time out other users with points'\n CATEGORY = 'Feature'\n PARENT_MODULE = PaidTimeoutModule\n # No settings to add yet. would like to have the message customizeable\n # would also like to have the discounts customizeable\n SETTINGS = []\n\n def on_paid_timeout(self, source, victim, cost):\n log.info('PAID TIMEOUT OCCURED')\n # Discounts here!\n discounts = {\n 'trump_sub': (0.5, 'Trump (50%)'),\n 'massan_sub': (0.45, 'Massan (55%)'),\n 'athene_sub': (0.45, 'Athene (55%)'),\n 'nostam_sub': (0.4, 'Nostam (60%)'),\n 'reynad_sub': (0.8, 'Reynad (20%)'),\n 'forsen_sub': (0.95, 'Forsen (5%)'),\n }\n\n added_discount = 1.0\n whisper_msg = []\n for tag, data in discounts.items():\n discount, text = data\n if tag in victim.tags:\n whisper_msg.append(text)\n added_discount *= discount\n\n if len(whisper_msg) > 0:\n actual_discount = 1.0 - added_discount\n refund = math.trunc(cost * actual_discount)\n if refund > 0:\n source.points += refund\n self.bot.whisper(source.username, 'You have been refunded {refund} points courtesy of TheMysil, because the user you timed out matched the following discounts: {discount_str}'.format(refund=refund, discount_str=', '.join(whisper_msg)))\n\n def enable(self, bot):\n HandlerManager.add_handler('on_paid_timeout', self.on_paid_timeout)\n self.bot = bot\n\n def disable(self, bot):\n HandlerManager.remove_handler('on_paid_timeout', self.on_paid_timeout)\n","sub_path":"pajbot/modules/paidtimeout.py","file_name":"paidtimeout.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"374882172","text":"from scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import HtmlXPathSelector\nfrom openrecipes.items import RecipeItem\n\n\nclass BellalimentocrawlSpider(CrawlSpider):\n\n name = \"www.bellalimento.com\"\n allowed_domains = [\"www.bellalimento.com\"]\n start_urls = [\n \"http://www.bellalimento.com/\",\n ]\n\n # a tuple of Rules that are used to extract links from the HTML page\n rules = (\n Rule(SgmlLinkExtractor(allow=('/category/.+'))),\n Rule(SgmlLinkExtractor(allow=('/\\d\\d\\d\\d/\\d\\d/\\d\\d/')),callback='parse_item'),\n )\n\n def parse_item(self, response):\n hxs = HtmlXPathSelector(response)\n\n base_path = \"\"\"//div[@id=\"zlrecipe-container\"]\"\"\"\n\n recipes_scopes = hxs.select(base_path)\n\n name_path = '//div[@id=\"zlrecipe-title\"]/text()'\n ingredients_path = '//ul[@id=\"zlrecipe-ingredients-list\"]/li[@class=\"ingredient\"]'\n\n recipes = []\n for r_scope in recipes_scopes:\n item = RecipeItem()\n item['name'] = r_scope.select(name_path).extract()\n name = item['name']\n image_path = '//img[contains(@title, \"' + name[0] + '\")]/@src'\n item['image'] = r_scope.select(image_path).extract()[0]\n item['url'] = response.url\n\n ingredient_scopes = r_scope.select(ingredients_path)\n ingredients = []\n for i_scope in ingredient_scopes:\n ingredient_item = i_scope.select('text()').extract()\n ingredients.append(\"%s\" % ingredient_item)\n item['ingredients'] = ingredients\n\n recipes.append(item)\n\n return recipes\n","sub_path":"scrapy_proj/openrecipes/spiders/bellalimento_spider.py","file_name":"bellalimento_spider.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"384028153","text":"s = input()\npairs = eval(input())\nn = len(s)\n# print(n)\nparent = [-1] * n\n\n\ndef find(s):\n if parent[s] != -1:\n parent[s] = find(parent[s])\n return parent[s]\n return s\n\n\ndef union(p, s):\n parent[s] = p\n\n\npairNum = len(pairs)\nfor i in range(pairNum):\n p1 = find(pairs[i][0])\n p2 = find(pairs[i][1])\n if p1 != p2:\n if p1 < p2:\n union(p1, p2)\n else:\n union(p2, p1)\n\ndicts = {}\nfor i in range(n):\n if parent[i] == -1:\n temp = {i: {i: s[i]}}\n dicts.update(temp)\n else:\n prt = find(i)\n pr = dicts.get(prt)\n # print(pr)\n tempPair = {i: s[i]}\n pr.update(tempPair)\n\nn2 = len(dicts)\nchars = [' ']*n\nfor i in range(n2):\n idx = list(dicts[i].keys())\n idx.sort()\n n3 = len(idx)\n vals = sorted(dicts[i].values())\n for j in range(n3):\n chars[idx[j]] = vals[j]\nres = ''.join(chars)\nprint(res)","sub_path":"Code/CodeRecords/2718/60683/239211.py","file_name":"239211.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"407990545","text":"\nclass Measured_Value(object):\n \n NO_MEASUREMENT = ['NULL', [], 0.0]\n \n def __init__(self, sensor_key = '', data = [], deltaT = 0.0):\n self.sensor_key = sensor_key # Identifies which sensor the measurement originated from\n self.data = data # holds measurement values from a given sensor\n self.deltaT = deltaT # Time difference from last measurement\n \n def __str__(self): # string representation of Measured_Value for output purposes\n output = self.sensor_key + ': ['\n \n for value in self.data:\n output += str(value) + ', '\n \n output += str(self.deltaT)\n output += ']'\n return output\n \n","sub_path":"measured_value.py","file_name":"measured_value.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"61627194","text":"import bpy\nimport addon_utils\nimport numpy as np\nimport pathlib\nimport glob\n\nimport src.blender.object as o # noqa: E402\nimport src.util.parser as p # noqa: E402\n\n\nclass Blender:\n \"\"\"\n Interacts with Blender.\n \"\"\"\n\n # Empty constructor\n # This class uses Blender\n # built-in functions.\n def __init__(self):\n self.camera = None\n\n def clear_scene(self, except_objects):\n \"\"\"\n Clears the scene so that there are no side effects from previous operations.\n except the ones specified.\n :param except_objects: list of object name strings not be removed\n :return: None\n \"\"\"\n self.select_objects(except_objects)\n bpy.ops.object.delete()\n\n def setup_camera(self, coordinates, rotation):\n \"\"\"\n Places camera at inputted coordinates.\n :param coordinates: list with 3 entries specifying x,y,z coordinates.\n :param rotation: list with 3 entries specifying rotation.\n :return: None\n \"\"\"\n cam = bpy.context.scene.camera\n # Move the camera to specific coordinates\n cam.location = coordinates\n # Rotate the camera to specific orientation\n cam.rotation_euler = rotation\n self.camera = cam\n\n def setup_light(self, coordinates, energy, type):\n \"\"\"\n Places light at inputted co-ordinates.\n :param coordinates: list with 3 entries specifying x,y,z coordinates.\n :param energy: integer specifying light strength.\n :param type: string specifying light type.\n :return: None\n \"\"\"\n light_data = bpy.data.lights['Light']\n light_object = bpy.data.objects['Light']\n light_data.energy = energy # 1000 by default\n light_data.type = type # in ['POINT', 'SUN', 'SPOT', 'HEMI', 'AREA']\n light_object.location = coordinates\n\n def setup_object(self, object):\n \"\"\"\n Imports the object from variable path.\n :param object: object to be setup.\n :return: None\n \"\"\"\n # Import the current object (of input material) to the scene\n bpy.ops.import_scene.obj(filepath=object.path)\n obj = bpy.context.selected_objects[0]\n self.set_object_location(obj, object.location)\n self.set_object_orientation(obj, object.orientation)\n # self.color_object(obj, object.color)\n return obj\n\n def reset_objects(self):\n \"\"\"\n Resets the objects above the plane.\n :return: none\n \"\"\"\n self.select_objects(['background', 'border_1', 'border_2', 'border_3', 'border_4'])\n random_object = o.Object(None, 'random', 'random', 'random', None)\n for obj in bpy.context.selected_objects:\n random_object.randomize_object(None)\n self.set_object_location(obj, random_object.location)\n self.set_object_orientation(obj, random_object.orientation)\n # self.color_object(obj, random_object.color)\n\n def setup_border_plane(self, axis, translation, name):\n \"\"\"\n Sets up vertical individual border plane.\n :param axis: list with 3 entries specifying specific axis.\n :param translation: list with 3 entries specifying translation plane.\n :param name: string name for border.\n :return: None\n \"\"\"\n half_pi = 0.5 * np.pi\n bpy.ops.mesh.primitive_plane_add()\n bpy.ops.rigidbody.object_add(type='PASSIVE')\n bpy.ops.transform.rotate(value=half_pi, orient_axis=axis) # , axis=axis)\n bpy.ops.transform.translate(value=translation)\n bpy.context.active_object.name = name\n\n def setup_border_planes(self, x_size, y_size):\n \"\"\"\n Sets up vertical border planes.\n :param x_size: integer stating x_size.\n :param y_size: integer stating y_size.\n :return: None\n \"\"\"\n self.setup_border_plane('Y', (x_size / 2, 0, 0), 'border_1')\n self.setup_border_plane('Y', (-x_size / 2, 0, 0), 'border_2')\n self.setup_border_plane('X', (0, y_size / 2, 0), 'border_3')\n self.setup_border_plane('X', (0, -y_size / 2, 0), 'border_4')\n\n def setup_background_plane(self, filename, x_size, y_size, seed):\n \"\"\"\n Render specific background plane with given name\n :param filename: string name of backgorund to be added.\n :param x_size: integer for x_size.\n :param y_size: integer for y_size.\n :param seed: integer for predicting randomness\n :return: None\n \"\"\"\n # Automatically enable 'Import-Export: Import images as Planes'\n # addon in Blender\n addon_utils.enable(\"io_import_images_as_planes\")\n filename = self.choose_background(filename, seed)\n bpy.ops.import_image.to_plane(\n shader='SHADELESS',\n files=[{'name': filename}])\n # Make plane passive rigid body so it can hold the bodies\n bpy.ops.rigidbody.object_add(type='PASSIVE')\n plane = bpy.context.active_object\n plane.name = 'background'\n # Scale background to fit in camera\n plane.dimensions = (x_size, y_size, 1)\n\n def choose_background(self, filename, seed):\n \"\"\"\n Method used for getting the right background or a random one\n :param filename: string name of file or 'random' for random\n :param seed: integer for predicting randomness\n :return: string name of file\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n if filename == 'random':\n path = np.random.choice(glob.glob('/workdir/Backgrounds/*.jpg'))\n else:\n path = '/workdir/Backgrounds/' + filename\n return path\n\n def setup_crush_plane(self):\n \"\"\"\n Setup plane for curshing.\n This means adding a collision modifier\n :return: None\n \"\"\"\n bpy.ops.mesh.primitive_plane_add(size=5, location=(0, 0, 0))\n bpy.ops.object.modifier_add(type='COLLISION')\n\n def set_object_location(self, obj, location):\n \"\"\"\n Places object at random coordinates.\n :param obj: object to be moved.\n :param location: list with 3 entries specifying x,y,z coordinates.\n :return: None\n \"\"\"\n obj.location.x = location[0]\n obj.location.y = location[1]\n obj.location.z = location[2]\n\n def set_object_orientation(self, obj, orientation):\n \"\"\"\n Rotates object to a random orientation.\n :param obj: object to be rotated.\n :param orientation: list with 3 entries specifying rotation.\n :return: None\n \"\"\"\n obj.rotation_euler[0] = orientation[0]\n obj.rotation_euler[1] = orientation[1]\n obj.rotation_euler[2] = orientation[2]\n\n def select_objects(self, except_objects):\n \"\"\"\n Select all models except for the names given.\n :param except_objects: list of object name strings to not select\n :return: None\n \"\"\"\n bpy.ops.object.select_by_type(type='MESH')\n for name in except_objects:\n if bpy.data.objects.get(name) is not None:\n bpy.data.objects[name].select_set(False)\n\n def activate_model(self, model):\n \"\"\"\n Activates the model.\n :param model: model to be activated\n :return: None\n \"\"\"\n bpy.context.view_layer.objects.active = model\n\n def setup_bodies(self):\n \"\"\"\n Sets up rigid bodies for simulating fall\n :return: None\n \"\"\"\n bpy.context.scene.rigidbody_world.steps_per_second = 300\n self.select_objects(['background', 'border_1', 'border_2', 'border_3', 'border_4'])\n objects = bpy.context.selected_objects\n for obj in objects:\n # Add rigid body to object\n self.activate_model(obj)\n bpy.ops.rigidbody.object_add()\n\n def simulate(self, frames):\n \"\"\"\n Simulates in blender for a certain amount of frames.\n :param frames: integer amount of frames to simulate\n :return: None\n \"\"\"\n scene = bpy.data.scenes['Scene']\n scene.frame_set(0)\n for i in range(1, frames):\n scene.frame_set(i)\n\n def render(self, src_dir, output_location, name):\n \"\"\"\n Renders image in blender.\n :param src_dir: string for source directory.\n :param output_location: string for output folder.\n :param name: string for name of file.\n :return: None\n \"\"\"\n # Set render device to GPU instead of CPU\n self.set_render_device('GPU')\n\n bpy.context.scene.render.filepath = \\\n src_dir + '/' + output_location \\\n + name\n\n # Render image\n bpy.ops.render.render(write_still=True)\n print('Rendering done!')\n\n def set_render_output_parameters(self):\n \"\"\"\n Sets the render output parameters which it gets from configuration.yaml\n The parameters that will be changed are x and y resolution and percentage,\n max amount of ray tracing bounces, samples, tile size\n and set denoising to True.\n :return: None\n \"\"\"\n scene = bpy.context.scene\n configuration = p.Parser().parse_long_term_configuration(pathlib.Path(\n '/workdir' + r\"/configuration.yaml\"))\n scene.render.resolution_x = configuration['render']['res_width']\n scene.render.resolution_y = configuration['render']['res_height']\n scene.render.resolution_percentage = configuration['render']['res_percentage']\n scene.cycles.max_bounces = configuration['render']['max_bounces']\n scene.cycles.samples = configuration['render']['samples']\n scene.render.tile_x = configuration['render']['tile_x']\n scene.render.tile_y = configuration['render']['tile_y']\n scene.view_layers['View Layer'].cycles.use_denoising = True\n\n def set_render_device(self, device):\n \"\"\"\n Set render device.\n :param device: device to be used for rendering. Choice: [CPU,GPU].\n :return: None\n \"\"\"\n bpy.context.scene.cycles.device = device\n\n def color_object(self, obj, rgba):\n \"\"\"\n Set color of an object to RGB value.\n :param obj: object to be changed color of.\n :param rgba: list with 4 entries specifying rgb colors and the alpha value.\n :return: None\n \"\"\"\n mat = bpy.data.materials.new(\"Color\")\n mat.diffuse_color = (rgba[0], rgba[1], rgba[2], rgba[3])\n obj.active_material = mat\n\n def set_softbody(self):\n \"\"\"\n Adds the SoftBody modifier to the active object\n This is for the realistic deformation of the object\n :return: None\n \"\"\"\n bpy.ops.object.modifier_add(type='SOFT_BODY')\n bpy.context.object.modifiers[\"Softbody\"].settings.use_goal = False\n bpy.context.object.modifiers[\"Softbody\"].settings.plastic = 100\n bpy.context.object.modifiers[\"Softbody\"].settings.bend = 1000\n\n def setup_cage(self, target):\n \"\"\"\n Adds a cage to a target model.\n The cage is made from a sphere .\n It is made in the right form using the ShrinkWrap modifier.\n :param target: object for which the cage gets formed\n :return: None\n \"\"\"\n bpy.ops.mesh.primitive_uv_sphere_add(radius=5, location=(0, 0, 0))\n # Subdivide surface of sphere to make for better denting [OPTIONAL]\n bpy.ops.object.modifier_add(type='SUBSURF')\n self.apply_modifier(\"Subsurf\")\n bpy.ops.object.modifier_add(type='SHRINKWRAP')\n bpy.context.object.modifiers[\"Shrinkwrap\"].target = target\n bpy.context.object.modifiers[\"Shrinkwrap\"].offset = 0.05\n bpy.context.object.modifiers[\"Shrinkwrap\"].show_viewport = False\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=\"Shrinkwrap\")\n\n def set_mesh_deform(self, target):\n \"\"\"\n Adds the MeshDeform modifier to the active object.\n This binds all the vertices of the active object to the vertices of the (lower-poly) cage.\n :param target: the cage that the active object is mapped to\n :return: None\n \"\"\"\n bpy.ops.object.modifier_add(type='MESH_DEFORM')\n bpy.context.object.modifiers[\"MeshDeform\"].object = target\n bpy.context.object.modifiers[\"MeshDeform\"].precision = 3\n bpy.ops.object.meshdeform_bind(modifier=\"MeshDeform\")\n\n def apply_modifier(self, modifier):\n \"\"\"\n Applies a modifier of the active object.\n modifier can be one of the following enums:\n ['DATA_TRANSFER', 'MESH_CACHE', 'MESH_SEQUENCE_CACHE', 'NORMAL_EDIT',\n 'WEIGHTED_NORMAL', 'UV_PROJECT', 'UV_WARP', 'VERTEX_WEIGHT_EDIT', 'VERTEX_WEIGHT_MIX',\n 'VERTEX_WEIGHT_PROXIMITY', 'ARRAY', 'BEVEL', 'BOOLEAN', 'BUILD', 'DECIMATE', 'EDGE_SPLIT',\n 'MASK', 'MIRROR', 'MULTIRES', 'REMESH', 'SCREW', 'SKIN', 'SOLIDIFY', 'SUBSURF',\n 'TRIANGULATE', 'WELD', 'WIREFRAME', 'ARMATURE', 'CAST', 'CURVE', 'DISPLACE', 'HOOK',\n 'LAPLACIANDEFORM', 'LATTICE', 'MESH_DEFORM', 'SHRINKWRAP', 'SIMPLE_DEFORM', 'SMOOTH',\n 'CORRECTIVE_SMOOTH', 'LAPLACIANSMOOTH', 'SURFACE_DEFORM', 'WARP', 'WAVE', 'CLOTH',\n 'COLLISION', 'DYNAMIC_PAINT', 'EXPLODE', 'FLUID', 'OCEAN', 'PARTICLE_INSTANCE',\n 'PARTICLE_SYSTEM', 'SOFT_BODY', 'SURFACE']\n :param modifier: specifies which one to apply\n :return: None\n \"\"\"\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=modifier)\n\n def export_scene(self, filepath):\n \"\"\"\n Exports the current scene as an object.\n :param filepath: place where the scene is stored as well as the name of the object\n :return: None\n \"\"\"\n bpy.ops.export_scene.obj(filepath=filepath)\n\n def get_model(self, name):\n \"\"\"\n Returns the object by name.\n :param name: identifier of object\n :return: object with name\n \"\"\"\n return bpy.data.objects[name]\n\n def get_labeled_object_bounding_boxes(self, object_names, render_configuration):\n \"\"\"\n Gets the bounding boxes for the objects in the scene.\n :param object_names: list of object names\n :param render_configuration: render configuration\n :return: a list of tuples (object name, bounding box)\n \"\"\"\n bounding_boxes = []\n bpy.context.view_layer.update()\n for obj in bpy.context.scene.objects:\n if obj.name in object_names:\n bounding_box = self.camera_view_bounds_2d(bpy.context.scene, obj)\n bounding_boxes.append((obj.name.split('.')[0], bounding_box))\n return bounding_boxes\n\n def camera_view_bounds_2d(self, scene, obj): # noqa: CFQ001\n \"\"\"\n Taken from\n blender.stackexchange.com/questions/7198/save-the-2d-bounding-box-of-an-object-in-rendered-image-to-a-text-file\n\n Returns camera space bounding box of mesh object.\n\n Negative 'z' value means the point is behind the camera.\n\n Takes shift-x/y, lens angle and sensor size into account\n as well as perspective/ortho projections.\n\n :arg scene: Scene to use for frame size.\n :type scene: :class:`bpy.types.Scene`\n :arg obj: Untransformed Mesh.\n :type obj: :class:`bpy.types.Mesh´\n :return: a Box object (call its to_tuple() method to get x, y, width and height)\n :rtype: :class:`Box`\n \"\"\"\n cam_ob = bpy.context.scene.objects['Camera']\n mat = cam_ob.matrix_world.normalized().inverted()\n depsgraph = bpy.context.evaluated_depsgraph_get()\n mesh_eval = obj.evaluated_get(depsgraph)\n me = mesh_eval.to_mesh()\n me.transform(obj.matrix_world)\n me.transform(mat)\n\n camera = cam_ob.data\n frame = [-v for v in camera.view_frame(scene=scene)[:3]]\n camera_persp = camera.type != 'ORTHO'\n\n lx = []\n ly = []\n\n for v in me.vertices:\n co_local = v.co\n z = -co_local.z\n\n if camera_persp:\n if z == 0.0:\n lx.append(0.5)\n ly.append(0.5)\n # Does it make any sense to drop these?\n # if z <= 0.0:\n # continue\n else:\n frame = [(v / (v.z / z)) for v in frame]\n\n min_x, max_x = frame[1].x, frame[2].x\n min_y, max_y = frame[0].y, frame[1].y\n\n x = (co_local.x - min_x) / (max_x - min_x)\n y = (co_local.y - min_y) / (max_y - min_y)\n\n lx.append(x)\n ly.append(y)\n\n min_x = np.clip(min(lx), 0.0, 1.0)\n max_x = np.clip(max(lx), 0.0, 1.0)\n min_y = np.clip(min(ly), 0.0, 1.0)\n max_y = np.clip(max(ly), 0.0, 1.0)\n\n mesh_eval.to_mesh_clear()\n\n r = scene.render\n fac = r.resolution_percentage * 0.01\n dim_x = r.resolution_x * fac\n dim_y = r.resolution_y * fac\n # Sanity check\n if round((max_x - min_x) * dim_x) == 0 or round((max_y - min_y) * dim_y) == 0:\n return 0, 0, 0, 0\n\n return (\n round(min_x * dim_x), # X\n round(dim_y - max_y * dim_y), # Y\n round((max_x - min_x) * dim_x), # Width\n round((max_y - min_y) * dim_y) # Height\n )\n\n def get_object_names(self):\n \"\"\"\n Gets the names of the objects that are currently in the scene.\n :return: a list of object_names\n \"\"\"\n object_names = []\n for obj in bpy.context.scene.objects:\n if obj.name not in ['background', 'border_1', 'border_2',\n 'border_3', 'border_4', 'Camera', 'Light']:\n object_names.append(obj.name)\n return object_names\n","sub_path":"src/blender/blender.py","file_name":"blender.py","file_ext":"py","file_size_in_byte":17762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"524918272","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncode from https://github.com/tensorflow/cleverhans/blob/master/examples/adversarial_patch/AdversarialPatch.ipynb\nand https://arxiv.org/abs/1712.09665\nThe code takes sample of images and a target class, makes adversarial pixel blocked patch for each of the image in\nsamples\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.saved_model import tag_constants\n\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport PIL.Image\nimport scipy\nimport time\nimport random\n\nimport keras\nfrom keras.applications import mobilenetv2\nfrom keras import backend as k_backend\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom keras.layers import Input\n\n# Declaration of some global variables that are used throughtout the code.\n\nTARGET_LABEL = 5575 # garbage value, is the target label for which patch is to be created\nPATCH_BLOCK = -1 # for 8x8 blocks , if value =8\nPatch_Len = -1 # int(224/PATCH_BLOCK) = number of PATCH_BLOCK\n# PATCH_SHAPE = (Patch_Len,Patch_Len,3)#(299, 299, 3)\n# Ensemble of models\n\n\nMODEL_NAMES = ['name you want to give to your model'] # the name u want for ur model\n\n# Data augmentation\n# Empirically found that training with a very wide scale range works well\n# as a default\nSCALE_MIN = 0.3 # minimum size of patch for training\nSCALE_MAX = 1.5 # maximum size of patch for training\n\nIMAGE_HEIGHT = -1 # garbage value, height of image\nIMAGE_WIDTH = -1 # garbage value, width of image\nCH = -1 # garbage value# no. of channels in image =3 for rgb\nPATCH_SHAPE = (-1, -1, -1) # shape of patch\nBATCH_SIZE = -1 # batch size to be used for training\nLEARNING_RATE = 'learning rate to train the patch'\nMODEL_FILE_PATH = '' # location of model\nFILE_NAME = '' # name of model file, for example model.h5 or model.meta\nIMAGES = np.array((5, 3, 3, 3)) # 'a numpy array of images' # numpy array of Images, for 100 images of shape (32,32,3),\n# its shape will be (100,32,32,3)\nMAX_ROTATION = 0 # Max rotation to be used for training\nNUM_CLASSES = -1 # no. of classes, i.e labels for example 1000 for imagenet\nTARGET_ONEHOT = -1 # one-hot target labels\nimage_loader = -1 # an object for loading images and batch of images\nMM = -1 # a model object for training , inference etc\nNUM_IMAGES = -1 # Number of sample images to train on\nTEST_SCALE = -1 # The size of patch to put on returned images\nname_to_label = {} # A dictionary mapping from names to labels\n# Description is given in the string itself\nPreprocess_Func = 'a func to preprocess the input array'\nRev_Preprocess = 'a func to reverse preprocess from processed input back to normal input'\nINPUT_TENSOR_NAME = 'name of input tensor for ex \"inp\"'\nLOGITS_TENSOR_NAME = 'name of logits tensor for ex \"logits\"'\nRANGE_MODEL_INPUT = 'a tuple of floats. The minimum & maximum values of pre-processed input images fed to the model'\nPATCH_APPEARANCE = 'a string (circle or rectangle). By default circle. Indicates how the patch appears on images.'\n\n\ndef enlarge_patch(x):\n \"\"\"\n\n :param x: numpy array, a small patch\n :return: numpy array, a larger patch, by concatenating small patch pixels\n \"\"\"\n y = np.empty((0, IMAGE_HEIGHT, CH), dtype=np.float32)\n for i in range(0, Patch_Len):\n y1 = np.empty((PATCH_BLOCK, 0, CH), dtype=np.float32)\n for j in range(0, Patch_Len):\n y2 = np.array(x[i][j], dtype=np.float32)\n y2 = np.tile(y2, (PATCH_BLOCK, PATCH_BLOCK, 1)) # 8x8x1\n y1 = np.concatenate((y1, y2), axis=1)\n y = np.concatenate((y, y1), axis=0)\n return y\n\n\ndef save_transparent_png_patch(patch):\n \"\"\"\n\n :param patch: numpy array, small patch\n :return: numpy array, a large patch patch with transparent channel\n the function takes small patch, enlarges it, adds extra transparent channel and returns\n \"\"\"\n\n pat = Rev_Preprocess(patch)/np.max(patch)\n pat_big = enlarge_patch(pat)\n if PATCH_APPEARANCE.lower() == 'circle':\n mask = _circle_mask((IMAGE_HEIGHT, IMAGE_WIDTH, 4), PATCH_APPEARANCE) # 4 for the rgba channels in png\n plt.imsave('patch_big.png', pat_big)\n pat_big = plt.imread('patch_big.png')\n pat_transparent = mask * pat_big\n return pat_transparent\n else:\n return pat_big\n\n\nclass StubImageLoader:\n \"\"\"\n Class for loading images, have function get_images for traversing random images for training\n function get_batch_images is used to get batch of images for inference\n \"\"\"\n\n def __init__(self):\n self.images = []\n assert 2*IMAGES.shape[0] >= BATCH_SIZE, \"2*IMAGES.shape[0] should be >= BATCH_SIZE\"\n\n print(\"\\n\\n image shape\\n\\n\", IMAGES.shape)\n for i in range(IMAGES.shape[0]):\n self.images.append(IMAGES[i])\n\n last_batch_size = IMAGES.shape[0] % BATCH_SIZE\n extra_imgs = BATCH_SIZE - last_batch_size\n \n if last_batch_size > 0:\n for i in range(extra_imgs):\n self.images.append(IMAGES[i])\n\n def get_images(self):\n # index = np.random.choice(IMAGES.shape[0], BATCH_SIZE, replace=False)\n # return IMAGES[index]\n return random.sample(self.images, BATCH_SIZE)\n\n def get_batch_images(self, index):\n # return IMAGES[BATCH_SIZE * index:BATCH_SIZE * (index + 1)]\n return self.images[BATCH_SIZE * index:BATCH_SIZE * (index + 1)]\n\n\ndef _transform_vector(width, x_shift, y_shift, im_scale, rot_in_degrees):\n \"\"\"\n Return transform mapping, which when used for transforming an image, scales it, rotates it and\n shifts it to some random location.\n If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1],\n then it maps the output point (x, y) to a transformed input point\n (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),\n where k = c0 x + c1 y + 1.\n The transforms are inverted compared to the transform mapping input points to output points.\n \"\"\"\n\n rot = float(rot_in_degrees) / 90. * (math.pi / 2)\n\n # Standard rotation matrix\n # (use negative rot because tf.contrib.image.transform will do the inverse)\n rot_matrix = np.array(\n [[math.cos(-rot), -math.sin(-rot)],\n [math.sin(-rot), math.cos(-rot)]]\n )\n\n # Scale it\n # (use inverse scale because tf.contrib.image.transform will do the inverse)\n inv_scale = 1. / im_scale\n xform_matrix = rot_matrix * inv_scale\n a0, a1 = xform_matrix[0]\n b0, b1 = xform_matrix[1]\n\n # At this point, the image will have been rotated around the top left corner,\n # rather than around the center of the image.\n #\n # To fix this, we will see where the center of the image got sent by our transform,\n # and then undo that as part of the translation we apply.\n x_origin = float(width) / 2\n y_origin = float(width) / 2\n\n x_origin_shifted, y_origin_shifted = np.matmul(\n xform_matrix,\n np.array([x_origin, y_origin]),\n )\n\n x_origin_delta = x_origin - x_origin_shifted\n y_origin_delta = y_origin - y_origin_shifted\n\n # Combine our desired shifts with the rotation-induced undesirable shift\n a2 = x_origin_delta - (x_shift / (2 * im_scale))\n b2 = y_origin_delta - (y_shift / (2 * im_scale))\n\n # Return these values in the order that tf.contrib.image.transform expects\n return np.array([a0, a1, a2, b0, b1, b2, 0, 0]).astype(np.float32)\n\n\ndef _circle_mask(shape, patch_appearance, sharpness=40):\n \"\"\"Return a circular mask of a given shape\n given a shape for ex. (32,32,3) return a largets circle\n that fits the (32,32) square, i.e has all 1's in circular shape\n and 0's outside\n \"\"\"\n assert patch_appearance.lower() == 'circle' or patch_appearance.lower() == 'rectangle', \\\n \"The patch_appearance attribute must be set either to 'circle' or 'rectangle'\"\n if patch_appearance.lower() == 'circle':\n assert shape[0] == shape[1], \"circle_mask received a bad shape: \" + shape\n diameter = shape[0]\n x = np.linspace(-1, 1, diameter)\n y = np.linspace(-1, 1, diameter)\n xx, yy = np.meshgrid(x, y, sparse=True)\n z = (xx ** 2 + yy ** 2) ** sharpness\n mask = 1 - np.clip(z, -1, 1)\n mask = np.expand_dims(mask, axis=2)\n mask = np.broadcast_to(mask, shape).astype(np.float32)\n return mask\n else:\n mask = np.ones(shape)\n return mask.astype(np.float32)\n\n\ndef _gen_target_ys():\n \"\"\"\n generates one_hot vector for target class\n :return: target_class one_hot vector\n \"\"\"\n label = TARGET_LABEL\n y_one_hot = np.zeros(NUM_CLASSES)\n y_one_hot[label] = 1.0\n y_one_hot = np.tile(y_one_hot, (BATCH_SIZE, 1))\n return y_one_hot\n\n\nclass ModelContainer:\n \"\"\"Encapsulates an model, and methods for interacting with it.\n for example training and inferencing\n \"\"\"\n\n def __init__(self, model_name, verbose=True, peace_mask=None, peace_mask_overlay=0.0):\n # Peace Mask: None, \"Forward\", \"Backward\"\n self.model_name = model_name\n self.graph = tf.Graph()\n self.sess = tf.Session(graph=self.graph)\n self.peace_mask = peace_mask\n self.patch_shape = PATCH_SHAPE\n self._peace_mask_overlay = peace_mask_overlay\n self.load_model(verbose=verbose)\n\n def patch(self, new_patch=None):\n \"\"\"Retrieve or set the adversarial patch.\n\n new_patch: The new patch to set, or None to get current patch.\n\n Returns: Itself if it set a new patch, or the current patch.\"\"\"\n if new_patch is None:\n return self._run(self._clipped_patch)\n\n self._run(self._assign_patch, {self._patch_placeholder: new_patch})\n return self\n\n def reset_patch(self):\n \"\"\"Reset the adversarial patch to all zeros.\"\"\"\n self.patch(np.random.uniform(RANGE_MODEL_INPUT[0], RANGE_MODEL_INPUT[1], size=self.patch_shape).astype\n (np.float32))\n\n def train_step(self, images=None, target_ys=None, scale=(0.1, 1.0), dropout=None,\n patch_disguise=None, disguise_alpha=None):\n \"\"\"Train the model for one step.\n\n Args:\n images: A batch of images to train on, it loads one if not present.\n target_ys: Onehot target vector, defaults to TARGET_ONEHOT\n scale: Either a scalar value for the exact scale, or a (min, max) tuple for the scale range.\n dropout: consider dropout or not\n patch_disguise: A Disguised Patch, if not None, then learned patch should be close to patch_disguise\n disguise_alpha: Weightage given to disguised_patch\n Returns: Loss on the target ys.\"\"\"\n if images is None:\n images = image_loader.get_images()\n if target_ys is None:\n target_ys = TARGET_ONEHOT\n\n feed_dict = {self._image_input: images,\n self._target_ys: target_ys,\n self._learning_rate: LEARNING_RATE,\n }\n\n if patch_disguise is not None:\n if disguise_alpha is None:\n raise ValueError(\"You need disguise_alpha\")\n feed_dict[self.patch_disguise] = patch_disguise\n feed_dict[self.disguise_alpha] = disguise_alpha\n\n loss, _ = self._run([self._loss, self._train_op], feed_dict, scale=scale, dropout=dropout)\n return loss\n\n def inference_batch(self, index, images=None, target_ys=None, scale=None):\n \"\"\"Report loss and label probabilities, and patched images for a batch.\n\n Args:\n images: A batch of images to train on, it loads if not present.\n scale: Either a scalar value for the exact scale, or a (min, max) tuple for the scale range.\n index: Which target batch to pick for inference, for example 5th batch\n target_ys: The target_ys for loss calculation, TARGET_ONEHOT if not present.\"\"\"\n global BATCH_SIZE\n if images is None:\n images = image_loader.get_batch_images(index=index) # get_images()\n if target_ys is None:\n target_ys = TARGET_ONEHOT # have used extra :len(images)\n\n print(\"\\n\\n Inferencing \\n\\n\")\n\n # if BATCH_SIZE > len(images):\n # print(\"\\n\\n\\n\",BATCH_SIZE,len(images),\"\\n\\n\\n\")\n # BATCH_SIZE = len(images)\n # print(\"\\n\\n\\n\", BATCH_SIZE, len(images), \"\\n\\n\\n\")\n\n feed_dict = {self._image_input: images, self._target_ys: target_ys}\n\n loss_per_example, ps, ims = self._run([self._loss_per_example, self._probabilities, self._patched_input],\n feed_dict, scale=scale)\n return loss_per_example, ps, ims\n\n def load_model(self, verbose=True):\n \"\"\"\n calls make_model_and_ops which loads the model, make necessary changes for example adding\n patch to image inputs\n :return:\n \"\"\"\n\n # keras_mode = False\n patch = None\n self._make_model_and_ops(patch, verbose)\n # self._make_model_and_ops(keras_mode, patch, verbose)\n\n def _run(self, target, feed_dict=None, scale=None, dropout=None):\n \"\"\"\n generic function to run the session for obtaining the target value given\n the feed_dict\n :return: the target result from running the tensorflow session\n \"\"\"\n k_backend.set_session(self.sess)\n if feed_dict is None:\n feed_dict = {}\n feed_dict[self.learning_phase] = False\n\n if scale is not None:\n if isinstance(scale, (tuple, list)):\n scale_min, scale_max = scale\n else:\n scale_min, scale_max = (scale, scale)\n feed_dict[self.scale_min] = scale_min\n feed_dict[self.scale_max] = scale_max\n\n if dropout is not None:\n feed_dict[self.dropout] = dropout\n return self.sess.run(target, feed_dict=feed_dict)\n\n def _make_model_and_ops(self, patch_val, verbose):\n \"\"\"\n The main logic of the code that loads the model, modifies it to include\n patched images, defines the loss, training step\n \"\"\"\n start = time.time()\n k_backend.set_session(self.sess)\n with self.sess.graph.as_default():\n self.learning_phase = k_backend.learning_phase()\n\n image_shape = (IMAGE_HEIGHT, IMAGE_WIDTH, CH) # (299, 299, 3), change in other code also\n self._image_input = keras.layers.Input(shape=image_shape)\n\n self.scale_min = tf.placeholder_with_default(SCALE_MIN, [])\n self.scale_max = tf.placeholder_with_default(SCALE_MAX, [])\n self._scales = tf.random_uniform([BATCH_SIZE], minval=self.scale_min, maxval=self.scale_max)\n\n image_input = self._image_input\n self.patch_disguise = tf.placeholder_with_default(tf.zeros(self.patch_shape), shape=self.patch_shape)\n self.disguise_alpha = tf.placeholder_with_default(0.0, [])\n init = tf.constant(np.random.uniform(RANGE_MODEL_INPUT[0], RANGE_MODEL_INPUT[1], size=self.patch_shape).\n astype(np.float32))\n patch = tf.get_variable(\"patch\", initializer=init)\n self._patch_placeholder = tf.placeholder(dtype=tf.float32, shape=self.patch_shape)\n self._assign_patch = tf.assign(patch, self._patch_placeholder)\n # self._batch_size = tf.placeholder(dtype=tf.int32,shape=[])\n\n modified_patch = patch\n\n def clip_to_valid_image(x):\n return tf.clip_by_value(x, clip_value_min=RANGE_MODEL_INPUT[0], clip_value_max=RANGE_MODEL_INPUT[1])\n\n self._clipped_patch = clip_to_valid_image(modified_patch)\n\n self.dropout = tf.placeholder_with_default(1.0, [])\n patch_with_dropout = tf.nn.dropout(modified_patch, keep_prob=self.dropout)\n patched_input = clip_to_valid_image(self._random_overlay(image_input, patch_with_dropout, image_shape))\n # patched_input = self._random_overlay(image_input, patch_with_dropout, image_shape)\n self._patched_input = patched_input\n\n # Labels for our attack (e.g. always a toaster)\n self._target_ys = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES))\n print(\"patched_input--\", patched_input)\n # Pre-softmax logits of our pretrained model\n # mnet = mobilenetv2.MobileNetV2(input_tensor=patched_input, weights=None, include_top=False, pooling='avg')\n # predictions = Dense(4,activation='softmax', use_bias=True,name='predictions')(mnet.output)\n # model = Model(inputs=mnet.input, outputs=predictions)\n # model.load_weights('mobilenet_rgb_stopint.h5')\n\n # keras tft#######\n if FILE_NAME[-3:] == '.h5':\n oldModel = load_model(MODEL_FILE_PATH + '/' + FILE_NAME)\n oldModel.layers.pop(0)\n pi = Input(tensor=patched_input, shape=(IMAGE_HEIGHT, IMAGE_WIDTH, CH))\n newOutputs = oldModel(patched_input)\n model = Model(pi, newOutputs)\n logits = model.outputs[0].op.inputs[0]\n self._probabilities = model.outputs[0]\n # tensorflow meta and ckpt model\n elif FILE_NAME[-5:] == '.meta':\n self.sess.run(tf.global_variables_initializer())\n saver = tf.train.import_meta_graph(MODEL_FILE_PATH + '/' + FILE_NAME,\n input_map={INPUT_TENSOR_NAME: patched_input})\n saver.restore(self.sess, tf.train.latest_checkpoint(MODEL_FILE_PATH))\n logits = tf.get_default_graph().get_tensor_by_name(LOGITS_TENSOR_NAME)\n self._probabilities = tf.nn.softmax(logits)\n elif FILE_NAME[-3:] == '.pb':\n self.sess.run(tf.global_variables_initializer())\n model = tf.saved_model.loader.load(sess=self.sess, export_dir=MODEL_FILE_PATH,\n tags=[tag_constants.SERVING],\n input_map={INPUT_TENSOR_NAME: patched_input})\n logits = self.sess.graph.get_tensor_by_name(LOGITS_TENSOR_NAME)\n self._probabilities = tf.nn.softmax(logits)\n else:\n print('\\n\\nBAD File Name, should end with .meta or .h5\\n\\n')\n exit()\n self._loss_per_example = tf.nn.softmax_cross_entropy_with_logits(\n labels=self._target_ys,\n logits=logits\n )\n\n self._target_loss = tf.reduce_mean(self._loss_per_example)\n\n self._patch_loss = tf.nn.l2_loss(patch - self.patch_disguise) * self.disguise_alpha\n\n self._loss = self._target_loss + self._patch_loss\n\n # Train our attack by only training on the patch variable\n self._learning_rate = tf.placeholder(tf.float32)\n self._train_op = tf.train.GradientDescentOptimizer(self._learning_rate) \\\n .minimize(self._loss, var_list=[patch])\n\n # self._probabilities = tf.nn.softmax(logits)#model.outputs[0]\n\n if patch_val is not None:\n self.patch(patch_val)\n else:\n self.reset_patch()\n\n elapsed = time.time() - start\n if verbose:\n print(\"Finished loading {}, took {:.0f}s\".format(self.model_name, elapsed))\n\n def _random_overlay(self, imgs, patch, image_shape):\n \"\"\"Augment images with random rotation, transformation.\n\n Image: BATCHx299x299x3\n concatenates the patch on some part of the image and returns the\n corresponding patched image\n :returns - the patched images\n\n \"\"\"\n # Add padding\n\n print('\\npatch is :', patch)\n\n def my_func(x):\n \"\"\"\n exactly same as enlarge_patch function\n :param x: numpy array, a small patch\n :return: numpy array, a larger patch, by concatenating small patch pixels\n \"\"\"\n y = np.empty((0, IMAGE_HEIGHT, CH), dtype=np.float32)\n for i in range(0, Patch_Len):\n y1 = np.empty((PATCH_BLOCK, 0, CH), dtype=np.float32)\n for j in range(0, Patch_Len):\n y2 = np.array(x[i][j], dtype=np.float32)\n y2 = np.tile(y2, (PATCH_BLOCK, PATCH_BLOCK, 1)) # 8x8x1\n y1 = np.concatenate((y1, y2), axis=1)\n y = np.concatenate((y, y1), axis=0)\n return y # [:224,:224,:]\n\n ##############################################################################################\n #############################################################################################\n def py_func(func, inp, tout, stateful=True, name=None, grad=None):\n \"\"\"\n overrides the gradient for the \"func=my_func\" with the custom gradient grad\n note that initial gradient w.r.t to \"func = my_func\" would not be defined\n \"\"\"\n\n rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))\n\n tf.RegisterGradient(rnd_name)(grad)\n g = tf.get_default_graph()\n with g.gradient_override_map({\"PyFunc\": rnd_name}):\n return tf.py_func(func, inp, tout, stateful=stateful, name=name)\n\n # Def custom function using my_func instead of usual tf operations:\n def extend_patch(x, name=None):\n with ops.op_scope([x], name, \"Extend_patch\") as name:\n ext_patch = py_func(my_func,\n [x],\n [tf.float32],\n name=name,\n grad=_extendpatchgrad) # <-- here's the call to the gradient\n return ext_patch[0]\n\n def _extendpatchgrad(op, grad):\n \"\"\"\n\n :param op: used for input, but we don't use it, since we don't need the input\n :param grad: the inflowing gradients w.r.t the large patch\n :return: the gradients w.r.t the small patch\n \"\"\"\n y = np.zeros((0, Patch_Len, CH), dtype=np.float32)\n y = tf.convert_to_tensor(y, dtype=tf.float32)\n for i in range(Patch_Len):\n y1 = np.zeros((1, 0, CH), dtype=np.float32)\n y1 = tf.convert_to_tensor(y1, tf.float32)\n\n for j in range(Patch_Len):\n sum = 0\n for i1 in range(PATCH_BLOCK * i, PATCH_BLOCK * i + PATCH_BLOCK):\n for i2 in range(PATCH_BLOCK * j, PATCH_BLOCK * j + PATCH_BLOCK):\n sum += grad[i1][i2]\n sum = tf.reshape(sum, [1, 1, CH])\n # sum = sum/(PATCH_BLOCK*PATCH_BLOCK)\n y1 = tf.concat([y1, sum], 1)\n y = tf.concat([y, y1], 0)\n return y\n\n patch_extend = extend_patch(patch) # tf.py_func(my_func, [patch], tf.float32)\n # print(patch_extend,patch_extend[0])\n patch_extend.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, CH])\n image_mask = _circle_mask(image_shape, PATCH_APPEARANCE)\n\n image_mask = tf.stack([image_mask] * BATCH_SIZE)\n padded_patch = tf.stack([patch_extend] * BATCH_SIZE)\n\n transform_vecs = []\n\n def _random_transformation(scale_min, scale_max, width):\n \"\"\"\n\n :param scale_min: minimum scale of image after transform\n :param scale_max: maximum scale of image after transform\n :param width: width of image\n :return: a transform vector which can be used to transform the given image\n to a image with size as random between scale_min and scale_max of original\n image, some random rotation and shifted to a random part of image\n such that image doen't come out of its boundary\n \"\"\"\n im_scale = np.random.uniform(low=scale_min, high=scale_max)\n\n padding_after_scaling = (1 - im_scale) * width\n x_delta = np.random.uniform(-padding_after_scaling, padding_after_scaling)\n y_delta = np.random.uniform(-padding_after_scaling, padding_after_scaling)\n\n rot = np.random.uniform(-MAX_ROTATION, MAX_ROTATION)\n\n return _transform_vector(width,\n x_shift=x_delta,\n y_shift=y_delta,\n im_scale=im_scale,\n rot_in_degrees=rot)\n\n for _ in range(BATCH_SIZE):\n # Shift and scale the patch for each image in the batch\n random_xform_vector = tf.py_func(_random_transformation, [self.scale_min, self.scale_max, image_shape[0]],\n tf.float32)\n random_xform_vector.set_shape([8])\n\n transform_vecs.append(random_xform_vector)\n\n image_mask = tf.contrib.image.transform(image_mask, transform_vecs, \"BILINEAR\")\n padded_patch = tf.contrib.image.transform(padded_patch, transform_vecs, \"BILINEAR\")\n\n inverted_mask = (1 - image_mask)\n return imgs * inverted_mask + padded_patch * image_mask\n\n\nclass MetaModel:\n \"\"\"\n A class for encapsulating model object, defines train and inference functions\n \"\"\"\n def __init__(self, verbose=True, peace_mask=None, peace_mask_overlay=0.0):\n self.nc = {m: ModelContainer(m, verbose=verbose, peace_mask=peace_mask, peace_mask_overlay=peace_mask_overlay)\n for m in MODEL_NAMES}\n self._patch = np.zeros(PATCH_SHAPE)\n self.patch_shape = PATCH_SHAPE\n\n def patch(self, new_patch=None):\n \"\"\"Retrieve or set the adversarial patch.\n\n new_patch: The new patch to set, or None to get current patch.\n\n Returns: Itself if it set a new patch, or the current patch.\"\"\"\n if new_patch is None:\n return self._patch\n\n self._patch = new_patch\n return self\n\n def reset_patch(self):\n \"\"\"Reset the adversarial patch to uniform dist.\"\"\"\n self.patch(np.random.uniform(RANGE_MODEL_INPUT[0], RANGE_MODEL_INPUT[1], size=self.patch_shape).\n astype(np.float32))\n\n def train_step(self, model=None, steps=1, images=None, target_ys=None, scale=None, **kwargs):\n \"\"\"Train the model for `steps` steps.\n\n Args:\n model: model name\n steps: number of iterations to train model\n images: A batch of images to train on, it loads one if not present.\n target_ys: Onehot target vector, defaults to TARGET_ONEHOT\n learning_rate: Learning rate for this train step.\n scale: Either a scalar value for the exact scale, or a (min, max) tuple for the scale range.\n\n Returns: Loss on the target ys.\"\"\"\n\n if model is not None:\n to_train = [self.nc[model]]\n else:\n to_train = self.nc.values()\n\n losses = []\n for mc in to_train:\n mc.patch(self.patch())\n for _ in range(steps):\n loss = mc.train_step(images, target_ys, scale=scale, **kwargs)\n losses.append(loss)\n self.patch(mc.patch())\n return np.mean(losses)\n\n def inference_batch(self, model, index, images=None, target_ys=None, scale=None):\n \"\"\"Report loss and label probabilities, and patched images for a batch.\n\n Args:\n model: name of the model\n index: Which target batch to pick for inference, for example 5th batch\n images: A batch of images to train on, it loads if not present.\n target_ys: The target_ys for loss calculation, TARGET_ONEHOT if not present.\n scale: Either a scalar value for the exact scale, or a (min, max) tuple for the scale range.\n \"\"\"\n\n mc = self.nc[model]\n mc.patch(self.patch())\n return mc.inference_batch(images, target_ys, scale=scale, index=index)\n\n\ndef report(model, n=400, scale=(0.1, 1.0)):\n \"\"\"Prints a report on how well the model is doing.\n\n\n Args:\n :param model: can be a ModelContainer instance, or a string. If it's a string, we\n lookup that model name in the MultiModel\n :param n: int, number of images for which to report the results\n :param scale: int or tuple, the fraction of image covered by patch\n \"\"\"\n\n n_batches = int(math.ceil(float(n) / BATCH_SIZE))\n \n patched_images = np.empty((0, IMAGE_HEIGHT, IMAGE_WIDTH, CH), dtype=np.float32)\n probabs = np.empty((0, NUM_CLASSES), dtype=np.float32) # 0,(4--> number of classes)\n\n for b in range(n_batches):\n if isinstance(model, str):\n loss_per_example, probs, patched_imgs = MM.inference_batch(model, scale=scale, index=b)\n else:\n loss_per_example, probs, patched_imgs = model.inference_batch(scale=scale, index=b)\n\n patched_images = np.concatenate((patched_images, patched_imgs), axis=0)\n probabs = np.concatenate((probabs, probs), axis=0)\n\n return patched_images[:n]\n\n\ndef train_models(steps):\n \"\"\"\n A function that calls train_step for number of epochs,\n essentially train the model\n :return: a numpy array containg patched images and another numpy array storing the patch\n \"\"\"\n model_targets = MODEL_NAMES # ['mobilenetv2']\n epochs = steps # As per mobile net model on traffic signs where we found patches to perform well\n print(\"Will run for epochs==>\", epochs)\n regular_training_model_to_patch = {}\n\n for m in model_targets:\n print(\"Training %s\" % m)\n model_con = MM.nc[m] # ModelContainer\n model_con.reset_patch()\n for i in range(epochs):\n\n loss = model_con.train_step(scale=(0.1, 1.0)) # 0.7,1.2\n if i % int(epochs / 10) == 0:\n print(\"[%s] loss: %s\" % (i, loss))\n\n regular_training_model_to_patch[m] = model_con.patch()\n\n patch_transparent = save_transparent_png_patch(model_con.patch())\n m = MM.nc[model_targets[0]]\n m.patch(regular_training_model_to_patch[model_targets[0]])\n return report(m, n=NUM_IMAGES, scale=TEST_SCALE), patch_transparent\n\n\ndef adv_patch(m, target_label_name, n_to_l, images, num_classes, model_file_path, file_name, input_tensor_name,\n logits_tensor_name, num_images, patch_block, range_model_input, patch_appearance, batch_size=4,\n learning_rate=5.0, scale_min=0.1, scale_max=1.0, max_rotation=22.5, model_name='model_to_test',\n test_scale=0.4):\n \"\"\"\n\n :param m: model object\n :param target_label_name: string, the target label for example, \"ostrich\"\n :param n_to_l: a dictionary mapping from name to labels\n :param images: a numpy array of images\n :param num_classes: int, number of labels/classes in model\n :param model_file_path: string, path where model is stored\n :param file_name: string, name of model file for example model.h5 or model.meta\n :param input_tensor_name: string, name of input tensor for example \"input:0\"\n :param logits_tensor_name: string, name of logits tensor for example \"logits:0\"\n :param num_images: int, number of images\n :param patch_block: int, block size of pixels with same value, for example if 16 the a grid of 16x16 pixels will\n have same value\n :param range_model_input: a tuple of floats.Range of values such that reverse preprocess gives valid original images\n :param patch_appearance: a string ('circle' or 'rectangle').By default 'circle'.Indicates how the patch appear on\n images.\n :param batch_size: int, batch_size to be used for training\n :param learning_rate: a float - learning rate for training the patch\n :param scale_min: float [0,1], minimum patch size to be used for training\n :param scale_max: float [0,1], maximum patch size to be used for training\n :param max_rotation: float, maximum rotation of patch to be used for training\n :param model_name: string, name u give to ur model\n :param test_scale: int or tuple, fraction of patch covering the image\n :return: a numpy array containg patched images and another numpy array storing the patch\n \"\"\"\n global TARGET_LABEL, PATCH_SHAPE, BATCH_SIZE, SCALE_MAX, SCALE_MIN, MAX_ROTATION, IMAGE_HEIGHT, \\\n IMAGE_WIDTH, CH, MODEL_NAMES, NUM_CLASSES, TARGET_ONEHOT\n global image_loader, MM, name_to_label, MODEL_FILE_PATH, NUM_IMAGES, TEST_SCALE, FILE_NAME, \\\n Preprocess_Func, PATCH_BLOCK, Patch_Len, PATCH_SHAPE, IMAGES, RANGE_MODEL_INPUT, PATCH_APPEARANCE\n global INPUT_TENSOR_NAME, LOGITS_TENSOR_NAME, Rev_Preprocess, LEARNING_RATE\n TARGET_LABEL = int(n_to_l[target_label_name])\n\n name_to_label = n_to_l\n # PATCH_SHAPE = (m.image_size_height, m.image_size_width, m.num_channels) # (299, 299, 3)\n BATCH_SIZE = batch_size\n LEARNING_RATE = learning_rate\n SCALE_MAX = scale_max\n SCALE_MIN = scale_min\n MAX_ROTATION = max_rotation\n IMAGE_HEIGHT = m.image_size_height\n IMAGE_WIDTH = m.image_size_width\n CH = m.num_channels\n INPUT_TENSOR_NAME = input_tensor_name\n LOGITS_TENSOR_NAME = logits_tensor_name\n IMAGES = images\n MODEL_NAMES = [model_name]\n PATCH_BLOCK = patch_block\n RANGE_MODEL_INPUT = range_model_input\n PATCH_APPEARANCE = patch_appearance\n Patch_Len = int(m.image_size_height / patch_block)\n PATCH_SHAPE = (Patch_Len, Patch_Len, CH)\n NUM_CLASSES = num_classes\n NUM_IMAGES = num_images\n TEST_SCALE = test_scale\n FILE_NAME = file_name\n MODEL_FILE_PATH = model_file_path\n Preprocess_Func = m.pre_process\n Rev_Preprocess = m.rev_preprocess\n TARGET_ONEHOT = _gen_target_ys()\n image_loader = StubImageLoader()\n MM = MetaModel()\n steps = np.max([int((num_images/batch_size)*4), 500])\n\n return train_models(steps)\n","sub_path":"tft/image/adversarial_patch.py","file_name":"adversarial_patch.py","file_ext":"py","file_size_in_byte":33841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"131911532","text":"# import necessary libraries\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request)\n\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///AAPL.sqlite\"\n\ndb = SQLAlchemy(app)\n\nclass DailyStockData(db.Model):\n __tablename__ = 'StockPrices'\n\n id = db.Column(db.Integer, primary_key=True)\n Date = db.Column(db.String(64))\n Open = db.Column(db.Float)\n High = db.Column(db.Float)\n Low = db.Column(db.Float)\n Close = db.Column(db.Float)\n Volume = db.Column(db.Integer)\n\n def __repr__(self):\n return '' % (self.id)\n\n\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/line\")\ndef test():\n results = db.session.query(\n DailyStockData.Date,\n DailyStockData.Open,\n DailyStockData.High,\n DailyStockData.Low,\n DailyStockData.Close,\n DailyStockData.Volume,\n ).all()\n\n daily_data = []\n for result in results:\n daily_data.append({\n \"Date\": result[0],\n \"Open\": result[1],\n \"High\": result[2],\n \"Low\": result[3],\n \"Close\": result[4],\n \"Volume\": result[5],\n })\n return jsonify(daily_data)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Project_2/finance_tech_analysis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"273762629","text":"import heapq\nfrom sys import stdin\n\nN = int(stdin.readline())\nmaxheap = []\n\nfor i in range(N):\n x = int(stdin.readline())\n if len(maxheap) ==0:\n if x ==0:\n print(0)\n else:\n heapq.heappush(maxheap, -x)\n else:\n if x==0:\n print(-heapq.heappop(maxheap))\n else:\n heapq.heappush(maxheap, -x)\n \n ","sub_path":"11286.py","file_name":"11286.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"626085529","text":"#\n## Google Cloud function /articles\n# \n#\n# articles/?t=n fetch articles from topic N\n# articles/?w=name fetch articles written by 'name'\n# articles fetch all articles\n# \n# We return a JSON array of article descriptions\n#\n\nfrom google.cloud import bigquery\nimport pandas as pd \nimport flask\n\ndef hello_world(request):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n if request.args and 't' in request.args:\n response = flask.jsonify(get_topic(request.args.get('t')))\n elif request.args and 'w' in request.args:\n response = flask.jsonify(get_who(request.args.get('w')))\n elif request.args and 'm' in request.args:\n response = flask.jsonify(get_map(request.args.get('m')))\n elif request.args and 'c' in request.args:\n response = flask.jsonify(get_articles(request.args.get('c')))\n else:\n response = flask.jsonify(get_articles())\n response.headers.set('Access-Control-Allow-Origin', '*')\n response.headers.set('Access-Control-Allow-Methods', 'GET, POST')\n return response\n\ndef with_a(city = ''):\n q = \"\"\"\nwith a as \n(SELECT *\nFROM\n(\n SELECT *, ROW_NUMBER() OVER (PARTITION BY index ORDER BY date desc) rn\n FROM `octo-news.gdelt_sa.daily_map_feed` \n where lower(city) like lower('%$CITY%')\n) t\nWHERE rn = 1\norder by index)\n\"\"\"\n return q.replace('$CITY',city)\n\n\ndef get_topic(t):\n # BQ Query to get articles with topic t\n QUERY = with_a()\n QUERY += \"SELECT a.*,b.name as topic_text from a inner join \"\n QUERY += \"`octo-news.gdelt_sa.themes` as b on a.topic=b.index \"\n QUERY += \"where a.topic=\"+str(t)+\" order by index limit 50\"\n bq_client = bigquery.Client()\n query_job = bq_client.query(QUERY) # API request\n rows_df = query_job.result().to_dataframe() # Waits for query to finish\n return rows_df.to_dict(orient='records')\n\ndef get_who(w):\n # BQ Query to get articles authored by w\n QUERY = with_a()\n QUERY += \"SELECT a.*,b.name as topic_text from a inner join \"\n QUERY += \"`octo-news.gdelt_sa.themes` as b on a.topic=b.index where \"\n QUERY += \" a.who like '\"+str(w)+\"' order by index limit 50\"\n bq_client = bigquery.Client()\n query_job = bq_client.query(QUERY) # API request\n rows_df = query_job.result().to_dataframe() # Waits for query to finish\n return rows_df.to_dict(orient='records')\n\ndef get_articles(city=''):\n # BQ Query to get top 50 articles\n QUERY = with_a(city)\n QUERY += \"SELECT a.*,b.name as topic_text from a inner join \"\n QUERY += \"`octo-news.gdelt_sa.themes` as b on a.topic=b.index order by index limit 50\"\n bq_client = bigquery.Client()\n query_job = bq_client.query(QUERY) # API request\n rows_df = query_job.result().to_dataframe() # Waits for query to finish\n return rows_df.to_dict(orient='records')\n\ndef get_map(m):\n # BQ Query to get top 250 articles in city m, 'all' for all\n if m.lower() == 'all':\n m = ''\n QUERY = with_a(m)\n QUERY += \"SELECT a.*,b.name as topic_text from a inner join \"\n QUERY += \"`octo-news.gdelt_sa.themes` as b on a.topic=b.index \"\n QUERY += \"order by index limit 250\"\n bq_client = bigquery.Client()\n query_job = bq_client.query(QUERY) # API request\n rows_df = query_job.result().to_dataframe() # Waits for query to finish\n return rows_df.to_dict(orient='records')\n","sub_path":"src/server/articles.py","file_name":"articles.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"600808924","text":"\"\"\"\n1) Создать класс TrafficLight (светофор).\n● определить у него один атрибут color (цвет) и метод running (запуск);\n● атрибут реализовать как приватный;\n● в рамках метода реализовать переключение светофора в режимы: красный, жёлтый,\nзелёный;\n● продолжительность первого состояния (красный) составляет 7 секунд, второго\n(жёлтый) — 2 секунды, третьего (зелёный) — на ваше усмотрение;\n● переключение между режимами должно осуществляться только в указанном порядке\n(красный, жёлтый, зелёный);\n● проверить работу примера, создав экземпляр и вызвав описанный метод.\nЗадачу можно усложнить, реализовав проверку порядка режимов. При его нарушении\nвыводить соответствующее сообщение и завершать скрипт.\n\"\"\"\n\nfrom time import sleep\nfrom sys import exit\n\n\nclass TrafficLight:\n def __init__(self, traffic_light_number, first_color, second_color, third_color):\n self.first_color = first_color\n self.second_color = second_color\n self.third_colow = third_color\n self.traffic_light_number = traffic_light_number\n print(f\"Проверка работы светофора № {self.traffic_light_number}\")\n\n # colors = [\"Красный\", \"Жёлтый\", \"Зелёный\"]\n\n def running(self):\n colors = [self.first_color, self.second_color, self.third_colow]\n if colors != [\"Красный\", \"Жёлтый\", \"Зелёный\"]:\n print(\"Неверный порядок режимов работы светофора\")\n exit()\n for color in colors:\n print(f\"Цвет светофора: {color}\")\n if color == \"Красный\":\n sleep(7)\n elif color == \"Жёлтый\":\n sleep(2)\n elif color == \"Зелёный\":\n sleep(3)\n\n\nt1 = TrafficLight(1, \"Красный\", \"Жёлтый\", \"Зелёный\")\nt1.running()\nt2 = TrafficLight(2, \"Жёлтый\", \"Красный\", \"Зелёный\")\nt2.running()\n","sub_path":"lesson_6/exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"53751678","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom .states import state_list\nfrom scrapy.selector import Selector\nfrom scrapy.http import FormRequest\nimport re\nfrom os import path\nimport json\n\nclass JuniorPlayersSpider(scrapy.Spider):\n name = 'junior_players'\n allowed_domains = ['tennislink.usta.com']\n start_urls = ['http://tennislink.usta.com/']\n\n def parse(self, response):\n urls = [\"https://tennislink.usta.com/Tournaments/Schedule/SearchResults.aspx?typeofsubmit=&Action=2&Keywords=&TournamentID=&SectionDistrict=&City=&State={0}&Zip=&Month=13&StartDate={1}&EndDate={2}&Day=&Year=2019&Division=D1009&Category=&Surface=&OnlineEntry=&DrawsSheets=&UserTime=&Sanctioned=-1&AgeGroup=Y&SearchRadius=-1\",\n \"https://tennislink.usta.com/Tournaments/Schedule/SearchResults.aspx?typeofsubmit=&Action=2&Keywords=&TournamentID=&SectionDistrict=&City=&State={0}&Zip=&Month=13&StartDate={1}&EndDate={2}&Day=&Year=2019&Division=D1023&Category=&Surface=&OnlineEntry=&DrawsSheets=&UserTime=&Sanctioned=-1&AgeGroup=Y&SearchRadius=-1\",\n \"https://tennislink.usta.com/Tournaments/Schedule/SearchResults.aspx?typeofsubmit=&Action=2&Keywords=&TournamentID=&SectionDistrict=&City=&State={0}&Zip=&Month=13&StartDate={1}&EndDate={2}&Day=&Year=2019&Division=D1011&Category=&Surface=&OnlineEntry=&DrawsSheets=&UserTime=&Sanctioned=-1&AgeGroup=Y&SearchRadius=-1\",\n \"https://tennislink.usta.com/Tournaments/Schedule/SearchResults.aspx?typeofsubmit=&Action=2&Keywords=&TournamentID=&SectionDistrict=&City=&State={0}&Zip=&Month=13&StartDate={1}&EndDate={2}&Day=&Year=2019&Division=D1025&Category=&Surface=&OnlineEntry=&DrawsSheets=&UserTime=&Sanctioned=-1&AgeGroup=Y&SearchRadius=-1\"]\n\n for state in state_list:\n for url in urls:\n start_date = '04/01/2019'\n end_date = '06/01/2019'\n url = url.format(state, start_date, end_date)\n meta = {'searched_state': state,\n 'page': 1,\n 'year': '2019',\n 'age_group': 'Y',\n 'start': start_date,\n 'end': end_date,\n 'listing_url': url}\n yield scrapy.Request(url, callback=self.parse_listings, meta={'meta': meta})\n\n def get_event_url_and_id(self, raw_event_id):\n if raw_event_id:\n event_id = re.findall(r\"Go\\((\\d+)\\)\", raw_event_id[0])\n if event_id:\n event_id = event_id[0]\n url = \"https://tennislink.usta.com/Tournaments/TournamentHome/Tournament.aspx?T=\"+event_id\n return {'url': url, 'event_id': event_id}\n else:\n return {'url': None, 'event_id': None}\n\n def parse_listings(self, response):\n parser = Selector(response)\n tournament_listings = parser.xpath(\"//table[@id='ctl00_mainContent_dgTournaments']//tr\")\n page_ = parser.xpath(\"//span[@class='results_info']//text()\").extract()\n\n raw_results = self.clean(response.xpath(\"//span[@class='results_info']//text()\").extract())\n raw_total_no_of_tournaments = re.findall(r'(\\d+)\\s+tournaments', raw_results)\n\n if raw_total_no_of_tournaments:\n results = int((int(raw_total_no_of_tournaments[0])/20))+1\n else:\n results = 0\n\n for listing in tournament_listings[1:]:\n raw_event_id = listing.xpath(\".//td[2]/a/@href\").extract()\n \n id_n_url = self.get_event_url_and_id(raw_event_id)\n player_formdata={'ctl00$ScriptManager1': 'ctl00$mainContent$pnlTabUpdate|ctl00$mainContent$lnkbutSubmitApproval',\n '__EVENTTARGET': 'ctl00$mainContent$lnkbutSubmitApproval',\n '__EVENTARGUMENT':'' ,\n 'ctl00_ScriptManager1_HiddenField':'', \n '__VIEWSTATEGENERATOR': '61E3761A',\n 'q_player_record': 'on',\n 'ctl00$SocialMediaPanel$isExistSocialMedia': 'True',\n 'ctl00$mainContent$controlTabIndex': '0',\n 'ctl00$mainContent$hdncontrolTabValue': '0',\n 'ctl00$ucHB$hfPagename': 'tlink:tourn:search results:tournament home page',\n 'ctl00$ucHB$hfchannel': 'tlink',\n 'ctl00$ucHB$hfprop1': 'tlink:tourn',\n 'ctl00$ucHB$hfprop2': 'tlink:tourn:search results',\n 'ctl00$ucHB$hfprop3': 'tlink:tourn:search results',\n 'ctl00$ucHB$hfprop6': 'tournament home page',\n 'ctl00$ucHB$hfprop8': 'tennislink.usta.com',\n 'ctl00$ucHB$hfprop14': 'not logged in',\n 'ctl00$ucHB$hfprop43': 'Public',\n 'ctl00$ucHB$hfhier1': 'tlink:tourn:search results',\n 'ctl00$ucHB$hfprop58': 'any',\n '__ASYNCPOST': 'true'}\n \n player_headers = {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Host': 'tennislink.usta.com',\n 'Origin': 'https://tennislink.usta.com',\n 'Referer': id_n_url['url'],\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',\n 'X-MicrosoftAjax': 'Delta=true',\n 'X-Requested-With': 'XMLHttpRequest'}\n yield FormRequest(id_n_url['url'], callback=self.parse_players, headers=player_headers, formdata=player_formdata, meta=response.meta)\n\n view_state = response.xpath(\"//input[@name='__VIEWSTATE']/@value\").extract()\n if not response.meta.get(\"paginated\"):\n for i in range(results):\n form_data = {\"__EVENTTARGET\":\"dgTournaments:_ctl1:_ctl\"+str(i),\n \"ctl00_ScriptManager1_HiddenField\":\";;AjaxControlToolkit, Version=3.5.50731.0, Culture=neutral, PublicKeyToken=28f01b0e84b6d53e:en-US:ec0bb675-3ec6-4135-8b02-a5c5783f45f5:de1feab2:f9cec9bc:35576c48\",\n \"__VIEWSTATE\": view_state[0],\n \"__VIEWSTATEGENERATOR\":\"5C74730A\",\n \"q_player_record\":\"on\",\n \"ctl00$SocialMediaPanel$isExistSocialMedia\":\"False\",\n \"ctl00$mainContent$ddlResultMonths\":\"13\",\n \"ctl00$mainContent$ddlResultYears\":response.meta[\"meta\"][\"year\"],\n \"ctl00$mainContent$ddlOrderBy\":\"2\",\n \"ctl00$mainContent$ddlStartDateMonth\":'13',\n \"ctl00$mainContent$ddlStartDateYear\":\"2019\",\n 'ctl00$mainContent$start_dt': response.meta[\"meta\"]['start'],\n 'ctl00$mainContent$end_dt': response.meta[\"meta\"]['end'],\n \"ctl00$mainContent$ddlState\":response.meta[\"meta\"][\"searched_state\"],\n \"ctl00$mainContent$ddlSearchRadius\":\"-1\",\n \"ctl00$mainContent$rblTournamentsType\":\"-1\",\n \"hdnAction\":\"2\",\n \"hdnMonth\":\"13\",\n 'hdnStartDate':response.meta['meta']['start'],\n 'hdnEndDate': response.meta['meta']['end'],\n \"hdnYear\":response.meta['meta'][\"year\"],\n \"hdnState\":response.meta['meta'][\"searched_state\"],\n \"hdnSanctioned\":\"-1\",\n \"hdnSkillLevel_EntryLevel\":\"False\",\n \"hdnSkillLevel_Intermediate\":\"False\",\n \"hdnSkillLevel_Advanced\":\"False\",\n \"hdnSearchRadius\":\"-1\",\n \"ctl00$mainContent$hdnDateClick\":\"0\",\n \"hdnUserTime\":\"Thu Oct 04 2018\",\n \"ctl00$ucHB$hfPagename\":\"tlink:tourn:advanced search:search results\",\n \"ctl00$ucHB$hfchannel\":\"tlink\",\n \"ctl00$ucHB$hfprop1\":\"tlink:tourn\",\n \"ctl00$ucHB$hfprop2\":\"tlink:tourn:advanced search\",\n \"ctl00$ucHB$hfprop3\":\"tlink:tourn:advanced search\",\n \"ctl00$ucHB$hfprop6\":\"search results\",\n \"ctl00$ucHB$hfprop8\":\"tennislink.usta.com\",\n \"ctl00$ucHB$hfprop14\":\"not logged in\",\n \"ctl00$ucHB$hfprop43\":\"Public\",\n \"ctl00$ucHB$hfhier1\":\"tlink:tourn:advanced search\",\n \"ctl00$ucHB$hfprop58\":\"any\"}\n\n headers = {\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\":\"gzip, deflate, br\",\n \"Accept-Language\":\"en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7\",\n \"Cache-Control\":\"max-age=0\",\n \"Connection\":\"keep-alive\",\n \"Content-Type\":\"application/x-www-form-urlencoded\",\n \"Host\":\"tennislink.usta.com\",\n \"Origin\":\"https://tennislink.usta.com\",\n \"Upgrade-Insecure-Requests\":\"1\",\n \"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36\"\n }\n url = response.url\n response.meta.update({\"paginated\":True,\"page\":i})\n yield FormRequest(url,formdata=form_data, callback=self.parse_listings, headers=headers, meta=response.meta)\n\n def get_division(self, abbr, raw_division_name):\n \"\"\" Identifying divison from tournament abbreviations.\"\"\"\n try:\n matched = []\n for id, desc in abbr.items():\n if id in raw_division_name:\n matched.append(desc)\n return matched\n except Exception:\n import traceback\n print(traceback.format_exc(), raw_division_name)\n return None\n\n def get_abbreviations(self):\n \"\"\" Reading tournament abbreviations from \n https://tennislink.usta.com/Tournaments/Help/DivisionAbbreviation.aspx\n \"\"\"\n file_path = path.join(path.dirname(__file__), 'usta_abbreviations.json')\n abbr = json.load(open(file_path))\n return abbr\n\n def get_name(self, raw_name):\n if raw_name:\n splited_name = raw_name.split(\",\")\n first_name = splited_name[0]\n\n if len(splited_name) > 1:\n last_name = splited_name[-1].strip()\n middle_name = raw_name.replace(\n first_name, \"\").replace(last_name, \"\").strip(\",\").strip()\n else:\n last_name = None\n middle_name = None\n\n return {\"first_name\": last_name,\n \"middle_name\": middle_name,\n \"last_name\": first_name\n }\n\n def clean(self, text):\n if text:\n return ' ' .join(''.join(text).split()).strip()\n else:\n return None\n\n def parse_players(self, response):\n \"\"\" Parsing registered players.\"\"\"\n # with open(\"player-resp.html\", \"w\") as fp:\n # fp.write(response.text)\n players = []\n parser = Selector(response)\n player_listings = parser.xpath(\"//div[@class='CommonTable']//table//tr\")\n raw_selected_division = parser.xpath(\"//select[@name='ctl00$mainContent$ControlTabs7$cboEvents']//option[@selected='selected']//text()\")\n selected_division = self.clean(raw_selected_division.extract())\n abbreviations = self.get_abbreviations()\n\n for player in player_listings:\n player_name = self.clean(player.xpath(\".//td[1]//a/text()\").extract())\n player_city = self.clean(player.xpath(\".//td[2]//text()\").extract())\n events = self.clean(player.xpath(\".//td[3]//text()\").extract())\n name = self.get_name(player_name) if player_name else {}\n\n if player_name and player_city:\n divisions = self.get_division(abbreviations, events)\n for div in divisions:\n player_data = {\"player_full_name\": player_name,\n \"first_name\": name.get('first_name'),\n \"middle_name\": name.get('middle_name') or None,\n \"last_name\": name.get('last_name'),\n \"player_city\": player_city.split(',')[0].strip(),\n \"player_state\": player_city.split(',')[-1].strip(),\n \"event\": events,\n \"url\": response.url,\n \"division\": div,\n 'listing_url':response.meta['meta']['listing_url']}\n yield player_data\n","sub_path":"usta_junior_tournament_players/spiders/junior_players.py","file_name":"junior_players.py","file_ext":"py","file_size_in_byte":12765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"134711610","text":"#Valor de Pi\r\npi = 3.1415926\r\n#Imprime el valor de Pi\r\nprint(pi)\r\n#Determina el length de Pi y lo imprime. Tambien le resta 1, para no contar el estacio del punto (\".\")\r\nPi_Real=(len(str(pi)) - 1)\r\nprint(\"El length de Pi es:\", Pi_Real)\r\n#Inicializo el contador para el while\r\ni = 1;\r\nwhile i <= Pi_Real:\r\n #Utilizo el end = '' para indicarle al loop de while que tiene que imprimir en una sola linea\r\n print((\"%s %.{}f\".format(Pi_Real - i) % (\"*\" * i, pi)), end = ' ')\r\n i = i + 1;","sub_path":"Tarea 1/Tarea1.py","file_name":"Tarea1.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111332066","text":"names = ['person', 'name', 'paul', 'sammy']\n\nfor name in names:\n print(f'Hello there {name}') # template string\n\nx = [i for i in range(5)] # same as commented below\n\n# for i in range(5):\n# x.append(i)\n\n# generator - much quicker to generate since it doesn't get stored in memory\n# takes longer to iterate over than something in memory however\ny = (i for i in range(5))\nprint(y)\nfor i in y:\n print(i)\n\nin_list = [2, 4, 5, 7774, 33324, 3565, 335, 5555]\n\n# returns true if divisible by 5\ndef div_by_fiv(num):\n return not num % 5\n\nxyz = (i for i in in_list if div_by_fiv(i))\n\nfor i in xyz:\n print(i)\n\n# enumerate\nex = ['one', 'two', 'blue', 'red']\nfor i, j in enumerate(ex):\n print(i, j)\n\ndi = dict(enumerate(ex))\n[print(i,j) for i, j in enumerate(di)]\n\n# more generators\ndef gen ():\n # generators don't return, they yield\n yield 'hey'\n yield 'aye'\n yield 'travel'\n\n[print(i) for i in gen()]\n\n# try to figure out combo, \ncorrect_combo = (4, 2, 7)\n\ndef combo_gen():\n for c1 in range(10):\n for c2 in range(10):\n for c3 in range(10):\n yield (c1, c2, c3)\n\nfor (c1, c2, c3) in combo_gen():\n print(c1, c2, c3)\n if (c1, c2, c3) == correct_combo:\n print(f'found combo {correct_combo}')\n break\n print(c1, c2, c3)\n","sub_path":"python-learning/intPyStrings.py","file_name":"intPyStrings.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"166329383","text":"# -*- coding: utf-8 -*-\n#Author:tik\n#CreateTime:2018-07-19\n#\n# db query use mysql. you should install pymysql first. you can run the command [pip install pymysql] to install pymysql module.\n#\nimport file_utils\nimport os\nimport os.path\nimport config_utils\nimport os\nimport os.path\nimport zipfile\nimport re\nimport subprocess\nimport platform\nimport codecs\nimport sys\nimport shutil\nimport time\nimport log_utils\nimport pymysql\n\n\n\nclass MySqlDB:\n\n def __init__(self, host='127.0.0.1', port=3306, user='root',pwd='000000', db='pdb'):\n self.host = host\n self.port = port\n self.user = user\n self.pwd = pwd\n self.db = db\n self.connection = None\n self.cursor = None\n\n def connect(self):\n try:\n self.connection = pymysql.connect(host=self.host, port=self.port, user=self.user, passwd=self.pwd, database=self.db, charset='utf8')\n self.cursor = self.connection.cursor(pymysql.cursors.DictCursor)\n except Exception as e:\n print(e)\n log_utils.error(\"connect db failed.\")\n return False\n\n \n return True\n\n\n def close(self):\n if self.connection and self.cursor:\n self.cursor.close()\n self.connection.close()\n\n\n def fetch_all(self, sql, params=None):\n\n suc = self.connect()\n\n if not suc:\n return None\n\n result = None\n\n try:\n if self.connection and self.cursor:\n self.cursor.execute(sql, params)\n result = self.cursor.fetchall()\n self.connection.commit()\n\n except Exception as e:\n print(e)\n log_utils.error(\"execute sql [\"+sql+\"] failed\")\n finally:\n self.close()\n\n return result\n\n\n def fetch_one(self, sql, params=None):\n\n suc = self.connect()\n\n if not suc:\n return None\n\n result = None\n\n try:\n if self.connection and self.cursor:\n self.cursor.execute(sql, params)\n result = self.cursor.fetchone()\n self.connection.commit()\n\n except Exception as e:\n print(e)\n log_utils.error(\"execute sql [\"+sql+\"] failed\")\n finally:\n self.close()\n\n return result \n","sub_path":"client/android/scripts/mysql_query.py","file_name":"mysql_query.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"290724853","text":"from user import User\n#child class\nclass Student(User):\n\tdef __init__(self):\n\t\tself.name = \"\"\n\t\tself.age = 0\n\t\tself.grade = \"\"\n\t\tself.__gender = \"\"\n\t\t#super().__init__()\n\tdef insert(self):\n\t\tprint(\"Enter Student Details:\")\n\t\tself.name = input(\"\\tName: \")\n\t\tself.age = input(\"\\tAge: \")\n\t\tself.grade = input(\"\\tGrade :\")\n\t\tself.__gender = input(\"\\tGender: \")\n\t\tself.set_username(input(\"\\tUsername: \"))\n\t\tself.set_password(input(\"\\tPassword: \"))\n\t\tself._role = input(\"Role: \")\n\tdef display(self):\n\t\tprint(\"Details:\")\n\t\tprint(f\"{self.name} | {self.age} | {self.grade} | {self.__gender} | {self.get_username()} | {self._role}\")\n\t\tself._data() #Should not use\n#\t\tself.__sample() #Not accessible\n\n\tdef xyz(self):\n\t\tsuper().display()\n","sub_path":"docs_and_codes/DayWiseFiles_ppt/18AugDay18/learn_python/OOPs/stud_inherit_abstract_polymorphism/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"182347968","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import poisson, norm, uniform, gamma, power_divergence, chi2\nfrom scipy.optimize import minimize\nfrom scipy.optimize.optimize import _minimize_neldermead\nimport emcee, corner, time\n\nclass IterativeFitter(object):\n\n '''\n A class for the ramp iterative fitter technique\n\n :RM:\n a ramp.RampMeasurement object\n\n :fitpars:\n a dictionary with all the fit parameters\n\n '''\n def __init__(self,RM,fitpars=None):\n\n self.RM = RM\n if fitpars is not None:\n self.fitpars = fitpars\n else:\n self.fitpars = {'one_iteration_method':'Nelder-Mead'}\n\n '''\n Initialize the \"hat\" values. These are the latent poisson variables \"actual number of electrons deposited in each interval\"\n The _new values are the auxiliary variables used to iterate over the _hat ones\n '''\n \n self.x_hat = np.floor(self.RM.noisy_counts*self.RM.gain)\n self.dt = np.zeros_like(self.x_hat)\n\n for i in range(1,self.dt.size):\n self.dt[i] = self.RM.RTS.group_times[i] - self.RM.RTS.group_times[i-1] \n\n \n for i in range(1,self.x_hat.size):\n if self.x_hat[i] < self.x_hat[i-1]:\n self.x_hat[i] = self.x_hat[i-1]\n\n self.x_hat = self.x_hat+np.mean(self.RM.noisy_counts*self.RM.gain)-np.mean(self.x_hat)\n\n electron_rates = (self.x_hat[1:]-self.x_hat[:-1])/self.dt[1:]\n self.mean_electron_rate = np.median(electron_rates) #set initially to median to avoid problems with CR in case of few reads\n\n self.x_new = np.copy(self.x_hat)\n \n\n\n '''\n Freeze the normal pdfs to compare \"true\" and measured counts and the poisson pdf to compare the\n mean and \"true\" electrons\n '''\n\n self.normal_distr = []\n self.poisson_distr = []\n for i in range(len(self.RM.noisy_counts)):\n self.normal_distr.append(norm(loc=(self.RM.noisy_counts[i]*self.RM.gain),scale=self.RM.RON_e))\n self.poisson_distr.append(poisson(mu=self.mean_electron_rate*self.dt[i]))\n\n def loglikelihood_all(self,x):\n '''\n Function that returns minus the log-likelihood of the measured counts, given an electron flux\n and the noise characteristics. Does all reads in one\n\n :x:\n a numpy array of length equal to the number of groups in the ramp\n\n '''\n\n xr = np.round(x).astype(np.int_)\n \n poisson_pmf = np.empty_like(x)\n gaussian_pdf = np.empty_like(x)\n \n for i in range(len(xr)):\n if i == 0:\n poisson_pmf[i] = 1.\n else:\n if xr[i] < xr[i-1]:\n return np.inf\n else:\n poisson_pmf[i] = self.poisson_distr[i].pmf(xr[i]-xr[i-1])\n gaussian_pdf[i] = self.normal_distr[i].pdf(xr[i])\n \n keep_grps = np.empty_like(xr,dtype=np.bool_)\n for i in range(len(xr)):\n if i == 0:\n intdw = i\n intup = i\n elif i == (len(xr)-1):\n intdw = i-1\n intup = i-1\n else:\n intdw = i-1\n intup = i\n \n if ((self.good_intervals[intdw] == True) | (self.good_intervals[intup] == True)):\n keep_grps[i] = True\n else:\n keep_grps[i] = False\n \n\n return -1.* ( np.sum(np.log(gaussian_pdf)[keep_grps]) + np.sum(np.log(poisson_pmf[1:])[self.good_intervals]) )\n\n\n def one_iteration(self,conv_attempts = 5):\n\n '''\n This function calls the scipy minimize routine, passing all the needed arguments to it.\n After minimize returns, one_iteration does some book keeping\n '''\n\n success,attempts = False, 0\n while success == False:\n attempts = attempts + 1\n \n if self.fitpars['one_iteration_method'] == 'Nelder-Mead':\n sim = np.empty((self.x_hat.size + 1, self.x_hat.size), dtype=self.x_hat.dtype)\n sim[0] = self.x_hat\n for k in range(self.x_hat.size):\n y = self.x_new + np.random.normal(scale=self.RM.RON_e,size=self.x_hat.size)\n for i in range(1,self.x_hat.size):\n if y[i] < y[i-1]:\n y[i] = y[i-1]\n sim[k+1] = y+np.mean(self.RM.noisy_counts*self.RM.gain)-np.mean(y)\n\n \n self.minimize_res = _minimize_neldermead(self.loglikelihood_all,self.x_hat,initial_simplex=sim)\n else:\n self.minimize_res = minimize(self.loglikelihood_all,self.x_hat,method=self.fitpars['one_iteration_method'])\n \n \n success = self.minimize_res['success']\n if success == True:\n self.x_new = np.round(self.minimize_res['x'])\n self.x_hat = np.copy(self.x_new)\n else:\n '''\n If the minimizer did not converge, restart from slightly different initial condistions\n '''\n self.x_hat = self.x_new + np.random.normal(scale=self.RM.RON_e,size=self.x_hat.size)\n for i in range(1,self.x_hat.size):\n if self.x_hat[i] < self.x_hat[i-1]:\n self.x_hat[i] = self.x_hat[i-1]\n self.x_hat = self.x_hat+np.mean(self.RM.noisy_counts*self.RM.gain)-np.mean(self.x_hat)\n\n\n if attempts >= conv_attempts:\n success = True\n self.x_new = np.round(self.minimize_res['x'])\n self.x_hat = np.copy(self.x_new)\n \n electron_rates = (self.x_new[1:]-self.x_new[:-1])/self.dt[1:]\n self.mean_electron_rate = np.mean(electron_rates[self.good_intervals])\n\n for i in range(len(self.RM.noisy_counts)):\n self.poisson_distr[i] = poisson(mu=self.mean_electron_rate*self.dt[i])\n\n\n\n\n def perform_fit(self,thr=None,maxCRiter=10,maxiter=20,CRthr=4.):\n\n '''\n Wrapper function to perform the up-the-ramp fit, test for cosmic rays, check convergence, issue error status\n\n :thr:\n Threshold for convergence\n\n :maxiter:\n Maximum number of iterations\n\n :CRthr:\n Threshold for CR hits flagging. Represents the number of standard deviations a single delta-group must differ from the expectation, in order to be flagged \n '''\n\n if thr is None:\n '''\n Two values of the flux that produce, on average, less than one count of difference within that ramp, cannot be distinguished,\n this is the ultimate threshold for convergence (hence the gain term in the squared sum below).\n To this we add the standard deviation of the effective noise: given the same mean flux, one cannot distinguish two measurements\n to better than this noise floor, hence the effRON_e term in the sum\n '''\n \n thr = np.sqrt(np.sum(np.square(np.array([self.RM.gain,self.RM.effRON_e])))) /self.RM.RTS.group_times[-1] \n #thr = self.RM.gain /self.RM.RTS.group_times[-1]\n \n\n\n old_mean_electron_rate = self.mean_electron_rate\n \n #Initial flagging of CR hits\n stddev = np.sqrt(self.mean_electron_rate*self.dt[1:]+2*np.square(self.RM.RON_e))\n diffs = self.RM.gain*(self.RM.noisy_counts[1:]-self.RM.noisy_counts[:-1]) - self.mean_electron_rate*self.dt[1:]\n self.good_intervals = np.fabs( diffs/stddev) < CRthr\n\n check_CRs = 1\n crloops_counter = 0\n\n while check_CRs:\n crloops_counter = crloops_counter + 1\n\n if (np.any(self.good_intervals) == True):\n check_conv = 1\n counter = 0\n error = 0\n else:\n counter = 0\n error = 2\n return error, counter, self.good_intervals, crloops_counter\n\n while check_conv:\n\n self.one_iteration()\n counter = counter+1\n if np.fabs(self.mean_electron_rate-old_mean_electron_rate) < thr:\n check_conv = 0\n if (counter > maxiter):\n error = 1\n return error, counter, self.good_intervals, crloops_counter\n \n old_mean_electron_rate = self.mean_electron_rate\n \n\n #test here for CR presence\n stddev = np.sqrt(self.mean_electron_rate*self.dt[1:]+2*np.square(self.RM.RON_e))\n diffs = self.RM.gain*(self.RM.noisy_counts[1:]-self.RM.noisy_counts[:-1]) - self.mean_electron_rate*self.dt[1:]\n new_good_intervals = np.fabs(diffs/stddev) < CRthr\n\n if np.array_equal(self.good_intervals,new_good_intervals):\n check_CRs = 0\n else: \n self.good_intervals = new_good_intervals\n electron_rates = (self.x_new[1:]-self.x_new[:-1])/self.dt[1:]\n self.mean_electron_rate = np.mean(electron_rates[self.good_intervals])\n \n \n if (crloops_counter > maxCRiter):\n error = 3\n return error, counter, self.good_intervals, crloops_counter\n\n \n return error, counter, self.good_intervals, crloops_counter\n\n\n\n\n def goodness_of_fit(self,mode='G-test'):\n\n '''\n Method to perform a goodness of fit test of the derived count rate.\n It compares the expected counts (from the final rate atimes the interval times) with the \"observed counts\" the latter are obtained\n not from the actual data, but from the poisson latent variable (i.e. before read noise).\n \n\n :mode: the type of test perfomed.\n\n Possible values are 'G-test', 'Pearson-chi-sq'\n\n G-test: (https://en.wikipedia.org/wiki/G-test)\n This is the default value.\n The G-test statistics, based on a likelihood ratio, is a better approximation to the chi-squared distribution\n than Pearson's chi-square, which fails for small number counts\n\n Pearson-chi-sq: (https://en.wikipedia.org/wiki/Pearson's_chi-squared_test)\n Pearsons' chi square is implemented and should give similar results for moderately large observed count rates\n \n Squared deviations: (https://en.wikipedia.org/wiki/Reduced_chi-squared_statistic)\n Use the variance of the counts plus the variance of the readnoise, summed together, as the\n denominator \n '''\n\n\n f_obs = (self.RM.noisy_counts[1:]-self.RM.noisy_counts[:-1])[self.good_intervals]\n f_exp = (self.mean_electron_rate * self.dt[1:]/self.RM.gain)[self.good_intervals]\n ddof = 1\n dof = np.sum(self.good_intervals) - 1 - ddof\n\n if mode == 'G-test':\n g,p = power_divergence(f_obs, f_exp=f_exp, ddof=ddof, lambda_='log-likelihood')\n\n elif mode == 'Pearson-chi-sq':\n g,p = power_divergence(f_obs, f_exp=f_exp, ddof=ddof, lambda_='pearson')\n\n elif mode == 'Squared-deviations':\n variance = (f_exp+2*np.square(self.RM.RON_e))/np.square(self.RM.gain)\n g = np.sum(np.square(f_obs-f_exp)/variance)\n p = chi2.sf(g,dof) \n\n\n else:\n print('Goodness of fit test type not supported')\n assert False\n \n self.gof_stat = g\n self.gof_pval = p\n\n def test_plot(self):\n '''\n Method to plot the fit results\n '''\n f,ax = plt.subplots(1,1,figsize=(10,5))\n ax.scatter(self.RM.RTS.group_times,self.RM.noisy_counts,label='Noisy Counts',s=100,marker='*')\n ax.scatter(self.RM.RTS.group_times,self.x_new/self.RM.gain,label='Convergence counts',s=25)\n ax.scatter(self.RM.RTS.group_times,self.RM.noisy_counts-self.RM.RON_effective/self.RM.gain,label='Noiseless Counts + \\n Bias + KTC + CRs',s=25)\n ax.scatter(self.RM.RTS.group_times,self.RM.noisy_counts-(self.RM.RON_effective-np.mean(self.RM.RON_effective))/self.RM.gain,label='Noiseless Counts + \\n Bias + KTC +\\n mean RON',s=25)\n \n ax.plot(self.RM.RTS.group_times,(self.x_new[0]+self.mean_electron_rate*self.RM.RTS.group_times)/self.RM.gain)\n\n for j,gi in enumerate(self.good_intervals):\n if ~gi:\n ax.axvline(0.5*(self.RM.RTS.group_times[j]+self.RM.RTS.group_times[j+1]),color='#bbbbbb',linestyle='--')\n \n ax.legend()\n f.tight_layout()\n\n\n","sub_path":"ramp_utils/fitter.py","file_name":"fitter.py","file_ext":"py","file_size_in_byte":12732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"196814343","text":"from knock52 import read_data, vectorize\nimport joblib\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import MultinomialNB\n\n# Search for the training algorithms and \n# parameters that achieves the best accuracy score on the validation data. \n# Then compute its accuracy score on the test data.\n\nif __name__ == \"__main__\":\n vectorizer = joblib.load('vectorizer.pkl')\n\n train = open('train.feature.txt')\n train_ftr, train_label = read_data(train)\n valid = open('valid.feature.txt')\n valid_ftr, valid_label = read_data(valid)\n test = open('test.feature.txt')\n test_ftr, test_label = read_data(test)\n\n x_train = vectorizer.transform(train_ftr)\n y_train = train_label\n x_valid = vectorizer.transform(valid_ftr)\n y_valid = valid_label\n x_test = vectorizer.transform(test_ftr)\n y_test = test_label\n \n # Multinomial NB\n nb_model = MultinomialNB()\n nb_model.fit(x_train,y_train)\n\n # Random Forest\n rf_model = RandomForestClassifier()\n rf_model.fit(x_train,y_train)\n\n # SVM\n svm_model = LinearSVC(multi_class = 'crammer_singer', class_weight='balanced')\n svm_model.fit(x_train,y_train)\n\n print('\\nValid:')\n print(nb_model.score(x_valid,y_valid))\n print(rf_model.score(x_valid,y_valid))\n print(svm_model.score(x_valid,y_valid))\n\n print('\\nTest:')\n print(nb_model.score(x_test, y_test))\n print(rf_model.score(x_test, y_test))\n print(svm_model.score(x_test, y_test))\n\n'''\nValid:\nLG 0.813008\nNB 0.646341\nRF 0.760162\nSVM 0.804878\n\nTest:\nLG 0.828571\nNB 0.604081\nRF 0.775510\nSVM 0.832653\n'''","sub_path":"oryza/chapter06/knock59.py","file_name":"knock59.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"229424381","text":"from json import dump as json_dump\nfrom time import sleep\nfrom urllib.request import HTTPError\nfrom tmdb_helper import get_movie, get_api_key\n\nif __name__ == \"__main__\":\n api_key = get_api_key()\n movies_start = int(input(\"Enter start movie ID: [1]\") or \"1\")\n movies_num = int(input(\"Enter number of movies: [1000]\") or \"1000\")\n queue = list(range(movies_start, movies_start + movies_num))\n movie_list = {}\n tries = 0\n\n for index, movie_id in enumerate(queue):\n try:\n print(\"Request movie with ID %d\" % movie_id)\n response = get_movie(movie_id, api_key=api_key)\n except HTTPError as error:\n if error.getcode() == 429:\n rateLimitRemaining = int(error.headers['X-RateLimit-Remaining'])\n print(\"Too many requests. Wait %d s.\" % (rateLimitRemaining + 1))\n sleep(rateLimitRemaining + 1)\n elif error.getcode() == 404:\n new_id = queue[-1] + 1\n print(\"Movie with ID %d not found. Delete this ID and append ID %d\" % (\n movie_id, new_id)\n )\n queue.append(new_id)\n else:\n tries += 1\n if tries > 5:\n raise error\n continue\n tries = 0\n movie_title = response[\"title\"]\n movie_list[movie_title] = response\n print(\"Movie \\\"%s\\\" was found. Remained %d movies\" % (\n movie_title, len(queue) - index - 1))\n print(\"Export DB to movies.json file\")\n with open('movies.json', 'w') as filmsFile:\n json_dump(movie_list, filmsFile)\n print(\"Task done! You can use your movie DB.\")\n","sub_path":"generate_moviedb.py","file_name":"generate_moviedb.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"140789119","text":"#!/usr/bin/env python3.7\n\"\"\"\n\n\"\"\"\n\nfrom __future__ import annotations\nfrom typing import List, Dict, Tuple, Union\nimport json\nfrom asyncio import get_event_loop, ensure_future, wait, FIRST_COMPLETED, sleep\nfrom websockets import serve\nfrom andes_addon.dime import Dime\nfrom andes_addon.pymatbridge import _Session\n\n\n\n# Patch Dime's decode function to not do special processing\n_Session.json_decode = lambda self, x: json.loads(x)\n_Session.json_encode = lambda self, x: json.dumps(x)\n\n\nS_CONS_NAME = 0\nS_CONS_TARGET = 1\nS_CONS_DATA = 2\n\nS_PROD_RECV = 0\nS_PROD_VALUE = 1\n\n\n_g_dimec: Dime = None\n_g_cons_state = S_CONS_NAME\n_g_cons_name = None\n_g_cons_target = None\n_g_cons_data = None\n_g_prod_state = S_PROD_RECV\n_g_prod_name = None\n_g_prod_value = None\n\n\nasync def consumer(message: str):\n\tglobal _g_cons_state\n\tglobal _g_cons_name\n\tglobal _g_cons_target\n\tglobal _g_cons_data\n\n\tprint(f'cons: {_g_cons_state}')\n\n\tif _g_cons_state == S_CONS_NAME:\n\t\t_g_cons_name = message\n\t\t_g_cons_state = S_CONS_TARGET\n\telif _g_cons_state == S_CONS_TARGET:\n\t\t_g_cons_target = message\n\t\t_g_cons_state = S_CONS_DATA\n\telif _g_cons_state == S_CONS_DATA:\n\t\t_g_cons_data = json.loads(message)\n\n\t\t_g_dimec.send_var(_g_cons_target, _g_cons_name, _g_cons_data)\n\t\t\n\t\t_g_cons_state = S_CONS_NAME\n\telse:\n\t\traise NotImplementedError\n\n\nasync def producer() -> str:\n\tglobal _g_prod_state\n\tglobal _g_prod_name\n\tglobal _g_prod_value\n\n\tprint(f'prod: {_g_prod_state}')\n\n\tif _g_prod_state == S_PROD_RECV:\n\t\twhile True:\n\t\t\tname = _g_dimec.sync()\n\t\t\tif not name:\n\t\t\t\tawait sleep(0.1)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tbreak\n\t\t\t\n\t\t_g_prod_name = name\n\t\t_g_prod_value = json.dumps(_g_dimec.workspace[name])\n\t\t_g_prod_state = S_PROD_VALUE\n\t\treturn _g_prod_name\n\n\telif _g_prod_state == S_PROD_VALUE:\n\t\t_g_prod_state = S_PROD_RECV\n\t\treturn _g_prod_value\n\telse:\n\t\traise NotImplementedError\n\n\nasync def consumer_handler(websocket, path):\n\tasync for message in websocket:\n\t\tawait consumer(message)\n\n\nasync def producer_handler(websocket, path):\n\twhile True:\n\t\tmessage = await producer()\n\t\tawait websocket.send(message)\n\n\nasync def handler(websocket, path):\n\tconsumer_task = ensure_future(consumer_handler(websocket, path))\n\tproducer_task = ensure_future(producer_handler(websocket, path))\n\tdone, pending = await wait([consumer_task, producer_task], return_when=FIRST_COMPLETED)\n\tfor task in pending:\n\t\ttask.cancel()\n\n\ndef main(bind, port, dhost, dport):\n\tprint(f'Connecting to dime on tcp://{dhost}:{dport}')\n\tdimec = Dime('geovis', f'tcp://{dhost}:{dport}')\n\tok = dimec.start()\n\tif not ok:\n\t\traise ValueError('Could not start dime client')\n\t\treturn\n\n\tglobal _g_dimec\n\t_g_dimec = dimec\n\n\tprint(f'Listening on {bind}:{port}')\n\tstart_server = serve(handler, bind, port)\n\t\n\tloop = get_event_loop()\n\tloop.run_until_complete(start_server)\n\tloop.run_forever()\n\n\ndef cli():\n\timport argparse\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--bind', default='')\n\tparser.add_argument('--port', type=int, default=8810)\n\tparser.add_argument('--dhost', default='127.0.0.1')\n\tparser.add_argument('--dport', default=8819)\n\targs = vars(parser.parse_args())\n\n\tmain(**args)\n\n\nif __name__ == '__main__':\n\tcli()\n","sub_path":"wsdime.py","file_name":"wsdime.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"485878439","text":"def maximo(x, y, z):\n ''' (int, int, int) -> int\n A função maximo que recebe 3 números inteiros como parâmetro e\n devolve o maior deles\n '''\n # x é o maior\n if x > y and x > z:\n resultado = x\n elif y > x and y > z:\n resultado = y\n else:\n resultado = z\n return resultado\n\n\n# =============================================================================\n# # Testes #\n# print(maximo(30, 14, 10)) # deve devolver 30\n# print(maximo(0, -1, 1)) # deve devolver 1\n# =============================================================================\n","sub_path":"PyCursoUSP - Parte 1/semana 5/maximo_3.py","file_name":"maximo_3.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"21191275","text":"import cv2\r\nimport numpy as np\r\nfrom time import time\r\nimport mediapipe as mp\r\nimport matplotlib.pyplot as plt\r\n\r\n# Initialize the mediapipe hands class.\r\nmp_hands = mp.solutions.hands\r\n\r\n# Set up the Hands function.\r\nhands = mp_hands.Hands(\r\n static_image_mode=True, max_num_hands=2, min_detection_confidence=0.8\r\n)\r\n\r\n# Initialize the mediapipe drawing class.\r\nmp_drawing = mp.solutions.drawing_utils\r\n\r\n\r\nhands_video = mp_hands.Hands(\r\n static_image_mode=False,\r\n max_num_hands=2,\r\n min_detection_confidence=0.8,\r\n min_tracking_confidence=0.5,\r\n)\r\n\r\n\r\ndef detectHandsLandmarks(image, hands, display=True):\r\n \"\"\"\r\n This function performs hands landmarks detection on an image.\r\n Args:\r\n image: The input image with prominent hand(s) whose landmarks needs to be detected.\r\n hands: The hands function required to perform the hands landmarks detection.\r\n display: A boolean value that is if set to true the function displays the original input image, and the output\r\n image with hands landmarks drawn and returns nothing.\r\n Returns:\r\n output_image: The input image with the detected hands landmarks drawn.\r\n results: The output of the hands landmarks detection on the input image.\r\n \"\"\"\r\n\r\n # Create a copy of the input image to draw landmarks on.\r\n output_image = image.copy()\r\n\r\n # Convert the image from BGR into RGB format.\r\n imgRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\r\n # Perform the Hands Landmarks Detection.\r\n results = hands.process(imgRGB)\r\n\r\n # Check if landmarks are found.\r\n if results.multi_hand_landmarks:\r\n\r\n # Iterate over the found hands.\r\n for hand_landmarks in results.multi_hand_landmarks:\r\n\r\n # Draw the hand landmarks on the copy of the input image.\r\n mp_drawing.draw_landmarks(\r\n image=output_image,\r\n landmark_list=hand_landmarks,\r\n connections=mp_hands.HAND_CONNECTIONS,\r\n )\r\n\r\n # Check if the original input image and the output image are specified to be displayed.\r\n if display:\r\n\r\n # Display the original input image and the output image.\r\n plt.figure(figsize=[15, 15])\r\n plt.subplot(121)\r\n plt.imshow(image[:, :, ::-1])\r\n plt.title(\"Original Image\")\r\n plt.axis(\"off\")\r\n plt.subplot(122)\r\n plt.imshow(output_image[:, :, ::-1])\r\n plt.title(\"Output\")\r\n plt.axis(\"off\")\r\n\r\n # Otherwise\r\n else:\r\n\r\n # Return the output image and results of hands landmarks detection.\r\n return output_image, results\r\n\r\n\r\ndef getHandType(image, results, draw=True, display=True):\r\n \"\"\"\r\n This function performs hands type (left or right) classification on hands.\r\n Args:\r\n image: The image of the hands that needs to be classified, with the hands landmarks detection already performed.\r\n results: The output of the hands landmarks detection performed on the image in which hands types needs\r\n to be classified.\r\n draw: A boolean value that is if set to true the function writes the hand type label on the output image.\r\n display: A boolean value that is if set to true the function displays the output image and returns nothing.\r\n Returns:\r\n output_image: The image of the hands with the classified hand type label written if it was specified.\r\n hands_status: A dictionary containing classification info of both hands.\r\n \"\"\"\r\n\r\n # Create a copy of the input image to write hand type label on.\r\n output_image = image.copy()\r\n\r\n # Initialize a dictionary to store the classification info of both hands.\r\n hands_status = {\r\n \"Right\": False,\r\n \"Left\": False,\r\n \"Right_index\": None,\r\n \"Left_index\": None,\r\n }\r\n\r\n # Iterate over the found hands in the image.\r\n for hand_index, hand_info in enumerate(results.multi_handedness):\r\n\r\n # Retrieve the label of the found hand.\r\n hand_type = hand_info.classification[0].label\r\n\r\n # Update the status of the found hand.\r\n hands_status[hand_type] = True\r\n\r\n # Update the index of the found hand.\r\n hands_status[hand_type + \"_index\"] = hand_index\r\n\r\n # Check if the hand type label is specified to be written.\r\n if draw:\r\n\r\n # Write the hand type on the output image.\r\n cv2.putText(\r\n output_image,\r\n hand_type + \" Hand Detected\",\r\n (10, (hand_index + 1) * 30),\r\n cv2.FONT_HERSHEY_PLAIN,\r\n 2,\r\n (0, 255, 0),\r\n 2,\r\n )\r\n\r\n # Check if the output image is specified to be displayed.\r\n if display:\r\n\r\n # Display the output image.\r\n plt.figure(figsize=[10, 10])\r\n plt.imshow(output_image[:, :, ::-1])\r\n plt.title(\"Output Image\")\r\n plt.axis(\"off\")\r\n\r\n # Otherwise\r\n else:\r\n\r\n # Return the output image and the hands status dictionary that contains classification info.\r\n return output_image, hands_status\r\n\r\n\r\ndef drawBoundingBoxes(\r\n image, results, hand_status, padd_amount=10, draw=True, display=True\r\n):\r\n \"\"\"\r\n This function draws bounding boxes around the hands and write their classified types near them.\r\n Args:\r\n image: The image of the hands on which the bounding boxes around the hands needs to be drawn and the\r\n classified hands types labels needs to be written.\r\n results: The output of the hands landmarks detection performed on the image on which the bounding boxes needs\r\n to be drawn.\r\n hand_status: The dictionary containing the classification info of both hands.\r\n padd_amount: The value that specifies the space inside the bounding box between the hand and the box's borders.\r\n draw: A boolean value that is if set to true the function draws bounding boxes and write their classified\r\n types on the output image.\r\n display: A boolean value that is if set to true the function displays the output image and returns nothing.\r\n Returns:\r\n output_image: The image of the hands with the bounding boxes drawn and hands classified types written if it\r\n was specified.\r\n output_landmarks: The dictionary that stores both (left and right) hands landmarks as different elements.\r\n \"\"\"\r\n\r\n # Create a copy of the input image to draw bounding boxes on and write hands types labels.\r\n output_image = image.copy()\r\n\r\n # Initialize a dictionary to store both (left and right) hands landmarks as different elements.\r\n output_landmarks = {}\r\n\r\n # Get the height and width of the input image.\r\n height, width, _ = image.shape\r\n bounding_box = []\r\n # Iterate over the found hands.\r\n for hand_index, hand_landmarks in enumerate(results.multi_hand_landmarks):\r\n bbox = []\r\n # Initialize a list to store the detected landmarks of the hand.\r\n landmarks = []\r\n\r\n # Iterate over the detected landmarks of the hand.\r\n for landmark in hand_landmarks.landmark:\r\n\r\n # Append the landmark into the list.\r\n landmarks.append(\r\n (\r\n int(landmark.x * width),\r\n int(landmark.y * height),\r\n (landmark.z * width),\r\n )\r\n )\r\n\r\n # Get all the x-coordinate values from the found landmarks of the hand.\r\n x_coordinates = np.array(landmarks)[:, 0]\r\n\r\n # Get all the y-coordinate values from the found landmarks of the hand.\r\n y_coordinates = np.array(landmarks)[:, 1]\r\n\r\n # Get the bounding box coordinates for the hand with the specified padding.\r\n x1 = int(np.min(x_coordinates) - padd_amount)\r\n y1 = int(np.min(y_coordinates) - padd_amount)\r\n x2 = int(np.max(x_coordinates) + padd_amount)\r\n y2 = int(np.max(y_coordinates) + padd_amount)\r\n # print(x1,y1,x2,y2)\r\n bbox.append([x1, y1, x2, y2])\r\n # Initialize a variable to store the label of the hand.\r\n label = \"Unknown\"\r\n\r\n # Check if the hand we are iterating upon is the right one.\r\n if hand_status[\"Right_index\"] == hand_index:\r\n\r\n # Update the label and store the landmarks of the hand in the dictionary.\r\n label = \"Right Hand\"\r\n output_landmarks[\"Right\"] = landmarks\r\n\r\n # Check if the hand we are iterating upon is the left one.\r\n elif hand_status[\"Left_index\"] == hand_index:\r\n\r\n # Update the label and store the landmarks of the hand in the dictionary.\r\n label = \"Left Hand\"\r\n output_landmarks[\"Left\"] = landmarks\r\n\r\n # Check if the bounding box and the classified label is specified to be written.\r\n if draw:\r\n\r\n # Draw the bounding box around the hand on the output image.\r\n cv2.rectangle(\r\n output_image, (x1, y1), (x2, y2), (155, 0, 255), 3, cv2.LINE_8\r\n )\r\n\r\n # Write the classified label of the hand below the bounding box drawn.\r\n cv2.putText(\r\n output_image,\r\n label,\r\n (x1, y2 + 25),\r\n cv2.FONT_HERSHEY_COMPLEX,\r\n 0.7,\r\n (20, 255, 155),\r\n 1,\r\n cv2.LINE_AA,\r\n )\r\n bounding_box.append(bbox)\r\n # Check if the output image is specified to be displayed.\r\n if display:\r\n\r\n # Display the output image.\r\n plt.figure(figsize=[10, 10])\r\n plt.imshow(output_image[:, :, ::-1])\r\n plt.title(\"Output Image\")\r\n plt.axis(\"off\")\r\n\r\n # Otherwise\r\n else:\r\n\r\n # Return the output image and the landmarks dictionary.\r\n return output_image, output_landmarks, bounding_box\r\n\r\n\r\ndef hand_data(frame, bdraw=True):\r\n # Perform Hands landmarks detection.\r\n frame, results = detectHandsLandmarks(frame, hands_video, display=False)\r\n # Check if landmarks are found in the frame.\r\n\r\n if results.multi_hand_landmarks:\r\n lmList_all = []\r\n # Perform hand(s) type (left or right) classification.\r\n _, hands_status = getHandType(frame.copy(), results, draw=False, display=False)\r\n if bdraw == True:\r\n # Draw bounding boxes around the detected hands and write their classified types near them.\r\n frame, _, bbox = drawBoundingBoxes(\r\n frame, results, hands_status, display=False\r\n )\r\n\r\n for handLms in results.multi_hand_landmarks:\r\n lmList = []\r\n for id, lm in enumerate(handLms.landmark):\r\n # print(id, lm)\r\n h, w, c = frame.shape\r\n cx, cy = int(lm.x * w), int(lm.y * h)\r\n lmList.append([id, cx, cy])\r\n lmList_all.append(lmList)\r\n try:\r\n lmList_all_error = lmList_all\r\n except:\r\n lmList_all = [\"NULL\"]\r\n try:\r\n bbox_error = bbox\r\n except:\r\n bbox = [\"NULL\"]\r\n return frame, lmList_all, bbox\r\n\r\n\r\n# import cv2\r\n# from Hand_Detection import hand_data\r\n# cap=cv2.VideoCapture(0)\r\n# cap.set(3,1280)\r\n# cap.set(4,720)\r\n# while True:\r\n# success,img=cap.read()\r\n# img=cv2.flip(img,1)\r\n# img,lmList_all,bbox=hand_data(img,bdraw=True)\r\n# cv2.imshow(\"VIDEO\",img)\r\n# cv2.waitKey(1)\r\n","sub_path":"Hand_Detection.py","file_name":"Hand_Detection.py","file_ext":"py","file_size_in_byte":11486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"529520454","text":"import numpy as np\nimport tensorflow as tf\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nfrom predict_3dpose import create_model\nimport data_utils\nimport cameras\n\nFLAGS = tf.app.flags.FLAGS\norder = [15, 12, 25, 26, 27, 17, 18, 19, 1, 2, 3, 6, 7, 8]\nSUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]\nbatch_size = 128\n\nclass Estimator_3D:\n def __init__(self, use_gpu=False):\n actions = data_utils.define_actions(FLAGS.action)\n rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)\n train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(\n actions, FLAGS.data_dir)\n train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(\n actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)\n\n self.train_set_2d = train_set_2d\n self.test_set_2d = test_set_2d\n self.data_mean_2d = data_mean_2d\n self.data_std_2d = data_std_2d\n self.dim_to_use_2d = dim_to_use_2d\n self.dim_to_ignore_2d = dim_to_ignore_2d\n self.train_set_3d = train_set_3d\n self.test_set_3d = test_set_3d\n self.data_mean_3d = data_mean_3d\n self.data_std_3d = data_std_3d\n self.dim_to_use_3d = dim_to_use_3d\n self.dim_to_ignore_3d = dim_to_ignore_3d\n\n device_count = {\"GPU\": 1} if use_gpu else {\"GPU\": 0}\n self.persistent_sess = tf.Session(config=tf.ConfigProto(device_count=device_count, allow_soft_placement=True))\n with self.persistent_sess.as_default():\n self.graph = tf.get_default_graph()\n self.model = create_model(self.persistent_sess, actions, batch_size)\n\ndef get_3d_estimator(use_gpu=False):\n return Estimator_3D(use_gpu)\n\ndef predict_3d(poses, estimator):\n enc_in = np.zeros((1, 64))\n enc_in[0] = [0 for i in range(64)]\n\n with estimator.persistent_sess.as_default():\n with estimator.graph.as_default():\n _3d_predictions = []\n for n, xy in enumerate(poses):\n joints_array = np.zeros((1, 36))\n joints_array[0] = [float(xy[o]) for o in range(36)]\n\n _data = joints_array[0]\n # mapping all body parts or 3d-pose-baseline format\n for i in range(len(order)):\n for j in range(2):\n # create encoder input\n enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]\n for j in range(2):\n # Hip\n enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] + enc_in[0][6 * 2 + j]) / 2\n # Neck/Nose\n enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] + enc_in[0][12 * 2 + j]) / 2\n # Thorax\n enc_in[0][13 * 2 + j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]\n\n # set spine\n spine_x = enc_in[0][24]\n spine_y = enc_in[0][25]\n\n enc_in = enc_in[:, estimator.dim_to_use_2d]\n mu = estimator.data_mean_2d[estimator.dim_to_use_2d]\n stddev = estimator.data_std_2d[estimator.dim_to_use_2d]\n enc_in = np.divide((enc_in - mu), stddev)\n\n # ?\n dp = 1.0\n dec_out = np.zeros((1, 48))\n dec_out[0] = [0 for i in range(48)]\n _, _, poses3d = estimator.model.step(estimator.persistent_sess, enc_in, dec_out, dp, isTraining=False)\n all_poses_3d = []\n enc_in = data_utils.unNormalizeData(enc_in, estimator.data_mean_2d, estimator.data_std_2d, estimator.dim_to_ignore_2d)\n poses3d = data_utils.unNormalizeData(poses3d, estimator.data_mean_3d, estimator.data_std_3d, estimator.dim_to_ignore_3d)\n all_poses_3d.append( poses3d )\n enc_in, poses3d = map( np.vstack, [enc_in, all_poses_3d] )\n subplot_idx, exidx = 1, 1\n _max = 0\n _min = 10000\n\n # ??????\n for i in range(poses3d.shape[0]):\n for j in range(32):\n tmp = poses3d[i][j * 3 + 2]\n poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]\n poses3d[i][j * 3 + 1] = tmp\n if poses3d[i][j * 3 + 2] > _max:\n _max = poses3d[i][j * 3 + 2]\n if poses3d[i][j * 3 + 2] < _min:\n _min = poses3d[i][j * 3 + 2]\n\n for i in range(poses3d.shape[0]):\n for j in range(32):\n poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min\n poses3d[i][j * 3] += (spine_x - 630)\n poses3d[i][j * 3 + 2] += (500 - spine_y)\n\n # np.min(poses3d) é o score do frame\n if False:# FLAGS.cache_on_fail ;; TODO: colocar regra pra não inserir keypoint\n if np.min(poses3d) < -1000:\n poses3d = before_pose\n\n p3d = poses3d\n x,y,z = [[] for _ in range(3)]\n if not poses3d is None:\n to_export = poses3d.tolist()[0]\n else:\n to_export = [0.0 for _ in range(96)]\n for o in range(0, len(to_export), 3):\n x.append(to_export[o])\n y.append(to_export[o+1])\n z.append(to_export[o+2])\n\n export_units = {}\n for jnt_index, (_x, _y, _z) in enumerate(zip(x,y,z)):\n export_units[jnt_index] = [_x, _y, _z]\n _3d_predictions.append(export_units)\n return _3d_predictions\n","sub_path":"src/predict3d.py","file_name":"predict3d.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"561213549","text":"from tkinter import *\r\nroot = Tk()\r\nroot.title('Сумма чисел')\r\n\r\n\r\n\r\nEntryA = Entry(root, width=10, font='Arial 16')\r\nEntryB = Entry(root, width=10, font='Arial 16')\r\nEntryC = Entry(root, width=20, font='Arial 16')\r\n\r\n# размещаем первые два поля справа от меток, второй столбец (отсчет от нуля)\r\nEntryA.grid(row=0, column=1, sticky=E)\r\nEntryB.grid(row=1, column=1, sticky=E)\r\n\r\n\r\n\r\n# третье текстовое поле ввода занимает всю ширину строки 2\r\n# columnspan — объединение ячеек по столбцам; rowspan — по строкам\r\nEntryC.grid(row=3, columnspan=2)\r\n\r\n\r\n\r\ndef summa():\r\n a = EntryA.get() # берем текст из первого поля\r\n a = int(a) # преобразуем в число целого типа\r\n\r\n b = EntryB.get() \r\n b = int(b)\r\n\r\n result = str(a + b) # результат переведем в строку для дальнейшего вывода\r\n EntryC.delete(0, END) # очищаем текстовое поле полностью\r\n EntryC.insert(0, result) # вставляем результат в начало \r\n\r\n# размещаем кнопку в строке 3 во втором столбце \r\nbut = Button(root, text='Сложить!', command=summa)\r\nbut.grid(row=2, column=1, sticky=E)\r\n\r\ndef dvoich():\r\n\ta = EntryA.get()\r\n\ta = int(a)\r\n\r\n\tresult = (bin(a)[2:])\r\n\tEntryA.delete(0, END)\r\n\tEntryA.insert(0, result)\r\n\r\n\tb = EntryB.get()\r\n\tb = int(b)\r\n\r\n\tresult = (bin(b)[2:])\r\n\tEntryB.delete(0, END)\r\n\tEntryB.insert(0, result)\r\n\r\ndef vosmi():\r\n\ta = EntryA.get()\r\n\ta = int(a)\r\n\r\n\tresult = (oct(a)[2:])\r\n\tEntryA.delete(0, END)\r\n\tEntryA.insert(0, result)\r\n\r\n\tb = EntryB.get()\r\n\tb = int(b)\r\n\r\n\tresult = (oct(b)[2:])\r\n\tEntryB.delete(0, END)\r\n\tEntryB.insert(0, result)\r\n\r\ndef shesn():\r\n\ta = EntryA.get()\r\n\ta = int(a)\r\n\r\n\tresult = (hex(a)[2:])\r\n\tEntryA.delete(0, END)\r\n\tEntryA.insert(0, result)\r\n\r\n\tb = EntryB.get()\r\n\tb = int(b)\r\n\r\n\tresult = (hex(b)[2:])\r\n\tEntryB.delete(0, END)\r\n\tEntryB.insert(0, result)\r\n\r\nvar = IntVar()\r\nvar.set(0)\r\nRadiobutton(root, text=\"2-я система\", command=dvoich, variable=var, value=1).grid(row=0, sticky=W)\r\nRadiobutton(root, text=\"8-я система\", command=vosmi, variable=var, value=2).grid(row=1, sticky=W)\r\nRadiobutton(root, text=\"16-я система\", command=shesn, variable=var, value=3).grid(row=2, sticky=W)\r\n\r\n\r\nroot.mainloop()","sub_path":"system_programming/addition_numbers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"141293569","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 7 15:24:56 2018\n\n@author: kboosam\n\"\"\"\n'''\n@@ API TO CAPTURE THE DRIVING LICENSE DETAILS FROM GOOGLE VISION API\n\n'''\n# Importing libraries\n\n#import pandas as pd\nfrom flask import Flask, jsonify, request\nimport logging\nfrom flask_cors import CORS\n#import numpy as np\nfrom raven.contrib.flask import Sentry ## Sentry logging \n#import requests\nimport json\nimport http.client\n# Imports the Google Cloud client library\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nimport io\nimport re, os\nfrom random import randint\nimport urllib.request as req\n\n\n##\n## FUNCTION TO CALL GGOGLE VISION API WITH THE DL IMAGE \n##\n\ndef DL_OCR_VISION(path):\n \n \n try:\n ## First download the file for Google Vision API call\n img_loc = \"DL_tmp_\"+str(randint(100001, 199999))+\".jpg\"\n req.urlretrieve(path, img_loc)\n print('---> Image file downloaded at:', img_loc)\n \n client = vision.ImageAnnotatorClient()\n ''' for remote image - it didn't work as google rejected accessing FB images\n image = types.Image()\n image.source.image_uri = path\n '''\n # THIS IS FOR LOCAL FILE \n with io.open(img_loc, 'rb') as image_file:\n content = image_file.read()\n \n image = types.Image(content=content) \n \n response = client.text_detection(image=image)\n texts = response.text_annotations\n print('-------> Calling google vision API conplete')\n \n ## Delete the downloaded image file\n os.remove(img_loc)\n \n \n ret_text = ''\n #if response.error==:\n #print('Texts:', texts)\n for text in texts:\n ret_text += text.description\n #print(text , type(text))\n \n #ret_text.replace('\\n',' ') # replace new line charachters\n ret_text = ' '.join(ret_text.split())\n except Exception as e:\n print(e)\n print('Error occured while calling the google vision API - 105')\n \n return ret_text ## retunrs a string of all text from the driving license\n\n\n####\n### FUNCTION TO PARSE THE TEXTS returned from Vision API to a DL object\n####\n'''\nDL OBject structure\n\n{\n DLN : ,\n DLN_valid: \n DOB : ,\n EXP_DT : ,\n address: {\n add_ln1: ,\n add_ln2: ,\n city: ,\n state: ,\n zip: \n },\n verified: \"valid address or not\"\n}\n'''\n\ndef parse_DL(full_text):\n \n print('full text - ', full_text)\n ## Remove non-ascii characters that are inserted by google vision sometimes.\n all_ascii = ''.join(char for char in full_text if ord(char) < 128)\n \n if full_text != all_ascii :\n print('### ---- ### Non-ascii charachters removed from text', all_ascii)\n full_text = all_ascii\n \n state = ' ' ## Initialize\n \n if full_text.count('Texas') or full_text.count('TX') > 0 : state = 'TX'\n \n if full_text.count('Sunshine') > 0 and full_text.count('FL') : state='FL'\n \n if full_text.count('Jes') > 0 and full_text.count('White') : state = 'IL'\n \n if full_text.count('visitPA') > 0 : state='PA'\n \n if full_text.count('WISCON') > 0 : state='WI'\n \n if full_text.count('CALIF') > 0 : state='CA'\n \n if full_text.count('ALABAMA') > 0 : state='AL'\n \n if state in ['TX', 'PA', 'IL', 'WI']: \n full_text = full_text.replace(' 1 ',' ') # replace FIELD LABELS\n full_text = full_text.replace(' 2 ',' ') # replace FIELD LABELS\n full_text = full_text.replace(' 8 ',' ') # replace FIELD LABELS\n if state=='TX' : \n full_text = full_text.replace(' 3 ',' ') \n full_text = full_text.replace(' 4b ',' ') # replace FIELD LABELS\n full_text = full_text.replace('\\n',' ')\n else:\n full_text = full_text.replace('\\n',' ') \n \n \n #### Call Smarty Streets API to find address from text\n try:\n conn = http.client.HTTPSConnection(\"us-extract.api.smartystreets.com\")\n \n payload = full_text #send full text\n \n headers = {\n 'content-type': \"text/plain\",\n 'host': \"us-extract.api.smartystreets.com\",\n 'cache-control': \"no-cache\"\n }\n \n conn.request(\"POST\", \"/?auth-id=eff0b523-c528-0292-6685-6ad2c5a6e92a&auth-token=V7pWleHG8yLUS8CC7NqQ\", payload, headers)\n SSresp = conn.getresponse()\n print('---->Call to SmartyStreets successful: ', SSresp)\n except Exception as e: \n print('###@@@@### Error occured while calling the SmartyStreets API for address extraction')\n print(e)\n sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log\n \n \n try:\n SSresp = json.loads(SSresp.read())\n print ('\\n\\n ---> Response from SmartyStreets', SSresp)\n verified = SSresp['addresses'][0]['verified'] # address validity\n if not verified : ## Checking if the address is valid\n postal_address = {\n \"add_ln1\":SSresp['addresses'][0]['text']\n }\n # when address is not valid we are just sending the identified address string in the line 1\n print('Address on DL is invalid:', SSresp['addresses'][0]['text'] )\n else:\n #extract the address object\n address = SSresp['addresses'][0]['api_output'][0]\n \n ## fomulate address\n postal_address = {\n \"add_ln1\": address['delivery_line_1'],\n \"add_ln2\": '',\n \"city\": address['components']['city_name'],\n \"state\": address['components']['state_abbreviation'],\n \"zip\": address['components']['zipcode'] + '-' + address['components']['plus4_code']\n }\n \n state = address['components']['state_abbreviation'] # get state code for all other work.\n ### END OF IF ELSE STRUCTURE\n except Exception as e:\n print(e)\n print('###@@@@### Error occured while building address from SmartyStreets API response')\n sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log\n \n ## make a continuous string without spaces by concatenating all individual texts from google\n full_str = ''.join(full_text.split())\n \n print('---->Fetching DLN; Address state is:', state)\n \n # get DL number for IL\n if state == 'IL':\n # IL DLN is 14 digits - X999-999-999\n DLN = re.search('\\D\\d{3}-\\d{4}-\\d{4}', full_str).group(0)\n \n # get DL number for TX\n if state == 'TX':\n DLN = re.search('\\d{6}9', full_str).group(0)\n \n # get DL number for FL\n if state == 'FL':\n DLN = re.search('\\D\\d{3}-\\d{3}-\\d{2}-\\d{3}-\\d', full_str).group(0) # FL DLN is 17 digits\n \n # get DL number for PA\n if state == 'PA':\n DLN = re.search('DLN\\:\\d{8}', full_str).group(0)[4:] # PA DLN is 8 digits\n # get DL number for WI\n if state == 'WI':\n DLN = re.search('\\D\\d{3}-\\d{4}-\\d{4}-\\d{2}', full_str).group(0) # WI DLN is 14 digits\n \n # get DL number for CA\n if state == 'CA':\n DLN = re.search('\\D\\d{7}', full_str).group(0) # WI DLN is 8 digits\n \n # get DL number for AL\n if state == 'AL':\n DLN = re.search('NO\\.\\d{7}', full_str).group(0)[3:] # WI DLN is 7 digits\n \n print('----> License Number: ', DLN) \n \n \n #### GET DOB and EXPIRY DATE\n dtformat = True\n DATES = re.findall('(\\\\d{1,2}/\\\\d{1,2}/\\\\d{4})', full_str) #date separator by slashes\n if len(DATES) == 0: \n dtformat = False\n DATES = re.findall('(\\d{1,2}-\\d{1,2}-\\d{4})', full_str) # date separator as -\n if len(DATES) == 0: raise Exception('dates not found on drivers license')\n \n\t#remove duplicates from the dates. there are duplicates because full_text for some reason contain two copies\n imp_DATES = []\n for t_date in DATES:\n if t_date not in imp_DATES:\n imp_DATES.append(t_date)\n \n ###\n ### TO CAPTURE Date of Birth and expiry date of the Driving license, SORT dates in scending order\n ### smallet date would be DOB and farthest date would be expiry date\n ###\n import datetime\n DLN_valid = True\n if dtformat : \n imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m/%d/%Y'))\n EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], \"%m/%d/%Y\")\n DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## check if DL is still valid\n else:\n imp_DATES = sorted(imp_DATES, key=lambda x: datetime.datetime.strptime(x, '%m-%d-%Y'))\n EXP_datetime = datetime.datetime.strptime(imp_DATES[-1], \"%m-%d-%Y\")\n DLN_valid = False if EXP_datetime <= datetime.datetime.now() else True ## Check if DL is not valid\n \n DOB = imp_DATES[0] ## oldest date will be DOB\n EXP = imp_DATES[-1] ## Latest date will be Expiry date of DL\n \n print('----> DOB, EXPIRY: ', DOB, EXP)\n \n ret_obj = { \n \"DLN\": DLN,\n \"DLN_valid\": DLN_valid,\n \"DL_State\": state,\n \"DOB\": DOB,\n \"EXP_DT\": EXP,\n \"address\": postal_address,\n \"verified\":verified\n }\n # end of else - Verified address\n return ret_obj\n\n###\n#### function to build the response for CHATFUEL JSON API \n###\ndef build_resp(dlobj):\n \n try:\n # build the Full response dictionary\n if dlobj['DLN_valid'] :\n if dlobj['verified']:### build success message, display details and show quick reply buttons\n print(\"Good driving license \\n\")\n resp_dict = {\n\t\t\t\t\t\t\t\"set_attributes\": {\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\"validDL\":\"YES\",\n\t\t\t\t\t\t\t\t\"validAddress\" : \"YES\",\n\t\t\t\t\t\t\t\t\"jsonAPIError\": \"NO\",\n\t\t\t\t\t\t\t\t\"DLN\" : dlobj['DLN'],\n\t\t\t\t\t\t\t\t\"DL_DOB\" : dlobj['DOB'],\n\t\t\t\t\t\t\t\t\"DL_EXP\":dlobj['EXP_DT'],\n\t\t\t\t\t\t\t\t\"DL_add_ln1\": dlobj['address']['add_ln1'],\n\t\t\t\t\t\t\t\t\"DL_add_ln2\": dlobj['address']['add_ln2'],\n\t\t\t\t\t\t\t\t\"DL_city\": dlobj['address']['city'],\n\t\t\t\t\t\t\t\t\"DL_state\": dlobj['address']['state'],\n\t\t\t\t\t\t\t\t\"DL_zip\": dlobj['address']['zip'] \n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"messages\": [\n\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\"text\": \"We have scanned the drivers license you provided. Please confirm the below details\" \n\t\t\t\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\"text\": \"DL Number: \" + dlobj['DLN']\n\t\t\t\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\"text\": \"Date of Birth: \" + dlobj['DOB']\n\t\t\t\t\t\t\t\t\t\t },\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\"text\": \"DL Validity: \" + dlobj['EXP_DT']\n\t\t\t\t\t\t\t\t\t\t },\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t{ \n\t\t\t\t\t\t\t\t\t\t\t\"text\": \"Address: \" + dlobj['address']['add_ln1'] + ',\\n' + dlobj['address']['add_ln2'] + ',\\n' + dlobj['address']['city'] + ', ' + dlobj['address']['state'] + ' ' + dlobj['address']['zip'] \t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t } \n \n \t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t}\n else:\n \t\t\t\t### Address could not be verified...\n print(\"DL Address is not confirmed as valid \\n\")\n resp_dict = {\n\t\t\t\t\t\t\t\"set_attributes\": {\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\"validDL\":\"YES\",\n\t\t\t\t\t\t\t\t\"validAddress\" : \"NO\",\n \"jsonAPIError\": \"NO\"\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\"messages\": [\n\t\t\t\t\t\t\t\t\t\t {\n\t\t\t\t\t\t\t\t\t\t \"text\": \"Thanks for providing the DL image. \" \n\t\t\t\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t\t\t\t { \n\t\t\t\t\t\t\t\t\t\t \"text\": \"We could not validate the address. I will let our representative contact you within 24 hours, to process your request appropriately.\" \n\t\t\t\t\t\t\t\t\t\t }\n\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t}\n else:\n \t\t\t### DL Expired\n print(\"Driving license has expired!!! \\n\")\n resp_dict = {\n \t\t\t\t\t\t\"set_attributes\": {\n \t\t\t\t\t\t\t\n \t\t\t\t\t\t\t\"validDL\":\"NO\",\n \t\t\t\t\t\t\t\"validAddress\" : \"NO\" if not dlobj['verified'] else \"YES\",\n \"jsonAPIError\": \"NO\"\n \t\t\t\t\t\t},\n \t\t\t\t\t\t\n \t\t\t\t\t\t\"messages\": [\n \t\t\t\t\t\t\t\t\t {\n \t\t\t\t\t\t\t\t\t \"text\": \"Thanks for providing the DL image. \" \n \t\t\t\t\t\t\t\t\t },\n \t\t\t\t\t\t\t\t\t { \n \t\t\t\t\t\t\t\t\t \"text\": \"We observed an issue with the document provided. I will let our representative contact you within 24 hours, to process your request appropriately.\" \n \t\t\t\t\t\t\t\t\t }\n \t\t\t\t\t\t\t\t\t] \t\t\t\t\t\t\n \t\t\t\t\t\t}\n except Exception as e:\n print(e)\n print('###@@@@### Error occured by building response dictionary object')\n sentry.captureMessage(message=e, level=logging.FATAL)\n resp_dict = {\n \"set_attributes\": {\n \t\t\t\t\t\t\t\n \t\t\t\t\t\t\t\"jsonAPIError\": \"YES\"\n \t\t\t\t\t\t},\n \"messages\": [\n {\"text\": \"An error occurred while fetching the details for your drivers license - 104.\"}\n ]\n }\n \n return resp_dict;\n\n##### END OF FUNCTION - build_resp\n###################################################################\n\napp = Flask(__name__)\n#set sentry for logging the messages\nsentry = Sentry(app, dsn='https://e8ddaf32cc924aa295b846f4947a9332:5e52d48fe13a4d2c82babe6833c5f871@sentry.io/273115')\nCORS(app) ## cross origin resource whitelisting..\n\n## dl ocr api on flask\n@app.route('/dlocr_api', methods=['POST','GET'])\ndef get_DL():\n\n \"\"\"API Call\n Pandas dataframe (sent as a payload) from API Call\n \"\"\"\n #print(\"\\n\\n Started processing the GET request..\\n\")\n\n ##################\n # REQUEST STRCUTRE\n # imgurl\n ################# \n \n try: \n #req = request.json\n img_path = request.args.get('imgurl', type= str)\n \n print(\"##This is the request:\", request.args , '\\n\\n') \n \n #print(\"##This is the request JSON:\", str(request.get_json()), '\\n\\n')\n sentry.captureMessage(message='Started processing request- {}'.format(img_path), level=logging.INFO)\n \n except Exception as e:\n print(e)\n sentry.captureMessage(message=e, level=logging.FATAL)\n resp = {\n \"set_attributes\": {\n \t\t\t\t\t\t\t\n \t\t\t\t\t\t\t\"jsonAPIError\": \"YES\"\n \t\t\t\t\t\t},\n \n \"messages\": [\n {\"text\": \"An error occurred while fetching the DL image details for your vehicle - 102.\"},\n ]\n }\n\n try:\n #img_path = \"DL Tests\\illinois-DL.jpg\"\n # call google vision API\n DL_Text = DL_OCR_VISION(img_path)\n \n #parse to DL objects\n dlobj = parse_DL(DL_Text)\n print ('Parsed DL Info:', dlobj)\n #build response structure\n resp = build_resp(dlobj)\n #resp = dlobj\n #sentry.captureMessage(message='completed processing the DL OCR: {}'.format(dlobj['DLN']), level=logging.INFO)\n \n except Exception as e:\n print(e)\n sentry.captureMessage(message=e, level=logging.FATAL) #printing all exceptions to the log\n resp = {\n \"set_attributes\": {\n \t\t\t\t\t\t\t\n \t\t\t\t\t\t\t\"jsonAPIError\": \"YES\"\n \t\t\t\t\t\t},\n \"messages\": [\n {\"text\": \"An error occurred while fetching the details for your drivers license - 103.\"},\n ]\n }\n \n print (\"--- Response -->\", resp) \n return jsonify(resp)\n#### END OF function\n\n# main function\nif __name__ == '__main__':\n ## DISABLE CERITIFACATE VERIFICATION FOR SSL.. some issue in Capgemini network..\n '''\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n ''' \n sentry.captureMessage('Started runnning API for DL COR !!')\n #app.run(debug= True)\n app.run(debug=True,port=5100) #turnoff debug for production deployment\n\n\n","sub_path":"dl_ocr_API.py","file_name":"dl_ocr_API.py","file_ext":"py","file_size_in_byte":16214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"625414636","text":"import numpy as np\nimport pandas as pd\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import normalized_mutual_info_score\nfrom collections import Counter\nfrom copy import deepcopy\nfrom scipy.stats import multivariate_normal\n\n\"\"\"\nsteps:\n1) Normalize data\n2) get intitial centroids\n3) calc dist of every feature from each centroid\n4) recalculate centroid till diff is not less than tolerance\n\"\"\"\n\n\"\"\"\nSetting up all parameter values:\n\"\"\"\nK = 10\nmax_iterations = 100\ntolerance = 0.1\n\n###################\n\nfile_name = \"yeastData.csv\"\n#step 1 : gete data\n#reading data in nd-Arrays\ndata = np.genfromtxt (file_name, delimiter=\",\")\n# data = np.genfromtxt ('ecoliData.csv', delimiter=\",\")\n\n#shuffle data\nnp.random.shuffle(data)\n\n# nd array of Features\nX = data[:, :-1].astype(np.float) #366 x 34\n\n#nd array of Labels\nY = data[:, -1].flatten() # 366 X 1\n#step 2: Normalizing data\n# scaler = StandardScaler()\n# scaler.fit(X_feature)\n# scaler.transform(X_feature) #now we have scaled X and is normalized\n\n#step 3: initialize centroids with random values\ndef getCentroids(K):\n centroids = np.empty(shape=[K, X.shape[1]])\n for i in range(K):\n loc = random.randint(0,X.shape[0]-1)\n centroids[i] = X[loc]\n\n return centroids\n\ndef plotGraph(x,y, name):\n plt.figure()\n plt.ylabel('score')\n plt.xlabel('K')\n plt.plot(x, y)\n plt.savefig(name + \".png\")\n\ndef hy_vals(Y):\n hy = 0\n total = len(Y)\n counts = dict(Counter(Y))\n for c in counts:\n p = counts[c]/total\n\n hy += (-1.0 *p) * math.log(p,2)\n\n return hy\n\ndef hc_values(pi, Y):\n hc = 0\n total = len(Y)\n\n # for i in range(z.shape[1]):\n # p = np.sum(z[:,i])/total\n # hc += (-1.0 *p) * math.log(p,2)\n\n for i in range(len(pi)):\n hc += (-1 * pi[i]) * math.log(pi[i], 2)\n\n return hc\n\ndef hyc_values(z,Y, pi):\n hyc = []\n for j in range(len(pi)):\n pc = pi[j]\n # items in cluster j\n items_in_cluster = []\n\n sum_hy = 0\n items_in_cluster = []\n for i in range(z.shape[0]):\n if z[i,j] > 0:\n items_in_cluster.append(Y[i])\n cluster_size = len(items_in_cluster)\n counts = dict(Counter(items_in_cluster))\n sum_hy = 0\n for c in counts:\n py = counts[c]/cluster_size\n sum_hy += py * math.log(py,2.0)\n val = (-1.0 * pc) * sum_hy\n hyc.append(val)\n\n hyc.append(val)\n\n return np.sum(hyc)\n\n\ndef calc_nmi(z, Y, hy, pi):\n \n hc = hc_values(pi, Y)\n \n \n hyc = hyc_values(z, Y, pi)\n \n I = hy - hyc\n\n nmi_score = (2.0 * I)/(hy + hc)\n return nmi_score\n\n\n\ndef expectation(X,centroids, covar, pi, K):\n\n gamma = np.zeros(shape=[X.shape[0], K])\n\n for n in range(X.shape[0]):\n den = 0\n for j in range(K):\n \n den += pi[j] * multivariate_normal.pdf(X[n], mean=centroids[j], cov=covar[j], allow_singular=True)\n for k in range(K):\n \n num = pi[k] * multivariate_normal.pdf(X[n], mean=centroids[k], cov=covar[k], allow_singular=True)\n\n gamma[n,k] = num / den\n\n return gamma\n\ndef maximization(X,centroids, covar, pi, K, gamma):\n Nk = []\n for k in range(K):\n nk = np.sum(gamma[:,k], axis=0)\n Nk.append(nk)\n\n for k in range(K):\n mu_new = np.zeros(shape=[1,X.shape[1]])\n covar_new = np.zeros(shape=[X.shape[1],X.shape[1]])\n\n for n in range(X.shape[0]):\n mu_new += gamma[n,k] * X[n]\n \n val = X[n] - mu_new\n covar_new += gamma[n,k] * np.dot(val, val.T)\n\n centroids[k] = mu_new\n covar[k] = covar_new\n pi[k] = Nk[k]/X.shape[0]\n\n return centroids, covar, pi\n \n\n\n \nhy = hy_vals(Y)\nsse_list = []\nnmi_list = []\nfor temp in range(1,K):\n k = temp + 1\n # initialization\n centroids = getCentroids(k)\n\n \n obj_old = 0\n best_z = np.zeros(shape=[X.shape[0], k])\n for iters in range(max_iterations):\n J = 0\n z = np.zeros(shape=[X.shape[0], k])\n #create z matrix\n dist = np.zeros(shape=[X.shape[0], k])\n for i in range(X.shape[0]):\n \n for j in range(k):\n\n dist[i,j] = np.linalg.norm(X[i] - centroids[j], axis = 0)\n \n min_index = np.argmin(dist[i])\n z[i, min_index] = 1\n\n c_old = deepcopy(centroids)\n \n for i in range(k):\n count = 0\n points = np.zeros(shape=[1, X.shape[1]])\n for j in range(1,X.shape[0]):\n if z[j,i] == 1:\n if count == 0:\n points = X[j]\n else:\n points = np.vstack((points, X[j]))\n count += 1\n centroids[i] = np.mean(points, axis=0)\n \n \n error = np.linalg.norm(c_old - centroids, axis=0)\n best_z = z\n if error.all() == 0:\n break\n\n\n # till here we ran k means and we have got our initial centroids and Z matrix\n\n #INITIAL SET UP\n pi = []\n covar = []\n #initlize co variance:\n for j in range(k):\n p = 0\n total = len(best_z[:,j])\n points = np.zeros(shape=[1, X.shape[1]])\n for i in range(1,X.shape[0]):\n if best_z[i,j] == 1:\n if p == 0:\n points = X[i]\n else:\n points = np.vstack((points, X[i]))\n p += 1\n\n pi.append(p/total)\n covar.append(np.cov(points.T))\n\n\n # now we have initialized all the parameters\n # gamma = np.zeros(shape=[X.shape[0], k])\n\n #expectation step\n best_gamma = np.zeros(shape=[X.shape[0], k])\n ll_old = 0\n for iters in range(25):\n \n gamma = expectation(X,centroids, covar, pi, k)\n c_old = deepcopy(centroids)\n centroids, covar, pi = maximization(X,centroids, covar, pi, k, gamma)\n\n # error = np.linalg.norm(c_old - centroids, axis=0)\n # best_gamma = gamma\n # if error.all() < tolerance:\n # break\n\n ll_new = 0.0\n for i in range(X.shape[0]):\n s = 0\n for j in range(k):\n s += pi[j] * multivariate_normal(mean=centroids[j], cov=covar[j], allow_singular=True).pdf(X[i])\n ll_new += np.log(s)\n if np.abs(ll_new - ll_old) < tolerance:\n best_gamma = gamma\n break\n ll_old = ll_new\n\n\n \n sse = 0\n for j in range(k):\n for i in range(X.shape[0]):\n sse += best_gamma[i,j] * np.linalg.norm(X[i] - centroids[j], axis = 0)\n\n nmi = calc_nmi(best_gamma, Y, hy, pi) \n # nmi = sse\n print(\"--------------------\")\n print(\"iteration = \",k)\n print(\"sse score = \", sse)\n print(\"nmi score = \", nmi)\n # print(k,sse)\n sse_list.append(sse)\n nmi_list.append(nmi)\n\n\nx_axis = range(2,K+1)\nplotGraph(x_axis, sse_list, \"gmm_sse_score_\" + file_name)\nplotGraph(x_axis, nmi_list, \"gmm_nmi_score_\" + file_name)\n \n","sub_path":"Assignment5/gmm.py","file_name":"gmm.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"595104166","text":"from Quest.Event import Event\nfrom Quest.Condition import Condition\nfrom Quest.Conditions.EntityExists import EntityExists\nfrom Quest.Conditions.PlaceExists import PlaceExists\nfrom Quest.Conditions.CurrentPlayerDirectory import CurrentPlayerDirectory\nfrom Quest.Conditions.CurrentDialogueChoice import CurrentDialogueChoice\nfrom Quest.Events.CreateEvents import *\nfrom Quest.Events.RemoveEvents import *\nfrom Quest.Events.DialogueEvents import *\nfrom Quest.Events.MoveEntity import MoveEntity\nfrom Entity.Items.Common import *\nfrom Entity.Character import *\nfrom Entity.Characteristic import *\nfrom enum import Enum\n\nclass QuestStatus(Enum):\n UNAVAILABLE = 0 # Cannot be started\n AVAILABLE = 1 # Can be started (unlocked by other quests)\n STARTED = 2\n ENDED = 3\n\nclass Quest :\n def __init__(self, filename) :\n evalSeparator = '|'\n\n with open(\"Quests/\"+filename, encoding=\"UTF-8\") as file:\n self.status = QuestStatus.UNAVAILABLE\n self.onStart = []\n self.onResolve = []\n self.steps = []\n self.conditions = []\n self.next = []\n\n\n for line in file.readlines():\n parts = line.split(':')\n attr = parts.pop(0)\n left = (\":\".join(parts)).strip()\n \n if(attr == \"name\"):\n if(left != 'none'):\n self.name = left.split('\\n')[0]\n else:\n raise Exception\n continue\n if(attr == \"description\"):\n if(left != 'none'):\n self.description = left.split('\\n')[0]\n continue\n if(attr == \"onStart\"):\n if(left != 'none'):\n for event in left.split(evalSeparator):\n self.onStart.append(eval(event))\n if(attr == \"onResolve\"):\n if(left != 'none'):\n for event in left.split(evalSeparator):\n self.onResolve.append(eval(event))\n if(attr == \"steps\"):\n if(left != 'none'):\n for quest in left.split(evalSeparator):\n self.steps.append(quest)\n if(attr == \"conditions\"):\n if(left != 'none'):\n for condition in left.split(evalSeparator):\n self.conditions.append(condition)\n if(attr == \"next\"):\n if(left != 'none'):\n for quest in left.split(evalSeparator):\n self.next.append(quest)\n continue\n\n def setAvailable(self):\n self.status = QuestStatus.AVAILABLE\n\n def evalConditions(self):\n evals = []\n for condition in self.conditions:\n invert = False\n toEval = condition\n\n sp = condition.split(' ')\n check = sp[0]\n if(check == \"not\"):\n invert = True\n sp.pop(0)\n toEval = \"\".join(sp)\n\n cond = eval(toEval)\n met = cond.met()\n evals.append((not invert and met) or (invert and not met))\n \n for ev in evals:\n if not ev:\n return False\n return True\n\n def isResolved(self):\n if self.status == QuestStatus.ENDED:\n return True\n if not self.evalConditions():\n return False\n \n for step in self.steps:\n if not step.isResolved():\n return False\n\n return True\n\n def tryResolve(self):\n if not self.status == QuestStatus.ENDED and self.isResolved():\n nextQuests = self.resolve()\n return True,nextQuests\n return False,[]\n\n def start(self):\n if self.status == QuestStatus.AVAILABLE:\n self.status = QuestStatus.STARTED\n for event in self.onStart:\n event.do()\n\n def resolve(self):\n nextQuests = []\n\n if self.status == QuestStatus.STARTED:\n self.status = QuestStatus.ENDED\n\n print(u\"\\u001b[32mQuête complétée : \\u001b[0m\"+self.name)\n\n for event in self.onResolve:\n event.do()\n\n for quest in self.next:\n nextQuests.append(quest)\n \n return nextQuests\n\n \n \n","sub_path":"game/Quest/Quest.py","file_name":"Quest.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"416797820","text":"import os\nimport subprocess\n\nfrom pprint import pprint,pformat\n\nclass SnippyUtils:\n\n def __init__(self):\n self.callbackURL = os.environ['SDK_CALLBACK_URL']\n pass \n\n def build_snippy_command(self, genome_file, output_dir):\n outpath = \"/kb/module/work/tmp/\"\n command = \"/kb/module/deps/snippy/bin/snippy --outdir \"+ output_dir +\" --ref \"+ genome_file + \" --R1 \"+ outpath + \"f.fastq --R2 \" + outpath + \"r.fastq\" \n return command\n\n def run_snippy_command(self, command):\n os.system(command)\n\n def deinterleave(self, fastq_file):\n path = \"/kb/module/work/tmp/\"\n fastq_1 = open(path + \"r.fastq\",'w')\n fastq_2 = open(path + \"f.fastq\",'w')\n [fastq_1.write(line) if (i % 8 < 4) else fastq_2.write(line) for i, line in enumerate(open(fastq_file))]\n fastq_1.close()\n fastq_2.close()\n \n #print(\"bash /kb/module/deps/deinterleave.sh > \"+ fastq_file + \" /kb/module/work/tmp/f.fastq /kb/module/work/tmp/r.fastq\")\n #os.system(\"bash /kb/module/deps/deinterleave.sh > \"+ fastq_file + \" /kb/module/work/tmp/f.fastq /kb/module/work/tmp/r.fastq\")\n\n#su = SnippyUtils()\n#su.deinterleave(\"/home/manish/Desktop/VariationAnalyzer/test_local/workdir/tmp/7b7fa138-4fee-47ae-83e4-d23f7481dc8d.inter.fastq\")\n","sub_path":"lib/VariationAnalyzer/Utils/SnippyUtils.py","file_name":"SnippyUtils.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"500077905","text":"# 떡장수와 호랑이\nfrom sys import stdin\n\ndef dfs(prev_ricecake_type, days):\n if days == n:\n for a in answer: print(a)\n exit(0)\n\n for i in range(1, sell_schedule[days][0] + 1):\n ricecake_type = sell_schedule[days][i]\n\n if not visited[days][i] and (prev_ricecake_type != ricecake_type):\n visited[days][i] = True\n answer.append(ricecake_type)\n dfs(ricecake_type, days+1)\n answer.pop()\n\nanswer = list()\n\nn = int(input())\nsell_schedule = [list(map(int, stdin.readline().split())) for _ in range(n)]\nvisited = [[False for _ in range(10)] for _ in range(1000)]\n\ndfs(0, 0)\n\nprint(-1)\n","sub_path":"python/algorithm/baekjoon16432_RiceCakeSellerAndTiger.py","file_name":"baekjoon16432_RiceCakeSellerAndTiger.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"14705098","text":"import numpy as np\nimport random\n\nclass WordCreator(object):\n def __init__(self, filename=None, words=None):\n \"\"\"\n Initializes variables in WordCreator object\n Creates Transition matrix and normalizes it\n Takes string path (filename), or string (words)\n \"\"\"\n\n #Check whether input was text file or list of words\n if filename:\n self.txtFile = open(filename, 'r')\n self.words = None\n else:\n self.txtFile = None\n self.words = words\n\n #Initialize attributes\n self.transitions = np.zeros([256, 256])\n self.sumList = []\n self.minLetters = self.getMinLetters()\n self.maxLetters = self.getMaxLetters()\n\n #Run through training set and assign probabilities\n self.setTransitions()\n\n def getMaxLetters(self):\n \"\"\"\n Returns the max number of letters in a word in the text file\n \"\"\"\n\n currentMax = 0\n\n #If reading from text file\n if self.txtFile:\n self.txtFile.seek(0) #Go to beginning of file\n #Loop through file\n for word in self.txtFile:\n #Find longest word\n if len(word) >= currentMax:\n currentMax = len(word)\n else:\n currentMax = 15\n #Else read from word list\n # else:\n # #Loop through list\n # for word in self.words:\n # #Find longest word\n # if len(word) >= currentMax:\n # currentMax = len(word)\n # #Return length of longest word\n return currentMax\n\n def getMinLetters(self):\n \"\"\"\n Returns the minimum number of letters in a word in the text file\n \"\"\"\n\n currentMin = 10000 #Arbitrarily large starting minimum\n #If using text file\n if self.txtFile:\n #Go to beginning of text file\n self.txtFile.seek(0)\n #Loop through file\n for word in self.txtFile:\n #Find biggest word\n if len(word) <= currentMin:\n currentMin = len(word)\n #Else use word list\n # else:\n # #Loop through word list\n # for word in self.words:\n # #Find shortest word\n # if len(word) <= currentMin:\n # currentMin = len(word)\n #return shortest word length\n else:\n currentMin = 3\n\n return currentMin\n\n def setTransitions(self):\n \"\"\"\n Sets the transitions attribute\n Loops through the text file and finds probabilities that\n letters transition into other letters\n Index m, n represents the probability that m transitions to n\n Index 27 represents a space input\n \"\"\"\n #If using text file\n if self.txtFile:\n #Go to beginning of file\n self.txtFile.seek(0)\n #Loop through words\n for line in self.txtFile:\n line = line.lower()\n #Increment transition matrix ith letter, i+1th letter by 1\n for i in range(len(line)-1):\n xInd = ord(line[i])\n yInd = ord(line[i+1])\n if not(xInd > 255 or yInd > 255):\n self.transitions[xInd][yInd] += 1\n #Else use word list\n if self.words:\n #Loop through words\n for i in range(len(self.words)-1):\n #Increment transition matrix ith letter, i+1th letter by 1\n xInd = ord(self.words[i])\n yInd = ord(self.words[i+1])\n if not(xInd > 255 or yInd > 255):\n self.transitions[xInd][yInd] += 1\n #Normalize Matrix and get totals of each character\n self.normalizeTransitions()\n\n def normalizeTransitions(self):\n \"\"\"\n Adds up a row of transitions and divides it by the sum\n Adds sum to the list of sums (sumList)\n \"\"\"\n #Reinitialize list of character appearances\n self.sumList = []\n\n #Loop through each row of Transition Matrix\n for i in range(len(self.transitions)):\n #Normalize row and add sum to the list of character appearances\n self.transitions[i], summation = self.norm(self.transitions[i])\n self.sumList.append(summation)\n\n def norm(self, lst):\n \"\"\"\n Receives a list (lst) argument\n Normalizes the list\n Returns normalized list and sum of the original entities\n \"\"\"\n #Initialize current sum and make copy of the input list\n summation = 0\n tempList = self.copyList(lst)\n\n summation = self.getSum(lst)\n\n #Loop through list again, dividing each entry by the sum of all entries\n for i in range(len(tempList)):\n tempList[i] = tempList[i]/summation\n\n #Return normalized list, and the sum of the original values\n return tempList, summation\n\n def getSum(self, lst):\n \"\"\"\n Receives list of numbers\n Returns sum of the list\n \"\"\"\n #Initialize sum\n summation = 0\n #Loop through list, adding up values\n for i in lst:\n summation += i\n\n #Return final sum\n return summation\n\n def copyList(self, lst):\n \"\"\"\n Receives list (lst)\n Returns copy of list (copy)\n Exists so attributes don't get changed in methods unless\n otherwise specified\n \"\"\"\n\n #Initialize copy\n copy = []\n\n #Add all elements of lst to copy\n for i in lst:\n copy.append(i)\n\n #return copy\n return copy\n\n\n def getRanges(self, probList, summation):\n \"\"\"\n Turns list of probabilities of transition into ranges\n for random numbers.\n Receives probability list (probList) and\n sum of original values (summation)\n Returns list of selection ranges (ranges) and their indexes\n in the original list (indices)\n \"\"\"\n #Initialize method variables\n lastVal = 0\n tempList = self.copyList(probList)\n ranges = []\n indices = []\n\n #Loop through list of probabilities\n for i in range(len(tempList)):\n #If the current index is non-zero\n if tempList[i]:\n #Un-Normalize the value\n tempList[i] = tempList[i] * summation\n\n #Add the running sum to it\n ranges.append(lastVal + tempList[i])\n\n #Append it to the return list\n indices.append(i)\n\n #Add the value to un-normed value to the running sum\n lastVal += tempList[i]\n\n #Return the list of ranges and the original indices of the ranges\n return ranges, indices\n\n\n def getWeightedLetter(self, probList, inSum):\n \"\"\"\n Receives list of probabilities (probList) and sum of original entities\n of the list\n Generates random letter based on how the letter is weighted in the list\n returns that number\n \"\"\"\n print(inSum)\n #Get probability ranges and their indices\n ranges, indices = self.getRanges(probList, inSum)\n randLetterIndex = random.randint(1, inSum)\n lastVal = 0\n #Loop through values in ranges\n for i in range(len(ranges)):\n #If random integer is between the previous and current range\n if lastVal < randLetterIndex and randLetterIndex <= ranges[i]:\n #return the character at that index\n return chr(indices[i])\n #Reinitialize last value\n lastVal = ranges[i]\n\n\n def genWord(self, n):\n \"\"\"\n Generates random word based on transition list of length n\n Returns a word (word)\n \"\"\"\n #Initiealize method variables\n word = []\n wordLen = n\n\n #Create normalized list of sums and get the total characters\n normList, totalChar = self.norm(self.sumList)\n\n #Get a letter based on the frequency of letters in the set\n word.append(self.getWeightedLetter(normList, totalChar))\n\n #loop until you hit the end of the word length\n for i in range(wordLen-1):\n #append random character based on the previous character\n letterIndex = ord(word[i])\n flag = True\n subtractor = 1\n while flag:\n if self.sumList[letterIndex]:\n word.append(self.getWeightedLetter(self.transitions[letterIndex], self.sumList[letterIndex]))\n flag = False\n else:\n letterIndex = ord(word[i-subtractor])\n subtractor+=1\n\n\n\n #return word as a string\n return ''.join(word)\n\n def genRandWord(self):\n \"\"\"\n Generates random word based on transition list\n Returns a word (word)\n Same as genWord(), but with a random word length\n \"\"\"\n wordLen = random.randint(self.minLetters, self.maxLetters)\n word = []\n normList, totalChar = self.norm(self.sumList)\n word.append(self.getWeightedLetter(normList, totalChar))\n for i in range(wordLen-1):\n letterIndex = ord(word[i])\n word.append(self.getWeightedLetter(self.transitions[letterIndex], self.sumList[letterIndex]))\n return ''.join(word)\n\n def __main__(self):\n \"\"\"\n Main function. Generates random word\n \"\"\"\n\n print(\"Generated phrase is %s\" % self.genRandWord())\n","sub_path":"wordCreator.py","file_name":"wordCreator.py","file_ext":"py","file_size_in_byte":9615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"29441396","text":"import random\n\nclass Matrix:\n\n def __init__(self, rows, cols):\n self.rows = rows\n self.cols = cols\n self.data = []\n \n vector = []\n for i in range(0, rows):\n for j in range(0, cols):\n vector.append(0)\n self.data.append(vector)\n vector = []\n\n\n def add(self, n):\n if isinstance(n, Matrix):\n for i in range(0, self.rows):\n for j in range(0, self.cols):\n self.data[i][j] += n.data[i][j]\n else: \n for i in range(0, self.rows):\n for j in range(0, self.cols):\n self.data[i][j] += n\n \n\n def scalarMultiply(self, n):\n for i in range(0, self.rows):\n for j in range(0, self.cols):\n self.data[i][j] *= n\n\n \n def map(self, fn):\n # Apply a function to every element of matrix\n for i in range(0, self.rows):\n for j in range(0, self.cols):\n val = self.data[i][j]\n self.data[i][j] = fn(val)\n\n\n def randomize(self):\n for i in range(0, self.rows):\n for j in range(0, self.cols):\n self.data[i][j] = random.uniform(0, 1)\n\n\n @staticmethod\n def mapMatrix(matrix, fn):\n result = Matrix(matrix.rows, matrix.cols)\n for i in range(0, result.rows):\n for j in range(0, result.cols):\n val = matrix.data[i][j]\n result.data[i][j] = fn(val)\n return result\n \n @staticmethod\n def transpose(matrix):\n result = Matrix(matrix.cols, matrix.rows)\n for i in range(0, matrix.rows):\n for j in range(0, matrix.cols):\n result.data[j][i] = matrix.data[i][j]\n return result\n\n\n @staticmethod\n def subtract(a, b):\n # Return a new Matrix A-B\n result = Matrix(a.rows, a.cols)\n for i in range(0, result.rows):\n for j in range(0, result.cols):\n result.data[i][j] = a.data[i][j] - b.data[i][j]\n\n return result\n \n\n\n @staticmethod\n def fromArray(arr):\n m = Matrix(arr.__len__(), 1)\n for i in range(0, arr.__len__()):\n m.data[i][0] = arr[i]\n \n return m\n\n\n @staticmethod\n def toArray(matrix):\n if matrix.cols is 1:\n arr = []\n for i in range(0, matrix.data.__len__()):\n arr.append(matrix.data[i][0])\n return arr\n else:\n print(\"Too many cols, cannot convert\")\n return None\n\n\n @staticmethod\n def matrixMultiply(a, b):\n result = Matrix(a.rows, b.cols)\n for i in range(0, result.rows):\n for j in range(0, result.cols):\n sum = 0\n for k in range(0, a.cols):\n sum += a.data[i][k] * b.data[k][j]\n result.data[i][j] = sum\n return result\n\n\n def __str__(self):\n string = \"\"\n for array in self.data:\n string += (array.__str__() + \"\\n\")\n\n return string\n ","sub_path":"Neural_Networks/2-Multilayered_Perceptron/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"154940187","text":"def reconstruct_trip(tickets):\n tix = {}\n route = ['']* (len(tickets)-1)\n for ticket in tickets:\n tix[ticket[0]] = ticket[1]\n if ticket[0] is None:\n route[0] = ticket[1]\n for i in range(1, len(route)):\n if route[i-1] in tix:\n route[i] = tix[route[i-1]]\n else:\n return []\n return route\n\n\ntickets = [\n ('PIT', 'ORD'),\n ('XNA', 'CID'),\n ('SFO', 'BHM'),\n ('FLG', 'XNA'),\n (None, 'LAX'), \n ('LAX', 'SFO'),\n ('CID', 'SLC'),\n ('ORD', None),\n ('SLC', 'PIT'),\n ('BHM', 'FLG'),\n]\nreconstruct_trip(tickets)\n\nif __name__ == '__main__':\n # You can write code here to test your implementation using the Python repl\n pass\n","sub_path":"hash-tables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"429212940","text":"import time\nimport serial\n\n\n\ndef convert_btw_lines(x, y):\n x = (x*2)+1\n y = (y*2)+1\n xy = [x, y]\n #print(xy)\n return xy\n\n#types: type1 = movimiento en x, type2 = movmiento en y,\n#type3 = movimiento en y y en x\n\n\ndef determinate_type(coord1, coord2):\n\tif (coord1[0] != coord2[0] and coord1[1] != coord2[1]):\n\t\tty = 3\n\telif (coord1[0] != coord2[0] and coord1[1] == coord2[1]):\n\t\tty = 1\n\telif (coord1[0] == coord2[0] and coord1[1] != coord2[1]):\n\t\tty = 2\n\treturn ty\n\n\n#def wait_for_answer():\n#\tser = serial.Serial('/dev/ttyACM0', baudrate=115200) #Tried with and without the last 3 parameters, and also at 1Mbps, same happens.\n#\tser.flushInput()\n#\tser.flushOutput()\n#\tdata_raw = ser.readline()\n # \tprint(data_raw)\n\n\ndef make_path(crd1, crd2):\n\tt = determinate_type(crd1, crd2)\n\tcrd1 = convert_btw_lines(crd1[0], crd1[1])\n\tcrd2 = convert_btw_lines(crd2[0], crd2[1])\n\tif (t == 1):\n\t\tmove1 = crd1 #va a buscar la pieza a donde esta\n\t\t#se prende el electroiman\n\t\tmove2 = [crd1[0], crd1[1]+1] #se pone entre lineas\n\t\tmove3 = [crd2[0], crd2[1]+1] #se mueve a la altura en x estando entre lienas\n\t\tmove4 = crd2 #se centra en el cuadrado\n\t\t#se apaga el electroiman\n\t\tmoves = [move1, move2, move3, move4]\n\telif (t == 2):\n\t\tmove1 = crd1 #va a buscar la pieza\n\t\t#se prende iman\n\t\tmove2 = [crd1[0]+1, crd1[1]] #se pone entre lienas\n\t\tmove3 = [crd2[0]+1, crd2[1]] #se mueve a la altura en y estando entre lienas\n\t\tmove4 = crd2 #se centra en el cuadrado\n\t\t#se apaga iman\n\t\tmoves = [move1, move2, move3, move4]\n\telse:\n\t\tmove1 = crd1 #va a buscar la pieza\n\t\t#se prende iman\n\t\tmove2 = [crd1[0], crd1[1]-1] #la pone entre lineas\n\t\tmove3 = [crd2[0]-1, crd1[1]-1] #se mueve a la altura del destino en el eje x sumandole 1 para que no llevarse por delante las piezas cuando baje\n\t\tmove4 = [crd2[0]-1, crd2[1]] #baja estando entre lineas a la altura de destino en y\n\t\tmove5 = crd2 #se centra en el cuadrado\n\t\t#se apaga iman\n\t\tmoves = [move1, move2, move3, move4, move5]\n\treturn moves\n\ndef val_path(line, letra):\n\t\n\tindX = find_index_letra(line, 'X')\n\tindY = find_index_letra(line, 'Y')\n\tval = ''\n\tif (letra == 'X'):\n\t\ti = indX\n\t\twhile (i < indY):\n\t\t\tval = val+line[i]\n\t\t\ti += 1\n\t\t\t\n\telse:\n\t\ti = indY\n\t\twhile (i < len(line)):\n\t\t\tval = val+line[i]\n\t\t\ti += 1\n\treturn val\n\n\n\n\t\n\n\ndef add_magnet(mov):\n\tmov.insert(1, 1)\n\tmov.append(0)\n\treturn mov\n\ndef mayor(linexd):\n\tif (len(linexd) > 2):\n\t\tmay = True\n\treturn may\n\ndef micro_g_code(line, tam):\n\tg = \"G\"\n\tx = \"X\"\n\ty = \"Y\"\n\tesp = \" \"\n\n\tif (tam > 1):\n\t\tgLine = g+\"91\"+esp+x+str(line[0])+y+str(line[1])\n\t\t#if (len(str(line[0])) > 1):\n\t\t\t#xVal = str(line[0])\n\t\t\t#gline = g+\"91\"+esp+x+xVal+esp+y+yVal\n\t\t#\tgLine = g+\"91\"+esp+x+str(line[0])+y+str(line[1])\n\t\t#if (len(str(line[1])) > 1):\n\t\t#\tyVal = str(line[1])\n\telse:\n\t\tif (line == 1):\n\t\t\tgLine = \"M3\"\n\t\telse:\n\t\t\tgLine = \"M4\"\n\treturn gLine\n\ndef find_index_letra(line, letra):\n\tindex = 0\n\tfor i in line:\n\t\tif (i == letra):\n\t\t\tesp = index\n\t\t\tbreak\n\t\telse:\n\t\t\tindex += 1\n\treturn index\n\ndef find_value(line, letra):\n\t\n\tindX = find_index_letra(line, 'X')\n\tindY = find_index_letra(line, 'Y')\n\tval = ''\n\tif (letra == 'X'):\n\t\ti = indX\n\t\twhile (i < indY):\n\t\t\tval = val+line[i]\n\t\t\ti += 1\n\t\t\t\n\telse:\n\t\ti = indY\n\t\twhile (i < len(line)):\n\t\t\tval = val+line[i]\n\t\t\ti += 1\n\treturn val\n\n\n\n\n\ndef acort(lines, typ):\n\tallLines = []\n\n\tif (typ == 1):\n\t\tallLines.append(lines[0])\n\t\tallLines.append('G91 '+find_value(lines[1], 'Y'))\n\t\tallLines.append('G91 '+find_value(lines[2], 'X'))\n\t\tallLines.append('G91 '+find_value(lines[3], 'Y'))\n\telif (typ == 2):\n\t\tallLines.append(lines[0])\n\t\tallLines.append('G91 '+find_value(lines[1], 'X'))\n\t\tallLines.append('G91 '+find_value(lines[2], 'Y'))\n\t\tallLines.append('G91 '+find_value(lines[3], 'X'))\n\telse:\n\t\tallLines.append(lines[0])\n\t\tallLines.append('G91 '+find_value(lines[1], 'Y'))\n\t\tallLines.append('G91 '+find_value(lines[2], 'X'))\n\t\tallLines.append('G91 '+find_value(lines[3], 'Y'))\n\t\tallLines.append('G91 '+find_value(lines[4], 'X'))\n\treturn allLines\n\ndef add_gMagnet(tira_lineas):\n\ttira_lineas.insert(1, 'M4')\n\ttira_lineas.append('M3')\n\treturn tira_lineas\n\n\n\n#def acortacion(lineas, tipo):\n#\tlineasFinal = []\n#\n#\tif (tipo == 1):\n#\t\tlinea1 = micro_acorte(lineas[2], 'x')\n#\t\tlinea2 = micro_acorte(lineas[3], 'y')\n#\t\tlinea3 = micro_acorte(lineas[4], 'x')\n#\t\tlineasFinal.append(lineas[0])\n#\t\tlineasFinal.append(lineas[1])\n#\t\tlineasFinal.append(linea1)\n#\t\tlineasFinal.append(linea2)\n#\t\tlineasFinal.append(linea3)\n#\t\tlineasFinal.append(lineas[5])\n#\telif (tipo == 2):\n#\t\tlinea1 = micro_acorte(lineas[2], 'y')\n#\t\tlinea3 = micro_acorte(lineas[4], 'y')\n#\n#\t\tlineasFinal.append(lineas[0])\n#\t\tlineasFinal.append(lineas[1])\n#\t\tlineasFinal.append(linea1)\n#\t\tlineasFinal.append(linea2)\n#\t\tlineasFinal.append(linea3)\n#\tlinea2 = micro_acorte(lineas[3], 'y')\n#\t\tlinea3 = micro_acorte(lineas[4], 'x')\n#\t\tlinea4 = micro_acorte(lineas[5], 'y')\n#\n#\t\tlineasFinal.append(lineas[0])\n#\t\tlineasFinal.append(lineas[1])\n#\t\tlineasFinal.append(linea1)\n#\t\tlineasFinal.append(linea2)\n#\t\tlineasFinal.append(linea3)\n#\t\tlineasFinal.append(linea4)\n#\t\tlineasFinal.append(lineas[6])\n#\n#\treturn lineasFinal \n\n\ndef g_code_converter(m):\n\n\t#for i in len(m):\n\t#\tgLines.append(micro_g_code(m[i]))\n\t#return gLines\n\n\t#gLines = [\"g\"]\n\t#for i in len(m):\n\t#i = 0\n\t#while(i < len(m)):\n\t#\tgLines.append(micro_g_code(m[i]), len(m))\n\t#\ti += 1\n\t#return gLines\n\t#print(m)\n\t\n\t\n\tallLines = []\n\tfor i in m:\n\t\t\n\t\tallLines.append(micro_g_code(i, len(list(str(i)))))\n\treturn allLines\n\ndef send(val):\n\t'''\n\tports = list(serial.tools.list_ports.comports())\n\tfor p in ports:\n \t#print p\n\t\tif \"Arduino\" in p[1]:\n\t\t\tarduino = serial.Serial(p[0], baudrate=115200)\n\t\n\ttempN = 3\n\ttempF = 5\n\t'''\n\tarduino = serial.Serial('dev/ttyACM0', baudrate=115200)\n\t\n\n\tarduino.write(bytes(\"\\r\\n\\r\\n\", encoding='ascii'))\n\tarduino.write(bytes(\"M4\", encoding='ascii'))\n\n\tfor z in val:\n\t\t\n\t\tarduino.write(bytes(\"\\r\\n\\r\\n\", encoding='ascii'))\n\t\tarduino.write(bytes(str(z), encoding='ascii'))\n\t\tarduino.write(bytes(\"\\r\\n\\r\\n\", encoding='ascii'))\n\t\tprint(z)\n\t\ttime.sleep(3)\n\t\t#wait_for_answer()\n\t\ttime.sleep(.5)\n\ndef home(tira):\n\ttam = len(tira)\n\tultTira = tira[tam-1]\n\ty = find_value(ultTira, 'Y')\n\t#y = list(y.insert(1, '-'))\n\ty = y.replace('Y', '-')\n\tx = find_value(ultTira, 'X')\n\t#x = list(x.insert(1, '-'))\n\tx = x.replace('X', '-')\n\tultTira = 'G91 '+'X'+x+'Y'+y\n\t#print(ultTira)\n\treturn ultTira\n\n\n\ndef resta(line1, line2, let):\n\ta = find_value(line1, let)\n\tb = find_value(line2, let)\n\ta = a.replace(let, \"\")\n\tb = b.replace(let, \"\")\n\tres = int(b) - int(a)\n\t#print(a, b)\n\t#print(len(a), len(b))\n\tprint(res)\n\treturn res \n\ndef mkPath2(path, tipo):\n\tprint(len(path))\n\tif (tipo == 1):\n\t\tm1 = resta(path[1], path[3], 'Y')\n\t\tm2 = resta(path[3], path[4], 'X')\n\t\tm3 = resta(path[4], path[5], 'Y')\n\t\tl1 = 'G91 Y'+str(m1)\n\t\tl2 = 'G91 X'+str(m2)\n\t\tl3 = 'G91 Y'+str(m3)\n\t\tfixedPath = [path[0], path[1], path[2], l1, l2, l3, path[6]]\n\telif (tipo == 2):\n\t\tm1 = resta(path[1], path[3], 'X')\n\t\tm2 = resta(path[3], path[4], 'Y')\n\t\tm3 = resta(path[4], path[5], 'X')\n\t\tl1 = 'G91 X'+str(m1)\n\t\tl2 = 'G91 Y'+str(m2)\n\t\tl3 = 'G91 X'+str(m3)\n\t\tfixedPath = [path[0], path[1], path[2], l1, l2, l3, path[6]]\n\telse:\n\t\tm1 = resta(path[1], path[3], 'Y')\n\t\tm2 = resta(path[3], path[4], 'X')\n\t\tm3 = resta(path[4], path[5], 'Y')\n\t\tm4 = resta(path[5], path[6], 'X')\n\t\tl1 = 'G91 Y'+str(m1)\n\t\tl2 = 'G91 X'+str(m2)\n\t\tl3 = 'G91 Y'+str(m3)\n\t\tl4 = 'G91 X'+str(m4)\n\t\tfixedPath = [path[0], path[1], path[2], l1, l2, l3, l4, path[7]]\n\treturn fixedPath\n\n#def goodPath(gcode, t):\n#\tif (t == 1):\n\n\ndef funcion_maxima(cor1, cor2):\n\ttipo = determinate_type(cor1, cor2)\n\t#print(\"move type:\",tipo)\n\tmoves = make_path(cor1, cor2)\n\t#print(moves)\n\t#magMoves = add_magnet(moves)\n\t#print(magMoves)\n\t#gline = micro_g_code(moves, 2)\n\t#gline2 = micro_g_code(magMoves[1], 1)\n\t#print(gline)\n\t#print(gline2)\n\tglines = g_code_converter(moves)\n\tprint(glines)\n\t#lineasCortadas = acortacion(glines, tipo)\n\t#print(lineasCortadas)\n\n\t#return lineasCortadas\n\t#send(lineasCortadas)\n\n\t#home()\n\ttod_lineas = glines\n\t#tod_lineas = acort(glines, tipo)\n\ttod_lineas = add_gMagnet(tod_lineas)\n\ttod_lineas.insert(0, \"G00 X0Y0\")\n\th = home(glines)\n\n\tprint(h)\n\ttod_lineas.append(h)\n\tprint(tod_lineas)\n\t#add_gMagnet(gl)\n\t#print(gl)\n\t#print(glines)\n\t#glines.insert(0, 'G91 X0Y0')\n\t#print(glines)\n\t#glines = glines.append(home(gl))\n\ttod_lineas = mkPath2(tod_lineas, tipo)\n\t#print(glines)\n\treturn tod_lineas\n\t#fLineas = mkPath2(tod_lineas, tipo)\n\t#send(tod_lineas)\n\t#return fLineas\n\t#print(tod_lineas)\n\t#print(find_value(caca, 'X'))\n\t#print(add_gMagnet(tod_lineas))\n\n\t#time.sleep(5)\n\n","sub_path":"ajedrez-2/functs.py","file_name":"functs.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"126622793","text":"from .Logger import *\nimport subprocess\nimport json\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nimport threading\n\nclass Trigger:\n\n\t__name=\"\"\n\t__path=\"\"\n\t__occurence=None\n\t__time=\"\"\n\t__nextExecution=None\n\t__executed=False\n\n\tdef __init__(self, name, path, freq, resultObj):\n\t\tself.__name = name\n\t\tself.__path = path\n\t\tself.__time = freq.split(\" \")[0]\n\t\tself.__occurence = freq.split(\" \")[1]\n\t\t#To be enabled when model can return every object in HOME\n\t\tself.__resultObj = resultObj\n\t\tself.lg = Logger(\"syslog.LOG\", \"Trigger [\"+self.__name+\"]\")\n\t\tself.__nextExecution = datetime.now().strftime('%d/%m/%Y')\n\n\tdef run(self):\n\t\ttry:\n\t\t\tr =subprocess.check_output([\"python3\", self.__path]).decode()\n\t\t\tself.lg.info(\"json received: \"+r)\n\t\t\tjsonedResponse = json.loads(r)\n\t\t\tfor element in jsonedResponse[0]:\n\t\t\t\tsetattr(self.__resultObj, \"__\" + element, jsonedResponse[0][element])\n\t\t\tself.__resultObj.dump()\n\t\t\tself.__calculateNextExecution()\n\t\texcept subprocess.CalledProcessError as e:\n\t\t\tself.lg.error(\"thrown error: \" + repr(e))\n\n\tdef getNextExecution(self):\n\t\treturn self.__nextExecution\n\n\tdef __calculateNextExecution(self):\n\t\tif (self.__occurence == \"DAILY\"):\n\t\t\tself.__nextExecution = (datetime.now() + relativedelta(days=1)).strftime('%d/%m/%Y') \n\t\t\tself.lg.info(\"next execution calculated to: \" + self.__nextExecution)\n\t\t\tself.__executed = True\n\t\t\tt = threading.Timer(5.0, self.__resetExecutionState)\n\t\t\tt.start()\n\n\tdef __resetExecutionState(self):\n\t\t\tself.__executed = False\n\n\tdef hasBeenExecuted(self):\n\t\treturn self.__executed\n\n\tdef getName(self):\n\t\treturn self.__name\n","sub_path":"Scheduller/Trigger.py","file_name":"Trigger.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"494056087","text":"# Insertion sort: \n# Always maintains a sorted sublist with one item larger\n# Each new item is \"inserted\" into the correct sublist position\n# Best case: only one comparison needed without going in the while loop\n# O(n)\n# Worst case: O(n^2)\ndef insertionSort(numlist):\n for index in range(1, len(numlist)):\n currentVal = numlist[index]\n position = index\n \n # keeps track of current value until current value\n # is greater than the value at position, then exit while and insert\n while position > 0 and numlist[position-1] > currentVal:\n numlist[position] = numlist[position-1]\n position = position - 1\n \n numlist[position] = currentVal\n\nalist = [54,26,93,17,77,31,44,55,20]\ninsertionSort(alist)\nprint(alist)","sub_path":"Algorithms/Sorting/Insertion_Sort.py","file_name":"Insertion_Sort.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"297440271","text":"\"\"\"\nUtils for conversion\n\"\"\"\nimport os\nimport logging\nimport numpy as np\nimport cv2\nimport scipy.misc as misc\nimport math\nfrom PIL import Image\nfrom geometry import *\n\n\ndef setupLogging(prefix):\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger()\n\n fileHandler = logging.FileHandler(\"{0}/{1}.log\".format('.', prefix))\n fileHandler.setFormatter(logFormatter)\n rootLogger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n rootLogger.addHandler(consoleHandler)\n\n rootLogger.setLevel(level=logging.INFO)\n logging.info(\"starting up\")\n\n\ndef rotateImg(img, angle, mask_in=None):\n if angle == 0:\n return img, mask_in\n\n # grab the dimensions of the image\n (h, w) = img.shape[:2]\n\n max_dim = int(max(h, w) * 2.0)\n\n # Get a blank array the max paste size\n if len(img.shape) > 2:\n buffer_roi = np.zeros([max_dim, max_dim, img.shape[2]], dtype=np.uint8)\n else:\n buffer_roi = np.zeros([max_dim, max_dim], dtype=np.uint8)\n\n if mask_in is not None:\n buffer_roi_mask = np.zeros([max_dim, max_dim], dtype=np.uint8)\n\n center_rotate_roi = int(max_dim / 2.0)\n paste_left = int(img.shape[1] / 2.0)\n paste_right = img.shape[1] - paste_left\n paste_top = int(img.shape[0] / 2.0)\n paste_bottom = img.shape[0] - paste_top\n\n # Copy the image into the center of this\n buffer_roi[(center_rotate_roi - paste_top):(center_rotate_roi + paste_bottom),\n (center_rotate_roi - paste_left):(center_rotate_roi + paste_right)] = img\n if mask_in is not None:\n buffer_roi_mask[(center_rotate_roi - paste_top):(center_rotate_roi + paste_bottom),\n (center_rotate_roi - paste_left):(center_rotate_roi + paste_right)] = mask_in\n\n # showAndWait('buffer_roi', buffer_roi)\n\n rotated = misc.imrotate(buffer_roi, angle)\n if mask_in is not None:\n rotated_mask = misc.imrotate(buffer_roi_mask, angle)\n\n if len(img.shape) > 2:\n paste_grey = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)\n else:\n paste_grey = rotated\n\n # showAndWait('paste_grey', paste_grey)\n # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/paste_grey.png', paste_grey)\n\n ret, rotated_mask_img = cv2.threshold(paste_grey, 5, 255, cv2.THRESH_BINARY)\n\n # showAndWait('mask', rotated_mask)\n # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/rotated_mask.png', rotated_mask)\n\n where = np.array(np.where(rotated_mask_img))\n # np.savetxt('/media/dcofer/Ubuntu_Data/drone_images/fuckhead.csv', np.transpose(where))\n\n x1, y1 = np.amin(where, axis=1)\n x2, y2 = np.amax(where, axis=1)\n\n out_image = rotated[x1:x2, y1:y2]\n if mask_in is not None:\n out_mask = rotated_mask[x1:x2, y1:y2]\n ret, out_mask = cv2.threshold(out_mask, 3, 255, cv2.THRESH_BINARY)\n else:\n out_mask = None\n\n # showAndWait('out_image', out_image)\n # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/out_image.png', out_image)\n\n # return the rotated image\n return out_image, out_mask\n\n\ndef generateMask(img):\n if len(img.shape) == 3:\n img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n else:\n img_grey = img\n\n ret, mask = cv2.threshold(img_grey, 5, 255, cv2.THRESH_BINARY)\n return mask\n\n\ndef showAndWait(name, img):\n cv2.imshow(name, img)\n cv2.waitKey(0)\n\n\ndef findFilesOfType(input_dir, endings):\n # Get the xml files in the directory\n files = os.listdir(input_dir)\n\n out_files = []\n for file in files:\n for ext in endings:\n if file.endswith(ext):\n out_files.append(input_dir + '/' + file)\n break\n\n ret_files = sorted(set(out_files))\n # print img_files\n\n return ret_files\n\n\ndef writeFileList(list, filename):\n with open(filename, 'w') as f:\n for item in list:\n #logging.debug(item)\n f.write(\"%s\\n\" % item)\n\n\ndef saveDetectNetLabelFile(label, list, filename):\n with open(filename, 'w') as f:\n for l in list:\n x_max = l['x'] + l['width']\n y_max = l['y'] + l['height']\n\n f.write(\"{} 0.0 0 0.0 {} {} {} {} 0.0 0.0 0.0 0.0 0.0 0.0 0.0\\n\".format(label, l['x'],\n l['y'], x_max, y_max))\n\n\ndef loadYoloLabels(label_file):\n\n label_data = []\n with open(label_file) as reader:\n line = reader.readline()\n labels = line.split(' ')\n\n width_2 = float(labels[3]) / 2.0\n height_2 = float(labels[4]) / 2.0\n\n left = float(labels[1]) - width_2\n top = float(labels[2]) - height_2\n right = left + float(labels[3])\n bottom = top + float(labels[4])\n\n new_labels = [left, top, right, bottom]\n\n label_data.append(new_labels)\n\n return label_data\n\n\ndef saveYoloLabelFile(label, list, filename, img_width, img_height):\n with open(filename, 'w') as f:\n for l in list:\n x_center = (l['x'] + float(l['width'])/2.0) / float(img_width)\n y_center = (l['y'] + float(l['height'])/2.0) / float(img_height)\n width = float(l['width']) / float(img_width)\n height = float(l['height']) / float(img_height)\n\n f.write(\"{} {:.6f} {:.6f} {:.6f} {:.6f}\\n\".format(label, x_center, y_center, width, height))\n\n\ndef rotate(origin, point, angle_deg):\n \"\"\"\n Rotate a point counterclockwise by a given angle around a given origin.\n\n The angle should be given in radians.\n \"\"\"\n angle = math.radians(angle_deg)\n\n ox, oy = origin\n px, py = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy\n\n\ndef bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n\ndef color_map(N=256, normalized=False):\n cmap = []\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap.append(r)\n cmap.append(g)\n cmap.append(b)\n\n return cmap\n\n\ndef quantizetopalette(silf, palette, dither=False):\n \"\"\"Convert an RGB or L mode image to use a given P image's palette.\"\"\"\n\n silf.load()\n\n # use palette from reference image\n palette.load()\n if palette.mode != \"P\":\n raise ValueError(\"bad mode for palette image\")\n if silf.mode != \"RGB\" and silf.mode != \"L\":\n raise ValueError(\n \"only RGB or L mode images can be quantized to a palette\"\n )\n im = silf.im.convert(\"P\", 1 if dither else 0, palette.im)\n # the 0 above means turn OFF dithering\n return silf._makeself(im)\n\n\ndef savePascalColorMap(file_name):\n cm = color_map()\n\n # print cm\n cm_file = open(file_name, \"w\")\n color_idx = 0\n for c in cm:\n cm_file.write(str(c))\n\n color_idx = color_idx + 1\n if color_idx >= 3:\n color_idx = 0\n cm_file.write(\"\\n\")\n else:\n cm_file.write(\" \")\n\n cm_file.close()\n\n\ndef saveIndexImage(file_name, img):\n cmap = color_map()\n\n rgb_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n # pil_im = Image.open(\"/media/dcofer/Ubuntu_Data/train_data/orig_labels/P1040599_0_0.png\")\n pil_im = Image.fromarray(rgb_im)\n\n palimage = Image.new('P', pil_im.size)\n palimage.putpalette(cmap)\n newimage = quantizetopalette(pil_im, palimage, dither=False)\n\n newimage.save(file_name)\n # print(\"Saved mask file: \" + file_name)\n\ndef drawLabels(img_in, labels):\n\n img = img_in.copy()\n\n for l in labels:\n x_max = l['x'] + l['width']\n y_max = l['y'] + l['height']\n\n top_left = (int(l['x']), int(l['y']))\n top_right = (int(x_max), int(l['y']))\n bottom_right = (int(x_max), int(y_max))\n bottom_left =(int(l['x']), int(y_max))\n\n img = cv2.line(img, top_left, top_right, color=(0, 0, 255), thickness=3)\n img = cv2.line(img, top_right, bottom_right, color=(0, 0, 255), thickness=3)\n img = cv2.line(img, bottom_right, bottom_left, color=(0, 0, 255), thickness=3)\n img = cv2.line(img, bottom_left, top_left, color=(0, 0, 255), thickness=3)\n\n return img\n\n\ndef getYoloCoords(obj, img_width, img_height):\n coords = obj['relative_coordinates']\n conf = float(obj['confidence'])\n\n width = coords['width'] * img_width\n height = coords['height'] * img_height\n\n x_center = (coords['center_x'] * img_width)\n y_center = (coords['center_y'] * img_height)\n\n x_min = x_center - int(width / 2.0)\n y_min = y_center - int(height / 2.0)\n\n x_max = x_center + int(width / 2.0)\n y_max = y_center + int(height / 2.0)\n\n top_left = (int(x_min), int(y_min))\n top_right = (int(x_max), int(y_min))\n bottom_right = (int(x_max), int(y_max))\n bottom_left = (int(x_min), int(y_max))\n\n return top_left, top_right, bottom_left, bottom_right, width, height, conf\n\n\ndef drawYoloObjectLabels(img_in, labels):\n\n img = img_in.copy()\n\n for l in labels:\n logging.info(l)\n\n top_left, top_right, bottom_left, bottom_right, width, height, conf = getYoloCoords(l,\n img_in.shape[1],\n img_in.shape[0])\n if conf > 0.25:\n color = (0, 0, 255)\n else:\n color = (0, 255, 255)\n\n img = cv2.line(img, top_left, top_right, color=color, thickness=3)\n img = cv2.line(img, top_right, bottom_right, color=color, thickness=3)\n img = cv2.line(img, bottom_right, bottom_left, color=color, thickness=3)\n img = cv2.line(img, bottom_left, top_left, color=color, thickness=3)\n\n return img\n\n\ndef overlapsYolo(annotations, yolo_labels, img_width, img_height):\n\n overlaps_count = 0\n for a in annotations:\n a_rect = Rect(a['x'], a['y'], a['width'], a['height'])\n\n for y in yolo_labels:\n top_left, top_right, bottom_left, bottom_right, width, height, conf = getYoloCoords(y,\n img_width,\n img_height)\n\n y_rect = Rect(top_left[0], top_left[1], width, height)\n\n if y_rect.overlaps(a_rect):\n overlaps_count += 1\n break\n\n if overlaps_count >= len(annotations):\n return True\n else:\n return False\n\n\ndef randomFlipImage(img_in, flip_horizontal=True, flip_vertical=True,\n horiz_perc=50, vert_perc=10):\n if flip_horizontal:\n flip_val = np.random.randint(0, 100)\n if flip_val < horiz_perc:\n logging.info(\" flip_val: {}. Flipping image horizontal.\".format(flip_val))\n flipped_canvas_img = np.fliplr(img_in)\n else:\n logging.info(\" flip_val: {}. Leaving canvas horizontal unflipped\".format(flip_val))\n flipped_canvas_img = img_in\n\n if flip_vertical:\n flip_val = np.random.randint(0, 100)\n if flip_val < vert_perc:\n logging.info(\" flip_val: {}. Flipping image vertical.\".format(flip_val))\n flipped_canvas_img = np.flipud(flipped_canvas_img)\n else:\n logging.info(\" flip_val: {}. Leaving canvas unflipped vertical\".format(flip_val))\n flipped_canvas_img = flipped_canvas_img\n\n return flipped_canvas_img\n\n\ndef flipLabels(labels, paste_dim, vertical=False):\n\n new_labels = []\n for l in labels:\n # new_labels.append([paste_width - l[2], l[1], paste_width - l[0], l[3]])\n\n new_l = l.copy()\n\n if not vertical:\n new_l['x'] = paste_dim - (l['x'] + l['width'])\n else:\n new_l['y'] = paste_dim - (l['y'] + l['height'])\n\n new_labels.append(new_l)\n\n #new_labels.append([paste_width-l[2], l[1], paste_width-l[0], l[3]])\n\n printLabelDims(new_labels)\n\n return new_labels\n\ndef adjustLabels(labels, x, y):\n\n new_labels = []\n for l in labels:\n new_l = l.copy()\n new_l['x'] = x + l['x']\n new_l['y'] = y + l['y']\n new_labels.append(new_l)\n\n printLabelDims(new_labels)\n\n return new_labels\n\ndef scaleLabels(labels, ratio):\n\n new_labels = []\n for l in labels:\n new_l = l.copy()\n new_l['x'] = l['x'] * ratio\n new_l['y'] = l['y'] * ratio\n new_l['width'] = l['width'] * ratio\n new_l['height'] = l['height'] * ratio\n new_labels.append(new_l)\n\n printLabelDims(new_labels)\n\n return new_labels\n\ndef printLabelDims(labels):\n\n if len(labels) > 0:\n min_area = 99999999999\n for l in labels:\n logging.info(\" x: {0:.2f}, y: {0:.2f}, w: {0:.2f}, h: {0:.2f}\".format(l['x'], l['y'], l['width'], l['height']))\n area = l['width'] * l['height']\n if area < min_area:\n min_area = area\n\n return min_area\n else:\n return 0.0","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"644515336","text":"import imaplib\nimport base64\nimport re\n# a = '=?gb18030?B?y+21wLeiyfq1xA==?='\n# print(base64.b64decode(a).decode('gb18030'))\nM = imaplib.IMAP4('imap.163.com')\nM.login('pythonld', 'ldldld')\nM.select()\ntyp, data = M.search(None, 'ALL')\nfor num in data[0].split():\n typ, data = M.fetch(num, '(RFC822)')\n aa = data[0][1].decode()\n aa = re.search('Subject: (.*?)\\n', aa).group(1)\n if aa.startswith('=?gb'):\n aa = aa.split('?')[-2]\n print(base64.b64decode(aa).decode('gb18030'), '\\n')\n else:\n print(aa)\n\n\nM.close()\nM.logout()\n","sub_path":"python接收发送email并解析中文demo/得到中文标题.py","file_name":"得到中文标题.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"192238878","text":"import unittest\nfrom eva.project import Project\n\nclass TestProject(unittest.TestCase):\n\n path = '/Users/marcelofelix/Documents/workspace/vivareal-publishers/vr-publishers-admin'\n\n def test_start(self):\n p = Project(self.path) \n self.assertEqual(len(p.composes), 2)\n p.composes[0].start()\n\n def test_stop(self):\n p = Project(self.path)\n p.composes[0].stop()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Eva/tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"241568385","text":"#-*- coding: utf-8 -*-\r\nimport base64\r\nimport logging\r\nimport threading\r\nimport sched\r\nimport time\r\nimport urllib\r\nimport tornado\r\nimport sys\r\n\r\nfrom common.invokeCommand import InvokeCommand\r\nfrom tornado.options import options\r\nfrom tornado.httpclient import AsyncHTTPClient, HTTPClient\r\nfrom tornado.httpclient import HTTPRequest\r\nfrom tornado.gen import Wait, Callback, engine\r\nfrom common.dba_opers import DBAOpers\r\nfrom common.utils.mail import send_email\r\nfrom common.configFileOpers import ConfigFileOpers\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom common.zkOpers import ZkOpers\r\nfrom common.abstract_mysql_service_opers import Abstract_Mysql_Service_Opers\r\nfrom common.utils.threading_exception_queue import Threading_Exception_Queue\r\nfrom common.abstract_mysql_service_action_thread import Abstract_Mysql_Service_Action_Thread\r\n\r\n'''\r\nCreated on 2013-7-21\r\n\r\n@author: asus\r\n'''\r\n\r\nclass Node_Mysql_Service_Opers(Abstract_Mysql_Service_Opers):\r\n \r\n invokeCommand = InvokeCommand()\r\n \r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n \r\n def retrieve_recover_position(self):\r\n result = self.invokeCommand.run_check_shell(options.retrieve_node_uuid_seqno_script)\r\n uuid = self.__find_special_value(result, \"uuid:\", 37)\r\n seqno = self.__find_special_value(result, \"seqno:\", 65535)\r\n \r\n dict = {}\r\n dict.setdefault(\"uuid\", uuid)\r\n dict.setdefault(\"seqno\", seqno) \r\n \r\n return dict\r\n \r\n def __find_special_value(self, result, key, value_length):\r\n key_start_pos = result.find(key)\r\n key_end_pos = key_start_pos+len(key)\r\n value = result[key_end_pos:key_end_pos+value_length]\r\n value = value.rstrip('\\n')\r\n return value\r\n \r\n \r\n def start(self, isNewCluster):\r\n isLock,lock = self.zkOper.lock_node_start_stop_action()\r\n \r\n node_start_action = Node_start_action(isNewCluster, lock)\r\n node_start_action.start()\r\n \r\n def stop(self):\r\n isLock,lock = self.zkOper.lock_node_start_stop_action()\r\n \r\n # Start a thread to run the events\r\n node_stop_action = Node_stop_action(lock)\r\n node_stop_action.start()\r\n \r\nclass Node_start_action(Abstract_Mysql_Service_Action_Thread):\r\n lock = None\r\n isNewCluster = False\r\n \r\n dba_opers = DBAOpers()\r\n \r\n def __init__(self, isNewCluster, lock):\r\n super(Node_start_action, self).__init__()\r\n self.lock = lock\r\n self.isNewCluster = isNewCluster\r\n \r\n def run(self):\r\n try:\r\n self._issue_start_action(self.isNewCluster, self.lock)\r\n except:\r\n self.threading_exception_queue.put(sys.exc_info())\r\n \r\n def _issue_start_action(self, isNewCluster, lock):\r\n dataNodeProKeyValue = self.confOpers.getValue(options.data_node_property, ['dataNodeIp'])\r\n data_node_ip = dataNodeProKeyValue['dataNodeIp']\r\n \r\n try:\r\n finished_flag = self.dba_opers.retrieve_wsrep_status()\r\n \r\n if not finished_flag:\r\n result = self.invokeCommand.remove_mysql_socket()\r\n result = self.invokeCommand.mysql_service_start(isNewCluster)\r\n \r\n finished_flag = self._check_start_status(data_node_ip)\r\n finally:\r\n self.zkOper.unLock_node_start_stop_action(lock)\r\n \r\n if finished_flag: \r\n self._send_email(data_node_ip, \" mysql service start operation finished\")\r\n \r\n \r\n def _check_start_status(self, data_node_ip):\r\n finished_flag = False\r\n \r\n sh_name = \"ps -ef | grep mysqld_safe | grep -iv grep | wc -l\"\r\n \r\n while not finished_flag:\r\n result = self.invokeCommand.run_check_shell(sh_name)\r\n \r\n if int(result) == 0:\r\n finished_flag = False\r\n break\r\n \r\n finished_flag = self.dba_opers.retrieve_wsrep_status()\r\n \r\n time.sleep(2)\r\n \r\n if finished_flag: \r\n self.zkOper.write_started_node(data_node_ip)\r\n \r\n return finished_flag\r\n \r\n \r\nclass Node_stop_action(Abstract_Mysql_Service_Action_Thread):\r\n lock = None\r\n \r\n def __init__(self, lock):\r\n super(Node_stop_action, self).__init__()\r\n self.lock = lock\r\n \r\n def run(self):\r\n try:\r\n self._issue_stop_action(self.lock)\r\n except:\r\n self.threading_exception_queue.put(sys.exc_info())\r\n \r\n def _issue_stop_action(self, lock):\r\n finished_flag = False\r\n \r\n dataNodeProKeyValue = self.confOpers.getValue(options.data_node_property, ['dataNodeIp'])\r\n data_node_ip = dataNodeProKeyValue['dataNodeIp']\r\n \r\n try:\r\n result = self.invokeCommand.mysql_service_stop()\r\n finished_flag = self._check_stop_status(data_node_ip)\r\n finally:\r\n self.zkOper.unLock_node_start_stop_action(lock)\r\n \r\n if finished_flag: \r\n self._send_email(data_node_ip, \" mysql service stop operation finished\")\r\n \r\n \r\n def _check_stop_status(self, data_node_ip):\r\n sh_name = \"ps -ef | grep mysqld_safe | grep -iv grep | wc -l\"\r\n \r\n finished_flag = False\r\n retry_count = 0\r\n \r\n while not finished_flag and retry_count <= 60:\r\n result = self.invokeCommand.run_check_shell(sh_name)\r\n \r\n if int(result) == 0:\r\n finished_flag = True\r\n \r\n retry_count = retry_count + 1\r\n \r\n time.sleep(2)\r\n \r\n if finished_flag: \r\n self.zkOper.remove_started_node(data_node_ip)\r\n \r\n return finished_flag\r\n \r\n","sub_path":"common/node_mysql_service_opers.py","file_name":"node_mysql_service_opers.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"5483016","text":"from __future__ import absolute_import\nfrom meta.types import *\nfrom meta.codegen.protocol import get_protocols\n\n\n#\n# __all__\n#\n\n__all__ = [\n \"get_c_sharp_data_type\", \"get_c_sharp_variable_declaration\", \n \"space\", \"get_name\"\n]\n\n\n#\n# name\n#\n\ndef name_primitive(t):\n return repr(t)\n\ndef name_optional(t):\n return \"Optional_\" + get_name(t.element_type())\n\ndef name_static_array(t):\n return \"StaticArray%s_%s\" % (t.size(), get_name(t.element_type()))\n\ndef name_tuple(t):\n result = \"Tuple%s\" % (len(t.element_types()),)\n for element_type in t.element_types():\n result += \"_\" + get_name(element_type)\n return result\n\ndef name_vector(t):\n return \"Vector_\" + get_name(t.element_type())\n\ndef name_set(t):\n return \"Set_\" + get_name(t.element_type())\n\ndef name_map(t):\n return \"Map_%s_%s\" % (get_name(t.key_type()), get_name(t.value_type()))\n\ndef name_named(t):\n return \"%s%s\" % (get_namespace(t), t.name())\n\n_name_map = {\n BooleanType: name_primitive,\n SignedIntegerType: name_primitive,\n UnsignedIntegerType: name_primitive,\n FloatType: name_primitive,\n DoubleType: name_primitive,\n BytesType: name_primitive,\n TextType: name_primitive,\n OptionalType: name_optional,\n StaticArrayType: name_static_array,\n TupleType: name_tuple,\n VectorType: name_vector,\n SetType: name_set,\n MapType: name_map,\n EnumType: name_named,\n RecordType: name_named,\n VariantType: name_named,\n VariantOption: name_named\n}\n\ndef get_name(t):\n f = _name_map[type(t)]\n result = f(t)\n return result\n\ndef get_namespace(t):\n namespace = \"\"\n if isinstance(t, VariantOption):\n t = t.variant_type()\n for protocol in get_protocols():\n if t in protocol.top_level_types():\n namespace = protocol.namespace()\n return namespace\n\n\n#\n# util functions\n#\n\ndef space(level):\n return \" \" * level\n\ndef get_c_sharp_data_type(symbol):\n value_types = (\n BooleanType, SignedIntegerType, UnsignedIntegerType, \n FloatType, DoubleType, BytesType, EnumType\n )\n datatype = None\n if isinstance(symbol, SignedIntegerType):\n if symbol.bits() == 8:\n datatype = \"SByte\"\n else:\n datatype = \"Int%s\" % (symbol.bits())\n elif isinstance(symbol, UnsignedIntegerType):\n if symbol.bits() == 8:\n datatype = \"Byte\"\n else:\n datatype = \"UInt%s\" % (symbol.bits())\n elif isinstance(symbol, BooleanType):\n datatype = \"bool\"\n elif isinstance(symbol, FloatType):\n datatype = \"Single\"\n elif isinstance(symbol, DoubleType):\n datatype = \"Double\"\n elif isinstance(symbol, BytesType):\n datatype = \"byte[]\"\n elif isinstance(symbol, TextType):\n datatype = \"string\"\n elif isinstance(symbol, StaticArrayType):\n datatype = get_c_sharp_data_type(symbol.element_type())\n datatype = \"List<%s>\" % (datatype)\n elif isinstance(symbol, TupleType):\n def inner(element_types):\n types = None\n i = 1\n nested_types = []\n for element_type in element_types:\n if i > 7:\n nested_types.append(element_type)\n else:\n if types is None:\n types = get_c_sharp_data_type(element_type)\n else:\n types = types + (\", %s\" % (get_c_sharp_data_type(element_type),))\n i = i + 1\n if i > 7:\n types = \"%s,Tuple<%s>\" % (types, inner(nested_types))\n return types\n datatype = (\"Tuple<%s>\" % (inner(symbol.element_types()),))\n elif isinstance(symbol, VectorType):\n datatype = get_c_sharp_data_type(symbol.element_type())\n datatype = \"List<%s>\" % (datatype)\n elif isinstance(symbol, SetType):\n datatype = (\"HashSet<%s>\" % (get_c_sharp_data_type(symbol.element_type()),))\n elif isinstance(symbol, MapType):\n key_type = get_c_sharp_data_type(symbol.key_type())\n value_type = get_c_sharp_data_type(symbol.value_type())\n datatype = (\"Dictionary<%s, %s>\" % (key_type, value_type))\n elif type(symbol) in (EnumType, RecordType, VariantType, VariantOption):\n datatype = (\"%s.%s\" % (get_namespace(symbol), symbol.name()))\n elif isinstance(symbol, OptionalType):\n datatype = get_c_sharp_data_type(symbol.element_type())\n if type(symbol.element_type()) in value_types \\\n and not isinstance(symbol.element_type(),BytesType):\n datatype = \"%s?\" % (datatype,)\n else:\n assert False\n return datatype\n\ndef get_c_sharp_variable_declaration(symbol, variable_name):\n scalar_types = (BooleanType, SignedIntegerType, UnsignedIntegerType, \n FloatType, DoubleType, BytesType, TextType, EnumType)\n if type(symbol) in scalar_types:\n datatype = get_c_sharp_data_type(symbol)\n return \"%s %s;\" % (datatype, variable_name)\n elif isinstance(symbol, OptionalType):\n if type(symbol.element_type()) in scalar_types:\n datatype = get_c_sharp_data_type(symbol)\n return \"%s %s;\" % (datatype, variable_name)\n else:\n return get_c_sharp_variable_declaration(symbol.element_type(), variable_name)\n else:\n datatype = get_c_sharp_data_type(symbol)\n if isinstance(symbol, TupleType):\n return \"%s %s;\" % (datatype, variable_name)\n else:\n return \"%s %s = new %s();\" % (datatype, variable_name, datatype)\n","sub_path":"Src-server/meta/codegen/c_sharp/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"549804801","text":"# coding=utf-8\n__author__ = 'landing'\n__data__ = '2019/3/11 14:52'\n\"\"\"\n京东网数据爬取\n\n\"\"\"\nfrom lxml import html\nimport requests\n\n\ndef spider(sn):\n url = \"https://search.jd.com/Search?keyword={0}\".format(sn)\n print(url)\n # 获取Html_data\n html_data = requests.get(url).text\n print(html_data)\n # 将文本类容转换成xpath对象\n selector = html.fromstring(html_data)\n # 找到列表的集合\n ul_list = selector.xpath('//div[@id=\"J_goodsList\"]/ul/li')\n print(len(ul_list))\n # 解析对于的类容 标题 价格 购买链接\n for li in ul_list:\n print(li)\n\n\nif __name__ == \"__main__\":\n spider('Python编程 从入门到实践')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"isMe_pythonSP/bookMoney/jingdong.py","file_name":"jingdong.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"288981776","text":"from config import *\nimport pygame as pg\n\nclass Display_board():\n def __init__(self, screen):\n self.screen = screen\n self.font_num = pg.font.SysFont(\"comicsans\", NUMBER_SIZE) \n self.font_cdt = pg.font.SysFont(\"comicsans\", CANDIDATE_SIZE) \n\n def draw_val(self, val, x, y):\n text1 = self.font_num.render(str(val), 1, BLACK)\n self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))\n\n def draw_cdt(self, val, x, y):\n text1 = self.font_cdt.render(str(val), 1, BLACK)\n self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))\n\n def on_mouse_press(self, x, y, symbol, modifier):\n pass\n\n def draw(self, grid, cell):\n for i in range (9):\n for j in range (9):\n if grid[i][j] != 0:\n if type(grid[i][j]) != int:\n self.draw_candidates(grid, cell)\n else:\n text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)\n self.screen.blit(text1, (TOP_LX + i * BLOCK_SIZE + 15, TOP_LY + j * BLOCK_SIZE + 14))\n\n size = BLOCK_SIZE\n for i in range(10):\n if i % 3 == 0:\n print(\"thick\")\n thick = 7\n else:\n print(\"thin\")\n thick = 1\n\n if (i + 2) % 3 == 0:\n print(\"increasing size: \", i)\n size +=7\n else:\n size = 0\n\n # thick Thick thin thin thick thin thin thick thin thin thick\n # TOP_LX -> TOP_RX & ( TOP_LY -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS)\n print(\"line: \", i, size, \"block size: \", BLOCK_SIZE)\n print(\"Start: \", TOP_LX, TOP_LY + i * size, \"end: \", TOP_RX, TOP_RY + i * size)\n print(\"Start: \", TOP_LX + i * size, TOP_LY, \"end: \", BOT_LX + i * size, BOT_LY)\n pg.draw.line(self.screen, BLACK, (TOP_LX, \n TOP_LY + i * size), \n (TOP_RX + 21, \n TOP_RY + i * size), thick)\n pg.draw.line(self.screen, BLACK, (TOP_LX + i * size, \n TOP_LY), \n (BOT_LX + i * size, \n BOT_LY + 21), thick)\n\n # For candidate placement\n # if i % 3 == 0:\n # print(BLOCK_SIZE)\n # pg.draw.line(self.screen, BLACK, (cell[0], \n # cell[1] + i * (cell[2] / 9)), \n # ((cell[0] + cell[2]), \n # cell[1] + i * (cell[2] / 9)), 1)\n # pg.draw.line(self.screen, BLACK, (cell[0] + i * (cell[3] / 9), \n # cell[1]), \n # (cell[0] + i * (cell[3] / 9),\n # cell[1] + cell[3]), 1)\n\n def draw_candidates(self, grid, cell):\n new_line = 1\n iteration = 1\n indent = 15\n for number in grid[i][j]:\n if iteration % 3 == 1: # Checking if first in line: 1, 4, 7\n text1 = self.font_cdt.render(str(number), 1, BLACK)\n self.screen.blit(text1, (cell[0] + 3, cell[1] + ((new_line - 1) * indent) + 2))\n else:\n text1 = self.font_cdt.render(str(number), 1, BLACK)\n self.screen.blit(text1, (cell[0] + ((iteration - 1) * indent) + 3, cell[1] + ((new_line - 1) * indent) + 2))\n if iteration % 3 == 0: # checking if last in line: 3, 6\n new_line += 1\n iteration = 0\n\n iteration += 1\n\n\n def update(self, grid, row, col, blk):\n font_val = pg.font.SysFont(\"comicsans\", BOLD)\n\n if row != (-1, -1):\n # Remove old number\n text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)\n self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))\n\n # Rewrite in bigger font\n text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)\n self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))\n \n if col != (-1, -1):\n # Remove old number\n text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)\n self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))\n\n # Rewrite in bigger font\n text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)\n self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))\n\n if blk != (-1, -1):\n # Remove old number\n text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)\n self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))\n\n # Rewrite in bigger font\n text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)\n self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))\n\n def find_cell(self, x, y):\n # Only applies glow when a cell is selected\n if x == -1 and y == -1:\n return\n \n width = BLOCK_SIZE\n height = BLOCK_SIZE\n\n # Adjustment in size if bordering a thick line\n if x % 3 == 0: # If thick line on the left\n start_pos_x = TOP_LX + x * BLOCK_SIZE + 4\n width = BLOCK_SIZE - 4\n else:\n start_pos_x = TOP_LX + x * BLOCK_SIZE + 1\n\n if (x + 1) % 3 == 0: # If thick line on the right\n width = BLOCK_SIZE - 3.5\n\n if y % 3 == 0: # If thick line on the top\n start_pos_y = TOP_LY + y * BLOCK_SIZE + 4\n height = BLOCK_SIZE - 4\n else:\n start_pos_y = TOP_LY + y * BLOCK_SIZE + 1\n\n if (y + 1) % 3 == 0: # If thick line on the bottom\n height = BLOCK_SIZE - 3.5\n\n return (start_pos_x, start_pos_y, width, height)\n\n def blink(self, alpha, a_change):\n if a_change:\n alpha += BLINK_SPEED\n if alpha >= 175:\n a_change = False\n elif a_change == False:\n alpha += -BLINK_SPEED\n if alpha <= 30:\n a_change = True\n \n return (alpha, a_change)\n\n","sub_path":".history/display_board_20201107204444.py","file_name":"display_board_20201107204444.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"223684327","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 7 19:12:18 2019\r\n\r\n@author: kingshuk02\r\n\"\"\"\r\n\r\n#%%\r\nimport random\r\ndef ball():\r\n ini_run=0\r\n a=True \r\n while a==True:\r\n b=int(input(\"enter one number from 1 to 6\" ))\r\n c=random.randrange(1,6)\r\n if b==c:\r\n print(\"Computer lost a wicket\")\r\n a=False\r\n print(\"computer made \",ini_run,\"runs\")\r\n else:\r\n ini_run+=c\r\n print(\"computer made \",ini_run,\"runs\")\r\n print(\"you need \",ini_run+1,\"runs to win\")\r\n d=True\r\n comp=0\r\n while d==True:\r\n e=int(input(\"enter one number from 1 to 6\"))\r\n f=random.randrange(1,6)\r\n if e==f:\r\n print(\"You lost one wicket\")\r\n print(\"you made \",comp,\"runs\")\r\n d=False\r\n else:\r\n comp+=e \r\n print(\"You made \",comp,\"runs\") \r\n \r\n \r\n if compini_run+1:\r\n print(\"you won the game\")\r\n \r\n \r\n \r\ndef runs():\r\n ini_run=0\r\n a=True \r\n while a==True:\r\n b=int(input(\"enter one number from 1 to 6\" ))\r\n c=random.randrange(1,6)\r\n if b==c:\r\n print(\"you lost a wicket\")\r\n a=False\r\n print(\"you made \",ini_run,\"runs\") \r\n else:\r\n ini_run+=b\r\n print(\"you made \",ini_run,\"runs\")\r\n print(\"computer needs \",ini_run+1,\"runs\")\r\n d=True\r\n comp=0\r\n while d==True:\r\n e=int(input(\"enter one number from 1 to 6\"))\r\n f=random.randrange(1,6)\r\n if e==f:\r\n print(\"Computer lost one wicket\")\r\n d=False\r\n print(\"computer made \",comp,\"runs\")\r\n else:\r\n comp+=f \r\n print(\"computer made \",comp,\"runs\") \r\n \r\n \r\n if comp>ini_run+1:\r\n print(\"you lost the game\")\r\n d=False\r\n if comp 0:\n IDSforRecommendation = []\n for i in range(min(len(currentSessionHistory),3)):\n IDSforRecommendation.append(currentSessionHistory[-i-1][0])\n newRecommendedIDs, newRecommendedCategories = \\\n check_product_category(get_product_recc(IDSforRecommendation, numberOfReccs=10), currentSessionHistory[0][1])\n # nech tam ty stare komponenty\n newReccProductsComponents = generate_products_recommended(newRecommendedIDs)\n currentSessionRecommendedCategories.update(newRecommendedCategories)\n\n currentSessionRecommendedCategories = list(currentSessionRecommendedCategories)\n print(\"doporucene kategorie pro uzivatele {} : {}\".format(inputUsername,currentSessionRecommendedCategories))\n\n # zobraz link na historii?\n if inputUsername is not None and inputUsername.isalnum():\n print(\"user {} logged in, display link to history\".format(inputUsername))\n historyLinkStyle = {'display': 'flex'}\n else:\n historyLinkStyle = {'display': 'none'}\n\n return currentSessionHistory, \\\n currentSessionRecommendedCategories, newReccProductsComponents, \\\n historyLinkStyle\n\n\n@app.callback(\n Output('categoryHeading', 'children'),\n Output('layoutCategories', 'style'),\n Output('layoutRecc', 'style'),\n Output('layoutHistory', 'style'),\n Output('reccCategoryList', 'children'),\n Output('historyDisplay', 'children'),\n Output('layoutProductsAll', 'style'),\n Output('productsRecAll', 'children'),\n [Input('url', 'pathname'),\n Input('categoriesRecommended', 'data')\n ],\n [State('categoriesRecommended', 'data'),\n State('usernameInput', 'value'),\n State('currentUserSessionHistory', 'data')]\n)\ndef switch_page(pathname, categoriesRecommendedListAsInput, categoriesRecommendedListAsState, inputUsername, userHistoryList):\n global RE_catID\n if pathname == '/':\n recommendToIDS = [x[0] for x in userHistoryList]\n return '', {'display': 'block'}, {'display': 'none'}, {'display': 'none'}, get_recc_category_links(categoriesRecommendedListAsState), '',{'display': 'block'}, generate_products_recommended_All(get_recc_products_all(recommendToIDS))\n elif pathname == '/history':\n return '', {'display': 'none'}, {'display': 'none'}, {'display': 'block'}, '', generate_products_from_history(inputUsername, userHistoryList), {'display': 'none'}, ''\n elif bool(re.search(RE_catID, pathname)):\n return get_category_name(re.findall(r'\\d+', pathname)[0]), {'display': 'none'}, {'display': 'block'}, {'display': 'none'}, '', '', {'display': 'none'}, ''\n else:\n return 'Čtyřistačtyři', {'display': 'none'}, {'display': 'none'}, {'display': 'block'}, '', layout404, {'display': 'none'}, ''\n\n\n@app.callback(\n Output('moreProductsContent', 'children'),\n [Input('loadMoreButton', 'n_clicks'),\n Input('url','pathname')\n ],\n [State('moreProductsContent', 'children'),\n State('url', 'pathname')]\n)\ndef load_more_products(clicks, currentPageURLAsInput, oldchildren, currentPageURLAsState):\n if (currentPageURLAsState != '/' and currentPageURLAsState != '/history'):\n return oldchildren + generate_products_from_category(5, re.findall(r'\\d+', currentPageURLAsState)[0])\n else:\n return []\n\n\n# nacti vsechno\nload_model()\nload_names()\nload_histories()\nload_categories()\n\nRE_catID = re.compile(r'products/\\d+')\n\n# priprav uvodni stranku\n\nlayoutcategories.children[3] = init_all_category_layout()\n\n# nastav rozvrzeni stranky a pridej CSS\napp.layout = layoutvse\n\n# spust webovku\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","sub_path":"webapp/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":8604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"130146527","text":"# Copyright 2019 Antonio Medrano\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Antonio Medrano\n\nimport sys\nimport time\nimport numpy as np\nimport readDataFiles\nimport plot\n \n\ndef read_problem(file):\n global numSites\n global numDemands\n global sites\n \n try:\n if (file[-3:].lower() == \"dat\"):\n sites = readDataFiles.readDat(file)\n elif (file[-3:].lower() == \"tsp\"):\n sites = readDataFiles.readTSP(file)\n except IOError:\n print('Error reading file')\n raise\n \n numSites = sites.shape[0] \n numDemands = numSites\n \n plot.plotData(sites)\n \n print('%d locations' % numSites)\n print('Finished Reading File!')\n\n\ndef main(unused_argv):\n print('')\n #RunCBC_LSCPexampleCppStyleAPI(SD)\n #RunSCIP_LSCPexampleCppStyleAPI(SD)\n #RunBOP_LSCPexampleCppStyleAPI(SD)\n\n\n\"\"\" Main will take in 3 arguments: p-Facilities; ServiceDistance; Data to Use \"\"\"\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n file = '../data/' + sys.argv[1]\n print(\"Problem instance from: \", file)\n read_problem(file)\n main(None)\n else:\n print(\"Problem not executed!\")","sub_path":"gurobi/dataviz.py","file_name":"dataviz.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"565952919","text":"class Solution:\n def toLowerCase(self, str: str) -> str:\n string = []\n diff = ord('a')-ord('A')\n for i in str:\n if ord('A')<=ord(i)<=ord('Z'):\n string.append(chr(ord(i)+diff))\n else:\n string.append(i)\n return(''.join(string))","sub_path":"String/709. To lower case.py","file_name":"709. To lower case.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"204953294","text":"#Delay between updates (default = 20 (ms))\r\ndelay = 20\r\n#Width and height of the window (default = 450,450)\r\nwidth = 450\r\nheight = 450\r\n#Font size (optimal = 1.0 for windows, 1.5 for linux)\r\nfont_multiplier\t= 1.5\r\n#Highlight rows and columns that are being checked (default = False, only recommended with high delay)\r\nenable_hightlight = True\r\n#Highlight border size of the current spot that is being checked (default = 6)\r\nhightlight_border_size = 6\r\n#Enable visualization of the backtracking algorithm (default = True)\r\n#Without visualization, delay will be 0 and no updates will be shown until the sudoku is solved\r\nvisualize = True\r\n\r\n#Example board:\r\n#8...9.5.19.6531.2.........91..98.7..6.3...29...2......2...1...5.....8...3....6...\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"194027595","text":"from typing import Callable, Optional\n\n\nclass ListNode:\n def __init__(self, val: int) -> None:\n self.val = val\n self.next: Optional[ListNode] = None\n\n\ndef create_node_list(values: list[int]) -> Optional[ListNode]:\n \"\"\"Creates a ListNode out of a list of values\"\"\"\n if len(values) == 0:\n return None\n\n head = ListNode(values[0])\n\n last_node = head\n for value in values[1:]:\n node = ListNode(value)\n last_node.next = node\n last_node = node\n\n return head\n\n\ndef create_intersecting_lists(\n valuesA: list[int],\n valuesB: list[int],\n skipA: int,\n skipB: int,\n) -> tuple[Optional[ListNode], Optional[ListNode]]:\n \"\"\"Creates intersecting linked lists\"\"\"\n listA = create_node_list(valuesA)\n listB = create_node_list(valuesB)\n\n indexA = 0\n nodeA = listA\n intersecting_node: Optional[ListNode] = None\n while indexA < skipA:\n assert nodeA is not None\n nodeA = nodeA.next\n indexA += 1\n\n intersecting_node = nodeA\n\n indexB = 0\n nodeB = listB\n while indexB < skipB - 1:\n assert nodeB is not None\n nodeB = nodeB.next\n indexB += 1\n\n assert nodeB is not None\n nodeB.next = intersecting_node\n\n return listA, listB\n\n\ndef get_values(node: ListNode) -> list[int]:\n \"\"\"Returns the values in linked list\"\"\"\n values = [node.val]\n curr = node.next\n while curr is not None:\n values.append(curr.val)\n curr = curr.next\n\n return values\n\n\ndef get_length(ll: Optional[ListNode]) -> int:\n \"\"\"Returns length of linked list\"\"\"\n length = 0\n while ll is not None:\n ll = ll.next\n length += 1\n\n return length\n\n\nclass Solution:\n def getIntersectionNode(\n self,\n headA: Optional[ListNode],\n headB: Optional[ListNode],\n ) -> Optional[ListNode]:\n lenA = get_length(headA)\n lenB = get_length(headB)\n\n longer, shorter = (headA, headB) if lenA > lenB else (headB, headA)\n\n node1 = longer\n node2 = shorter\n for _ in range(abs(lenA - lenB)):\n assert node1 is not None\n node1 = node1.next\n\n while node1 is not None:\n assert node2 is not None\n\n if node1 == node2:\n return node1\n\n node1 = node1.next\n node2 = node2.next\n\n return None\n\n\ntests = [\n (\n (8, [4, 1, 8, 4, 5], [5, 6, 1, 8, 4, 5], 2, 3,),\n 8,\n ),\n (\n (2, [1, 9, 1, 2, 4], [3, 2, 4], 3, 1,),\n 2,\n ),\n (\n (0, [2, 6, 4], [1, 5], 3, 2,),\n None,\n ),\n]\n\n\ndef validator(\n getIntersectionNode: Callable[\n [Optional[ListNode], Optional[ListNode]],\n Optional[ListNode]\n ],\n inputs: tuple[int, list[int], list[int], int, int],\n expected: int\n) -> None:\n _, values1, values2, skipA, skipB = inputs\n node_list1, node_list2 = create_intersecting_lists(\n values1, values2, skipA, skipB,\n )\n intersecting_node = getIntersectionNode(node_list1, node_list2)\n node_value = intersecting_node.val if intersecting_node else None\n assert node_value == expected, (node_value, expected)\n","sub_path":"intersection_of_two_linked_lists.py","file_name":"intersection_of_two_linked_lists.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"501261419","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 15 00:23:02 2021\r\n\r\n@author: clu1\r\n\r\nHW #2: Mattress POS Prototype\r\nThis program will calculate the cost of a mattress\r\n\r\nI completed this work individually\r\n\"\"\"\r\ntax_rate = .0625\r\npromo_discount = .1\r\n\r\nprint(\"Welcome to the mattress price simulator!\")\r\n\r\n\r\n#Gets the mattress brand and stores it as Sealy or Simmons\r\n#Gets the size based on if brand is Sealy or Simmons\r\nmattress_brand = input('Please select the mattress brand (1 - Sealy, 2 - Simmons): ')\r\nwhile mattress_brand != \"1\" and mattress_brand != \"2\":\r\n mattress_brand = input(\"Please enter '1' or '2': \") \r\nif mattress_brand == \"1\":\r\n mattress_brand = 'Sealy'\r\n size = input(\"Please select the size (K = King, Q = Queen, T = Twin): \")\r\n #Makes string uppercase to accept both upper and lowercase\r\n size = size.capitalize()\r\n while size != \"K\" and size != \"Q\" and size != \"T\":\r\n size = input(\"Please enter 'K', 'Q', or 'T': \")\r\n size = size.capitalize()\r\nif mattress_brand == \"2\":\r\n mattress_brand = 'Simmons'\r\n size = input(\"Please select the size (K = King, Q = Queen, F = Full): \")\r\n #Makes string uppercase to accept both upper and lowercase\r\n size = size.capitalize()\r\n while size != \"K\" and size != \"Q\" and size != \"F\":\r\n size = input(\"Please enter 'K', 'Q', or 'F': \")\r\n size = size.capitalize()\r\nif size == \"K\":\r\n size = \"King\"\r\nif size == \"Q\":\r\n size = \"Queen\"\r\nif size == \"T\":\r\n size = \"Twin\"\r\nif size == \"F\":\r\n size = \"Full\"\r\n\r\n\r\ncomfort_level = input(\"Please enter the comfort level (M - Medium, F - Firm, E - Extra Firm): \")\r\ncomfort_level = comfort_level.capitalize()\r\nwhile comfort_level != \"M\" and comfort_level != \"F\" and comfort_level != \"E\":\r\n comfort_level = input(\"Please enter 'M', 'F', or 'E': \")\r\n comfort_level = comfort_level.capitalize()\r\nif comfort_level == \"M\":\r\n comfort_level = \"Medium\"\r\nif comfort_level == \"F\":\r\n comfort_level = \"Firm\"\r\nif comfort_level == \"E\":\r\n comfort_level = \"Extra Firm\"\r\n \r\nbox_springs = input(\"Do you like to have box springs? (Y - Yes, N - No): \")\r\nbox_springs = box_springs.capitalize()\r\nwhile box_springs != \"Y\" and box_springs != \"N\":\r\n box_springs = input(\"Please enter 'Y' or 'N': \")\r\n box_springs = box_springs.capitalize()\r\n\r\nshipping = input(\"Which shipping mode do you like? (S - Standard, N - Next Day): \")\r\nshipping = shipping.capitalize()\r\nwhile shipping != \"S\" and shipping != \"N\":\r\n shipping = input(\"Please enter 'S' or 'N': \")\r\n shipping = shipping.capitalize()\r\nif shipping == \"S\":\r\n shipping_price = 100\r\nelse:\r\n shipping_price = 300\r\n\r\npromotion = input(\"Promotion code: \")\r\npromotion = promotion.lower()\r\n\r\n#symbolic constants for mattress each option\r\nsealy_medium_king = 1800\r\nsealy_medium_queen = 1400\r\nsealy_medium_twin= 900\r\nsealy_firm_king= 2200\r\nsealy_firm_queen = 1800\r\nsealy_firm_twin = 1300\r\nsealy_extra_king = 2400\r\nsealy_extra_queen = 2000\r\nsealy_extra_twin = 1500\r\nsimmons_medium_king = 2000\r\nsimmons_medium_queen = 1400\r\nsimmons_medium_full = 1000\r\nsimmons_firm_king = 2500\r\nsimmons_firm_queen = 1900\r\nsimmons_firm_full = 1500\r\nsimmons_extra_king = 3000\r\nsimmons_extra_queen = 2400\r\nsimmons_extra_full = 2000\r\n\r\nprint(\"========== Order Summary ==========\")\r\nprint(mattress_brand + \", \" + size + \", \" + comfort_level + \": \")\r\n\r\n#prints the mattress price\r\nif mattress_brand == \"Sealy\":\r\n if comfort_level == \"Medium\":\r\n if size == \"King\":\r\n print(f\"Mattress: $ {sealy_medium_king:,.2f}\")\r\n mattress_price = sealy_medium_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {sealy_medium_queen:,.2f}\")\r\n mattress_price = sealy_medium_queen\r\n if size == \"Twin\":\r\n print(f\"Mattress: $ {sealy_medium_twin:,.2f}\")\r\n mattress_price = sealy_medium_twin\r\n if comfort_level == \"Firm\":\r\n if size == \"King\": \r\n print(f\"Mattress: $ {sealy_firm_king:,.2f}\")\r\n mattress_price = sealy_firm_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {sealy_firm_queen:,.2f}\")\r\n mattress_price = sealy_firm_queen\r\n if size == \"Twin\":\r\n print(f\"Mattress: $ {sealy_firm_twin:,.2f}\")\r\n mattress_price = sealy_firm_twin\r\n if comfort_level == \"Extra Firm\":\r\n if size == \"King\":\r\n print(f\"Mattress: $ {sealy_extra_king:,.2f}\")\r\n mattress_price = sealy_extra_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {sealy_extra_queen:,.2f}\")\r\n mattress_price = sealy_extra_queen\r\n if size == \"Twin\":\r\n print(f\"Mattress: $ {sealy_extra_twin:,.2f}\") \r\n mattress_price = sealy_extra_twin\r\nif mattress_brand == \"Simmons\":\r\n if comfort_level == \"Medium\":\r\n if size == \"King\":\r\n print(f\"Mattress: $ {simmons_medium_king:,.2f}\")\r\n mattress_price = simmons_medium_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {simmons_medium_queen:,.2f}\")\r\n mattress_price = simmons_medium_queen\r\n if size == \"Full\":\r\n print(f\"Mattress: $ {simmons_medium_full:,.2f}\")\r\n mattress_price = simmons_medium_full\r\n if comfort_level == \"Firm\":\r\n if size == \"King\":\r\n print(f\"Mattress: $ {simmons_firm_king:,.2f}\")\r\n mattress_price = simmons_firm_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {simmons_firm_queen:,.2f}\")\r\n mattress_price = simmons_firm_queen\r\n if size == \"Full\":\r\n print(f\"Mattress: $ {simmons_firm_full:,.2f}\")\r\n mattress_price = simmons_firm_full\r\n if comfort_level == \"Extra Firm\":\r\n if size == \"King\":\r\n print(f\"Mattress: $ {simmons_extra_king:,.2f}\")\r\n mattress_price = simmons_extra_king\r\n if size == \"Queen\":\r\n print(f\"Mattress: $ {simmons_extra_queen:,.2f}\")\r\n mattress_price = simmons_extra_queen\r\n if size == \"Full\":\r\n print(f\"Mattress: $ {simmons_extra_full:,.2f}\") \r\n mattress_price = simmons_extra_full\r\n\r\n\r\n#Prints box spring price\r\nif box_springs == \"Y\":\r\n if size == \"King\":\r\n box_springs = 400\r\n print(f\"Box springs: $ {box_springs:,.2f}\")\r\n if size == \"Queen\":\r\n box_springs = 300\r\n print(f\"Box springs: $ {box_springs:,.2f}\")\r\n if size == \"Full\":\r\n box_springs = 200\r\n print(f\"Box springs: $ {box_springs:,.2f}\") \r\n if size == \"Twin\":\r\n box_springs = 100\r\n print(f\"Box springs: $ {box_springs:,.2f}\")\r\nelse:\r\n box_springs = 0\r\n\r\n\r\nif promotion == \"sleep\":\r\n discount = (mattress_price + box_springs)*promo_discount*-1\r\n print(f\"Discount: $ {discount:,.2f}\")\r\nelse:\r\n discount = 0\r\n \r\nsubtotal = mattress_price + box_springs + discount\r\nprint(f\"Subtotal: $ {subtotal:,.2f}\")\r\n\r\nif shipping == 'N':\r\n print(f\"Next day shipping: $ {shipping_price:,.2f}\")\r\nelse:\r\n print(f\"Standard shipping: $ {shipping_price:,.2f}\")\r\n\r\ntax = subtotal*tax_rate\r\nprint(f\"Tax: $ {tax:,.2f}\")\r\n\r\nprint(\"-----------------------------------\")\r\ntotal = subtotal + shipping_price + tax\r\nprint(f\"Total: $ {total:,.2f}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"614256202","text":"## 변수 선언 부분\nselect, answer, numStr, num1, num2, a, b, ch,i, k, guguLine, dan1, dan2 = 0, 0, \"\", 0, 0, 0, 0, \"\", 0, 0, \"\", 0, 0\n\n## 메인(main) 코드 부분\nwhile True :\n select=int(input(\"\\n1. 간단한 계산기 2. 수식 계산기 3. 두수 사이 합계 4. 구구단 출력기: \"))\n\n if select == 1 :\n a=int(input(\"첫번째 수를 입력하세요 : \"))\n ch=input(\"계산할 연산자를 입력하세요 : \")\n b=int(input(\"두번째 수를 입력하세요 : \"))\n\n if ch == \"+\" :\n print(\" %d + %d = %d 입니다. \" % (a, b, a + b))\n elif ch == \"-\" :\n print(\" %d - %d = %d 입니다. \" % (a, b, a - b))\n elif ch == \"*\" :\n print(\" %d * %d = %d 입니다. \" % (a, b, a * b))\n elif ch == \"/\" :\n print(\" %d / %d = %f 입니다. \" % (a, b, a / b)) \n elif ch == \"%\" :\n print(\" %d %% %d = %d 입니다. \" % (a, b, a % b))\n elif ch == \"//\" :\n print(\" %d // %d = %d 입니다. \" % (a, b, a // b))\n elif ch == \"**\" :\n print(\" %d ** %d = %d 입니다. \" % (a, b, a ** b)) \n else :\n print(\" 알 수 없는 연산자 입니다.\" ) \n elif select == 2 :\n numStr=input(\" *** 수식을 입력하세요 : \")\n answer = eval(numStr)\n print(\" %s 결과는 %5.1f 입니다. \" % (numStr, answer))\n elif select == 3 :\n num1=int(input(\" *** 첫번째 숫자를 입력하세요 : \"))\n num2=int(input(\" *** 두번째 숫자를 입력하세요 : \"))\n for i in range(num1, num2+1) :\n answer = answer + i\n print(\" %d+...+%d는 %d입니다. \" % (num1, num2, answer))\n elif select == 4 :\n dan1 = int(input(\"몇 단 부터?\"))\n dan2 = int(input(\"몇 단 까지?\"))\n\n for i in range(dan1, dan2+1) :\n guguLine = guguLine + (\" # %d단 # \" % i)\n\n print(guguLine)\n\n if dan2<10 :\n for i in range(2, 10) :\n guguLine=\"\"\n for k in range(dan1, dan2+1) :\n guguLine = guguLine + str(\"%4dX%2d=%4d\" % (k, i, k*i))\n print(guguLine)\n\n else :\n for i in range(2, dan2+1) :\n guguLine=\"\"\n for k in range(dan1, dan2+1) :\n guguLine = guguLine + str(\"%4dX%2d=%4d\" % (k, i, k*i))\n print(guguLine)\n\n\n else :\n print(\"1, 2, 3, 4중 하나만 입력해야 합니다.\")\n\n","sub_path":"컴퓨팅 사고력 향상을 위한 SW코딩/Calculator1.py","file_name":"Calculator1.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"551240895","text":"import codecs\nimport os\nfrom abc import abstractmethod, ABC\nfrom random import shuffle\nfrom typing import Generator\nimport numpy as np\nfrom tfaip.base.data.pipeline.definitions import PipelineMode, Sample\n\nfrom calamari_ocr.ocr.dataset.params import InputSample\n\n\nclass DataReader(ABC):\n def __init__(self, mode: PipelineMode, skip_invalid=False, remove_invalid=True, **kwargs):\n \"\"\" Dataset that stores a list of raw images and corresponding labels.\n\n Parameters\n ----------\n skip_invalid : bool\n skip invalid files instead of throwing an Exception\n remove_invalid : bool\n remove invalid files, thus dont count them to possible error on this data set\n \"\"\"\n self._samples = []\n self.loaded = False\n self.mode = mode\n self.auto_repeat = False\n\n self.skip_invalid = skip_invalid\n self.remove_invalid = remove_invalid\n\n self.n_folds = -1\n\n def populate_folds(self, n_folds):\n self.n_folds = n_folds\n\n sample_idx = list(range(len(self._samples)))\n shuffle(sample_idx)\n for i, idx in enumerate(sample_idx):\n self._samples[i]['fold_id'] = i % n_folds\n\n def __len__(self):\n \"\"\" Number of samples\n\n Returns\n -------\n int\n Number of samples\n \"\"\"\n return len(self._samples)\n\n def sample_by_id(self, id_) -> dict:\n return next(sample for sample in self._samples if sample['id'] == id_)\n\n def samples(self):\n \"\"\" List of all samples\n\n Returns\n -------\n list of dict\n List of all samples\n\n \"\"\"\n return self._samples\n\n def add_sample(self, sample):\n \"\"\" Add a sample\n\n Parameters\n ----------\n sample : dict\n The sample\n \"\"\"\n if not isinstance(sample, dict):\n raise Exception(\"A sample is expected to be a dictionary\")\n\n if \"id\" not in sample:\n raise Exception(\"A sample needs an id\")\n\n self.loaded = False\n if 'fold_id' not in sample:\n sample['fold_id'] = -1 # dummy fold ID\n self._samples.append(sample)\n\n def is_sample_valid(self, sample, line, text):\n if self.mode == PipelineMode.Prediction or self.mode == PipelineMode.Training or self.mode == PipelineMode.Evaluation:\n # skip invalid imanges (e. g. corrupted or empty files)\n if line is None or (line.size == 0 or np.amax(line) == np.amin(line)):\n return False\n\n return True\n\n def store_text(self, sentence, sample, output_dir, extension):\n output_dir = output_dir if output_dir else os.path.dirname(sample['image_path'])\n bn = sample.get('base_name', sample['id'])\n with codecs.open(os.path.join(output_dir, bn + extension), 'w', 'utf-8') as f:\n f.write(sentence)\n\n def store_extended_prediction(self, data, sample, output_dir, extension):\n bn = sample.get('base_name', sample['id'])\n if extension == \"pred\":\n with open(os.path.join(output_dir, bn + \".pred\"), 'wb') as f:\n f.write(data)\n elif extension == \"json\":\n with open(os.path.join(output_dir, bn + \".json\"), 'w') as f:\n f.write(data)\n else:\n raise Exception(\"Unknown prediction format.\")\n\n def prepare_store(self):\n pass\n\n def store(self, extension):\n # either store text or store (e. g. if all predictions must be written at the same time\n pass\n\n def generate(self, epochs=1) -> Generator[Sample, None, None]:\n if self.auto_repeat:\n epochs = -1\n\n while epochs != 0:\n epochs -= 1\n if self.mode == PipelineMode.Training:\n # no pred_and_eval bc it's shuffle\n shuffle(self._samples)\n for sample in self._generate_epoch(text_only=self.mode == PipelineMode.Targets):\n yield sample.to_input_target_sample()\n\n def _generate_epoch(self, text_only) -> Generator[InputSample, None, None]:\n for sample in self._sample_iterator():\n for raw_sample in self._load_sample(sample, text_only=text_only):\n assert isinstance(raw_sample, InputSample)\n yield raw_sample\n\n def _sample_iterator(self):\n return self._samples\n\n @abstractmethod\n def _load_sample(self, sample, text_only) -> Generator[InputSample, None, None]:\n raise NotImplementedError\n\n\n","sub_path":"calamari_ocr/ocr/dataset/datareader/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"325151110","text":"import time as t\r\nfrom os import path\r\n#\r\ndef createFile(dest):\r\n\r\n time_form=t.localtime(t.time())\r\n print(\"time_form : \",time_form)\r\n\r\n filename=\"%d_%d_%d.txt\"%(time_form[1],time_form[2],(time_form[0]%100))\r\n print(\"filename : \",filename)\r\n\r\n if not path.isfile(dest+filename):\r\n f=open(dest+filename, \"w\")\r\n f.write(\"\\n\"*30)\r\n f.close()\r\n\r\n\r\nif __name__==\"__main__\":\r\n destination=\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\\"\r\n createFile(destination)\r\n print(\"OK done!\")\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"339675695","text":"import csv\r\n\r\nclass DataPoint:\r\n def __init__(self, heelVoltage, ballVoltage, bigtoeVoltage, littletoeVoltage):\r\n self.heelVoltage = heelVoltage\r\n self.ballVoltage = ballVoltage\r\n self.bigtoeVoltage = bigtoeVoltage\r\n self.littletoeVoltage = littletoeVoltage\r\n\r\nclass DataFile:\r\n def __init__(self, filename):\r\n self.inputFile = filename\r\n self.midheelVoltage = 0 # average voltage for all data points (heel)\r\n self.midballVoltage = 0 # '' (ball)\r\n self.midbigVoltage = 0 # '' (big toe)\r\n self.midlittleVoltage = 0 # '' (little toe)\r\n self.dataPoints = [] # list of all data points\r\n self.sortedData = {} # dictionary to hold 6 lists of data points corresponding to each stage of the gait cycle\r\n \r\n def collectData(self):\r\n i = 0\r\n with open(self.inputFile, encoding='utf-8-sig') as inputData:\r\n reader = csv.reader(inputData)\r\n for line in reader:\r\n heelVoltage = float(line[0])\r\n ballVoltage = float(line[1])\r\n bigtoeVoltage = float(line[2])\r\n littletoeVoltage = float(line[3])\r\n self.dataPoints.append(\r\n DataPoint(heelVoltage, ballVoltage, bigtoeVoltage, littletoeVoltage)\r\n )\r\n self.midheelVoltage += heelVoltage\r\n self.midballVoltage += ballVoltage\r\n self.midbigVoltage += bigtoeVoltage\r\n self.midlittleVoltage += littletoeVoltage\r\n i += 1\r\n self.midheelVoltage /= i # dividing by number of data points to get average value for each parameter\r\n self.midballVoltage /= i\r\n self.midbigVoltage /= i\r\n self.midlittleVoltage /= i\r\n \r\n def sortData(self):\r\n initialContact = [] # lists of data points corresponding to each phase of the gait cycle\r\n loadingResponse = []\r\n midStance = []\r\n terminalStance = []\r\n preSwing = []\r\n swingPhase = []\r\n errors = 0\r\n for data in range(len(self.dataPoints)):\r\n if self.dataPoints[data].heelVoltage >= self.midheelVoltage and self.dataPoints[data].ballVoltage < self.midballVoltage and self.dataPoints[data].bigtoeVoltage < self.midbigVoltage:\r\n initialContact.append(self.dataPoints[data])\r\n \r\n elif self.dataPoints[data].heelVoltage >= self.midheelVoltage and self.dataPoints[data].ballVoltage >= self.midballVoltage and self.dataPoints[data].bigtoeVoltage < self.midbigVoltage:\r\n loadingResponse.append(self.dataPoints[data])\r\n \r\n elif self.dataPoints[data].heelVoltage >= self.midheelVoltage and self.dataPoints[data].ballVoltage >= self.midballVoltage and self.dataPoints[data].bigtoeVoltage >= self.midbigVoltage:\r\n midStance.append(self.dataPoints[data])\r\n \r\n elif self.dataPoints[data].heelVoltage < self.midheelVoltage and self.dataPoints[data].ballVoltage >= self.midballVoltage and self.dataPoints[data].bigtoeVoltage >= self.midbigVoltage:\r\n terminalStance.append(self.dataPoints[data])\r\n \r\n elif self.dataPoints[data].heelVoltage < self.midheelVoltage and self.dataPoints[data].ballVoltage < self.midballVoltage and self.dataPoints[data].bigtoeVoltage >= self.midbigVoltage:\r\n preSwing.append(self.dataPoints[data])\r\n \r\n elif self.dataPoints[data].heelVoltage < self.midheelVoltage and self.dataPoints[data].ballVoltage < self.midballVoltage and self.dataPoints[data].bigtoeVoltage < self.midbigVoltage:\r\n swingPhase.append(self.dataPoints[data])\r\n \r\n else:\r\n errors += 1\r\n\r\n iCAverage = [0, 0, 0, 0] # average voltage for each of the sensors (heel, ball, big toe, little toe) for\r\n lRAverage = [0, 0, 0, 0] # all data points in each stage of the gait cycle\r\n mSAverage = [0, 0, 0, 0]\r\n tSAverage = [0, 0, 0, 0]\r\n pSAverage = [0, 0, 0, 0]\r\n sPAverage = [0, 0, 0, 0]\r\n for i in range(len(initialContact)):\r\n iCAverage[0] += initialContact[i].heelVoltage\r\n iCAverage[1] += initialContact[i].ballVoltage\r\n iCAverage[2] += initialContact[i].bigtoeVoltage\r\n iCAverage[3] += initialContact[i].littletoeVoltage\r\n for i in range(len(loadingResponse)):\r\n lRAverage[0] += loadingResponse[i].heelVoltage\r\n lRAverage[1] += loadingResponse[i].ballVoltage\r\n lRAverage[2] += loadingResponse[i].bigtoeVoltage\r\n lRAverage[3] += loadingResponse[i].littletoeVoltage\r\n for i in range(len(midStance)):\r\n mSAverage[0] += midStance[i].heelVoltage\r\n mSAverage[1] += midStance[i].ballVoltage\r\n mSAverage[2] += midStance[i].bigtoeVoltage\r\n mSAverage[3] += midStance[i].littletoeVoltage\r\n for i in range(len(terminalStance)):\r\n tSAverage[0] += terminalStance[i].heelVoltage\r\n tSAverage[1] += terminalStance[i].ballVoltage\r\n tSAverage[2] += terminalStance[i].bigtoeVoltage\r\n tSAverage[3] += terminalStance[i].littletoeVoltage\r\n for i in range(len(preSwing)):\r\n pSAverage[0] += preSwing[i].heelVoltage\r\n pSAverage[1] += preSwing[i].ballVoltage\r\n pSAverage[2] += preSwing[i].bigtoeVoltage\r\n pSAverage[3] += preSwing[i].littletoeVoltage\r\n for i in range(len(swingPhase)):\r\n sPAverage[0] += swingPhase[i].heelVoltage\r\n sPAverage[1] += swingPhase[i].ballVoltage\r\n sPAverage[2] += swingPhase[i].bigtoeVoltage\r\n sPAverage[3] += swingPhase[i].littletoeVoltage\r\n iCAverage = [num / len(initialContact) for num in iCAverage] # dividing by number of data points in each stage of gait cycle\r\n lRAverage = [num / len(loadingResponse) for num in lRAverage] # to get average value for each parameter\r\n mSAverage = [num / len(midStance) for num in mSAverage]\r\n tSAverage = [num / len(terminalStance) for num in tSAverage]\r\n pSAverage = [num / len(preSwing) for num in pSAverage]\r\n sPAverage = [num / len(swingPhase) for num in sPAverage]\r\n self.sortedData['initialContact'] = iCAverage\r\n self.sortedData['loadingResponse'] = lRAverage\r\n self.sortedData['midStance'] = mSAverage\r\n self.sortedData['terminalStance'] = tSAverage\r\n self.sortedData['preSwing'] = pSAverage\r\n self.sortedData['swingPhase'] = sPAverage\r\n\r\n \r\n def exportData(self):\r\n return self.sortedData\r\n\r\nsmurf = DataFile(\"alldata.csv\")\r\nsmurf.collectData()\r\nsmurf.sortData()\r\nsmurf.exportData()\r\n","sub_path":"Python/smurf.py","file_name":"smurf.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"129229246","text":"from django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom grandchallenge.evaluation.views import (\n ConfigUpdate,\n EvaluationDetail,\n EvaluationList,\n EvaluationUpdate,\n LeaderboardDetail,\n LegacySubmissionCreate,\n MethodCreate,\n MethodDetail,\n MethodList,\n SubmissionCreate,\n SubmissionDetail,\n SubmissionList,\n)\n\napp_name = \"evaluation\"\n\nurlpatterns = [\n path(\"config/\", ConfigUpdate.as_view(), name=\"config-update\"),\n path(\"methods/\", MethodList.as_view(), name=\"method-list\"),\n path(\"methods/create/\", MethodCreate.as_view(), name=\"method-create\"),\n path(\"methods//\", MethodDetail.as_view(), name=\"method-detail\"),\n path(\"submissions/\", SubmissionList.as_view(), name=\"submission-list\"),\n path(\n \"submissions/create/\",\n SubmissionCreate.as_view(),\n name=\"submission-create\",\n ),\n path(\n \"submissions/create-legacy/\",\n LegacySubmissionCreate.as_view(),\n name=\"submission-create-legacy\",\n ),\n path(\n \"submissions//\",\n SubmissionDetail.as_view(),\n name=\"submission-detail\",\n ),\n path(\"leaderboard/\", LeaderboardDetail.as_view(), name=\"leaderboard\",),\n path(\"\", EvaluationList.as_view(), name=\"list\"),\n path(\"/\", EvaluationDetail.as_view(), name=\"detail\"),\n path(\"/update/\", EvaluationUpdate.as_view(), name=\"update\"),\n path(\n \"results/\", RedirectView.as_view(url=\"../leaderboard/\", permanent=True)\n ),\n]\n","sub_path":"app/grandchallenge/evaluation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"99672779","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport re\n\n# TODO: Add domain on mrp.production.product.line field product_id\n# Add domain on mrp.bom.line field product_id \n# Investigate bug on domain on purchase.order.line field product_id\n\n\n# class product_template(osv.Model):\n#\n# \"\"\"\n# store default_code so we can sort tree views\n# \"\"\"\n#\n# _inherit = \"product.template\"\n# _order = \"default_code\"\n#\n# _columns = {\n# 'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference', store=True),\n# }\n\n\n# class product_product(osv.Model):\n#\n# \"\"\"\n# Enforce unique product code (default_code)\n# Enforce product code formatting\n# Require product code for Stockable products (type='product')\n# TODO: Add produce_ok to product.product\n# TODO: Only require product code on Stockable products when setting purchase_ok, sale_ok, or produce_ok\n# \"\"\"\n#\n# _inherit = \"product.product\"\n#\n# _sql_constraints = [ ('default_code_uniq', 'unique (default_code)', \"\"\"Product Code must be unique.\"\"\"), ]\n#\n# default_code_pattern = r'^((COPY\\.)?[_A-Z0-9-]+\\.[A-Z-][0-9])$'\n#\n# def _require_default_code(self, cr, uid, ids, context=None):\n# # verify that we have a product code before allowing procurements\n# for product in self.browse(cr, uid, ids, context=context):\n# # require default_code when flagging for sale, purchase, produce\n# if product.type=='product' and not product.default_code and (product.purchase_ok or product.sale_ok):\n# return False\n# return True\n#\n# def _validate_default_code(self, cr, uid, ids, context=None):\n# # require valid default_code, making allowance for copies\n# for product in self.browse(cr, uid, ids, context=context):\n# if product.default_code and not re.match(self.default_code_pattern, product.default_code):\n# return False\n# return True\n#\n# def _validate_default_code_copy(self, cr, uid, ids, context=None):\n# # require completely valid default_code before allowing procurements\n# for product in self.browse(cr, uid, ids, context=context):\n# pattern = r'^COPY\\..*$'\n# if product.default_code and re.match(pattern, product.default_code) and (product.purchase_ok or product.sale_ok):\n# return False\n# return True\n#\n# _constraints = [\n# (_require_default_code, 'Stockable product type requires a valid Reference code (default_code field).', ['type','default_code','purchase_ok','sale_ok']),\n# (_validate_default_code, \"Reference code (default_code) must match this format: r'\" + default_code_pattern + \"'\", ['default_code']),\n# (_validate_default_code_copy, 'For procurements, copied Reference codes are not allowed', ['type','default_code','purchase_ok','sale_ok']),\n# ]\n#\n# def copy(self, cr, uid, id, default=None, context=None):\n# if context is None:\n# context={}\n# product = self.read(cr, uid, id, ['default_code'], context=context)\n# if not default:\n# default = {}\n# default = default.copy()\n# default['default_code'] = _('COPY.') + product['default_code']\n#\n# # unset purchase, sale, produce okay flags\n# if product.type == 'product':\n# default['purchase_ok'] = False\n# default['sale_ok'] = False\n# #default['produce_ok'] = False\n#\n# return super(product_code_unique_product, self).copy(cr=cr, uid=uid, id=id, default=default, context=context)\n\n\n\nclass uom_categ_unique(osv.Model):\n\n \"\"\"Enforce unique UOM category name\"\"\"\n\n _inherit = 'product.uom.categ'\n\n _sql_constraints = [ ('name_uniq', 'unique (name)', \"\"\"Category name must be unique.\"\"\"), ]\n\n def copy(self, cr, uid, id, default=None, context=None):\n if context is None:\n context={}\n\n product = self.read(cr, uid, id, ['name'], context=context)\n if not default:\n default = {}\n default = default.copy()\n default['name'] = product['name'] + _(' (copy)')\n\n return super(product_uom_categ_unique, self).copy(cr=cr, uid=uid, id=id, default=default, context=context)\n\n\nclass uom_unique(osv.Model):\n \n \"\"\"Enforce unique UOM name\"\"\"\n \n _inherit = 'product.uom'\n \n _sql_constraints = [ ('name_uniq', 'unique (name)', \"\"\"UOM Name must be unique.\"\"\"), ]\n\n def copy(self, cr, uid, id, default=None, context=None):\n if context is None:\n context={}\n\n product = self.read(cr, uid, id, ['name'], context=context)\n if not default:\n default = {}\n default = default.copy()\n default['name'] = product['name'] + _(' (copy)')\n\n return super(product_uom_categ_unique, self).copy(cr=cr, uid=uid, id=id, default=default, context=context)\n\n\n\n\n","sub_path":"azi_product/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"280847851","text":"import cv2\n\no1 = cv2.imread('img6.jpg')\no2 = cv2.imread('img9.jpg')\no3 = cv2.imread('yuko2.jpg')\n\ngray1 = cv2.cvtColor(o1, cv2.COLOR_BGR2GRAY)\ngray2 = cv2.cvtColor(o2, cv2.COLOR_BGR2GRAY)\ngray3 = cv2.cvtColor(o3, cv2.COLOR_BGR2GRAY)\n\nret, binary1 = cv2.threshold(gray1, 127, 255, cv2.THRESH_BINARY)\nret, binary2 = cv2.threshold(gray2, 127, 255, cv2.THRESH_BINARY)\nret, binary3 = cv2.threshold(gray3, 127, 255, cv2.THRESH_BINARY)\n\ncontours1, hierarchy = cv2.findContours(binary1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncontours2, hierarchy = cv2.findContours(binary2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncontours3, hierarchy = cv2.findContours(binary3, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\ncnt1 = contours1[0]\ncnt2 = contours2[0]\ncnt3 = contours3[0]\n\nret0 = cv2.matchShapes(cnt1, cnt1, 1, 0.0)\nret1 = cv2.matchShapes(cnt1, cnt2, 1, 0.0)\nret2 = cv2.matchShapes(cnt1, cnt3, 1, 0.0)\n\nprint('same picture matchShape = ', ret0)\nprint('similar picture matchShape = ', ret1)\nprint('different matchShape = ', ret2)\n\ncv2.imshow('o1', o1)\ncv2.imshow('o2', o2)\ncv2.imshow('o3', o3)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"ex12-10.py","file_name":"ex12-10.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"248138923","text":"#!/usr/bin/env python3\n\nfrom marathon_deploy.utils.common import parse_arguments, create_client\nimport os\nimport sys\nimport time\n\n# Hide urllib warnings for insecure certificates\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=InsecureRequestWarning)\n\n\ndef main():\n args = parse_arguments()\n\n if args.appid is None:\n print(\"appid must be defined \")\n sys.exit(1)\n\n client = create_client(args.marathon, args.user, args.password, args.https_verify)\n\n time.sleep(int(os.environ.get('INITIAL_SLEEP_DURATION', '20')))\n\n while True:\n my_app = client.get_app(args.appid)\n constraint = my_app.constraints and my_app.constraints[0]\n print(constraint, 'tasks_running:', my_app.tasks_running)\n if my_app.tasks_running >= 1 and my_app.tasks_healthy >= 1:\n print(\"APP deploy on Marathon \")\n break\n if my_app.tasks_staged >= 1:\n print(\"APP in staging mode \")\n\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"marathon_deploy/checkappdeploy.py","file_name":"checkappdeploy.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"291021762","text":"#!/usr/bin/env python\nimport os.path\nimport subprocess\nimport argparse\nimport shutil\n\n\ncl_parser = argparse.ArgumentParser()\ncl_parser.add_argument('node_num', type=int, help='Specify node number')\ncl_parser.add_argument('dns_address', help='Specify app\\'s DNS address')\ncl_parser.add_argument('region', help='Specify AWS region')\ncl_parser.add_argument('public_ip', help='Specify node public IP')\ncl_parser.add_argument('private_ip', help='Specify node private IP')\nargs = cl_parser.parse_args()\n\nos.chdir(os.path.abspath(os.path.dirname(__file__)))\n\nos.chdir('assets/certificates')\n\nwith file('master{0}-master.json'.format(args.node_num), 'wt') as f:\n f.write(\"\"\"{{\n \"CN\": \"master{0}.{1}\",\n \"hosts\": [\n \"{1}\",\n \"{2}\",\n \"{3}\",\n \"ip-{4}.{5}.compute.internal\",\n \"10.3.0.1\",\n \"127.0.0.1\",\n \"localhost\"\n ],\n \"key\": {{\n \"algo\": \"rsa\",\n \"size\": 2048\n }},\n \"names\": [\n {{\n \"C\": \"DE\",\n \"L\": \"Germany\",\n \"ST\": \"\"\n }}\n ]\n}}\n\"\"\".format(\n args.node_num, args.dns_address, args.public_ip, args.private_ip,\n args.private_ip.replace('.', '-'),\n args.region\n ))\n\nsubprocess.check_call(\n 'cfssl gencert -initca=true ca-csr.json | cfssljson -bare ca -',\n shell=True)\nsubprocess.check_call(\n 'cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json '\n '-profile=client-server master{0}-master.json | '\n 'cfssljson -bare master{0}-master-peer'.format(args.node_num),\n shell=True)\nsubprocess.check_call(\n 'cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json '\n '-profile=client-server master{0}-master.json | '\n 'cfssljson -bare master{0}-master-client'.format(args.node_num),\n shell=True)\n","sub_path":"terraform-aws/master/generate-certs.py","file_name":"generate-certs.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"72768524","text":"\"\"\"\n This module provides some helpful functions and variables to parse commands from prompt\n\"\"\"\n\nfrom links import CONST_EMAILS, SOCIAL_MEDIA, CODING_MEDIA, LINKEDIN, BLOGS, SCHOOL\n\n\n'''MAKE THE COMMANDS DICTIONARY CLASSWIDE OBJECT SO IT CAN BE ACCESSED BY OTHER MODULES'''\n\ncommands = {\n 'twitter': SOCIAL_MEDIA[0],\n 'instagram': SOCIAL_MEDIA[1],\n 'github': CODING_MEDIA[0],\n 'linkedin': LINKEDIN[0],\n 'kaggle': CODING_MEDIA[1],\n 'mail': CONST_EMAILS, # all emails opened\n 'mail0': CONST_EMAILS[0], # open bxsci email\n 'mail1': CONST_EMAILS[1], # open eps email\n 'mail2': CONST_EMAILS[2], # open personal gmail\n 'xkcd': BLOGS[0], # xkcd main\n 'blog': BLOGS[1], # xkcd blog\n 'school': SCHOOL[0], # bxsci main website\n 'sciencesurvey': SCHOOL[1] # the science survey online newspaper\n}\n\ninput_commands = {\n \"t\": 'twitter',\n \"twitter\": 'twitter',\n 'insta': 'instagram',\n 'instagram': 'instagram',\n 'i': 'instagram',\n \"g\": 'github',\n \"git\": 'github',\n \"github\": 'github',\n \"l\": 'linkedin',\n \"linkedin\": 'linkedin',\n \"k\": 'kaggle',\n \"kaggle\": 'kaggle',\n \"mail\": 'mail',\n \"mail0\": 'mail0',\n \"mail1\": 'mail1',\n \"mail2\": 'mail2',\n \"email\": 'mail',\n \"m\": 'mail',\n \"m0\": 'mail0',\n \"m1\": 'mail1',\n \"m2\": 'mail2',\n \"xkcd\": 'xkcd',\n \"x\": 'xkcd',\n \"xk\": 'xkcd',\n \"xkcd blog\": 'blog',\n \"x blog\": 'blog',\n \"xk blog\": 'blog',\n \"s\": 'school',\n \"school\": 'school',\n \"s news\": 'sciencesurvey',\n \"school news\": 'sciencesurvey',\n}\n\ndef command_to_link(comm):\n return commands[comm.lower()]\n\ndef convert_to_command(comm):\n return input_commands[comm.lower()]\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"295837898","text":"from selenium import webdriver\nfrom interfaceTest.config.main import *\nimport requests\nimport json\nimport ddt\nfrom interfaceTest.libext.HTMLTestRunner import *\nfrom interfaceTest.config.config import *\n\n\n@ddt.ddt\nclass test_intelligentFaultAnalysis(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls): # 类中最先执行\n cls.path = '{}/testOutput/interface-sheet.xls'.format(path_dir)\n # create_sheet(cls.path)\n cls.driver = webdriver.Chrome()\n\n cls.driver.maximize_window()\n cls.driver.get(interface_url + '/darams/doc.html')\n time.sleep(2)\n\n @classmethod\n def tearDownClass(cls):\n # cls.driver = driver# 类中最后执行\n cls.driver.quit()\n\n @ddt.file_data('{}/testInput/test_ramsEvaluate_input/test_1_login.json'.format(path_dir))\n def test_1_login(self, username, password):\n # \"\"\"登录接口\"\"\"\n try:\n # f = open('')\n\n driver = self.driver\n driver.refresh()\n\n time.sleep(2)\n # 点击搜索\n\n Method(driver).click('xpath', '//span[contains(text(),\"登录管理\")]')\n time.sleep(1)\n\n # 点击登录\n driver.find_element_by_xpath('//span[text()=\"登录\"]').click()\n time.sleep(1)\n # 获取登录接口的地址\n login_interface_url = driver.find_element_by_xpath('//span[text()=\"接口地址\"]/../code').text\n\n # 访问登录接口,拿到登录校验\n login_send = requests.post(interface_url + login_interface_url,\n data={'username': '{}'.format(username), 'password': '{}'.format(password)})\n login_response = json.loads(login_send.text)\n path = self.path\n url = interface_url + login_interface_url\n params = json.dumps({'username': '{}'.format(username), 'password': '{}'.format(password)})\n if login_send.status_code == 200:\n # 得到登录接口的返回\n if login_response['success'] is True:\n globals()[\"Authorization\"] = login_response['result']\n self.assertEqual(4, 4)\n else:\n globals()[\"Authorization\"] = {}\n\n if 'result' in login_response.keys():\n result = login_response['result']\n del login_response['result']\n\n write_sheet(path, '登录接口', url, params, json.dumps(login_response, ensure_ascii=False),\n json.dumps(result, ensure_ascii=False))\n else:\n write_sheet(path, '登录接口', url, params, json.dumps(login_response, ensure_ascii=False), '')\n self.assertEqual(3, 4)\n else:\n globals()[\"Authorization\"] = {}\n\n write_sheet(path, '登录接口', url, params, '登录接口返回:{}'.format(login_send.status_code), '')\n\n self.assertEqual(3, 4)\n except AssertionError:\n logger.error(AssertionError)\n self.assertEqual(3, 4)\n\n @ddt.file_data('{}/testInput/test_intelligentFaultAnalysis_input/test_2_intelligent.json'.format(path_dir))\n def test_2_intelligentComponentChain(self,trainNo,component):\n \"\"\"查询部件位置\"\"\"\n path = self.path\n if len(globals()[\"Authorization\"]) == 0:\n write_sheet(path, '查询部件位置接口', '', '', '用户未登录', '')\n self.assertEqual(3, 4)\n else:\n try:\n driver = self.driver\n driver.refresh()\n time.sleep(2)\n Method(driver).click('xpath', '//span[contains(text(),\"智能故障分析\")]')\n time.sleep(1)\n Method(driver).circle_click('查询部件位置')\n\n # 获取接口的地址\n intelligentComponentChain_interface_url = driver.find_element_by_xpath('//span[text()=\"接口地址\"]/../code').text\n\n body = {\n \"trainNo\": \"{}\".format(trainNo), \"component\": \"{}\".format(component)\n }\n\n # 请求保存接口\n intelligentComponentChain_send = requests.post(interface_url + intelligentComponentChain_interface_url, headers={\n \"Authorization\": globals()[\"Authorization\"][\"Authorization\"],\n \"Content-Type\": \"application/json;charset=UTF-8\"},\n params=body)\n intelligentComponentChain_response = json.loads(intelligentComponentChain_send.text)\n url = interface_url + intelligentComponentChain_interface_url\n params = json.dumps(body, ensure_ascii=False)\n\n if intelligentComponentChain_send.status_code == 200:\n if intelligentComponentChain_response['success'] is True:\n self.assertEqual(4, 4)\n else:\n if 'result' in intelligentComponentChain_response.keys():\n result = intelligentComponentChain_response['result']\n del intelligentComponentChain_response['result']\n\n write_sheet(path, '列车 - 根据车型/车号范围查询列车信息接口', url, params,\n json.dumps(intelligentComponentChain_response, ensure_ascii=False), json.dumps(result))\n else:\n write_sheet(path, '列车 - 根据车型/车号范围查询列车信息接口', url, params,\n json.dumps(intelligentComponentChain_response, ensure_ascii=False), '')\n\n self.assertEqual(3, 4)\n else:\n write_sheet(path, '列车 - 根据车型/车号范围查询列车信息接口', url, params, '列车 - 根据车型/车号范围查询列车信息接口返回'.format(intelligentComponentChain_response.status_code), '')\n\n self.assertEqual(3, 4)\n except AssertionError:\n logger.error(AssertionError)\n self.assertEqual(3, 4)\n\n\n def test_3_intelligentGetModifiedData(self):\n \"\"\"获取模型数据\"\"\"\n path = self.path\n if len(globals()[\"Authorization\"]) == 0:\n write_sheet(path, '获取模型数据接口', '', '', '用户未登录', '')\n self.assertEqual(3, 4)\n else:\n try:\n\n # 获取接口的地址\n intelligentGetModifiedData_interface_url = '/darams/calculator/intelligent_fault_analysis/getModifiedData'\n\n body = {\n \"adaptation\": \"\",\n \"endDate\": \"\",\n \"pageNo\": 1,\n \"pageSize\": 10,\n \"startDate\": \"\"\n }\n # 请求保存接口\n intelligentGetModifiedData_send = requests.post(interface_url + intelligentGetModifiedData_interface_url,\n headers={\"Authorization\": globals()[\"Authorization\"][\n \"Authorization\"],\"Content-Type\": \"application/json;charset=UTF-8\"},\n data=json.dumps(body))\n intelligentGetModifiedData_response = json.loads(intelligentGetModifiedData_send.text)\n url = interface_url + intelligentGetModifiedData_interface_url\n params = json.dumps(body, ensure_ascii=False)\n\n if intelligentGetModifiedData_send.status_code == 200:\n if intelligentGetModifiedData_response['success'] is True:\n self.assertEqual(4, 4)\n else:\n if 'result' in intelligentGetModifiedData_response.keys():\n result = intelligentGetModifiedData_response['result']\n del intelligentGetModifiedData_response['result']\n\n write_sheet(path, '获取模型数据接口', url, params,\n json.dumps(intelligentGetModifiedData_response, ensure_ascii=False),\n json.dumps(result))\n else:\n write_sheet(path, '获取模型数据接口', url, params,\n json.dumps(intelligentGetModifiedData_response, ensure_ascii=False), '')\n\n self.assertEqual(3, 4)\n else:\n write_sheet(path, '获取模型数据接口', url, params,\n '获取模型数据接口返回'.format(intelligentGetModifiedData_response.status_code), '')\n\n self.assertEqual(3, 4)\n\n except AssertionError:\n logger.error(AssertionError)\n self.assertEqual(3, 4)\n\n def test_4_intelligentRecongition(self):\n \"\"\"智能故障识别\"\"\"\n path = self.path\n if len(globals()[\"Authorization\"]) == 0:\n write_sheet(path, '智能故障识别接口', '', '', '用户未登录', '')\n self.assertEqual(3, 4)\n else:\n try:\n driver = self.driver\n driver.refresh()\n time.sleep(2)\n Method(driver).click('xpath', '//span[contains(text(),\"智能故障分析\")]')\n time.sleep(1)\n Method(driver).circle_click('智能故障识别')\n\n # 获取接口的地址\n intelligentRecongition_interface_url = driver.find_element_by_xpath('//span[text()=\"接口地址\"]/../code').text\n\n body = {\n \"credit\": \"0.2\",\n \"faultBrief\": \"灯坏了\",\n \"trainNo\": \"0259\",\n \"trainType\": \"E22\"\n }\n\n # 请求保存接口\n intelligentRecongition_send = requests.post(interface_url + intelligentRecongition_interface_url,\n headers={\n \"Authorization\": globals()[\"Authorization\"][\n \"Authorization\"],\n \"Content-Type\": \"application/json;charset=UTF-8\"},\n data=json.dumps(body))\n intelligentRecongition_response = json.loads(intelligentRecongition_send.text)\n url = interface_url + intelligentRecongition_interface_url\n params = json.dumps(body, ensure_ascii=False)\n\n if intelligentRecongition_send.status_code == 200:\n if intelligentRecongition_response['success'] is True:\n self.assertEqual(4, 4)\n else:\n if 'result' in intelligentRecongition_response.keys():\n result = intelligentRecongition_response['result']\n del intelligentRecongition_response['result']\n\n write_sheet(path, '智能故障识别接口', url, params,\n json.dumps(intelligentRecongition_response, ensure_ascii=False),\n json.dumps(result))\n else:\n write_sheet(path, '智能故障识别接口', url, params,\n json.dumps(intelligentRecongition_response, ensure_ascii=False), '')\n\n self.assertEqual(3, 4)\n else:\n write_sheet(path, '智能故障识别接口', url, params,\n '智能故障识别接口返回'.format(intelligentRecongition_response.status_code), '')\n\n self.assertEqual(3, 4)\n except AssertionError:\n logger.error(AssertionError)\n self.assertEqual(3, 4)\n\n def test_5_intelligentTrain(self):\n \"\"\"列车信息列表\"\"\"\n path = self.path\n if len(globals()[\"Authorization\"]) == 0:\n write_sheet(path, '列车信息列表', '', '', '用户未登录', '')\n self.assertEqual(3, 4)\n else:\n try:\n driver = self.driver\n driver.refresh()\n time.sleep(2)\n Method(driver).click('xpath', '//span[contains(text(),\"列车信息\")]')\n time.sleep(1)\n Method(driver).circle_click('列车信息列表')\n\n # 获取接口的地址\n intelligentTrain_interface_url = driver.find_element_by_xpath(\n '//span[text()=\"接口地址\"]/../code').text\n\n body = {\n \"pageNo\": \"1\",\n \"pageSize\": \"5\",\n \"trainNo\": \"\",\n \"vehicleNo\": \"\"\n }\n\n # 请求保存接口\n intelligentTrain_send = requests.post(interface_url + intelligentTrain_interface_url,\n headers={\n \"Authorization\": globals()[\"Authorization\"][\n \"Authorization\"],\n \"Content-Type\": \"application/json;charset=UTF-8\"},\n data=json.dumps(body))\n intelligentTrain_response = json.loads(intelligentTrain_send.text)\n url = interface_url + intelligentTrain_interface_url\n params = json.dumps(body, ensure_ascii=False)\n\n if intelligentTrain_send.status_code == 200:\n if intelligentTrain_response['success'] is True:\n self.assertEqual(4, 4)\n else:\n if 'result' in intelligentTrain_response.keys():\n result = intelligentTrain_response['result']\n del intelligentTrain_response['result']\n\n write_sheet(path, '列车信息列表接口', url, params,\n json.dumps(intelligentTrain_response, ensure_ascii=False),\n json.dumps(result))\n else:\n write_sheet(path, '列车信息列表接口', url, params,\n json.dumps(intelligentTrain_response, ensure_ascii=False), '')\n\n self.assertEqual(3, 4)\n else:\n write_sheet(path, '列车信息列表接口', url, params,\n '列车信息列表接口返回'.format(intelligentTrain_response.status_code), '')\n\n self.assertEqual(3, 4)\n except AssertionError:\n logger.error(AssertionError)\n self.assertEqual(3, 4)\n\n\nif __name__ == '__main__':\n report = r\"{}/Report.html\".format(path_dir) # 定义测试报告的名称(日期+report.html,引用report_name函数实现)\n fp = open(report, 'wb')\n st = unittest.TestSuite()\n # st.addTest(test_ramsInterface('test_1_login'))\n # st.addTest(test_ramsInterface('test_2_ramsEvaluate'))\n # st.addTest(test_ramsInterface('test_3_getModelStatus'))\n # st.addTest(test_ramsInterface('test_4_ramsEvaluate_charts'))\n st.addTest(unittest.makeSuite(test_intelligentFaultAnalysis))\n # unittest.main()\n runner = HTMLTestRunner(stream=fp, verbosity=2, title='接口测试报告', description='测试结果如下: ')\n runner.run(st) # 执行测试\n\n fp.close() # 关闭文件流,将HTML内容写进测试报告文件","sub_path":"interfaceTest/testCase/test_intelligentFaultAnalysis.py","file_name":"test_intelligentFaultAnalysis.py","file_ext":"py","file_size_in_byte":16166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"453947341","text":"\"\"\"This parser processes the basketball-reference teams page.\nIt returns a list of dictionaries, one per team, in the format\n\n {\n 'name': '',\n 'abbreviation': '',\n 'status': '',\n 'url': ''\n }\n\nKey data is found using the CSS select statement\n\n `#all_teams_active tr.full_table th[data-stat=franch_name] a`\n\nwhich contains the relative hrefs in the format `/teams/ATL/`.\n\"\"\"\n\nfrom bs4 import BeautifulSoup\n\nfrom fantalytix_python_crawler.crawler.sports_reference.basketball\\\n .settings import BASE_URL\n\nfrom urllib.parse import urljoin\n\nimport re\n\nclass TeamsPageParser:\n\n TEAM_TAG = '#all_teams_active tr.full_table th[data-stat=franch_name] a'\n RE_TEAM_URL = re.compile(r'/teams/([A-Z]{3})/')\n\n def __init__(self, html, parser='html.parser'):\n self.data = []\n self.html = html\n self.parser = parser\n\n def handle_data(self):\n handler = BeautifulSoup(self.html, self.parser)\n team_links = handler.select(self.TEAM_TAG)\n for link in team_links:\n rel_href = link.get('href')\n try:\n abbreviation = self.RE_TEAM_URL.match(rel_href).group(1)\n except AttributeError:\n print(\"No relative href found. Is '{}'\"\n \"in the correct format?\".format(rel_href))\n pass\n else:\n self.data.append({\n 'name': link.text.lower(),\n 'abbreviation': abbreviation,\n 'status': 'active',\n 'url': urljoin(BASE_URL, rel_href)\n })\n\n def get_data(self):\n if len(self.data) == 0:\n self.handle_data()\n return self.data\n","sub_path":"src/fantalytix_python_crawler/crawler/sports_reference/basketball/teams_page_parser.py","file_name":"teams_page_parser.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"348516976","text":"import sys,os\nsys.path.append(\"./\")\nfrom baseCase import BaseTestCaseClass\n\nclass awkExClass(BaseTestCaseClass):\n def __init__(self):\n self.conf_path = \"./common_conf.conf\"\n self.utp_name = \"\"\n BaseTestCaseClass.__init__(self,self.conf_path,self.utp_name)\n\n def data_runner(self,dc,UT_path):\n dc_info = dc.split(\" \")\n dc_name = dc_info[0].strip()\n dc_input_UT = dc_info[1].strip()\n dc_input_expect = dc_info[2].strip()\n if dc_input_expect == \"awk\":\n dc_input_expect = dc_input_UT.replace(\"awk_ex\",\"awk\")\n dc_input_UT = dc_input_UT.replace(\"awk_ex\",UT_path)\n dc_output_UT = os.popen(dc_input_UT).read()\n dc_output_expect = os.popen(dc_input_expect).read()\n if dc_output_UT == dc_output_expect:\n data_result = 1\n else:\n data_result = 0\n return data_result,dc_name\n\nif __name__ == \"__main__\":\n awkExTest = awkExClass()\n awkExTest.set_utp_name(\"awk_ex\")\n awkExTest.run_me()\n","sub_path":"test_suite/case_example.py","file_name":"case_example.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"293948103","text":"# Time: O(n)\n# Space: O(1)\n\n# Given an array of integers with possible duplicates,\n# randomly output the index of a given target number.\n# You can assume that the given target number must exist in the array.\n#\n# Note:\n# The array size can be very large.\n# Solution that uses too much extra space will not pass the judge.\n#\n# Example:\n#\n# int[] nums = new int[] {1,2,3,3,3};\n# Solution solution = new Solution(nums);\n#\n# // pick(3) should return either index 2, 3, or 4 randomly.\n# Each index should have equal probability of returning.\n# solution.pick(3);\n#\n# // pick(1) should return 0. Since in the array only nums[0] is equal to 1.\n# solution.pick(1);\n\nfrom random import randint\n\nclass Solution(object):\n\n def __init__(self, nums):\n \"\"\"\n \n :type nums: List[int]\n :type numsSize: int\n \"\"\"\n self.__nums = nums\n\n def pick(self, target):\n \"\"\"\n :type target: int\n :rtype: int\n \"\"\"\n reservoir = -1\n n = 0\n for i in xrange(len(self.__nums)):\n if self.__nums[i] != target:\n continue\n reservoir = i if randint(1, n+1) == 1 else reservoir\n n += 1\n return reservoir\n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(nums)\n# param_1 = obj.pick(target)\n","sub_path":"LeetCode/github_leetcode/Python/random-pick-index.py","file_name":"random-pick-index.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"162362134","text":"def ip2num(ip):\n ips = [int(x) for x in ip.split('.')]\n return ips[0]<< 24 | ips[1]<< 16 | ips[2] << 8 | ips[3]\n\ndef num2ip (num):\n return '%s.%s.%s.%s' % ((num >> 24) & 0xff, (num >> 16) & 0xff, (num >> 8) & 0xff, (num & 0xff))\n\n\ndef gen_ip(ip):\n start ,end = [ip2num(x) for x in ip.split('-')]\n return [num2ip(num) for num in range(start,end+1) if num & 0xff]\n\nif __name__ == '__main__':\n a_ip = '192.168.1.1'\n if '-' in a_ip:\n print(1)\n else:\n print(2)\n a_len = a_ip.split('-')\n print(len(a_len))\n print(a_ip.index('-'))\n # if a_ip.index('-'):\n # arr = gen_ip('192.168.1.1')\n # else:\n # arr =[a_ip]\n # if arr:\n # print(1)\n # else:\n # print(2)\n # print(len(arr))","sub_path":"week03/dealIpUtil.py","file_name":"dealIpUtil.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"133483070","text":"from ext.console import debug\n\n# Written by Hyungtae Kim , in Nov 2014\n# This enables the program to hold a specific spatial frequency\n# to display a functional response.\n\nclass InvalidMetaError(Exception): pass\nclass InvalidCursorError(Exception): pass\nclass SpatialFrequencyMeta(object):\n def __init__(self, condition, sfreq_cursor=0, tfreq_cursor=0):\n self.condition = condition\n self.tfrequencies = condition['temporal_frequencies']\n self.sfrequencies = condition['spatial_frequencies']\n self.orientations = condition['orientations']\n # flickerOn was not counted, so in this case we deduct only blankOn parameter.\n self.n_conditions = condition['nConditions'] - int(condition['blankOn'])\n self.sfreq_cursor = sfreq_cursor\n self.tfreq_cursor = tfreq_cursor\n self.verify()\n\n def verify(self):\n if self.n_conditions != reduce(\n lambda n, y: n * len(y),\n [self.sfrequencies, self.orientations, self.tfrequencies],\n 1\n ):\n raise InvalidMetaError(\n 'Logical relationship among some variables '\n 'in conditions seems not valid.'\n 'Total number of conditions should equal to '\n '(a number of orientations) X (a number of spatial frequencies) X (a number of temporal frequencies).'\n 'The program may not work correctly.'\n 'Please notify Hyungtae Kim to solve this issue.'\n '(InvaildMetaError in `verify` at `model.sfreq_meta`)'\n )\n @property\n def has_blank(self):\n return bool(self.condition['blankOn'])\n @property\n def has_flicker(self):\n return bool(self.condition['flickerOn'])\n @property\n def blank_index(self):\n if self.has_blank:\n return self.n_conditions\n @property\n def flicker_index(self):\n if self.has_flicker:\n return self.n_conditions + int(self.has_blank)\n @property\n def sequence(self):\n return self.condition['sequence']\n @property\n def sfreq_cursor(self):\n return self._sfreq_cursor\n @property\n def tfreq_cursor(self):\n return self._tfreq_cursor\n @property\n def cur_sfreq(self):\n return self.sfrequencies[self.sfreq_cursor]\n @property\n def cur_tfreq(self):\n return self.tfrequencies[self.tfreq_cursor]\n\n @sfreq_cursor.setter\n def sfreq_cursor(self, value):\n if value not in range(len(self.sfrequencies)):\n raise InvalidCursorError(\n 'Selected frequency is out of range.'\n 'The program may not work correctly.'\n 'Please notify Hyungtae Kim to solve this issue.'\n '(InvaildCursor in `@sfreq_cursor` at `model.sfreq_meta`)'\n )\n self._sfreq_cursor = value\n\n @tfreq_cursor.setter\n def tfreq_cursor(self, value):\n if value not in range(len(self.tfrequencies)):\n raise InvalidCursorError(\n 'Selected frequency is out of range.'\n 'The program may not work correctly.'\n 'Please notify Hyungtae Kim to solve this issue.'\n '(InvaildCursor in `@sfreq_cursor` at `model.sfreq_meta`)'\n )\n self._tfreq_cursor = value\n\n @property\n def initial_index(self):\n return self.sfreq_cursor * len(self.tfrequencies) + self.tfreq_cursor\n\n @property\n def conditions_by_chunk(self):\n n_o = len(self.orientations)\n n_t = len(self.tfrequencies)\n n_s = len(self.sfrequencies)\n return range(self.initial_index, n_o*n_s*n_t, n_s*n_t)\n # start = len(self.orientations) * self.sfreq_cursor #* self.tfreq_cursor\n # stop = len(self.orientations) * (self.sfreq_cursor + 1) #* (self.tfreq_cursor + 1)\n # return range(start, stop)\n @property\n def conditions_and_orientations(self):\n conds = self.conditions_by_chunk\n return zip(conds, self.orientations)\n\n def make_analysis_data(self, response):\n if response.blank:\n return dict(\n hasblank = True,\n blank = [rep.trace.mean() for rep in response.blank.reps],\n oris = [[rep.trace.mean() for rep in ori.reps] for ori in response.orientations]\n )\n else:\n return dict(\n hasblank = False,\n blank = [],\n oris = [[rep.trace.mean() for rep in ori.reps] for ori in response.orientations]\n )\n\n def make_export_data(self, response):\n blank = (\n 'blank', [rep.trace.mean() for rep in response.blank.reps]\n ) if response.blank else ()\n oris = [\n (ori.name, [rep.trace.mean() for rep in ori.reps])\n for ori in response.orientations\n ]\n return [blank] + oris\n","sub_path":"pacu/pacu/core/io/scanimage/legacy/cfaan/model/sfreq_meta.py","file_name":"sfreq_meta.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"405248670","text":"# encoding: utf8\n\nimport maya.api.OpenMaya as om2\nimport math\n\n\ndef maya_useNewAPI():\n pass\n\n\nclass SimpleNode(om2.MPxNode):\n node_name = 'simpleNode'\n node_id = om2.MTypeId(0x84002)\n a_arg = om2.MMatrix()\n a_amp = None\n a_val = None\n\n def __init__(self):\n om2.MPxNode.__init__(self)\n\n @staticmethod\n def creator():\n return SimpleNode()\n\n @staticmethod\n def initialize():\n num_attr = om2.MFnNumericAttribute()\n mtx_attr = om2.MFnMatrixAttribute()\n\n SimpleNode.a_arg = mtx_attr.create('matrixIn', 'mtxIn', om2.MFnNumericData.kFloat)\n mtx_attr.writable = True\n mtx_attr.storable = True\n om2.MPxNode.addAttribute(SimpleNode.a_arg)\n\n SimpleNode.a_amp = num_attr.create('amp', 'amp', om2.MFnNumericData.kFloat, 1.0)\n num_attr.storable = True\n om2.MPxNode.addAttribute(SimpleNode.a_amp)\n\n SimpleNode.a_val = num_attr.create('val', 'val', om2.MFnNumericData.kFloat, 0.0)\n num_attr.storable = True\n num_attr.writable = True\n om2.MPxNode.addAttribute(SimpleNode.a_val)\n\n om2.MPxNode.attributeAffects(SimpleNode.a_arg, SimpleNode.a_val)\n om2.MPxNode.attributeAffects(SimpleNode.a_amp, SimpleNode.a_val)\n\n def compute(self, plug, data):\n # type: (om.MPlug, om.MDataBlock) -> None\n\n state = om2.MFnDependencyNode(self.thisMObject()).findPlug('nodeState', False).asInt()\n if state == 1:\n data.outputValue(SimpleNode.a_val).setFloat(data.inputValue(SimpleNode.a_arg).asFloat())\n return\n\n if plug == SimpleNode.a_val:\n arg = data.inputValue(SimpleNode.a_arg).asFloat()\n amp = data.inputValue(SimpleNode.a_amp).asFloat()\n\n val_handle = data.outputValue(SimpleNode.a_val) # type: om.MDataHandle\n val_handle.setFloat(amp * math.sin(arg))\n data.setClean(plug)\n\n\ndef initializePlugin(obj):\n fn_plugin = om2.MFnPlugin(obj, 'ilya radovilsky', '1.0')\n fn_plugin.registerNode(SimpleNode.node_name, SimpleNode.node_id, SimpleNode.creator, SimpleNode.initialize)\n\n\ndef uninitializePlugin(obj):\n fn_plugin = om2.MFnPlugin(obj)\n fn_plugin.deregisterNode(SimpleNode.node_id)\n","sub_path":"tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"130803458","text":"from common.graph import Edge, Graph\nfrom common.utils import create_matrix, print_matrix, copy_matrix\nfrom lab6.algoritms import MaximumFlowCalculator\n\n\nclass DestinationCalculator(object):\n\n def __init__(self, table):\n self.table = copy_matrix(table)\n\n @staticmethod\n def get_cost(table, x):\n return sum(table[i][j]*x[i][j] for i in xrange(0, len(table)) for j in xrange(0, len(table)))\n\n def calculator(self):\n iterations = 0\n\n while True:\n iterations += 1\n # print('\\n--------------- Iterations {} ---------------'.format(iterations))\n\n self.loop_table()\n # print('table = ')\n # print_matrix(self.table)\n\n max_flow, graph, L = self.calculate_max_flow()\n\n # print('\\nmax flow = {}'.format(max_flow))\n # print('L = {}'.format(L))\n # for edge in sorted(graph.edges, lambda x, y: x.start - y.start):\n # print('{} -> {}: Flow = {}'.format(edge.start, edge.end, edge.flow))\n\n if max_flow >= len(self.table):\n # print('\\nSTOP')\n return self.create_x(graph)\n\n self.modificate_table(graph, L)\n\n def create_x(self, graph):\n size =len(self.table)\n U = range(1, size + 1)\n U_star = range(size + 1, size*2 + 1)\n\n result = create_matrix(size, size, 0)\n\n for edge in graph.edges:\n if edge.start in U and edge.end in U_star and edge.flow == 1:\n result[edge.start - 1][edge.end - size - 1] = 1\n\n return result\n\n def loop_table(self):\n size = len(self.table)\n\n for i in xrange(0, size):\n min_element = min(self.table[i])\n for j in xrange(0, size):\n self.table[i][j] -= min_element\n\n for i in xrange(0, size):\n min_element = min(row[i] for row in self.table)\n for j in xrange(0, size):\n self.table[j][i] -= min_element\n\n def calculate_max_flow(self):\n edges = []\n size = len(self.table)\n\n for i in xrange(1, size + 1):\n edges.append(Edge(0, i, capacity=1, flow=0))\n edges.append(Edge(i + size, size*2 + 1, capacity=1, flow=0))\n\n for row in xrange(0, size):\n for col in xrange(0, size):\n if self.table[row][col] == 0:\n edges.append(Edge(row + 1, col + 1 + size, capacity=1000000000, flow=0))\n\n graph = Graph(size*2 + 2, edges)\n\n max_flow_calculator = MaximumFlowCalculator(graph)\n graph, max_flow = max_flow_calculator.calculate(0, size*2 + 1)\n\n return max_flow, graph, max_flow_calculator.L\n\n def modificate_table(self, graph, L):\n size = len(self.table)\n U_1 = []\n U_2 = []\n\n for i in xrange(1, size + 1):\n if i in L:\n U_1.append(i)\n if i + size in L:\n U_2.append(i)\n\n # print('\\nU1 = {}'.format(U_1))\n # print('U2 = {}'.format(U_2))\n\n a = None\n for i in U_1:\n for j in xrange(1, size + 1):\n if j not in U_2:\n if a is None or min(a, self.table[i-1][j-1]) != a:\n a = self.table[i-1][j-1]\n\n # print('Cmin = {}'.format(a))\n\n for row in U_1:\n for j in xrange(1, size + 1):\n self.table[row-1][j-1] -= a\n\n for col in U_2:\n for j in xrange(1, size + 1):\n self.table[j-1][col-1] += a\n","sub_path":"8 semester/SAIO/lab7/algoritms.py","file_name":"algoritms.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"566831486","text":"from source.storage.stores.artifact_store.types.quest.base_artifact import QuestBaseArtifact\n\n__all__ = [\n 'ModelSelectionSummaryArtifact'\n]\n\n\nclass ModelSelectionSummaryArtifact(QuestBaseArtifact):\n type = \"model_selection_results\"\n\n def __init__(self, customer, quest_id, query_id, query_role, best_merger_id, best_merger_name, best_scorer_name,\n best_model_params, best_model_loss, selected_thresholds, models_losses):\n super(ModelSelectionSummaryArtifact, self).__init__(customer, quest_id)\n self.__models_losses = models_losses\n self.__selected_thresholds = selected_thresholds\n self.__best_model_loss = best_model_loss\n self.__best_model_params = best_model_params\n self.__best_scorer_name = best_scorer_name\n self.__best_merger_name = best_merger_name\n self.__best_merger_id = best_merger_id\n self.__query_role = query_role\n self.__query_id = query_id\n\n def _to_dict(self):\n return {\n \"query_id\": self.__query_id,\n \"query_role\": self.__query_role,\n \"chosen_merger_id\": self.__best_merger_id,\n \"chosen_merger_name\": self.__best_merger_name,\n \"chosen_scorer_name\": self.__best_scorer_name,\n \"chosen_merger_params\": self.__best_model_params,\n \"chosen_model_loss\": self.__best_model_loss,\n \"selected_thresholds\": self.__selected_thresholds,\n \"models_losses\": self.__models_losses\n }\n","sub_path":"internal/source/storage/stores/artifact_store/types/quest/model_selector.py","file_name":"model_selector.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"189705805","text":"import os\nimport time\nimport matplotlib.pyplot as plt\nfrom scapy.all import *\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\n\n\npktByte=[]\npktTime=[]\nbytes = pd.Series(pktByte).astype(int)\ntimes = pd.to_datetime(pd.Series(pktTime).astype(str), errors='coerce')\ndf2 = pd.DataFrame({\"Bytes\": bytes, \"Times\":times})\ndf2 = df2.set_index('Times')\n\ndef plist():\n global df2\n pktBytes=[]\n pktTimes=[]\n packets = rdpcap('capt.pcap')\n #Read each packet and append to the lists.\n for pkt in packets:\n if IP in pkt:\n try:\n pktBytes.append(pkt[IP].len)\n pktTime=datetime.fromtimestamp(pkt.time)\n pktTimes.append(pktTime.strftime(\"%Y-%m-%d %H:%M:%S.%f\"))\n\n except:\n pass\n\n\n bytes = pd.Series(pktBytes).astype(int)\n times = pd.to_datetime(pd.Series(pktTimes).astype(str), errors='coerce')\n df = pd.DataFrame({\"Bytes\": bytes, \"Times\":times})\n df = df.set_index('Times')\n df=df.resample('1S').sum()\n df2 = df2.append(df, ignore_index=False)\n df2 = df2.drop_duplicates()\n \n #print(df2)\n DelTime(60)\n #print(\"\\n \\n AFTER\",df2)\n yData = df2['Bytes']\n xData = np.arange(len(df2.index))\n #xData = df2.index\n #print(xData)\n \n #print(\"Ydata: \", yData,'\\n',\"xData: \", xData)\n plt.cla()\n plt.ylabel(\"Bytes\")\n plt.xlabel(\"Time\")\n plt.title(\"Real Time Network Traffic\")\n plt.bar(xData, yData, width=0.9, bottom=None,align='edge', data=None)\n plt.pause(1)\n frame1 = plt.gca()\n frame1.axes.get_xaxis().set_visible(False)\n #ax = plt.subplot(111)\n #ax.bar(xData, yData, width=10)\n #ax.xaxis_time()\n\ndef DelTime(tim):\n global df2\n tim = timedelta(seconds=tim)\n for Times, row in df2.iterrows():\n NowTime = (datetime.now()).time()\n Packet_Time = str(Times)\n NowTime = time_minus(NowTime,tim)\n Packet_Time = datetime.strptime(Packet_Time, \"%Y-%m-%d %H:%M:%S\" ).time()\n #print(Packet_Time<(time_minus(NowTime,tim)))\n #print(\"Packet_Time= \",Packet_Time,\"<\",\" NowTime= \", NowTime)\n epochN = int(time.mktime(time.strptime(str(NowTime), \"%H:%M:%S\")))\n epochP = int(time.mktime(time.strptime(str(Packet_Time), \"%H:%M:%S\")))\n #print((epochP Select:\n \"\"\"Emit statement to set 'jwt.claims.' for each claim in claims dict\n and a 'role'\n \"\"\"\n # Setting local variables an not be done in prepared statement\n # since JWT claims are signed, literal binds should be ok\n role_key = \"role\"\n\n # Set all jwt.claims.*\n claims = [\n func.set_config(\n literal(\"jwt.claims.\").op(\"||\")(func.cast(claim_key, Text())),\n func.cast(str(claim_value), Text()),\n True,\n )\n for claim_key, claim_value in jwt_claims.items()\n ]\n # Set all role claim if exists from jwt\n if role_key in jwt_claims:\n claims.append(\n func.set_config(\n func.cast(role_key, Text()),\n func.cast(str(jwt_claims[role_key]), Text()),\n True,\n )\n )\n # Set default role from config if provided\n elif default_role is not None:\n claims.append(\n func.set_config(\n func.cast(role_key, Text()),\n func.cast(str(default_role), Text()),\n True,\n )\n )\n return select(claims)\n","sub_path":"src/nebulo/gql/resolve/resolvers/claims.py","file_name":"claims.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"633871482","text":"# coding:utf-8\n\nfrom odoo import models, fields, api\nfrom datetime import datetime, timedelta\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT\nfrom ..models.hrp_mqtt import module\nfrom hrp_const import time_to_client\nfrom hrp_queue import STATE\n\nimport traceback\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass HrpTreatmentProcess(models.Model):\n _name = 'hrp.treatment_process'\n _description = u'就医流程'\n\n partner_id = fields.Many2one('res.partner', '患者')\n visit_date = fields.Date('就诊日期')\n\n line_ids = fields.One2many('hrp.treatment_process_line', 'process_id', '流程明细')\n\n state = fields.Selection([('doing', '进行中'), ('done', '完成')], '状态', default='doing')\n\n _rec_name = 'partner_id'\n _order = 'visit_date desc'\n\n def get_process(self, data):\n \"\"\"获取就医流程\"\"\"\n m_partner = self.env['res.partner']\n treatment_process_line_obj = self.env['hrp.treatment_process_line']\n\n prev_partner_id = data.get('prev_partner_id')\n current_partner_id = data['current_partner_id']\n topic = data['topic']\n\n # 清除之前患者topic\n prev_partner = m_partner.search([('id', '=', prev_partner_id)])\n if prev_partner:\n prev_partner.topic = False\n\n partner = m_partner.search([('id', '=', current_partner_id)])\n res = {}\n if not partner:\n return res\n # 记录当前用户topic\n partner.topic = topic\n\n today = (datetime.now() + timedelta(hours=8)).strftime(DEFAULT_SERVER_DATE_FORMAT)\n\n # 当天\n process = self.search([('partner_id', '=', current_partner_id), ('state', '=', 'doing'), ('visit_date', '=', today)], order='id desc', limit=1)\n if not process:\n # 以后\n process = self.search([('partner_id', '=', current_partner_id), ('state', '=', 'doing')], order='id desc', limit=1)\n if not process:\n return res\n\n res.update({\n 'partner_id': process.partner_id.id,\n 'visit_date': process.visit_date,\n 'treatment_details': []\n })\n\n # 查询子流程\n treatment_process_lines = treatment_process_line_obj.search([('process_id', '=', process.id)], order='update_time, id')\n\n for line in treatment_process_lines:\n line_data = line.get_line_data()\n res['treatment_details'].append(line_data)\n return res\n\n def update_process(self, queue):\n \"\"\"更新就医流程\"\"\"\n m_treatment_process_line = self.env['hrp.treatment_process_line']\n total_queue_obj = self.env['hrp.total_queue']\n clinic_item_category_obj = self.env['his.clinic_item_category']\n business_obj = self.env['hrp.business']\n\n if queue.state in [5]:\n # 退费\n return\n\n # 计算code\n code = '02'\n\n business = business_obj.search([('name', '=', queue.business)], limit=1)\n if business:\n if business.business_category == '1':\n # 门诊\n code = '02' if queue.stage == '1' else '06'\n elif business.business_category == '2':\n # 检验\n code = '04'\n elif business.business_category == '3':\n # 检查\n code = '05'\n elif business.business_category == '4':\n # 治疗\n code = '07'\n elif business.business_category == '6':\n # 发药\n code = '08'\n\n line = m_treatment_process_line.search([('code', '=', code),\n ('queue_id', '=', queue.id),\n ('queue_state', '=', queue.state),\n ('state', '=', 'doing')], limit=1)\n if line:\n if line.queue_state == 1:\n # 待诊\n # 修改更新时间\n line.write({\n 'update_time': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n })\n # 推送就医流程\n line.send_process()\n else:\n # 查询对应总队列\n total_queues = total_queue_obj.search([('queue_id', '=', queue.id)])\n\n # 查询检查项目\n clinic_items = []\n\n for total_queue in total_queues:\n if total_queue.origin_table != 'dispose':\n continue\n dispose = self.env['his.dispose'].search([('id', '=', total_queue.origin_id)])\n if not dispose:\n continue\n if dispose.item_id and dispose.item_id.name not in clinic_items:\n clinic_items.append(dispose.item_id.name)\n\n # 预约科室id\n reserve_department_id = False\n for clinic_item in clinic_items:\n clinic_item_category = clinic_item_category_obj.search([('name', '=', clinic_item)], limit=1)\n if not clinic_item_category:\n continue\n reserve_department_id = clinic_item_category.department_id.id\n\n # 将此队列其他状态完成\n treatment_process_lines = m_treatment_process_line.search([('queue_id', '=', queue.id), ('state', '=', 'doing')])\n treatment_process_lines.write({'state': 'done'})\n\n # 通知手机该流程状态改变\n for tpl in treatment_process_lines:\n tpl.send_process()\n\n # 顺序号\n order_num = False\n if queue.queue_dispatch_ids:\n order_num = queue.queue_dispatch_ids[0].order_num_str\n elif queue.appointment_number:\n order_num = queue.appointment_number_str\n\n # 创建新流程\n treatment_process_line = m_treatment_process_line.create_process({\n 'queue_id': queue.id,\n 'partner_id': queue.partner_id.id,\n 'name': dict(HrpTreatmentProcessLine.BUSINESS)[code],\n 'code': code,\n 'department_id': queue.department_id.id,\n 'employee_id': queue.employee_id.id,\n 'order_num': order_num,\n 'queue_state': queue.state,\n 'location': queue.department_id.location,\n 'process_type': '1',\n 'clinic_item': ';'.join(clinic_items) if clinic_items else False,\n 'reserve_department_id': reserve_department_id\n })\n _logger.info('=====流程明细:id-%s被创建======' % treatment_process_line.id)\n # 提交,防止死锁\n self.env.cr.commit()\n\n\nclass HrpTreatmentProcessLine(models.Model):\n _name = 'hrp.treatment_process_line'\n _description = u'就医流程明细'\n _rec_name = 'partner_id'\n _order = 'id desc'\n\n BUSINESS = [('01', '挂号'), ('02', '初诊'), ('03', '缴费'), ('04', '检验'), ('05', '检查'), ('06', '回诊'), ('07', '治疗'), ('08', '取药')]\n\n process_id = fields.Many2one('hrp.treatment_process', '就医流程', ondelete='cascade')\n partner_id = fields.Many2one('res.partner', related='process_id.partner_id')\n \n name = fields.Char('流程名称')\n code = fields.Char('流程编码')\n queue_id = fields.Many2one('hrp.queue', '队列')\n business = fields.Char('业务', related='queue_id.business')\n queue_state = fields.Selection(STATE, '队列状态')\n department_id = fields.Many2one('hr.department', '科室')\n employee_id = fields.Many2one('hr.employee', '医生')\n order_num = fields.Char('顺序号')\n location = fields.Char('位置信息')\n process_type = fields.Selection([('1', '排队'), ('2', '不排队')], '流程类型')\n message = fields.Text('信息')\n state = fields.Selection([('doing', '进行中'), ('done', '完成')], '状态', default='doing')\n duration = fields.Char('就诊持续时间')\n clinic_item = fields.Char('检查项目')\n reserve_department_id = fields.Integer('预约科室内部ID')\n pay_time = fields.Char('缴费时间')\n receipt_no = fields.Char('挂号单')\n reserve_id = fields.Integer('预约记录ID')\n update_time = fields.Datetime('更新时间', default=lambda *a: datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT))\n\n\n\n @api.model\n def create(self, val):\n res = super(HrpTreatmentProcessLine, self).create(val)\n # 推送就医流程\n res.send_process()\n return res\n\n # @api.multi\n # def write(self, val):\n # res = super(HrpTreatmentProcessLine, self).write(val)\n # for line in self:\n # # 推送就医流程\n # line.send_process()\n # return res\n\n def create_process(self, val):\n \"\"\"创建就医流程\"\"\"\n m_treatment_process = self.env['hrp.treatment_process']\n\n partner_id = val.get('partner_id')\n if not partner_id:\n return\n\n # 当前日期\n date_now_str = (datetime.now() + timedelta(hours=8)).strftime(DEFAULT_SERVER_DATE_FORMAT)\n\n parent_process = m_treatment_process.search([('visit_date', '=', date_now_str), ('partner_id', '=', partner_id), ('state', '=', 'doing')], order='id desc', limit=1)\n if not parent_process:\n parent_process = m_treatment_process.create({\n 'partner_id': partner_id,\n 'visit_date': (datetime.now() + timedelta(hours=8)).strftime(DEFAULT_SERVER_DATE_FORMAT)\n })\n # 父记录\n val.update({'process_id': parent_process.id})\n res = self.create(val)\n return res\n\n def send_process(self):\n \"\"\"推送就医流程\"\"\"\n topic = self.partner_id.topic\n if not topic:\n return\n msg = {\n 'action': 'send_process',\n }\n data = {\n 'partner_id': self.partner_id.id,\n 'visit_date': self.process_id.visit_date,\n 'treatment_details': []\n }\n\n # 推送时间\n # send_time_str = (datetime.now() + timedelta(hours=8)).strftime('%H:%M')\n\n line_data = self.get_line_data()\n data['treatment_details'].append(line_data)\n\n msg['data'] = data\n\n if module.get('emqtt'):\n module['emqtt'].Emqtt.publish(topic, msg, 2)\n\n def get_line_data(self):\n \"\"\"包装line的数据\"\"\"\n m_queue = self.env['hrp.queue']\n\n line_data = {\n 'process_line_id': self.id,\n 'time': time_to_client(self.update_time, '%Y-%m-%d %H:%M'),\n 'name': self.name,\n 'code': self.code,\n 'business': self.business,\n 'department': self.department_id.name,\n 'doctor': self.queue_id.employee_id.name if self.queue_id.employee_id else '',\n 'location': self.location,\n 'is_queue': True if self.queue_id else False,\n 'state': self.state,\n 'order_num': self.order_num or '',\n 'wait_count': 0,\n 'wait_minutes': 0,\n 'duration': self.duration,\n 'clinic_item': self.clinic_item,\n 'reserve_department_id': self.reserve_department_id,\n 'queue_state': dict(STATE)[self.queue_state] if self.queue_id else '',\n 'queue_state_code': self.queue_state if self.queue_id else 0,\n 'time_point': '',\n }\n\n if self.code in ['01']:\n # 挂号\n # 就诊日期|时间点|预约号|医生名称\n try:\n if self.message:\n msg_list = self.message.split('|')\n visit_date, time_point, appointment_number, doctor = msg_list[0], msg_list[1], msg_list[2], msg_list[3]\n line_data.update({\n 'time_point': '%s %s' % (visit_date, time_point),\n 'order_num': appointment_number,\n 'doctor': doctor\n })\n except Exception:\n _logger.error(traceback.format_exc())\n\n if self.process_type == '1' and self.queue_id:\n # 排队消息\n\n if self.queue_id.queue_dispatch_ids:\n queue_dispatch = self.queue_id.queue_dispatch_ids[0]\n\n queue_data = m_queue.clean_queue(self.queue_id)\n\n # 计算当前等候人数\n wait_count = m_queue.get_wait_count(queue_dispatch, queue_data)\n\n # 计算预计等候时间\n # average_wait_time = 3\n average_wait_time = m_queue.compute_average_wait_time(self.queue_id.department_id.id)\n\n if self.queue_id.state == 1:\n # 候诊状态计算等候人数和时间\n line_data.update({\n 'wait_count': wait_count,\n 'wait_minutes': average_wait_time * wait_count,\n })\n\n line_data.update({\n 'queue_state': dict(STATE)[self.queue_state],\n 'queue_state_code': self.queue_state,\n 'duration': average_wait_time,\n })\n return line_data\n\n\n","sub_path":"hrp_queue/models/hrp_treatment_process.py","file_name":"hrp_treatment_process.py","file_ext":"py","file_size_in_byte":13340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"225508226","text":"from flask import jsonify, request, Blueprint\nimport ctypes\nfrom api.dllUtil import DllUtil\nfrom api.exception import MyException\nimport traceback\n\nDLL_PATH = \"MathLibrary.dll\"\n\nmathlibrary = DllUtil(DLL_PATH)\n\nmathlibAPI = Blueprint('mathlibAPI', __name__)\n\n@mathlibAPI.route('/add', methods=['POST']) \ndef add():\n try:\n if not request.json:\n raise MyException(\"No parameter!\")\n\n if not mathlibrary:\n raise MyException(\"Not Window platform\")\n\n params = request.json['params']\n\n add = mathlibrary.getFunc(1, ctypes.c_double, [ctypes.c_double, ctypes.c_double])\n result = add(params[0], params[1])\n\n return jsonify({\"result\" : result}), 200\n except MyException as me:\n return str(me), 400\n except Exception as e:\n print(e)\n traceback.print_stack()\n return str(e), 400\n\n@mathlibAPI.route('/hello/', methods=['GET'])\n@mathlibAPI.route('/hello/', methods=['GET'])\ndef helloworld(name=None):\n try:\n if name is None:\n raise MyException(\"No name!\")\n\n if not mathlibrary:\n raise MyException(\"Not Window platform\")\n \n params = name.encode('utf-8')\n print(name)\n helloworld = mathlibrary.getFunc(3, ctypes.c_char_p, [ctypes.c_char_p])\n result = helloworld(params)\n print(result)\n return jsonify({\"result\": result.decode('UTF-8')}), 200\n except MyException as me:\n return str(me), 400\n except Exception as e:\n print(e)\n traceback.print_stack()\n return str(e), 400\n\n","sub_path":"api/mathlib.py","file_name":"mathlib.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"563662043","text":"# File: RetrieveMBTADwellTimes.py\n# Author: Dharmesh Tarapore \nimport urllib.request\nfrom urllib.request import quote \nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\nimport xmltodict\n\nclass RetrieveMBTADwellTimes(dml.Algorithm):\n contributor = \"bemullen_dharmesh\"\n reads = []\n writes = [\"bemullen_dharmesh.mbta_red_dwells\", \"bemullen_dharmesh.mbta_green_dwells\"]\n\n @staticmethod\n def execute(trial = False):\n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bemullen_dharmesh', 'bemullen_dharmesh')\n\n urls = {'mbta_red_dwells': 'http://datamechanics.io/data/bemullen_dharmesh/data/mbta_red_dwells.json',\n 'mbta_green_dwells': 'http://datamechanics.io/data/bemullen_dharmesh/data/mbta_green_dwells.json'}\n\n for (key, url) in urls.items():\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)[key]\n repo.dropCollection(key)\n repo.createCollection(key)\n repo['bemullen_dharmesh.' + key].insert_many(r)\n\n repo.logout()\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n\n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bemullen_dharmesh', 'bemullen_dharmesh')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n\n doc.add_namespace('bdpr', 'https://data.boston.gov/api/3/action/datastore_search_sql')\n doc.add_namespace('bdpm', 'https://data.boston.gov/datastore/odata3.0/')\n doc.add_namespace('bgis', 'https://bostonopendata-boston.opendata.arcgis.com/datasets/')\n doc.add_namespace('datp', 'http://datamechanics.io/data/bemullen_dharmesh/data/')\n\n this_script = doc.agent('alg:bemullen_dharmesh#RetrieveMBTADwellTimes', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n\n resource_mbta_red_dwells = doc.entity('datp:mbta_red_dwells',\n {'prov:label':'MBTA Red Line Dwell Values', prov.model.PROV_TYPE:'ont:DataResource',\n 'ont:Extension': 'json'})\n get_mbta_red_dwells = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime,\n {'prov:label': 'Get List of Time Spent waiting at each station (Red line)'})\n doc.wasAssociatedWith(get_mbta_red_dwells, this_script)\n doc.usage(get_mbta_red_dwells, resource_mbta_red_dwells, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval'})\n\n resource_mbta_green_dwells = doc.entity('datp:mbta_green_dwells',\n {'prov:label':'MBTA Green Line Dwell Values', prov.model.PROV_TYPE:'ont:DataResource',\n 'ont:Extension': 'json'})\n get_mbta_green_dwells = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime,\n {'prov:label': 'Get List of Time Spent waiting at each station (Green line)'})\n doc.wasAssociatedWith(get_mbta_green_dwells, this_script)\n doc.usage(get_mbta_green_dwells, resource_mbta_green_dwells, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval'})\n \n\n mbta_red_dwells = doc.entity('dat:bemullen_dharmesh#mbta_red_dwells',\n {prov.model.PROV_LABEL:'MBTA Red Line Dwell Intervals',\n prov.model.PROV_TYPE:'ont:DataSet'\n })\n doc.wasAttributedTo(mbta_red_dwells, this_script)\n doc.wasGeneratedBy(mbta_red_dwells, get_mbta_red_dwells, endTime)\n doc.wasDerivedFrom(mbta_red_dwells, resource_mbta_red_dwells, get_mbta_red_dwells,\n get_mbta_red_dwells, get_mbta_red_dwells)\n\n mbta_green_dwells = doc.entity('dat:bemullen_dharmesh#mbta_green_dwells',\n {prov.model.PROV_LABEL:'MBTA Green Line Dwell Intervals',\n prov.model.PROV_TYPE:'ont:DataSet'\n })\n doc.wasAttributedTo(mbta_green_dwells, this_script)\n doc.wasGeneratedBy(mbta_green_dwells, get_mbta_green_dwells, endTime)\n doc.wasDerivedFrom(mbta_green_dwells, resource_mbta_green_dwells, get_mbta_green_dwells,\n get_mbta_green_dwells, get_mbta_green_dwells)\n\n repo.logout()\n \n return doc\n","sub_path":"bemullen_dharmesh/RetrieveMBTADwellTimes.py","file_name":"RetrieveMBTADwellTimes.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"410832027","text":"import os\nimport glob\nimport torch\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom torch.utils.data import Dataset\n\ndataset_path = '/home/agaurav/Documents/Datasets/ILSVRC2012/'\n\n\ndef get_train_df():\n mat = loadmat(dataset_path + 'ILSVRC2012_devkit_t12/data/meta.mat')\n\n dict_wnid_to_label = dict()\n dict_label_to_wnid = dict()\n dict_image_label = {'image': [], 'label': []}\n\n # print(mat['synsets'].dtype)\n for element in mat['synsets']:\n label = element[0]['ILSVRC2012_ID'][0][0] - 1\n wnid = element[0]['WNID'][0]\n\n dict_wnid_to_label[wnid] = label\n dict_label_to_wnid[label] = wnid\n\n test_dir = dataset_path + 'ILSVRC2012_img_train/'\n for category in os.listdir(test_dir):\n cat_dir = test_dir + category\n for img_path in glob.glob(cat_dir + '/*.JPEG'):\n dict_image_label['image'].append(img_path)\n dict_image_label['label'].append(dict_wnid_to_label[category])\n\n return pd.DataFrame(dict_image_label)\n\n\nclass ILSVRC2012Dataset(Dataset):\n\n def __init__(self, df, transform=None):\n self.df = df\n self.transform = transform\n\n def __len__(self):\n return self.df.shape[0]\n\n def __getitem__(self, idx):\n image = torch.from_numpy(plt.imread(self.df['image'].values[idx]).astype('float32'))\n if len(image.shape) == 2:\n image = image.repeat(3, 1, 1)\n elif len(image.shape) == 3 and image.shape[2] == 3:\n image = image.permute(2, 0, 1)\n elif len(image.shape) == 3 and image.shape[2] == 4:\n image = image[:, :, 0:3]\n image = image.permute(2, 0, 1)\n else:\n print(image.shape, self.df['image'].values[idx])\n\n label = torch.tensor(self.df['label'].values[idx])\n if self.transform:\n image = self.transform(image)\n\n if image.shape != (3, 224, 224):\n print(image.shape, self.df['image'].values[idx])\n\n return image, label\n\n\ndef get_ilsvrc2012_train_dataset(transform=None):\n train_df = get_train_df()\n return ILSVRC2012Dataset(train_df, transform=transform)\n\n\n\n","sub_path":"Dataset/ilsvrc2012.py","file_name":"ilsvrc2012.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"605067257","text":"import tkinter as tk\r\nimport tkinter.messagebox as ms\r\n\r\n\r\nclass Application(tk.Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master=master)\r\n self.master = master\r\n self.createLogin()\r\n\r\n def createLogin(self):\r\n def click_enter():\r\n ms.showinfo(\"欢迎登录\", f\"用户名:{self.name_value.get()}\\\r\n 密码:{self.sn_value.get()}\")\r\n\r\n def click_cancel():\r\n self.destroy()\r\n\r\n self.name_value = tk.StringVar()\r\n self.sn_value = tk.StringVar()\r\n self.name_label = tk.Label(self, text=\"姓名\")\r\n self.name_label.grid(row=0, column=0, sticky=\"E\")\r\n self.name_entry = tk.Entry(self, textvariable=self.name_value)\r\n self.name_entry.grid(row=0, column=1)\r\n self.phone_label = tk.Label(self, text=\"用户名为手机号\")\r\n self.phone_label.grid(row=0, column=2)\r\n self.sn_label = tk.Label(self, text=\"密码\")\r\n self.sn_label.grid(row=1, column=0)\r\n self.sn_entry = tk.Entry(self, textvariable=self.sn_value, show=\"*\")\r\n self.sn_entry.grid(row=1, column=1)\r\n self._enter_button = tk.Button(self, text=\"登录\", command=click_enter)\r\n self._enter_button.grid(row=2, column=1, sticky=\"EW\")\r\n self._cancel_button = tk.Button(self, text=\"取消\", command=click_cancel)\r\n self._cancel_button.grid(row=2, column=2, sticky=\"E\")\r\n\r\n\r\ndef main():\r\n master = tk.Tk()\r\n # master.geometry(\"800x600+100+100\")\r\n app = Application(master)\r\n app.pack()\r\n master.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"python400/Gui编程/grid_布局.py","file_name":"grid_布局.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"391670062","text":"from distutils.core import setup, Extension\n\nimport pybind11\nimport os\n\n\nEXTRA_COMPILE_ARGS = ['-std=c++11', '-fvisibility=hidden']\n\next = Extension(\n \"_ext\",\n sources=[\"bindings.cpp\"],\n include_dirs=[\n os.getcwd(),\n pybind11.get_include(),\n pybind11.get_include(user=True)\n ],\n library_dirs=[os.getcwd()],\n runtime_library_dirs=[os.getcwd()],\n libraries=[\"func\"],\n extra_compile_args=EXTRA_COMPILE_ARGS,\n undef_macros=[\"NDEBUG\"],\n)\n\n\nsetup(\n name=\"ext\",\n ext_modules=[ext]\n)\n","sub_path":"functional/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"545813170","text":"# ROMEO AND JULIET EXERCISE\n\n# open the file.\nmy_file = open( \"romeo.txt\", \"r\")\n\n# declare empty list\nlist_of_lines = []\n\n# loop over lines in file.\nfor current_line in my_file:\n\n\t# check that current line is not blank\n if current_line != '\\n':\n \t# add line to list\n \tlist_of_lines.append( current_line )\n \nmy_file.close()\n\n\n\n# count number of lines\nnumber_of_lines = len(list_of_lines)\n\n# COUNT NUMBER OF CHARACTERS INCLUDING SPACES AND PUNCTUATION. ALSO COUNT WORDS\n# set word counter to 0\nnumber_of_words = 0\n# set character counter to 0\nnumber_of_characters = 0\n# loop over lines in script list\nfor line in list_of_lines:\n\t# add the number of characters in current line to cumulative total\n\tnumber_of_characters = number_of_characters + len(line)\n\t# add the number of words in current line to cumulative total\n\tnumber_of_words = number_of_words + len(line.split())\n\n\n# Strip punctuation and make lowercase\n# declare an empty list\nlist_of_lines_without_punctuation = []\n\n# loop over all lines\nfor line in list_of_lines:\n\tclean_line = line.lower()\n\tclean_line = clean_line.replace(\",\",\" \")\n\tclean_line = clean_line.replace(\".\",\" \")\n\tclean_line = clean_line.replace(\"!\",\" \")\n\tclean_line = clean_line.replace(\"?\",\" \")\n\tclean_line = clean_line.replace(\"-\",\" \")\n\tclean_line = clean_line.replace(\"[\",\" \")\n\tclean_line = clean_line.replace(\"]\",\" \")\n\tclean_line = clean_line.replace(\":\",\" \")\n\tclean_line = clean_line.replace(\";\",\" \")\n\tclean_line = clean_line.replace(\"*\",\" \")\n\tclean_line = clean_line.replace(\"(\",\" \")\n\tclean_line = clean_line.replace(\")\",\" \")\n\tclean_line = clean_line.replace(\"\\\"\",\" \")\n\tclean_line = clean_line.replace(\"\\n\",\"\")\n\n\t# Append cleaned current line to list of lines w/o punctuation\n\tlist_of_lines_without_punctuation.append (clean_line)\n\n# convert list of clean lines into list of words\n# declare an empty list for all the words\nword_list = []\n\n# loop over the list of lines w/o puncutation\nfor line in list_of_lines_without_punctuation:\n\t# add each word from current line to the cumulative list of all words\n\tword_list.extend (line.split())\n\n# Create dictionary of unique words\nword_dict = {}\n# loop over list of words w/o punctuation\nfor word in word_list:\n\t# add current word to dictionary if it's not already in the dictionary\n\tif word not in word_dict:\n\t\tword_dict[ word ] = 0\n\n# Insert counts into dictionary\nfor word in word_list:\n\tword_dict[word] += 1\n\n","sub_path":"exercises/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"559099029","text":"\"\"\"Each Node holds a reference to its previous node\nas well as its next node in the List.\"\"\"\n\n\nclass Node:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev\n self.next = next\n\n def get_value(self):\n return self.value\n\n \"\"\"Wrap the given value in a Node and insert it\n after this node. Note that this node could already\n have a next node it is point to.\"\"\"\n\n def insert_after(self, value):\n current_next = self.next\n self.next = Node(value, self, current_next)\n if current_next:\n current_next.prev = self.next\n\n \"\"\"Wrap the given value in a Node and insert it\n before this node. Note that this node could already\n have a previous node it is point to.\"\"\"\n\n def insert_before(self, value):\n current_prev = self.prev\n self.prev = Node(value, current_prev, self)\n if current_prev:\n current_prev.next = self.prev\n\n \"\"\"Rearranges this Node's previous and next pointers\n accordingly, effectively deleting this Node.\"\"\"\n\n def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev\n\n\n\"\"\"Our doubly-linked list class. It holds references to\nthe list's head and tail nodes.\"\"\"\n\n\nclass DoublyLinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n \"\"\"Wraps the given value in a Node and inserts it\n as the new head of the list. Don't forget to handle\n the old head node's previous pointer accordingly.\"\"\"\n\n def add_to_head(self, value):\n self.length += 1\n new_node = Node(value)\n # is there is no head or no tail\n if self.head is None and self.tail is None:\n self.head = new_node\n self.tail = new_node\n # is there is a head\n else:\n current_node = self.head\n current_node.prev = new_node\n new_node.next = current_node\n self.head = new_node\n\n \"\"\"Removes the List's current head node, making the\n current head's next node the new head of the List.\n Returns the value of the removed Node.\"\"\"\n\n def remove_from_head(self):\n # check for head\n if self.head is None:\n return None\n # if there is only one head\n if self.head.next == None:\n head = self.head\n self.head = None\n self.tail = None\n self.length = 0\n return head.get_value()\n else:\n new_head_node = self.head.next\n new_head_node.prev = None\n self.head = new_head_node\n self.length -= 1\n return new_head_node\n\n \"\"\"Wraps the given value in a Node and inserts it\n as the new tail of the list. Don't forget to handle\n the old tail node's next pointer accordingly.\"\"\"\n\n def add_to_tail(self, value):\n new_node = Node(value)\n # check for head\n if self.head is None and self.tail is None:\n self.head = new_node\n self.tail = new_node\n self.length += 1\n else:\n current_node = self.tail\n current_node.next = new_node\n new_node.prev = current_node\n self.tail = new_node\n self.length += 1\n return new_node.get_value()\n\n def remove_from_tail(self):\n # if there is nothing\n if self.head is None and self.tail is None:\n return None\n # if there is one Node\n if self.head is self.tail:\n head = self.head\n self.head = None\n self.tail = None\n self.length = 0\n return head.get_value()\n # if there is more than one Node\n else:\n last_node = self.tail\n new_last_node = last_node.prev\n self.tail = new_last_node\n new_last_node.next = None\n self.length -= 1\n return new_last_node.get_value()\n\n def move_to_front(self, node):\n # if there is nothing\n if self.head is None and self.tail is None:\n return None\n # only one Node\n if self.head is self.tail:\n return f\"this node is already at the front\"\n\n # two or more nodes\n current_node = self.head\n next_node = current_node.next\n while next_node:\n # only two nodes\n if self.length == 2 and next_node.get_value() == node.get_value():\n next_node.next = current_node\n next_node.prev = None\n current_node.prev = next_node\n current_node.next = None\n self.head = next_node\n return next_node.get_value()\n\n # more than two\n elif self.length > 2 and next_node.get_value() == node.get_value():\n ahead_node = next_node.next\n behind_node = next_node.prev\n ahead_node.prev = behind_node\n behind_node.next = ahead_node\n next_node.next = current_node\n current_node.prev = next_node\n self.head = next_node\n return next_node.get_value()\n\n def move_to_end(self, node):\n # if there is nothing\n if self.head is None and self.tail is None:\n return None\n # only one Node\n if self.head is self.tail and self.head.get_value() == node.get_value():\n return f\"this is the only Node\"\n\n # two or more nodes\n current_node = self.head\n next_node = current_node.next\n last_node = self.tail\n\n # two nodes (last one)\n if last_node.get_value() == node.get_value():\n return f\"Node {node.get_value()} is already at the last position\"\n\n # two nodes (first one)\n if self.length == 2 and current_node.get_value() == node.get_value():\n current_node.prev = last_node\n current_node.next = None\n last_node.next = current_node\n last_node.pre = None\n self.head = last_node\n self.tail = current_node\n\n return current_node.get_value()\n\n # more than two\n while next_node:\n if next_node.get_value() == node.get_value():\n ahead_node = next_node.next\n behind_node = next_node.prev\n ahead_node.prev = behind_node\n behind_node.next = ahead_node\n next_node.prev = last_node\n next_node.next = None\n last_node.next = next_node\n self.tail = next_node\n return next_node.get_value()\n\n next_node = next_node.next\n\n \"\"\"Removes a node from the list and handles cases where\n the node was the head or the tail\"\"\"\n\n def delete(self, node):\n # if there is nothing\n if self.head is None and self.tail is None:\n return None\n # if theres is one Node and matches\n if self.head is self.tail:\n if self.head.get_value() == node.get_value():\n self.head = None\n self.tail = None\n self.length = 0\n return node.get_value()\n else:\n return f\"no such node\"\n # if there is many nodes and its a first match\n current_node = self.head\n next_node = current_node.next\n if current_node.get_value() == node.get_value():\n self.head = next_node\n next_node.prev = None\n self.length -= 1\n # if there is many nodes and its in a middle somewhere\n elif current_node.get_value() != node.get_value():\n single_node = self.head.next\n while single_node:\n if single_node.get_value() == node.get_value():\n ahead_node = single_node.next\n behind_node = single_node.prev\n ahead_node.prev = behind_node\n behind_node.next = ahead_node\n single_node.pre = None\n single_node.next = None\n self.length -= 1\n return single_node.get_value()\n\n \"\"\"Returns the highest value currently in the list\"\"\"\n\n def get_max(self):\n # if there is nothing\n if self.head is None and self.tail is None:\n return None\n max_value = self.head.get_value()\n current_node = self.head.next\n while current_node:\n if current_node.get_value() > max_value:\n max_value = current_node.get_value()\n current_node = current_node.next\n return max_value\n","sub_path":"doubly_linked_list/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":8772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"13063917","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom matplotlib.font_manager import FontProperties \nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nimport keys\nimport landmark_coords\n\ndef get_erangel_pos(landmark):\n try:\n return landmark_coords.erangel[landmark]\n except:\n return False \n\ndef get_miramar_pos(landmark):\n try:\n return landmark_coords.miramar[landmark]\n except:\n return False\n\ndef get_sanhok_pos(landmark):\n try:\n return landmark_coords.sanhok[landmark]\n except:\n return False\n \n\n# Constant \nSHIFT = 10000\n\n# GoogleSpreadSheet\nscope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(keys.json_keyfile, scope)\ngc = gspread.authorize(credentials)\n\n# For Print Japanese\nfont_path = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'\nfont_prop = FontProperties(fname=font_path)\nmatplotlib.rcParams['font.family'] = font_prop.get_name()\n\n# Variable \nstack = []\n\n# TeamList\nroster_sheet = gc.open_by_key(keys.spreadsheetkey).worksheet('roster')\nteams = roster_sheet.col_values(1)\n\n# LandmarkList\nlandmark_sheet = gc.open_by_key(keys.spreadsheetkey).worksheet('landmark')\nlandmarks = landmark_sheet.get_all_values()\ndel landmarks[0]\n\n\n# Erangel\nfig = plt.figure(figsize=(18.00, 18.00), frameon=False, dpi=100)\nax = fig.add_axes([0, 0, 1, 1])\nax.axis('off')\nimg = mpimg.imread('maps/Erangel_Main_High_Res.png')\nax.imshow(img, extent=[0, 819200, 0, 819200])\n\nfor team in teams:\n for landmark in landmarks:\n if team == landmark[0]:\n if landmark[5] != '':\n pos = get_erangel_pos(landmark[5])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[5] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n else: \n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[5])), team, fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[5])\n\n if landmark[6] != '':\n pos = get_erangel_pos(landmark[6])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[6] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.apend('no_landmark')\n\n else:\n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[6])), \"(\" + team + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[6])\n\n else:\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team, fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n\nplt.savefig('erangel.png')\nstack.clear()\nfig.delaxes(ax)\n\n\n# Miramer\nax = fig.add_axes([0, 0, 1, 1])\nax.axis('off')\nimg = mpimg.imread('maps/Miramar_Main_High_Res.png')\nax.imshow(img, extent=[0, 819200, 0, 819200])\n\nfor team in teams:\n for landmark in landmarks:\n if team == landmark[0]:\n if landmark[1] != '':\n pos = get_miramar_pos(landmark[1])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[1] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n \n else:\n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[1])), team, fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[1])\n\n if landmark[2] != '':\n pos = get_miramar_pos(landmark[2])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[2] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n else:\n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[2])), \"(\" + team + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[2]) \n\n else:\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team, fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n\nplt.savefig('miramar.png')\nstack.clear()\nfig.delaxes(ax)\n\n\n# Sanhok\nax = fig.add_axes([0, 0, 1, 1])\nax.axis('off')\nimg = mpimg.imread('maps/Sanhok_Main_High_Res.png')\nax.imshow(img, extent=[0, 409600, 0, 409600])\n\nfor team in teams:\n for landmark in landmarks:\n if team == landmark[0]:\n if landmark[3] != '':\n pos = get_sanhok_pos(landmark[3])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[3] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n else: \n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[3])), team, fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[3])\n\n if landmark[4] != '':\n pos = get_sanhok_pos(landmark[4])\n if not pos:\n # dont match landmark.\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team + \"(\" + landmark[4] + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n else:\n ax.text(pos['x'], pos['y'] - (SHIFT*stack.count(landmark[4])), \"(\" + team + \")\", fontsize=16, color='yellow', weight='heavy')\n stack.append(landmark[4])\n \n else:\n ax.text(1000, 1000 + (SHIFT*stack.count('no_landmark')), team, fontsize=16, color='yellow', weight='heavy')\n stack.append('no_landmark')\n\n\nplt.savefig('sanhok.png')\n","sub_path":"print_landmark.py","file_name":"print_landmark.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"238456314","text":"import torch\nimport torch.nn.functional as F\nimport random\n# import copy\nimport time\nimport matplotlib.pyplot as plt\n# import numpy as np\n# from sklearn import cluster\nimport networkx as nx\n# from numba import jit, cuda\nfrom simulator.NN_correlation_matrix.MatrixReader import MatrixReader\nfrom simulator.NN_correlation_matrix.Reaction import Reaction\nfrom simulator.NN_correlation_matrix.Gan_NN_Matrix_simulator import Generator\nfrom simulator.NN_correlation_matrix.GeneratorStructureFactory import GeneratorStructureFactory\nfrom simulator.NN_correlation_matrix.ResultProcessor import ResultProcessor\nfrom simulator.NN_correlation_matrix.ResultSaver import ResultSaver\n\ngan_model_global = None #variable to make setting of model easier\n\n\ndef show_matrix(t):\n # img = img / 2 + 0.5 # unnormalize\n plt.title('correlation matrix')\n npimg = t.cpu().numpy()\n plt.imshow(npimg, cmap=\"Greys\")\n #plt.show()\n\n\ndef show_graph(t, threshold=0.9, pos_color=\"b\", neg_color=\"r\"):\n t = t.cpu()\n apos = torch.threshold(t, threshold, 0).numpy()\n gpos = nx.from_numpy_matrix(apos)\n\n aneg = torch.threshold(t.neg(), threshold, 0).numpy()\n gneg = nx.from_numpy_matrix(aneg)\n\n pos = nx.spring_layout(gpos)\n\n nx.draw_networkx(gpos, pos=pos, with_labels=False, node_size=2, edge_color=pos_color, node_color=\"black\", alpha=0.2)\n nx.draw_networkx(gneg, pos=pos, with_labels=False, node_size=2, edge_color=neg_color, node_color=\"black\", alpha=0.2)\n\n # nx.draw_networkx_edges(gneg,pos,\n # with_labels=False,\n # edge_color='r',\n # width=1.0,\n # alpha=0.1\n # )\n # nx.draw_networkx_edges(gpos,pos,\n # with_labels=False,\n # edge_color='b',\n # width=1.0,\n # alpha=0.2\n # )\n\n # plt.axis('off')\n plt.title('correlation network')\n #plt.show()\n\n\ndef print_reactions(reactions):\n original = (reactions.get_reactions_tensor() * 10).int()\n original = [repr(t) for t in list(original)]\n original.sort()\n for i in original:\n print(i)\n\n\ndef generate_reaction_def(sub_count, prod_count, metabolites, low=0, high=1):\n \"\"\"\n returns a tupple (substrate, product) of sub_count substrates randomly drawn from metabolites\n and prod_count products randomly drawn from metabolites\n ???such that substrates and products are mutually exclusive.???\n \"\"\"\n m = set(metabolites)\n sub = set(random.sample(m, sub_count))\n prod = set(random.sample(m, prod_count))\n return ([random.uniform(low, high) if i in sub else 0.0 for i in m],\n [random.uniform(low, high) if i in prod else 0.0 for i in m])\n\n\n\n\n\n# class MultiReaction(torch.nn.Module):\n# def __init__(self, rcount, metabolites, scount=2, pcount=2, step=0.0001, low=0.0, high=1.0, substrates=None,\n# products=None):\n# super(MultiReaction, self).__init__()\n# self._reactions = torch.nn.ModuleList()\n# for i in range(rcount):\n# if substrates == None:\n# sub, prod = generate_reaction_def(scount, pcount, metabolites, low, high)\n# else:\n# sub, prod = substrates, products\n# sub = torch.Tensor(sub).to(device) # so far there is no self.device in pytorch Modules\n# prod = torch.Tensor(prod).to(device) # so far there is no self.device in pytorch Modules\n# r = Reaction(sub, prod, step=step)\n# self._reactions.append(r)\n#\n# def forward(self, x):\n# # random.shuffle(self._reactions)\n# for m in self._reactions:\n# x = m(x)\n# return x\n#\n# def get_reactions_tensor(self):\n# rslt = [r.get_def_tensor().unsqueeze(dim=0) for r in self._reactions]\n# rslt = torch.cat(rslt, dim=0)\n# return rslt\n\nclass MultiReaction_new(torch.nn.Module):\n def __init__(self, rcount, metabolites, scount=2, pcount=2, step=0.0001, low=0.0, high=1.0, substrates=None,\n products=None):\n super(MultiReaction_new, self).__init__()\n self._reactions = torch.nn.ModuleList()\n global gan_model_global\n self._generator = Generator(m_count, reactions_count, gan_model_global)\n self._is_generated = True\n for i in range(rcount):\n if substrates == None:\n if i == 0:\n self._generator.clear_output_layer()\n #sub, prod = generate_reaction_def(scount, pcount, metabolites, low, high)\n sub, prod = self._generator.get_reactions_tensor(i)\n else:\n sub, prod = substrates[i], products[i]\n self._is_generated = False\n sub = torch.Tensor(sub).to(device) # so far there is no self.device in pytorch Modules\n prod = torch.Tensor(prod).to(device) # so far there is no self.device in pytorch Modules\n # sub = torch.as_tensor(sub).to(device) #uncommon this to use GPU. Common up\n # prod = torch.as_tensor(prod).to(device)\n r = Reaction(sub, prod, step=step)\n self._reactions.append(r)\n\n def forward(self, x):\n # random.shuffle(self._reactions)\n if self._is_generated:\n self._reactions = torch.nn.ModuleList()\n self._generator.clear_output_layer()\n for i in range(reactions_count):\n sub, prod = self._generator.get_reactions_tensor(i)\n sub = torch.Tensor(sub).to(device)\n prod = torch.Tensor(prod).to(device)\n # sub = torch.as_tensor(sub).to(device) #uncommon this to use GPU. Common up\n # prod = torch.as_tensor(prod).to(device)\n r = Reaction(sub, prod, step=step).to(device)\n self._reactions.append(r)\n\n for m in self._reactions:\n x = m(x)\n return x\n\n def get_reactions_tensor(self):\n rslt = [r.get_def_tensor().unsqueeze(dim=0) for r in self._reactions]\n rslt = torch.cat(rslt, dim=0)\n return rslt\n\n\ndef pearson_r(x, y):\n vx = x - torch.mean(x)\n vy = y - torch.mean(y)\n\n rx = torch.rsqrt(torch.sum(vx ** 2))\n ry = torch.rsqrt(torch.sum(vy ** 2))\n\n s = torch.sum(vx * vy)\n cost = s * rx * ry\n return cost\n\n\ndef correlation_matrix(b):\n vb = b - torch.mean(b, dim=0)\n mr = torch.rsqrt(torch.sum(vb ** 2, dim=0))\n mr = mr.unsqueeze(dim=0)\n corr = torch.mm(vb.t(), vb) * torch.mm(mr.t(), mr)\n #corr.requires_grad_() #uncommon to use GPU\n return corr\n\n# class Process(torch.nn.Module):\n# def __init__(self, rcount, metabolites, scount=2, pcount=2, low=0.0, high=1.0, substrates=None, products=None,\n# step=0.0001, iterations=100):\n# super(Process, self).__init__()\n# self._mr = MultiReaction(rcount, metabolites, scount=scount, pcount=pcount, step=step, low=low, high=high,\n# substrates=substrates, products=products)\n# self._iterations = iterations\n#\n# def forward(self, x):\n# iterations_for_sample = torch.randint(low=0, high=self._iterations, size=(minibatch_size,), device=device,\n# requires_grad=False)\n# for i in range(self._iterations):\n# doit = (iterations_for_sample > i).float()\n# doit = doit.unsqueeze(dim=1)\n# doit = torch.cat([doit] * m_count, dim=1)\n# y = self._mr(x)\n# x = y * doit + x * (1 - doit)\n#\n# c = correlation_matrix(x)\n# c = c - torch.eye(m_count).to(device)\n# return x, c\n#\n# def get_reactions_tensor(self):\n# return self._mr.get_reactions_tensor()\n\nclass Process_new(torch.nn.Module):\n def __init__(self, rcount, metabolites, scount=2, pcount=2, low=0.0, high=1.0, substrates=None, products=None,\n step=0.0001, iterations=100):\n super(Process_new, self).__init__()\n # self._mr = MultiReaction(rcount, metabolites, scount=scount, pcount=pcount, step=step, low=low, high=high,\n # substrates=substrates, products=products)\n\n #self._mr = Generator(m_count*2, None)\n self._mr = MultiReaction_new(rcount, metabolites, scount=scount, pcount=pcount, step=step, low=low, high=high,\n substrates=substrates, products=products)\n self._iterations = iterations\n\n def forward(self, x):\n iterations_for_sample = torch.randint(low=0, high=self._iterations, size=(minibatch_size,), device=device,\n requires_grad=False)\n for i in range(self._iterations):\n doit = (iterations_for_sample > i).float()\n doit = doit.unsqueeze(dim=1)\n doit = torch.cat([doit] * m_count, dim=1)\n y = self._mr(x)\n x = y * doit + x * (1 - doit)\n\n c = correlation_matrix(x)\n # c = c - torch.eye(m_count).to(device)\n return x, c\n\n def get_reactions_tensor(self):\n return self._mr.get_reactions_tensor()\n\n\n\n# import numpy as np\n# import sklearn.cluster as cluster\n\ndef compare_tensor_sets(s1,s2):\n \"\"\"\n s1 and s2 are lists of tensors of the same size and shape.\n the comparison is performed in a greedy manner where the frist element of s1 is compared to all elements of s2,\n the minimal MSE is recorded, and the respective element of s2 is removed. The second element of s1 is compared\n to the remaining elements of s2 and so on.\n @return the average of the minimal MSE values for all elements of s1.\n \"\"\"\n s1 = list(s1)\n s2 = list(s2)\n rcount = len(s1)\n assert(rcount==len(s2))\n rslt = 0.0\n while len(s1)>0:\n i = s1.pop()\n mses = torch.Tensor([F.mse_loss(i,j) for j in s2])\n j = mses.argmin()\n del s2[j]\n rslt+=mses[j]\n return rslt / rcount\n\nm_count = 76#10#20#50#200 #8 #size of metabolic profile\nreactions_count = 38#5#20#50#2 #50\ndataset_size = 1#20#50#10#1\nminibatch_size = 400#10#100 #number of random initial substrates\nsub_min = 2\nsub_max = 2.01\nepochs = 50#200\nstep = 0.5\niterations = 100\ns_count=3\np_count=3\nresult_saver = None #removed to function so results won't get restarted for no reason\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n#device = \"cpu\"\nprint(\"Device is \" + str(device))\n\n\ndef create_random_reactions(reactions_count, metabolic_count):\n substrates = []\n products = []\n for i in range(reactions_count):\n #substrate = random.sample(range(2), metabolic_count)\n substrate = [random.randrange(0, 2, 1) for i in range(metabolic_count)]\n #product = random.sample(range(2), metabolic_count)\n product = [random.randrange(0, 2, 1) for i in range(metabolic_count)]\n substrates.append(substrate)\n products.append(product)\n return substrates, products\n\n#function will create non overlapping reactions in the way that substrate in place i is 1 and product in the place i+1 is 1\ndef create_non_overlapping_reactions(reactions_count, metabolic_count):\n substrates = []\n products = []\n for i in range(reactions_count):\n #doing this if so there won't be overlapping in the last pair, so instead will add the same\n if i == reactions_count - 1:\n substrate = [0] * metabolic_count\n substrate[0] = 1\n product = [0] * metabolic_count\n product[1] = 1\n else:\n substrate = [0] * metabolic_count\n substrate[i] = 1\n product = [0] * metabolic_count\n product[i + 1] = 1\n\n substrates.append(substrate)\n products.append(product)\n return substrates, products\n\n#function will create non overlapping reactions in the way that substrate in place i is 1 and product in the place i+1 is 1\ndef create_non_overlapping_reactions_2(reactions_count, metabolic_count):\n substrates = []\n products = []\n for i in range(0,metabolic_count-1, 2):\n #doing this if so there won't be overlapping in the last pair, so instead will add the same\n if i == reactions_count - 1:\n substrate = [0] * metabolic_count\n substrate[0] = 1\n product = [0] * metabolic_count\n product[1] = 1\n else:\n substrate = [0] * metabolic_count\n substrate[i] = 1\n product = [0] * metabolic_count\n product[i + 1] = 1\n substrates.append(substrate)\n products.append(product)\n return substrates, products\n\n\ndef create_dataset_from_real_matrix():\n matrix_reader = MatrixReader()\n dataset = matrix_reader.read_matrices()\n\n global m_count\n m_count = matrix_reader.get_metabolites_count()\n global dataset_size\n dataset_size = matrix_reader.get_datasize()\n\n return dataset\n\n\ndef create_dataset(data_size):\n metabolites = range(m_count)\n\n # testing 1 reaction - no common in input/output\n # reactions = Process_new(reactions_count, metabolites,\n # # scount=s_count, pcount=p_count,\n # # low=1.0,high=1.0\n # substrates=[[1, 1, 1, 0, 0, 0, 0, 0]],\n # products=[[0, 0, 0, 1, 1, 1, 0, 0]],\n # step=step, iterations=iterations,\n # ).to(device)\n\n # testing 1 reaction - one common in input/output\n # reactions = Process_new(reactions_count, metabolites,\n # # scount=s_count, pcount=p_count,\n # # low=1.0,high=1.0\n # substrates= [[1, 1, 1, 0, 0, 0, 0, 0]],\n # products= [[0, 1, 0, 1, 1, 1, 0, 0]],\n # step=step, iterations=iterations,\n # ).to(device)\n\n #testing 2 reactions - no common in input/output\n # reactions = Process_new(reactions_count, metabolites,\n # # scount=s_count, pcount=p_count,\n # # low=1.0,high=1.0\n # substrates=[[1, 1, 1, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 1]],\n # products=[ [0, 0, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 1, 1, 0, 0]],\n # step=step, iterations=iterations,\n # ).to(device)\n\n # testing x reactions - random reactions\n #subs, prods = create_random_reactions(reactions_count, m_count)\n #subs, prods = create_non_overlapping_reactions(reactions_count, m_count)\n subs, prods = create_non_overlapping_reactions_2(reactions_count, m_count)\n reactions = Process_new(reactions_count, metabolites,\n # scount=s_count, pcount=p_count,\n # low=1.0,high=1.0\n substrates=subs,\n products=prods,\n step=step, iterations=iterations,\n ).to(device)\n\n print_reactions(reactions)\n\n # creation of dataset(?)\n with torch.no_grad():\n dataset = []\n for i in range(data_size):\n batch_x = torch.Tensor(size=(minibatch_size, m_count)).to(device)\n batch_x.uniform_(sub_min, sub_max)\n print(batch_x)\n ym, yc = reactions(batch_x)\n print(ym)\n dataset.append((batch_x, yc))\n if i % 1 == 0:\n print(\"batch {}: {}\".format(i, yc.size()))\n print(yc.min())\n print(yc.max())\n show_matrix(yc)\n show_graph(yc, threshold=0.3)\n\n # r = ResultProcessor(m_count, 1250, sub_min, sub_max) all three lines beloew are for result proccesing above model\n # r.run_model_for_fake(1, reactions.get_reactions_tensor(), dataset[0][1])\n # r.convert_from_xlsx_to_cvs(1)\n return dataset, reactions\n\n#dataset, reactions = create_dataset(dataset_size) #This line will create the *same* dataset for all experiments\n\nclass Test():\n\n #@jit(target=\"cuda\")\n def run_tests(self, run_with_real_data):\n\n global result_saver\n result_saver = ResultSaver(minibatch_size, step, dataset_size, epochs, reactions_count)\n\n if run_with_real_data:\n dataset = create_dataset_from_real_matrix()\n generator_structure_factory = GeneratorStructureFactory(m_count*2*reactions_count)\n structure_list = generator_structure_factory.get_structure_list()\n for structure in structure_list:\n name = structure.get_name()\n #gan_model = structure.get_model()\n print(\"\\n****************************************\")\n print(\"Running Test With Structure: \" + name)\n print(\"**************************************** \\n \")\n if not run_with_real_data:\n self.run_one_test(structure)\n else:\n self.run_one_real_data_test(structure, dataset)\n print(\"\\n****************************************\")\n print(\"End of Test With Structure: \" + name)\n print(\"**************************************** \\n \")\n\n print(\"\\n****************************************\")\n print(\"Summary\")\n print(\"**************************************** \\n \")\n\n if not run_with_real_data:\n for structure in structure_list:\n name = structure.get_name()\n in_err_dict, out_err_dict = 0,0#structure.get_score()\n #gan_model = structure.get_model()\n str_print = \"Model: \" + name + \" \\n\"\n for i in range(reactions_count):\n str_print += '\\t Reaction ' + str(i+1) + \" - In Err: \" + in_err_dict[i] + \"%. Out Err: \" + out_err_dict[i] + \"% \\n\"\n print(str_print)\n\n #write_data_summary(structure_list)\n result_saver.write_data_summary(structure_list)\n\n #@jit(target=\"cuda\")\n def run_one_test(self, gan_structure):\n dataset, reactions = create_dataset(dataset_size) #this line will create the dateset for each experiment\n gan_model = gan_structure.get_model()\n global gan_model_global\n gan_model_global = gan_model\n metabolites = range(m_count)\n\n model = Process_new(reactions_count, metabolites, scount=m_count, pcount=m_count, step=step, iterations=iterations)\n model = model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n ##optimizer = torch.optim.Adadelta(model.parameters(), lr=1.0, rho=0.9, eps=1e-06, weight_decay=1.0)\n ##optimizer = torch.optim.Adagrad(model.parameters(), lr=0.01, lr_decay=0, weight_decay=0, initial_accumulator_value=0)\n # optimizer = torch.optim.Adamax(model.parameters(), lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n # optimizer = torch.optim.ASGD(model.parameters(), lr=0.1, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0.01)\n # optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, dampening=0, weight_decay=0, nesterov=False)\n\n ploss = 0.0\n rloss = 0.0\n with torch.no_grad():\n rt = reactions.get_reactions_tensor()\n r1 = model.get_reactions_tensor()\n T = time.time()\n loss_list = []\n for epoch in range(epochs):\n for x, y in dataset:\n rand_x = torch.Tensor(size=(minibatch_size, m_count))\n rand_x.uniform_(sub_min, sub_max)\n rand_x = rand_x.to(device)\n\n optimizer.zero_grad()\n\n #ym, yc = model(x)\n ym, yc = model(rand_x)\n loss = F.mse_loss(yc, y)\n #loss_list.append(loss) moved it down because when we had many datasets it didn't work - only works with one dataset\n loss.backward(retain_graph=True)\n optimizer.step()\n\n # print(y_pred)\n # print(y)\n # print(loss)\n\n with torch.no_grad():\n r2 = model.get_reactions_tensor()\n if (ploss > 0) and (loss.item() / ploss > 100):\n print(r1)\n print(r2)\n ploss += float(loss.item())\n rloss += float(compare_tensor_sets(rt, r2).item())\n r1 = r2\n\n if epoch % 1 == 0:\n T = time.time() - T\n ploss = ploss / 1 / dataset_size\n rloss = rloss / 1 / dataset_size\n info_string = \"{}\\t time:{:2.3g} \\t mse:{} \\t p-loss:{} \\t r-loss :{}\".format(epoch, T,loss, ploss, rloss)\n #write_date_epoch(gan_structure, loss, ploss, rloss,epoch,T)\n result_saver.write_data_epoch(gan_structure, loss, ploss, rloss, epoch, T)\n #loss_list.append(loss)\n loss_list.append(ploss)\n print(info_string)\n if ploss < (10 ** (-20)):\n print(\"stop\")\n break\n final_ploss = ploss\n ploss = 0.0\n rloss = 0.0\n T = time.time()\n\n if epoch == epochs-1: #last epoch\n #gan_structure.set_last_mse_loss(loss)\n gan_structure.set_last_mse_loss(final_ploss)\n #write_date_final_epoch(gan_structure, yc, y, loss_list)\n result_saver.write_date_final_epoch(gan_structure, yc, y, loss_list)\n result_saver.save_model(model, gan_structure)\n\n predicted = model.get_reactions_tensor()\n predicted = predicted.round().int()\n\n predict_for_cal = predicted #need this for my calculate_error function, the next line converts it for string for some reason\n predicted = [repr(t) for t in list(predicted)]\n predicted.sort()\n gan_structure.add_to_predicted_reactions_list(predict_for_cal)\n\n original = reactions.get_reactions_tensor().int()\n original_for_cal = original\n original = [repr(t) for t in list(original)]\n original.sort()\n gan_structure.add_to_original_reactions_list(original_for_cal)\n\n verdict = [x==y for x,y in zip(original, predicted)]\n print(verdict)\n\n for i in range(len(predicted)):\n print(\"reaction %d predicted: %r \"%(i,original[i]==predicted[i]))\n print(\"original:\")\n print(original[i])\n print(\"predicted:\")\n print(predicted[i])\n\n\n self.calculate_error(original_for_cal, predict_for_cal, gan_structure)\n result_saver.save_reactions(gan_structure)\n\n def calculate_error(self, original, predicted, gan_structure):\n calc_in_error_dict = {}\n calc_out_error_dict = {}\n for j in range(reactions_count):\n error_in_input = 0\n error_in_output = 0\n for i in range(m_count):\n if(original[j][0][i] != predicted[j][0][i]):\n error_in_input = error_in_input + 1\n\n if (original[j][1][i] != predicted[j][1][i]):\n error_in_output = error_in_output + 1\n\n input_error = str( (error_in_input/m_count) * 100)\n out_error = str((error_in_output / m_count) * 100)\n print(\"Reaction \" + str(j) + \" - Error in input: \" + input_error + \"%. Error in output: \" + out_error + \"%\")\n calc_in_error_dict[j] = input_error\n calc_out_error_dict[j] = out_error\n\n gan_structure.set_score(calc_in_error_dict, calc_out_error_dict)\n\n #create 2 random matrix and check the mse difference between them\n def two_random_matrix_test(self,iterations_num):\n global result_saver\n result_saver = ResultSaver(minibatch_size, step, dataset_size, epochs, reactions_count)\n sum = 0\n loss_list = []\n for i in range(iterations_num):\n dataset,_ = create_dataset(1)\n dataset2,_ = create_dataset(1)\n for x1, y1 in dataset:\n for x2, y2 in dataset2:\n loss = F.mse_loss(y1, y2)\n loss_item = loss.item()\n sum += loss_item\n loss_list.append(loss_item)\n print(\"loss is \" + str(loss_item))\n result_saver.save_two_random(y1,y2,i, loss_item)\n\n average = sum / iterations_num\n sum_squared_diff = 0\n for loss in loss_list:\n sum_squared_diff += (average - loss) ** 2\n st_dev = sum_squared_diff / iterations_num\n result_saver.save_two_random_final_stats(str(m_count), str(reactions_count), str(average), str(st_dev))\n print(\"Metabolices = \" + str(m_count) + \". Reactions = \" + str(reactions_count))\n print(\"Average loss is \" + str(average))\n print(\"Standard deviation loss is \" + str(st_dev))\n\n def export_to_classifier(self, model_name):\n result_processor = ResultProcessor(m_count, minibatch_size, sub_min, sub_max)\n model = torch.load(result_processor.get_saving_path() + \"\\\\Saved Models\\\\\" + model_name)\n model.eval()\n result_processor.set_model(model)\n matrices_num = 50\n result_processor.run_model(matrices_num)\n #result_processor.create_semi_random_negative_instances()\n result_processor.convert_from_xlsx_to_cvs(matrices_num)\n\n def run_one_real_data_test(self, gan_structure, dataset):\n gan_model = gan_structure.get_model()\n global gan_model_global\n gan_model_global = gan_model\n metabolites = range(m_count)\n\n model = Process_new(reactions_count, metabolites, scount=m_count, pcount=m_count, step=step, iterations=iterations)\n model = model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n\n ploss = 0.0\n rloss = 0.0\n T = time.time()\n loss_list = []\n dataset_repets = 5\n for epoch in range(epochs):\n for i in range(dataset_repets):\n for y in dataset:\n rand_x = torch.Tensor(size=(minibatch_size, m_count))\n rand_x.uniform_(sub_min, sub_max)\n rand_x = rand_x.to(device)\n\n optimizer.zero_grad()\n\n #ym, yc = model(x)\n ym, yc = model(rand_x)\n loss = F.mse_loss(yc, y)\n #loss_list.append(loss) moved it down because when we had many datasets it didn't work - only works with one dataset\n loss.backward(retain_graph=True)\n optimizer.step()\n\n with torch.no_grad():\n ploss += float(loss.item())\n\n if epoch % 1 == 0:\n T = time.time() - T\n ploss = ploss / 1 / dataset_size\n rloss = rloss / 1 / dataset_size\n info_string = \"{}\\t time:{:2.3g} \\t mse:{} \\t p-loss:{} \\t r-loss :{}\".format(epoch, T,loss, ploss, rloss)\n #write_date_epoch(gan_structure, loss, ploss, rloss,epoch,T)\n result_saver.write_data_epoch(gan_structure, loss, ploss, rloss, epoch, T)\n #loss_list.append(loss)\n loss_list.append(ploss)\n print(info_string)\n if ploss < (10 ** (-20)):\n print(\"stop\")\n break\n final_ploss = ploss\n ploss = 0.0\n rloss = 0.0\n T = time.time()\n\n if epoch == epochs-1: #last epoch\n #gan_structure.set_last_mse_loss(loss)\n gan_structure.set_last_mse_loss(final_ploss)\n #write_date_final_epoch(gan_structure, yc, y, loss_list)\n result_saver.write_date_final_epoch(gan_structure, yc, y, loss_list)\n result_saver.save_model(model, gan_structure)\n\n result_saver.save_reactions(gan_structure)\n\n\nif __name__ == \"__main__\":\n test = Test()\n test.run_tests(False)\n #test.run_tests(True)\n # test.two_random_matrix_test(100)\n # test.export_to_classifier(\"11.pt\")","sub_path":"simulator/NN_correlation_matrix/Reaction_NN_Matrix.py","file_name":"Reaction_NN_Matrix.py","file_ext":"py","file_size_in_byte":28273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"352616968","text":"import json\nimport falcon\nfrom graph_manager.schemas import load_schema\nfrom graph_manager.utils.validate import validate\nfrom graph_manager.utils.logs import app_logger\nfrom graph_manager.applib.graph_store import GraphStore\n\n\nclass GraphStatistics(object):\n \"\"\"Retrieve basic Graph Store statistics.\"\"\"\n\n def on_get(self, req, resp):\n \"\"\"Execution of the GET graph statistics request.\"\"\"\n fuseki = GraphStore()\n resp.data = json.dumps(fuseki._graph_statistics(), indent=1, sort_keys=True)\n resp.content_type = 'application/json'\n resp.status = falcon.HTTP_200\n app_logger.info('Finished operations on /graph/statistics GET Request.')\n\n\nclass GraphList(object):\n \"\"\"List named graphs in the graph store.\"\"\"\n\n def on_get(self, req, resp):\n \"\"\"Execution of the GET graph list request.\"\"\"\n fuseki = GraphStore()\n resp.data = json.dumps(fuseki._graph_list(), indent=1, sort_keys=True)\n resp.content_type = 'application/json'\n resp.status = falcon.HTTP_200\n app_logger.info('Finished operations on /graph/list GET Request.')\n\n\nclass GraphResource(object):\n \"\"\"Retrieve or delete named graph.\"\"\"\n\n def on_get(self, req, resp):\n \"\"\"Execution of the GET named graph request.\"\"\"\n graph_uri = req.get_param('uri')\n fuseki = GraphStore()\n response = fuseki._graph_retrieve(graph_uri)\n if response is not None:\n resp.data = str(response)\n resp.content_type = 'text/turtle'\n app_logger.info('Retrieved: {0}.'.format(graph_uri))\n resp.status = falcon.HTTP_200\n else:\n raise falcon.HTTPGone()\n\n def on_delete(self, req, resp):\n \"\"\"Execution of the DELETE named graph request.\"\"\"\n graph_uri = req.get_param('uri')\n fuseki = GraphStore()\n fuseki._drop_graph(graph_uri)\n resp.content_type = 'plain/text'\n app_logger.info('Deleted/DELETE graph with URI: {0}.'.format(graph_uri))\n resp.status = falcon.HTTP_200\n\n\n# TO DO: Look into LD PATCH\nclass GraphUpdate(object):\n \"\"\"Update Graph Store using a SPARQL Query.\"\"\"\n\n @validate(load_schema('update'))\n def on_post(self, req, resp, parsed):\n \"\"\"Execution of the POST update query request.\"\"\"\n fuseki = GraphStore()\n resp.data = json.dumps(fuseki._graph_add(parsed['targetGraph'], parsed['triples'], parsed[\"contentType\"]))\n resp.content_type = 'application/json'\n resp.status = falcon.HTTP_200\n app_logger.info('Finished operations on /graph/update POST Request.')\n\n\nclass GraphSPARQL(object):\n \"\"\"Execute SPARQL Query on Graph Store.\"\"\"\n\n @validate(load_schema('query'))\n def on_post(self, req, resp, parsed):\n \"\"\"Execution of the POST SPARQL query request.\"\"\"\n fuseki = GraphStore()\n data = fuseki._graph_sparql(parsed['targetGraph'], parsed['query'], parsed[\"contentType\"])\n resp.data = str(data)\n resp.content_type = parsed[\"contentType\"]\n resp.status = falcon.HTTP_200\n app_logger.info('Finished operations on /graph/query POST Request.')\n\n\nclass GraphSPARQLConstruct(object):\n \"\"\"Execute SPARQL Construct Query on Graph Store.\"\"\"\n\n @validate(load_schema('query'))\n def on_post(self, req, resp, parsed):\n \"\"\"Execution of the POST SPARQL query request.\"\"\"\n fuseki = GraphStore()\n data = fuseki._graph_construct(parsed['targetGraph'], parsed['query'], parsed[\"contentType\"])\n resp.data = str(data)\n resp.content_type = parsed[\"contentType\"]\n resp.status = falcon.HTTP_200\n app_logger.info('Finished operations on /graph/construct POST Request.')\n","sub_path":"src/graph_manager/api/graph_endpoint.py","file_name":"graph_endpoint.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"326766609","text":"def get_power_set(s):\n power_set=[[]]\n for elem in s:\n # iterate over the sub sets so far\n for sub_set in power_set:\n # add a new subset consisting of the subset at hand added elem\n power_set=power_set+[list(sub_set)+[elem]]\n return power_set\n\n\ndef powersetlist(s):\n r = [[]]\n\n for e in s:\n temp= [x+[e] for x in r]\n r += temp\n\n return r\n\n\ndef list_powerset(lst):\n # the power set of the empty set has one element, the empty set\n result = [[]]\n for x in lst:\n # for every additional element in our set\n # the power set consists of the subsets that don't\n # contain this element (just take the previous power set)\n # plus the subsets that do contain the element (use list\n # comprehension to add [x] onto everything in the\n # previous power set)\n result.extend([subset + [x] for subset in result])\n return result\n\n\n\nimport collections\ndef power_set(s):\n q = collections.deque()\n q.appendleft([])\n for elem in reversed(s):\n while True:\n subset = q.pop()\n q.appendleft([elem] + subset)\n q.appendleft(subset)\n if not subset: break\n return list(q)\n\n\ndef powerset1(s):\n x = len(s)\n res = []\n masks = [1 << i for i in range(x)]\n for i in range(1 << x):\n res = []\n #yield [ss for mask, ss in zip(masks, s) if i & mask]\n for mask, ss in zip(masks, s):\n if i & mask:\n res.append(ss)\n yield res\n\ndef powerset2(seq):\n #Returns all the subsets of this set. This is a generator.\n if len(seq) <= 1:\n #return [[], seq]\n yield seq\n yield []\n else:\n res =[]\n for item in powerset2(seq[1:]):\n #res.append(item)\n #res.append([seq[0]]+item)\n yield [seq[0]]+item\n yield item\n #return res\n\nmaxsum = 0\nfor set1 in powerset1([4, 5, 6, 7, 8, 9]):\n if sum(set1) > maxsum:\n maxsum = sum(set1)\nprint(maxsum)\n\n\nl = [14, 7, 35, 84]\nfor x in powerset2(l):\n if sum(x) % 2:\n print(x)\n\n#r = [x for x in powerset2(l)]\n#print(r)\nprint(powersetlist([0,1,2,3,4, 5, 6, 7, 8, 9]))\n\nimport time\ntSet = [_ for _ in range(1, 21)]\n\nstime = time.time()\ns=0\nfor s1 in powersetlist(tSet):\n s += sum(s1)\nprint(\"powersetlist : \",s, end=\" \")\netime = time.time()\nprint(etime-stime)\n\nstime = time.time()\ns=0\nfor s1 in list_powerset(tSet):\n s += sum(s1)\nprint(\"list_powerset : \",s, end=\" \")\netime = time.time()\nprint(etime-stime)\n\nstime = time.time()\ns=0\nfor s1 in power_set(tSet):\n s += sum(s1)\nprint(\"power_set : \",s, end=\" \")\netime = time.time()\nprint(etime-stime)\n\n\nstime = time.time()\ns = 0\nfor set1 in powerset1(tSet):\n s += sum(set1)\nprint(\"powerset1 : \",s, end=\" \")\netime = time.time()\nprint(etime-stime)\n\n\nstime = time.time()\ns = 0\nfor set1 in powerset2(tSet):\n # print(set1)\n s += sum(set1)\nprint(\"powerset2 : \",s, end=\" \")\netime = time.time()\nprint(etime-stime)\n","sub_path":"SWExpertAcademy/LEARN/00. Intro/Powerset(1).py","file_name":"Powerset(1).py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"330959177","text":"import cv2\n \npt1 = (0,0)\npt2 = (0,0)\nleftclick,rightclick = False, False\n\ndef callback(event,x,y,args,flags):\n \n global pt1,pt2,leftclick,rightclick\n \n if event == cv2.EVENT_LBUTTONDOWN:\n \n if leftclick and rightclick:\n leftclick = False\n rightclick = False\n pt1 = (0,0)\n pt2 = (0,0)\n \n if not leftclick :\n pt1= (x,y)\n leftclick = True\n\n elif not rightclick:\n pt2 = (x,y)\n rightclick = True\n\n \n \n\ncamera = cv2.VideoCapture(0)\n\ncv2.namedWindow('frame')\ncv2.setMouseCallback('frame',callback)\n\nwhile True:\n \n ret,frame = camera.read()\n \n if leftclick:\n cv2.circle(frame,pt1,4,(0,255,0),-1)\n if leftclick and rightclick:\n cv2.rectangle(frame, pt1, pt2, (0,0,255), 10)\n \n \n cv2.imshow('frame', frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ncv2.destroyAllWindows()\ncamera.release()\n\n\n","sub_path":"cameraDraw.py","file_name":"cameraDraw.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"360528153","text":"from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom sign.models import Event,Guest\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n\n\ndef index(request):\n #return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request,\"index.html\")\n# Create your views here.\n\ndef login_action(request):\n\n if request.method == 'POST':\n username = request.POST.get('username','')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username,password=password)\n\n if user is not None:\n\n auth.login(request,user)\n #response.set_cookie('user',uname,3600)\n request.session['user']=username\n response = HttpResponseRedirect('/event_manage/')\n return response\n else:\n return render(request,'index.html',{'error':'username or password is wrong'})\n\n@login_required\ndef event_manage(request):\n event_list = Event.objects.all()\n #uuname = request.COOKIES.get('user','')\n uuname = request.session.get('user','')\n return render(request,'event_manage.html',{\"user\":uuname,\"events\":event_list})\n\n\n@login_required\ndef guest_manage(request):\n guest_list = Guest.objects.all()\n #uuname = request.COOKIES.get('user','')\n uuname = request.session.get('user','')\n paginator = Paginator(guest_list,2)\n page = request.GET.get('page')\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n contacts =paginator.page(1)\n except EmptyPage:\n contacts = paginator.page(paginator.num_pages)\n return render(request, 'guest_manage.html', {\"user\":uuname, \"guests\":contacts})\n\n@login_required\ndef search_name(request):\n #uuname = request.COOKIES.get('user','')\n uuname = request.session.get('user','')\n search_name = request.GET.get(\"name\",\"\")\n event_list = Event.objects.filter(name__contains=search_name)\n return render(request,'event_manage.html',{\"user\":uuname,\"events\":event_list})\n\n@login_required\ndef sign_index(request,eid):\n event = get_object_or_404(Event,id = eid)\n return render(request,'sign_index.html',{\"event\":event})\n\n@login_required\ndef logout(request):\n auth.logout(request)\n response = HttpResponseRedirect('/index/')\n return response\n\n\n@login_required\ndef sign_index_action(request,eid):\n event = get_object_or_404(Event,id = eid)\n phone = request.POST.get('phone','')\n print(phone)\n result = Guest.objects.filter(phone=phone)\n if not result:\n return render(request, 'sign_index.html',\n {'event': event, 'hint': 'phone error.'})\n\n result = Guest.objects.filter(phone=phone, event_id=eid)\n if not result:\n return render(request, 'sign_index.html',\n {'event': event, 'hint': 'event id or phone error.'})\n\n result = Guest.objects.get(event_id=eid, phone=phone)\n\n if result.sign:\n return render(request, 'sign_index.html',\n {'event': event, 'hint': \"user has sign in.\"})\n else:\n Guest.objects.filter(event_id=eid, phone=phone).update(sign='1')\n return render(request, 'sign_index.html', {'event': event, 'hint': 'sign in success!',\n 'user': result,\n 'guest': result\n })\n","sub_path":"sign/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"183849743","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# import tensorflow as tf\n# import tensorflow.contrib.rnn as rnn\n# import tensorflow.contrib.metrics as metrics\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras import optimizers\nfrom keras import backend as KBend\n\nfrom sklearn.metrics import mean_squared_error\nimport pandas\nimport math\n\nnp.random.seed(7)\n\n## x(t) = phi0 + phi1*x(t-1) + a(t)\ndef ar1(phi0, phi1, length=10):\n x = [1]\n for i in range(0,length-1):\n x.append(phi0 + phi1*x[i] + np.random.normal(0,1,1)[0])\n return x\n\n################################################################################################################################################\n################################################################################################################################################\n\nxsize = 400\nx = ar1(0.9,0.9,xsize)\nx = (x-np.ones(len(x))*min(x))/(max(x)-min(x))\nxtrain = x[0:xsize/2]\nxtest = x[xsize/2:xsize]\ngraph_label = 'AR(1)'\n\n################################################################################################################################################\n################################################################################################################################################\n\n# data = pandas.read_csv('data/daily-returns.csv', sep=r\"\\s+\", header=None)\n# data = data.values\n# data = np.reshape(data, data.shape[0])\n# data = (data-np.ones(len(data))*min(data))/(max(data)-min(data))\n# data = data/max(data)\n# xtrain = data[0:len(data)/2]\n# xtest = data[len(data)/2:len(data)]\n# graph_label = 'Bitcoin daily return'\n\n################################################################################################################################################\n################################################################################################################################################\n\ndef create_dataset_for_lstm(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back)]\n\t\tdataX.append(a)\n\t\tdataY.append([dataset[i + look_back]])\n\treturn np.array(dataX), np.array(dataY)\n\nlook_back = 10\ntrainX, trainY = create_dataset_for_lstm(xtrain, look_back)\ntrainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n# trainY = np.reshape(trainY, (trainY.shape[0], 1, trainY.shape[1]))\ntestX, testY = create_dataset_for_lstm(xtest, look_back)\ntestX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\n\n######## create and fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(8, input_shape=(1, look_back)))\nmodel.add(Dense(1))\n\nsgd = optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\nrmsprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\nadagrad = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)\ndef root_mean_squared_error(y_true, y_pred):\n\treturn KBend.sqrt(KBend.mean(KBend.square(y_pred - y_true), axis=-1))\n# model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])\nmodel.compile(loss='mean_squared_error', optimizer=adagrad, metrics=['accuracy'])\nhistory = model.fit(trainX, trainY, epochs=50, batch_size=1, verbose=2)\n\n\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY, trainPredict))\nprint('Train Score: %.6f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY, testPredict))\nprint('Test Score: %.6f RMSE' % (testScore))\n\n## visualise training history\nplt.plot(history.history['loss'])\n# plt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\nplt.plot(np.array(range(trainX.shape[0])), np.reshape(trainPredict, trainPredict.shape[0]), label='LSTM on train')\nplt.plot(np.array(range(trainX.shape[0])), np.reshape(trainY, trainY.shape[0]), label=graph_label)\nplt.xlabel('t')\nplt.ylabel('x(t)')\nplt.legend()\nplt.show()\n\n# print np.reshape(testPredict, testPredict.shape[0]).shape\n# len(xtrain), len(xtrain)+len(xtest)) # range(200,398))\nplt.plot(np.array(range(testPredict.shape[0], testPredict.shape[0]+testPredict.shape[0])), np.reshape(testPredict, testPredict.shape[0]), label='LSTM on test')\nplt.plot(np.array(range(testPredict.shape[0], testPredict.shape[0]+testPredict.shape[0])), np.reshape(testY, testY.shape[0]), label=graph_label)\nplt.xlabel('t')\nplt.ylabel('x(t)')\nplt.legend()\nplt.show()","sub_path":"lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"319253529","text":"import django_tables2 as tables\n\nfrom .models import HMOBill\n\nclass HMOBillTable(tables.Table):\n\n selection = tables.CheckBoxColumn(accessor='pk')\n \n\n class Meta:\n\n model = HMOBill\n template_name = 'django_tables2/bootstrap.html'\n fields = ('selection', 'utility_date', 'approval_number', 'hmo.description', 'patient', 'doctor.last_name', 'bill_status')","sub_path":"HMO/billing/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"333375935","text":"from flask import render_template, request, jsonify\nfrom functools import cmp_to_key\nfrom app import app\nimport os\nimport json\nimport sqlite3\n\n\ndef multipleSearch(column, words):\n finalStr = ''\n for i in range(len(words)-1):\n finalStr+=column+' LIKE \"%'+words[i]+'%\" AND '\n finalStr+=column+' LIKE \"%'+words[-1]+'%\"'\n return finalStr\n\ndef sort(a, b):\n break1 = min(a.find(\" \"),a.find(\"-\")) if a.find(\"-\") != -1 else a.find(\" \")\n break2 = min(b.find(\" \"),b.find(\"-\")) if b.find(\"-\") != -1 else b.find(\" \") \n aInt = float(a[:break1])\n bInt = float(b[:break2])\n if aInt < bInt:\n return -1\n elif aInt > bInt:\n return 1\n else:\n return 0\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\n return render_template('index.html')\n@app.route('/fatals')\ndef fatals():\n return render_template('fatals.html')\n\n\n@app.route('/searchMARC')\ndef searchM():\n method = request.args.get('method', 'None').strip()\n ident = request.args.get('ident', 'None')\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, \"static\", \"marc.json\")\n data = json.load(open(json_url))\n if ident in data[method].keys():\n answer = data[method][ident]\n else:\n answer = \"INVALID ID\"\n return jsonify(result = answer)\n\n@app.route('/search')\ndef search():\n keyword = request.args.get('keyword', 'None')\n menu = []\n conn = sqlite3.connect('icd9.db')\n c = conn.cursor()\n c.execute('SELECT * FROM data WHERE '+multipleSearch('descr', keyword.split()))\n data = c.fetchall()\n for row in data:\n menu.append(row[2] + \" (\"+str(row[0])+\")\")\n c.close()\n conn.close()\n return jsonify(result = sorted(menu, key = cmp_to_key(sort)))\n\n@app.route('/level2')\ndef level2():\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, \"static\", \"data.json\")\n data = json.load(open(json_url))\n menu = []\n nextJSON = data[request.args.get('cat', 'None')][1]\n for row in nextJSON.values():\n menu.append(row[0])\n return jsonify(result = sorted(menu, key = cmp_to_key(sort)))\n\n@app.route('/level3')\ndef level3():\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, \"static\", \"data.json\")\n data = json.load(open(json_url))\n cat1 = [request.args.get('cat1', 'None')][0]\n cat2 = [request.args.get('cat2', 'None')][0]\n menu = []\n nextJSON = data[cat1][1][cat2][1]\n for row in nextJSON.values():\n menu.append(row)\n return jsonify(result = sorted(menu, key = cmp_to_key(sort)))\n\n@app.route('/icd9', methods = ['GET'])\ndef icd9():\n menu = []\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, \"static\", \"data.json\")\n data = json.load(open(json_url))\n for row in data.values():\n menu.append(row[0])\n return render_template('icdGen.html', title='Home', menu1 = sorted(menu, key = cmp_to_key(sort)))\n\n\n@app.route('/marc')\ndef marc():\n return render_template('marc.html')","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"624858580","text":"import torch\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import ImageFolder\nfrom distutils.dir_util import copy_tree, remove_tree\nimport time\nfrom skimage.feature import hog\n\nclass LogoDataset(Dataset):\n \"\"\"Custom Dataset for loading Logo images\"\"\"\n\n def __init__(self, txt_path, img_dir, transform=None):\n\n df = pd.read_csv(txt_path, sep=\",\", index_col=None)\n self.img_dir = img_dir\n self.txt_path = txt_path\n self.img_names = df['Image'].values\n self.y = df['Label'].values\n self.transform = transform\n self.label_to_idx = dict()\n\n def __getitem__(self, index):\n img = Image.open(os.path.join(self.img_dir,\n self.img_names[index]))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.y[index] not in self.label_to_idx:\n self.label_to_idx[self.y[index]] = len(self.label_to_idx)\n label = self.label_to_idx[self.y[index]]\n return img, label, self.img_names[index]\n\n def __len__(self):\n return self.y.shape[0]\n\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass MyImageFolder(ImageFolder):\n\n def __getitem__(self, index):\n # print(super(MyImageFolder, self).__getitem__(index))\n # print(self.imgs[index])\n\n return super(MyImageFolder, self).__getitem__(index) + (self.imgs[index][0],)\n\n\ndef organize_files(data_path, tmp_path, args):\n while (True):\n\n data_directories = list()\n for root, dirs, files in os.walk(data_path):\n for d in dirs:\n data_directories.append(os.path.join(root, d))\n\n if len(data_directories) < args.number_of_labels:\n time.sleep(1)\n continue\n\n tmp_directories = list()\n for root, dirs, files in os.walk(tmp_path):\n for d in dirs:\n tmp_directories.append(os.path.join(root, d))\n\n nmb_of_files = dict()\n for d in data_directories:\n for root, dirs, files in os.walk(d):\n nmb_of_files[d] = len(files)\n\n fail = False\n for val in nmb_of_files.values():\n if val < args.batch_size:\n time.sleep(1)\n fail = True\n\n if fail:\n continue\n\n if not os.path.exists(data_path):\n time.sleep(3)\n continue\n copy_tree(data_path, tmp_path)\n remove_tree(data_path)\n\n nmb_of_files = dict()\n for d in tmp_directories:\n for root, dirs, files in os.walk(d):\n nmb_of_files[d] = len(files)\n\n fail = False\n for val in nmb_of_files.values():\n if val < 8 * args.batch_size:\n fail = True\n\n if fail:\n continue\n\n break\n\n\ndef load_data(data_path, tmp_path, args, shuffle=True):\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n remove_tree(tmp_path)\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n\n organize_files(data_path, tmp_path, args)\n\n custom_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),\n transforms.Resize((args.img_size, args.img_size)),\n HogFeatures(),\n NumpyToTensor(),\n ])\n\n dataset = MyImageFolder(root=tmp_path,\n transform=custom_transform)\n\n print(dataset.class_to_idx)\n\n return DataLoader(dataset=dataset,\n batch_size=args.batch_size,\n shuffle=shuffle,\n num_workers=4)\n\n\nclass HogFeatures(object):\n\n def __init__(self, orient=9, pixels_per_cell=(1, 1), cells_per_block=(2, 2), multichannel=False):\n self.orient = orient\n self.pixels_per_cell = pixels_per_cell\n self.cells_per_block = cells_per_block\n self.multichannel = multichannel\n\n def __call__(self, img):\n return hog(img, orientations=self.orient, pixels_per_cell=self.pixels_per_cell,\n cells_per_block=self.cells_per_block, visualize=False, multichannel=self.multichannel)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass NumpyToTensor(object):\n\n def __call__(self, img):\n return torch.from_numpy(img).float()\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n","sub_path":"project/auto_generate_hog_nn/src/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"469654381","text":"def Q_12(self, X_train_scaled, X_test_scaled, y_train, y_test, learning_rate=0.001, nIteration=7000):\n # Task 12: Given the (X_train, y_train) pairs denoting input matrix and output vector respectively,\n # Fit a linear regression model using the stochastic gradient descent algorithm you learned in class to obtain\n # the coefficients, beta's, as a numpy array of m+1 values (Please recall class lecture).\n # Please use the learning_rate and nIteration (number of iterations) parameters in your implementation\n # of the gradient descent algorithm.\n # Please measure the cpu_time needed during the training step. cpu_time is not equal to the wall_time. So,\n # use time.perf_counter() for an accurate measurement. Documentation on this function can be found here:\n # https://docs.python.org/3/library/time.html\n # Then using the computed beta values, predict the test samples provided in the \"X_test_scaled\"\n # argument, and let's call your prediction \"y_pred\".\n # Compute Root Mean Squared Error (RMSE) of your prediction.\n # Finally, return the beta vector, y_pred, RMSE, cpu_time as a tuple.\n # PLEASE DO NOT USE ANY LIBRARY FUNCTION THAT DOES THE LINEAR REGRESSION.\n import random\n random.seed(554433)\n beta = []\n y_pred = []\n RMSE = -1\n cpu_time = 0\n\n ## YOUR CODE HERE ###\n\n\n\n return (beta, y_pred, RMSE, cpu_time)","sub_path":"Q_12.py","file_name":"Q_12.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"144635710","text":"import pyqtgraph as pg\nimport numpy as np\nfrom PyQt5 import QtGui\nfrom pylsl import StreamInlet, StreamOutlet, StreamInfo, resolve_byprop\nimport threading\nimport queue\n\n\ndef fft_backend(input_stream, output_stream, window_length=256, pow2=True, window_type=np.hamming):\n \n #################################\n ## Stream Inlet and Outlet Creation\n #################################\n\n #streams = resolve_byprop(\"name\",input_stream.name(),timeout= 10)\n #input_stream = streams[0]\n #print(input_stream.channel_count())\n #print(input_stream)\n #print(input_stream.name())\n inlet = StreamInlet(input_stream, max_chunklen=12, recover=True)\n inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity\n\n # Create StreamOutlet to push data to output stream\n outlet = StreamOutlet(output_stream, chunk_size=129)\n ###################################\n ## FFT\n ###################################\n \n buffer = np.empty((0,5))\n window = window_type(window_length)\n g = True\n while(True):\n input_chunk = inlet.pull_chunk() # Pull Chunk\n #print(np.shape(input_chunk))\n\n if input_chunk[0] and np.shape(input_chunk)[1] > 0: # Check for available chunk\n #print(\"output samples\")\n buffer = np.append(buffer, input_chunk[0], axis=0)\n\n if (len(buffer) >= window_length):\n # Take data from buffer\n data = buffer[0:window_length]\n data = np.transpose(data)\n\n # Get frequency labels/bins\n freq_labels = np.fft.rfftfreq(window_length, 1/input_stream.nominal_srate())\n\n # Take FFT of data for each channel\n data_windowed = []\n data_fft = []\n psd = []\n for i in range(0, output_stream.channel_count()):\n # Multiply data by window\n data_windowed.append(data[i] - np.mean(data[i], axis=0))\n data_windowed[i] = data_windowed[i] * window\n\n # Get FFT\n data_fft.append(np.fft.rfft(data_windowed[i], n=window_length, axis=0))\n data_fft[i] = data_fft[i]/window_length\n\n # Convert FFT to PSD\n psd.append(abs(data_fft[i])) # Take absolute value\n # Assume input signal is real-valued and double power to account for negative frequencies \n # DC power (psd[i][0]) only occurs once and does not need to be doubled)\n psd[i][1:] = 2*psd[i][1:]\n\n # Create Output Data Packet in shape 2 x N (Where N is the # of discrete frequencies)\n # The first dimension of output sample contains the data of shape CHANNELS x N\n # The second dimension contains the N labels for the frequencies in Hz \n psd = np.transpose(psd)\n psd = psd.tolist()\n if(g==True):\n #print(psd)\n g=False\n\n #print(np.shape(psd))\n #freq_labels = freq_labels.tolist()\n #output_sample = (psd, freq_labels)\n #print(np.shape(output_sample))\n #print(output_sample)\n\n # Push fft transform for each channel using outlet\n outlet.push_chunk(psd)\n\ndef fft(input_stream, output_stream_name='default', window_length=256, pow2=True, window_type=np.hamming, channels=0):\n\n #################################\n ## Create New Output StreamInfo Objectcd \n #################################\n\n # Set Default Output Stream Name\n if (output_stream_name == 'default'):\n output_stream_name = str(input_stream.name() + '-PSD')\n\n # Get number of channels to transform\n if(channels == 0):\n channels = input_stream.channel_count() # Get number of channels\n\n # Create Output StreamInfo Object\n output_stream = StreamInfo(name=output_stream_name, \n type='PSD', \n channel_count=channels, \n nominal_srate=input_stream.nominal_srate(),\n channel_format='float32',\n source_id=input_stream.source_id())\n\n ####################################\n ## Create Thread to Run fft_backend\n ####################################\n #fft_backend(input_stream, output_stream)\n # Currently if you run function in a diff thread it does not work\n thread = threading.Thread(target=fft_backend, \n kwargs=dict(input_stream=input_stream, \n output_stream=output_stream,\n window_length=window_length, \n pow2=pow2,\n window_type=window_type))\n\n thread.start()\n\n return output_stream, queue\n\ndef plotTimeDomain(stream_info, chunkwidth=0, fs=0, channels=0, timewin=50, tickfactor=5, size=(1500, 800), title=None):\n \"\"\"Plot Real-Time domain in the time domain using a scrolling plot.\n\n Accepts a pylsl StreamInlet Object and plots chunks in real-time as they are recieved\n using a scrolling pyqtgraph plot. Can plot multiple channels.\n\n Args:\n stream_info (pylsl StreamInfo Object): The stream info object for the stream to be plotted\n chunkwidth (int): The number of samples in each chunk when pulling chunks from the stream\n fs (int): The sampling frequency of the device. If zero function will attempt to determine \n sampling frequency automatically\n channels (int): The number of channels in the stream (Eg. Number of EEG Electrodes). If\n zero the function will attempt determine automatically\n timewin (int): The number seconds to show at any given time in the plot. This affects the speed \n with which the plot will scroll accross the screen. Can not be a prime number.\n tickfactor (int): The number of seconds between x-axis labels. Must be a factor of timewin\n size (array): Array of type (width, height) of the figure\n title (string): Title of the plot figure\n \n Returns:\n bool: True if window was closed and no errors were encountered. False if an error was encountered within\n the function\n \"\"\"\n #################################\n ## Stream Inlet Creation\n #################################\n #stream = resolve_byprop(\"name\",stream_info.name(),timeout= 10)\n inlet = StreamInlet(stream_info, max_chunklen=chunkwidth, recover=True)\n inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity\n\n #################################\n ## Variable Initialization\n #################################\n\n ## Get/Check Default Params\n if(timewin%tickfactor != 0):\n print('''ERROR: The tickfactor should be a factor of of timewin. The default tickfactor\n \\n is 5 seconds. If you changed the default timewin, make sure that 5 is a factor, or \n \\n change the tickfactor so that it is a factor of timewin''')\n return False\n\n if(fs == 0):\n fs = stream_info.nominal_srate() # Get sampling rate\n\n if(channels == 0):\n channels = stream_info.channel_count() # Get number of channels\n\n ## Initialize Constants\n XWIN = timewin*fs # Width of X-Axis in samples\n XTICKS = (int)((timewin + 1)/tickfactor) # Number of labels to have on X-Axis\n #CHUNKPERIOD = chunkwidth*(1/fs) # The length of each chunk in seconds\n\n ##################################\n ## Figure and Plot Set Up\n ##################################\n\n ## Initialize QT\n app = QtGui.QApplication([])\n\n ## Define a top-level widget to hold everything\n fig = QtGui.QWidget()\n fig.resize(size[0], size[1]) # Resize window\n if (title != None): \n fig.setWindowTitle(title) # Set window title\n layout = QtGui.QGridLayout()\n fig.setLayout(layout)\n\n # Set up initial plot conditions\n (x_vec, step) = np.linspace(0,timewin,XWIN+1, retstep=True) # vector used to plot y values\n xlabels = np.zeros(XTICKS).tolist() # Vector to hold labels of ticks on x-axis\n xticks = [ x * tickfactor for x in list(range(0, XTICKS))] # Initialize locations of x-labels\n y_vec = np.zeros((channels,len(x_vec))) # Initialize y_values as zero\n\n # Set Up subplots and lines\n plots = []\n curves = []\n colors = ['c', 'm', 'g', 'r', 'y', 'b'] # Color options for various channels\n for i in range(0, channels):\n # Create axis item and set tick locations and labels\n axis = pg.AxisItem(orientation='bottom')\n axis.setTicks([[(xticks[i],str(xlabels[i])) for i in range(len(xticks))]]) # Initialize all labels as zero\n # Create plot widget and append to list\n plot = pg.PlotWidget(axisItems={'bottom': axis}, labels={'left': 'Volts (mV)'}, title='Channel ' + (str)(i + 1)) # Create Plot Widget\n plot.plotItem.setMouseEnabled(x=False, y=False) # Disable panning for widget\n plot.plotItem.showGrid(x=True) # Enable vertical gridlines\n plots.append(plot)\n # Plot data and save curve. Append curve to list\n curve = plot.plot(x_vec, y_vec[i], pen=pg.mkPen(colors[i%len(colors)], width=0.5)) # Set thickness and color of lines\n curves.append(curve)\n # Add plot to main widget\n layout.addWidget(plot, i, 0)\n\n # Display figure as a new window\n fig.show()\n\n ###################################\n # Real-Time Plotting Loop\n ###################################\n\n firstUpdate = True\n while(True):\n chunk = inlet.pull_chunk()\n\n # (something is wierd with dummy chunks, get chunks of diff sizes, data comes in too fast)\n if chunk and np.shape(chunk)[1] > 0: # Check for available chunk \n print(np.shape(chunk))\n chunkdata = np.transpose(chunk[0]) # Get chunk data and transpose to be CHANNELS x CHUNKLENTH\n chunkperiod = len(chunkdata[0])*(1/fs)\n xticks = [x - chunkperiod for x in xticks] # Update location of x-labels\n\n # Update x-axis locations and labels\n if(xticks[0] < 0): # Check if a label has crossed to the negative side of the y-axis\n\n # Delete label on left of x-axis and add a new one on the right side\n xticks.pop(0)\n xticks.append(xticks[-1] + tickfactor)\n\n # Adjust time labels accordingly\n if (firstUpdate == False): # Check to see if it's the first update, if so skip so that time starts at zero\n xlabels.append(xlabels[-1] + tickfactor)\n xlabels.pop(0)\n else:\n firstUpdate = False\n \n # Update plotted data\n for i in range(0,channels):\n y_vec[i] = np.append(y_vec[i], chunkdata[i], axis=0)[len(chunkdata[i]):] # Append chunk to the end of y_data (currently only doing 1 channel)\n curves[i].setData(x_vec, y_vec[i]) # Update data\n\n # Update x-axis labels\n axis = plots[i].getAxis(name='bottom')\n axis.setTicks([[(xticks[i],str(xlabels[i])) for i in range(len(xticks))]])\n \n # Update QT Widget to reflect the changes we made\n pg.QtGui.QApplication.processEvents()\n\n # Check to see if widget if has been closed, if so exit loop\n if not fig.isVisible():\n break\n \n # Close the stream inlet\n inlet.close_stream()\n \n return True\n\ndef plotFreqDomain(stream_info, chunkwidth, channels=0, size=(1500, 1500), title=None):\n \"\"\"Plot Real-Time in the frequency domain using a static x-axis and changing y axis values.\n\n Accepts a pylsl StreamInlet Object and plots chunks in real-time as they are recieved\n using a pyqtgraph plot. Can plot multiple channels.\n\n Args:\n stream_info (pylsl StreamInfo Object): The stream info object for the stream to be plotted\n chunkwidth (int): The number of samples in each chunk when pulling chunks from the stream\n fs (int): The sampling frequency of the device. If zero function will attempt to determine \n sampling frequency automatically\n size (array): Array of type (width, height) of the figure\n title (string): Title of the plot figure\n \n Returns:\n bool: True if window was closed and no errors were encountered. False if an error was encountered within\n the function\n \"\"\"\n #################################\n ## Stream Inlet Creation\n #################################\n inlet = StreamInlet(stream_info, max_chunklen=chunkwidth, recover=True)\n inlet.open_stream() # Stream is opened implicitely on first call of pull chunk, but opening now for clarity\n\n #################################\n ## Variable Initialization\n #################################\n\n if(channels == 0):\n channels = stream_info.channel_count() # Get number of channels\n\n ##################################\n ## Figure and Plot Set Up\n ##################################\n\n ## Initialize QT\n app = QtGui.QApplication([])\n\n ## Define a top-level widget to hold everything\n fig = QtGui.QWidget()\n fig.resize(size[0], size[1]) # Resize window\n if (title != None): \n fig.setWindowTitle(title) # Set window title\n layout = QtGui.QGridLayout()\n fig.setLayout(layout)\n\n # Set up initial plot conditions\n (x_vec, step) = np.linspace(0,chunkwidth,chunkwidth, retstep=True) # vector used to plot y values\n y_vec = np.zeros((channels,len(x_vec))) # Initialize y_values as zero\n\n # Set Up subplots and lines\n plots = []\n curves = []\n colors = ['c', 'm', 'g', 'r', 'y', 'b'] # Color options for various channels\n for i in range(0, channels):\n # Create plot widget and append to list\n plot = pg.PlotWidget(labels={'left': 'Power (dB)'}, title='Channel ' + (str)(i + 1)) # Create Plot Widget\n plot.plotItem.setMouseEnabled(x=False, y=False) # Disable panning for widget\n plot.plotItem.showGrid(x=True) # Enable vertical gridlines\n plots.append(plot)\n # Plot data and save curve. Append curve to list\n curve = plot.plot(x_vec, y_vec[i], pen=pg.mkPen(colors[i%len(colors)], width=0.5)) # Set thickness and color of lines\n curves.append(curve)\n # Add plot to main widget\n layout.addWidget(plot, np.floor(i/2), i%2)\n\n # Display figure as a new window\n fig.show()\n\n ###################################\n # Real-Time Plotting Loop\n ###################################\n\n firstUpdate = True\n buffer = []\n while(True):\n chunk = inlet.pull_chunk()\n #print(np.shape(chunk[0]))\n #print(chunk[0][0:129])\n #print(np.shape(chunk[0][0:129]))\n\n if not (np.size(chunk[0]) == 0): # Check for available chunk\n chunkdata = np.transpose(chunk[0]) # Get chunk data and transpose to be CHANNELS x CHUNKLENGTH\n if np.size(buffer) == 0:\n buffer = chunkdata\n else:\n buffer = np.append(buffer, chunkdata, axis=1)\n \n while np.size(buffer,1) > 129:\n data = buffer[:,0:129]\n buffer = buffer[:,129:]\n #if np.size(buffer,1) < 129:\n #data = np.zeros((5,129))\n # Update plotted data\n for i in range(0,channels):\n curves[i].setData(x_vec, data[i]) # Update data\n \n # Update QT Widget to reflect the changes we made\n pg.QtGui.QApplication.processEvents()\n\n # Check to see if widget if has been closed, if so exit loop\n if not fig.isVisible():\n break\n \n # Close the stream inlet\n inlet.close_stream()\n \n return True\n\n","sub_path":"Shawn/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":15725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"349661733","text":"''' Tree structure inspired by : https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html '''\n\nimport numpy as np\n\nclass Tree:\n \n def __init__(self):\n \n self.LEAF = -1\n self.n_nodes = 1\n \n self.left_child = [self.LEAF]\n self.right_child = [self.LEAF]\n self.depth = [0]\n self.parent = [self.LEAF]\n \n \n def split(self, node_id):\n \n if node_id >= self.n_nodes:\n #Error\n return 0\n if self.right_child[node_id] != self.LEAF:\n #Error\n return 0\n \n \n self.left_child[node_id] = self.n_nodes\n self.right_child[node_id] = self.n_nodes+1\n \n self.left_child += [self.LEAF, self.LEAF]\n self.right_child += [self.LEAF, self.LEAF]\n self.depth += [self.depth[node_id] + 1, self.depth[node_id] + 1]\n self.parent += [node_id, node_id]\n self.n_nodes += 2\n \n def go_left(self, node_id):\n \n if node_id >= self.n_nodes:\n #Error\n print(\"erreur\")\n return 0\n if self.left_child[node_id] == self.LEAF:\n #Error\n print(\"erreur\")\n return 0\n \n return self.left_child[node_id]\n\n def go_right(self, node_id):\n \n if node_id >= self.n_nodes:\n #Error\n print(\"erreur\")\n return 0\n if self.right_child[node_id] == self.LEAF:\n #Error\n print(\"erreur\")\n return 0\n \n return self.right_child[node_id]\n \n def path_to_root(self, node_id):\n \n if node_id >= self.n_nodes:\n #Error\n print(\"erreur\")\n return 0\n \n path = [node_id]\n i = node_id\n \n while self.parent[i] != self.LEAF:\n path.append(self.parent[i])\n i = self.parent[i]\n \n #while( i != 0 ):\n #if i in self.left_child:\n #j = np.where(np.array(self.left_child) == i)[0][0]\n #elif i in self.right_child:\n #j = np.where(np.array(self.right_child) == i)[0][0]\n \n #path += [j]\n #i = j\n \n return path\n\n def leafs(self):\n \n leafs = []\n for i in range(self.n_nodes):\n if self.right_child[i] == self.LEAF:\n leafs.append(i)\n return leafs\n \n def max_depth(self):\n return np.max(self.depth)\n \n def is_leaf(self, node_id):\n return left_child[node_id] == self.LEAF\n \n \nclass CartDensityTree(Tree):\n \n def __init__(self):\n super().__init__()\n self.cut_axe = [self.LEAF]\n self.threshold = [self.LEAF]\n self.count = [self.LEAF]\n \n def split(self, node_id, cut_axe, threshold):\n super().split(node_id)\n self.threshold[node_id] = threshold\n self.cut_axe[node_id] = cut_axe\n #TODO maj de count et de volume (nécessite p-e plus d'info)\n \n self.threshold += [self.LEAF, self.LEAF]\n self.cut_axe += [self.LEAF, self.LEAF]\n \n def locate(self, x):\n \n #TODO vérifier la dimension\n i = 0\n while self.left_child[i] != self.LEAF:\n if x[self.cut_axe[i]] <= self.threshold[i]:\n i = left_child[i]\n else:\n i = right_node[i]\n \n return i\n \n def fit(self, X, max_depth=2, method='diadyc'):\n n, d = X.shape\n self.dim = d\n \n indices = [np.array(range(n))]\n self.count[0] = n\n while( self.max_depth() < max_depth ):\n \n for i in self.leafs():\n \n if method == 'diadyc':\n \n ''' axe will be the maximizer of ll among dyadic cuts '''\n c = len(indices[i])\n axe = 0\n threshold = np.mean(self.rectangle(i)[axe, :])\n c_left = len(np.where(X[indices[i], axe] <= threshold)[0])\n c_right = len(np.where(X[indices[i], axe] > threshold)[0])\n best_score_cut = c_left * np.log(c_left) + c_right * np.log(c_right)\n \n for j in range(1, self.dim):\n threshold = np.mean(self.rectangle(i)[j, :])\n c_left = len(np.where(X[indices[i], j] <= threshold)[0])\n c_right = len(np.where(X[indices[i], j] > threshold)[0])\n score_cut = c_left * np.log(c_left) + c_right * np.log(c_right)\n if score_cut > best_score_cut:\n best_score_cut = score_cut\n axe = j\n \n \n threshold = np.mean(self.rectangle(i)[axe, :])\n \n \n self.split(i, axe, threshold)\n indices += [np.where(X[indices[i], axe] <= threshold)[0]] \n indices += [np.where(X[indices[i], axe] > threshold)[0]]\n \n self.count = self.n_nodes * [0]\n for j in range(self.n_nodes):\n self.count[j] = len(indices[j])\n \n \n def rectangle(self, i):\n ''' Return limits of rectangle corresponding to node i '''\n \n rectangle = np.zeros(shape=(self.dim, 2))\n rectangle[:, 1] = np.ones(self.dim)\n \n \n path = self.path_to_root(i)[::-1]\n for j in range(len(path)-1):\n\n \n if self.left_child[path[j]] == path[j+1]:\n rectangle[self.cut_axe[path[j]], 1] = self.threshold[path[j]]\n elif self.right_child[path[j]] == path[j+1]:\n rectangle[self.cut_axe[path[j]], 0] = self.threshold[path[j]]\n \n return rectangle\n \n def volume(self, i):\n ''' return the volume corresponding to the node i '''\n r = self.rectangle(i)\n return np.prod(r[:, 1] - r[:, 0])\n \n def log_likelihood(self):\n \n ll = 0\n n = self.count[0]\n for i in self.leafs():\n v = self.volume(i)\n c = self.count[i]\n \n if c != 0:\n \n ll += c * np.log( c / (v * n) ) / n\n \n return(ll)\n \n \n \n\ndef show_hist2d(tree):\n \n\n import matplotlib.pyplot as plt\n from matplotlib import cm as cm\n \n n = tree.count[0]\n \n \n for i in tree.leafs():\n intensity = tree.count[i] / n\n r = tree.rectangle(i)\n plt.plot([r[0,0], r[0,0], r[0,1], r[0,1], r[0, 0]] , [r[1,0], r[1,1], r[1,1], r[1,0], r[1, 0]], c='r')\n \n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.show()\n \n \nif __name__ == '__main__':\n \n t = CartDensityTree()\n X = np.random.rand(5000,2)\n t.fit(X, max_depth=4)\n \n print(t.log_likelihood())\n\n show_hist2d(t)\n\n #print(t.left_child)\n #print(t.right_child)\n #print(t.cut_axe)\n #print(t.threshold)\n #print(t.leafs())\n \n \n \n \n\n","sub_path":"includes/dyadic-histogram.py","file_name":"dyadic-histogram.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"496847548","text":"from rest_framework import serializers\nfrom .models import Provider\n\n\nclass ProviderSerializer(serializers.ModelSerializer):\n eth_address = serializers.\\\n SerializerMethodField('_eth_address')\n\n academy_url = serializers.\\\n SerializerMethodField('_academy_url')\n\n def _eth_address(self, obj):\n if obj.user:\n return obj.user.username\n\n def _academy_url(self, obj):\n if obj.user:\n return obj.user.profile.academy_website\n\n class Meta:\n model = Provider\n fields = (\n 'id',\n 'name',\n 'user',\n 'academy_url',\n 'eth_address',\n 'verified',\n )\n","sub_path":"bdn/provider/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"212743515","text":"import itertools\nfrom datetime import date, timedelta\n\n# if __name__ == \"__main__\":\n# # Define date range - we are moving backwards into time, therefore we begin in 2015\n# start_year = 2015\n# start_month = 9\n# start_day = 15\n# end_year = 2013\n# end_month = 1\n# end_day = 1\n\n# # Define how many days should constitute one block i.e. the difference between dates in teh generated list\n# granularity = 15\n\n \ndef dates_generator(start_year, start_month, start_day, end_year, end_month, end_day, granularity):\n\n # Define a generator function to produce a date list\n def series_of_dates(start, end, delta):\n current = start\n while current > end:\n yield current\n current -= delta\n\n # Define a class that we use as a wrapper to make our generator object indexable\n class Indexable(object):\n def __init__(self,it):\n self.it = iter(it)\n def __iter__(self):\n for elt in self.it:\n yield elt\n def __getitem__(self,index):\n try:\n return next(itertools.islice(self.it,index,index+1))\n except TypeError:\n return list(itertools.islice(self.it,index.start,index.stop,index.step))\n \n start = date(start_year, start_month, start_day)\n end = date(end_year, end_month, end_day)\n intervals = timedelta(days=granularity)\n\n # Create an indexable list of strings from the generated dates\n generated_dates = series_of_dates(start, end, intervals)\n indexable_dates = Indexable(generated_dates)\n dates = [x.strftime(\"%Y-%m-%d\") for x in indexable_dates]\n\n # # Append the last date to get a complete (in this case for static input above)\n # dates.append(\"2012-12-31\")\n \n return dates\n\nif __name__ == \"__main__\":\n\n # Define date range - we are moving backwards into time, therefore we begin in 2015\n start_year = 2015\n start_month = 9\n start_day = 15\n end_year = 2013\n end_month = 1\n end_day = 1\n\n # Define how many days should constitute one block i.e. the difference between dates in teh generated list\n granularity = 15\n\n myDates = dates_generator(start_year, start_month, start_day, end_year, end_month, end_day, granularity)\n","sub_path":"Python/Scrape/Archive/dates_generator_v1.py","file_name":"dates_generator_v1.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"147400540","text":"from datetime import datetime\nimport logging\nfrom time import sleep\n\nfrom flask import Flask, request, json, jsonify, Response, render_template, redirect, url_for\nfrom wtforms import Form\n\nfrom forms import NewMemberForm\n\nclass Status:\n HTTP_201_CREATED = 201\n HTTP_304_NOT_MODIFIED = 304\n HTTP_404_NOT_FOUND = 404\n\n\nfrom members import Member\n\napp = Flask(__name__)\n\ndef convert_form_to_member_data(form):\n # form is a wtform\n result = {}\n if form.name.data is not None:\n result['name'] = form.name.data\n if form.phone.data is not None:\n result['phone'] = form.phone.data\n if form.email.data is not None:\n result['email'] = form.email.data\n return result\n\n\n@app.route('/members/create')\ndef create_member():\n # Must come before members/ to disambiguat\n form = NewMemberForm(request.form)\n return render_template('new-member-form.html', form=form)\n\n\n@app.route('/members/', methods=['GET', 'PUT', 'DELETE'])\n@app.route('/members/', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef members(member_id=None):\n member_id = int(member_id) if member_id else None\n\n if request.method == 'GET':\n if member_id:\n member = Member.build_key(member_id).get()\n if not member:\n return \"Member %s not found\" % member_id, Status.HTTP_404_NOT_FOUND\n return render_template(\"member-details.html\", member=member)\n else:\n members = Member.query().order(-Member.created).fetch()\n return render_template(\"member-list.html\", members=members, created=request.args.get('created'))\n \n elif request.method in ['PUT', 'POST']:\n if member_id:\n member = Member.build_key(member_id).get()\n if member:\n member.update(**convert_form_to_member_data(request.form))\n member.put()\n redirect(uri_for(\"members\"))\n else:\n return \"Member %s not found.\" % member_id, Status.HTTP_404_NOT_FOUND\n else:\n # Create a new member\n form = NewMemberForm(request.form)\n member = Member(**convert_form_to_member_data(form))\n member.put()\n sleep(1) # I'm ashamed of myself\n return redirect(url_for(\"members\", created=member.member_id))\n\n elif request.method == 'DELETE':\n if member_id:\n member = Member.build_key(member_id).get()\n if member:\n member.key.delete()\n return jsonify(deleted=member.to_dict())\n else:\n return \"Member %s not found.\" % member_id, Status.HTTP_404_NOT_FOUND\n else:\n if request.args.get('force') == 'true':\n all_members = Member.query().fetch()\n [m.key.delete() for m in all_members]\n return jsonify(deleted=[m.to_dict() for m in all_members])\n else:\n return \"Please use 'force=true' to delete all members.\", Status.HTTP_304_NOT_MODIFIED\n\n\n\n@app.route('/')\ndef home():\n form = NewMemberForm(request.form)\n return redirect(url_for(\"members\"))\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404\n\n\n@app.errorhandler(500)\ndef application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"144759498","text":"# Librerias externas\nimport tensorflow as tf\nimport requests\n\n# Librerias de python\nimport os\nimport collections\n\n# Librerias que de momento no uso\nimport numpy as np\nimport string\nimport re\nimport random\n\n##################################################### Load & clean data\n\ndata_file = 'data/shakespeare.txt'\n\nif not os.path.isfile(data_file):\n print('Data file not found, downloading the dataset')\n shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'\n response = requests.get(shakespeare_url)\n shakespeare_file = response.content\n # Decode binary into string\n s_text = shakespeare_file.decode('utf-8')\n # Drop first few descriptive paragraphs.\n s_text = s_text[7675:]\n # Remove newlines\n s_text = s_text.replace('\\r\\n', '')\n s_text = s_text.replace('\\n', '')\n # Write to file\n with open(data_file, 'w') as out_conn:\n out_conn.write(s_text)\nelse:\n with open(data_file, 'r') as file_conn:\n s_text = file_conn.read().replace('\\n', '')\n\nprint('Sample data:\\n'+ s_text[:200])\n\npunctuation = string.punctuation\npunctuation = ''.join([x for x in punctuation if x not in ['-', \"'\"]])\ns_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)\ns_text = re.sub('\\s+', ' ', s_text).strip().lower()\n\n\n############################################################### Build Vocabulary\n\ndef build_vocab(text, min_word_freq):\n\tword_counts = collections.Counter(text.split(' '))\n\tprint ('word counts: ', len(word_counts), 'text len: ', len(text.split(' ')))\n\t# limit word counts to those more frequent than cutoff\n\tword_counts = {key: val for key, val in word_counts.items() if val > min_word_freq}\n\t# Create vocab --> index mapping\n\twords = word_counts.keys()\n\tvocab_to_ix_dict = {key: (ix + 1) for ix, key in enumerate(words)}\n\t# Add unknown key --> 0 index\n\tvocab_to_ix_dict['unknown'] = 0\n\t# Create index --> vocab mapping\n\tix_to_vocab_dict = {val: key for key, val in vocab_to_ix_dict.items()}\n\treturn (ix_to_vocab_dict, vocab_to_ix_dict)\n\n# Build Shakespeare vocabulary\nmin_word_freq = 5 # Trim the less frequent words off\nix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)\nvocab_size = len(ix2vocab) + 1\nprint('Vocabulary Length = {}'.format(vocab_size))\n# Sanity Check\nassert (len(ix2vocab) == len(vocab2ix))\n\n\n##################################################### Convert text to word Vectors\n\ns_text_words = s_text.split(' ')\ns_text_ix = []\nfor ix, x in enumerate(s_text_words):\n try:\n s_text_ix.append(vocab2ix[x])\n except:\n s_text_ix.append(0)\ns_text_ix = np.array(s_text_ix)\n\n\n\n##################################################### LSTM RNN Model\n\n\nepochs = 10 # Number of epochs to cycle through data\nbatch_size = 32 # Train on this many examples at once\nlearning_rate = 0.001 # Learning rate\ntraining_seq_len = 11 # how long of a word group to consider\nrnn_size = 1024 # RNN Model size, has to equal embedding size\nembedding_size = rnn_size\neval_every = 50 # How often to evaluate the test sentences\nprime_texts = ['thou art more', 'to be or not to', 'wherefore art thou']\n\n\nlstm_cell = tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_size)\ninitial_state = lstm_cell.zero_state(batch_size, tf.float32)\n\nx_data = tf.placeholder(tf.int32, [batch_size, training_seq_len])\ny_output = tf.placeholder(tf.int32, [batch_size, training_seq_len])\n","sub_path":"code/teclado predictivo/tecladoPredictivo.py","file_name":"tecladoPredictivo.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"624490494","text":"from room import Room\nfrom player import Player\nfrom item import Item\n\n# Declare all the rooms\nfrom src.item import Item\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n\n# create a dictionary of items and append the key to some rooms\n\nitem = {\n 'outside': [Item(\"bones\", \"the bones of those who have walked this path be with you\"),\n Item(\"stones\", \"cast your stones wisely, it might save you\")],\n 'foyer': [Item(\"torch\", \"may the light guide your direction\"),\n Item(\"mask\", \"protection for the dusty path ahead\")],\n 'overlook': [Item(\"rope\", \"climb down this slippy slope\"),\n Item(\"axe\", \"chop doen a tree to build your bridge\")],\n 'treasure': [Item(\"tears\", \"free your heart of the pain of your loss\"),\n Item(\"sandals\", \"the journey out of here might be a long one\")],\n}\n\nroom['outside'].items = item['outside']\nroom['foyer'].items = item['foyer']\nroom['overlook'].items = item['overlook']\nroom['treasure'].items = item['treasure']\n\n# when user wants to see their inventory, write if statement to check if it is empty or not\n\n# Make a new player object that is currently in the 'outside' room.\nplayer = Player('kelechi', room['outside'])\n\n\n# Write a loop that:\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n# code for player movement\n\ndef move_player(move, current_room):\n attrib = move + '_to'\n if hasattr(current_room, attrib):\n return getattr(current_room, attrib)\n\n print('you cannot go that direction')\n\n return current_room\n\n\ndone = False\n\nwhile not done:\n print(player.current_room)\n direction = input(\"choose a direction:'n', 's', 'e', 'w' 'q' to quit: \").lower().strip().split()\n if direction == 'q':\n print('Later')\n done = True\n\n elif direction in ['n', 's', 'e', 'w']:\n player.current_room = move_player(direction, player.current_room)\n\n elif direction[0] == 'get' or 'take':\n print(player.current_room.on_take(direction[1]))\n\n else:\n print('invalid input, please try again')\n# add item list to the Room class\n# add a loop that will display all the items in each room\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"257782798","text":"import random\nimport math\nimport pygame\nimport csv\nimport os\nfrom pygame.locals import *\n\nfrom game.tile import *\nfrom game.standard_monster import *\n\n\nclass TiledLevel():\n def __init__(self, levelTileSize, allTileSprites, allMonsterSprites, monsters, screenData, explosionsSpriteSheet):\n\n self.initialScreenOffset = [0,0]\n self.positionOffset = [0,0]\n self.currentCentrePosition = [100000000,111111111111]\n\n self.fileName = \"data/level.csv\"\n self.explosionsSpriteSheet = explosionsSpriteSheet\n \n self.screenData = screenData\n\n self.tile_map = self.loadTileTable(\"images/tiles/tile_map.png\", 64, 64, False)\n\n self.guardsSpriteMap = self.loadTileTable(\"images/guards.png\", 48, 64, True)\n self.allMonsterSprites = allMonsterSprites\n self.monsters = monsters\n\n self.zeroTileX = 0\n self.zeroTileY = 0\n self.endTileX = 0\n self.endTileY = 0\n\n self.tileGrid = []\n self.tiles = []\n self.collidableTiles = []\n self.walkable_tiles = []\n #self.visibleTiles = []\n #self.visibleCollidableTiles = []\n\n self.aiSpawns = []\n\n self.levelTileSize = levelTileSize\n self.levelPixelSize = [self.levelTileSize[0]*64,self.levelTileSize[1]*64]\n\n for tileX in range(0, self.levelTileSize[0]):\n column = []\n for tileY in range(0, self.levelTileSize[1]):\n column.append(None)\n self.tileGrid.append(column)\n \n self.initialOffset = True\n\n self.allTileData = {}\n tileDataFiles = [file for file in os.listdir(\"data/tiles/\") if os.path.isfile(os.path.join(\"data/tiles/\", file))]\n\n self.defaultTile = None\n for fileName in tileDataFiles:\n newTileData = TileData(os.path.join(\"data/tiles/\", fileName), self.tile_map)\n newTileData.loadTileData()\n self.allTileData[newTileData.tileID] = newTileData\n if self.defaultTile == None:\n self.defaultTile = newTileData\n\n\n def clearLevelToDefaultTile(self):\n for x in range(0, self.levelTileSize[0]):\n for y in range(0,self.levelTileSize[1]):\n xCentre = 32 + (x * 64)\n yCentre = 32 + (y * 64)\n defaultTile = Tile([xCentre, yCentre], 0, self.defaultTile)\n self.tiles.append(defaultTile)\n self.walkable_tiles.append(defaultTile)\n \n def resetGuards(self):\n for spawn in self.aiSpawns:\n newMonster = StandardMonster(spawn.typeID, spawn.world_position, self.guardsSpriteMap, self.allMonsterSprites, self.screenData.playArea, self, self.explosionsSpriteSheet)\n self.monsters.append(newMonster)\n\n def updateOffsetPosition(self, centrePosition, allTileSprites):\n shouldUpdate = False\n self.currentCentrePosition = centrePosition\n xOffset = int(self.currentCentrePosition[0] - self.initialScreenOffset[0])\n yOffset = int(self.currentCentrePosition[1] - self.initialScreenOffset[1])\n\n if xOffset <= 0:\n xOffset = 0\n if xOffset >= int(self.levelPixelSize[0] - self.screenData.playArea[0]):\n xOffset = int(self.levelPixelSize[0] - self.screenData.playArea[0])\n\n if yOffset <= 0:\n yOffset = 0\n if yOffset >= int(self.levelPixelSize[1] - self.screenData.playArea[1]):\n yOffset = int(self.levelPixelSize[1] - self.screenData.playArea[1])\n \n if self.initialOffset or not (xOffset == self.positionOffset[0] and yOffset == self.positionOffset[1]) :\n if self.initialOffset:\n self.initialOffset = False\n self.positionOffset = [xOffset, yOffset]\n\n screenTileWidth = int(self.screenData.playArea[0]/64) + 1\n screenTileHeight = int(self.screenData.playArea[1]/64) + 2\n\n oldZeroTileX = self.zeroTileX\n oldZeroTileY = self.zeroTileY\n\n self.zeroTileX = int(xOffset/64)\n self.zeroTileY = int(yOffset/64)\n\n if self.zeroTileX != oldZeroTileX or self.zeroTileY != oldZeroTileY:\n allTileSprites.empty()\n self.endTileX = self.zeroTileX + screenTileWidth\n self.endTileY = self.zeroTileY + screenTileHeight\n\n if self.endTileX >= len(self.tileGrid):\n self.endTileX = len(self.tileGrid)\n if self.endTileY >= len(self.tileGrid[0]):\n self.endTileY = len(self.tileGrid[0])\n \n for tileX in range(self.zeroTileX, self.endTileX):\n for tileY in range(self.zeroTileY, self.endTileY):\n tile = self.tileGrid[tileX][tileY]\n if tile == None:\n print(\"No tile at grid: \" + str(tileX) + \", \" + str(tileY))\n else:\n tile.updateOffsetPosition(self.positionOffset, self.screenData)\n allTileSprites.add(tile.sprite)\n else:\n for tileX in range(self.zeroTileX, self.endTileX):\n for tileY in range(self.zeroTileY, self.endTileY):\n tile = self.tileGrid[tileX][tileY]\n tile.updateOffsetPosition(self.positionOffset, self.screenData)\n\n for spawn in self.aiSpawns:\n spawn.updateOffsetPosition(self.positionOffset)\n\n return shouldUpdate\n\n\n def checkUpdateVisibleTiles(self):\n pass\n \n def findPlayerStart(self):\n playerStart = [0,0]\n shortestDistance = 100000\n worldCentre = [self.levelPixelSize[0]/2, self.levelPixelSize[1]/2]\n startPosition = [worldCentre[0], self.levelPixelSize[1]]#worldCentre\n screenCentre = [self.screenData.playArea[0]/2, self.screenData.playArea[1]/2]\n tileClosestToStartPosition = None\n for tile in self.walkable_tiles:\n xDist = float(startPosition[0]) - float(tile.world_position[0])\n yDist = float(startPosition[1]) - float(tile.world_position[1])\n distance = math.sqrt((xDist * xDist) + (yDist * yDist))\n if distance < shortestDistance:\n shortestDistance = distance\n \n playerStart[0] = tile.world_position[0]\n playerStart[1] = tile.world_position[1]\n tileClosestToStartPosition = tile\n \n self.playerStart = playerStart\n \n self.initialScreenOffset[0] = screenCentre[0]\n self.initialScreenOffset[1] = screenCentre[1]\n \n self.currentCentrePosition = playerStart\n xOffset = int(self.currentCentrePosition[0] - self.initialScreenOffset[0])\n yOffset = int(self.currentCentrePosition[1] - self.initialScreenOffset[1])\n\n if xOffset <= 0:\n xOffset = 0\n if xOffset >= int(self.levelPixelSize[0] - self.screenData.playArea[0]):\n xOffset = int(self.levelPixelSize[0] - self.screenData.playArea[0])\n\n if yOffset <= 0:\n yOffset = 0\n if yOffset >= int(self.levelPixelSize[1] - self.screenData.playArea[1]):\n yOffset = int(self.levelPixelSize[1] - self.screenData.playArea[1])\n \n self.positionOffset = [xOffset, yOffset]\n\n self.initialOffset = True\n #print(\"Offset at start: \" + str(self.positionOffset[0]) + \", \" + str(self.positionOffset[1]))\n return playerStart\n\n\n def loadTileTable(self, filename, width, height, useTransparency):\n image = None\n if useTransparency:\n image = pygame.image.load(filename).convert_alpha()\n else:\n image = pygame.image.load(filename).convert()\n image_width, image_height = image.get_size()\n tile_table = []\n for tile_x in range(0, int(image_width/width)):\n line = []\n tile_table.append(line)\n for tile_y in range(0, int(image_height/height)):\n rect = (tile_x*width, tile_y*height, width, height)\n line.append(image.subsurface(rect))\n return tile_table\n\n\n def getTileDataAtPos(self, clickPos):\n for tile in self.tiles:\n if tile.sprite.rect[0] <= clickPos[0] and tile.sprite.rect[1] <= clickPos[1]:\n if tile.sprite.rect[0] + tile.sprite.rect[2] > clickPos[0] and tile.sprite.rect[1] + tile.sprite.rect[3] > clickPos[1]:\n return [tile.sprite.rect, tile.tileImage, tile.tileID, False, tile]\n return [pygame.Rect(0, 0, 0, 0), None, \"\", False, None]\n\n def setTileAtPos(self, clickPos, newTileImage, tileID, tileAngle):\n tileToSet = None\n for tile in self.tiles:\n if tile.sprite.rect[0] <= clickPos[0] and tile.sprite.rect[1] <= clickPos[1]:\n if tile.sprite.rect[0] + tile.sprite.rect[2] > clickPos[0] and tile.sprite.rect[1] + tile.sprite.rect[3] > clickPos[1]:\n tileToSet = tile\n break\n if tileToSet != None:\n if tileToSet.collidable:\n self.collidableTiles.remove(tileToSet)\n else:\n self.walkable_tiles.remove(tileToSet)\n \n newTile = None\n\n newTile = Tile(tileToSet.world_position, tileAngle, self.allTileData[tileID])\n newTile.position = tileToSet.position\n \n self.tiles.remove(tileToSet) \n self.tiles.append(newTile)\n\n self.tileGrid[int((newTile.world_position[0]-32)/64)][int((newTile.world_position[1]-32)/64)] = newTile\n\n\n if newTile.collidable:\n self.collidableTiles.append(newTile)\n else:\n self.walkable_tiles.append(newTile)\n \n \n\n self.checkUpdateVisibleTiles()\n\n def saveTiles(self):\n with open(self.fileName, \"w\", newline='') as tileFile:\n writer = csv.writer(tileFile)\n for tile in self.tiles:\n writer.writerow([\"tile\",tile.tileID,str(tile.world_position[0]),str(tile.world_position[1]),str(tile.angle)])\n\n for aiSpawn in self.aiSpawns:\n writer.writerow([\"aiSpawn\",aiSpawn.typeID,str(aiSpawn.world_position[0]),str(aiSpawn.world_position[1])])\n\n def loadTiles(self):\n if os.path.isfile(self.fileName):\n self.tiles[:] = []\n self.collidableTiles[:] = []\n self.walkable_tiles[:] = []\n \n with open(self.fileName, \"r\") as tileFile:\n reader = csv.reader(tileFile)\n for line in reader:\n lineType = line[0]\n \n if lineType == \"tile\":\n tileID = line[1]\n tileXPos = int(line[2])\n tileYPos = int(line[3])\n tileAngle = int(line[4])\n loadedTile = Tile([tileXPos, tileYPos], tileAngle, self.allTileData[tileID]) \n self.tiles.append(loadedTile)\n\n self.tileGrid[int((tileXPos-32)/64)][int((tileYPos-32)/64)] = loadedTile\n\n if loadedTile.collidable:\n self.collidableTiles.append(loadedTile)\n else:\n self.walkable_tiles.append(loadedTile)\n \n elif lineType == \"aiSpawn\":\n typeID = line[1]\n tileXPos = int(line[2])\n tileYPos = int(line[3])\n newAISpawn = AISpawn(self.guardsSpriteMap[0][1], [tileXPos,tileYPos],typeID)\n self.aiSpawns.append(newAISpawn)\n else:\n self.clearLevelToDefaultTile()\n\n def addAISpawnAtPos(self, clickPos, aiSpawn): \n for tile in self.tiles:\n if tile.sprite.rect[0] <= clickPos[0] and tile.sprite.rect[1] <= clickPos[1]:\n if tile.sprite.rect[0] + tile.sprite.rect[2] > clickPos[0] and tile.sprite.rect[1] + tile.sprite.rect[3] > clickPos[1]:\n tileToSet = tile\n alreadyPlaced = False\n for spawn in self.aiSpawns:\n if spawn.world_position[0] == tileToSet.world_position[0] and spawn.world_position[1] == tileToSet.world_position[1]:\n alreadyPlaced = True\n\n if not alreadyPlaced:\n newAISpawn = AISpawn(aiSpawn.tileImage, tileToSet.world_position, aiSpawn.typeID)\n newAISpawn.updateOffsetPosition(self.positionOffset)\n self.aiSpawns.append(newAISpawn)\n\n","sub_path":"game/tiled_level.py","file_name":"tiled_level.py","file_ext":"py","file_size_in_byte":12703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"470915096","text":"import os\nimport pickle\nimport statistics\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom tabulate import tabulate\n\nfrom package import gpr, io, rf, testhelper as th\n\n# Initialization\ngpr_thresholds_range = np.round(np.arange(0.5, 1.2, 0.1), 1)\nrf_thresholds_range = np.round(np.arange(0.5, 1.2, 0.1), 1)\ninclude_INF = True\n# normalityTests = ['RMSE', 'Normalized-RMSE', 'Log-RMSE', 'Normalized-Log-RMSE']\nnormalityTests = ['MetricOne', 'MetricTwo']\n# bin_sizes = [10, 50, 100, 200, 500]\nbin_sizes = [200]\ncontour_plot_same_scale = True\nmake_counts_plot = True\n\n# Resources\ntrainfile = '../data/Diffusion_Data_allfeatures.csv'\n# trainfile = '../data/temp.csv'\nrfslope = 0.919216\nrfintercept = -0.025370\ngprsavedkernel = io.loadmodelobj('../models/GPR_data_Diffusion_Data_allfeatures_csv_02-24-20_18-32-12') \\\n .getGPRkernel()\n\n# Data\ndata = io.importdata(trainfile)\ngroups = data['Material compositions 1'].values\ndata = io.sanitizedata(data)\nX = data.iloc[:, 1:]\nY = data.iloc[:, 0]\ny_std = statistics.stdev(Y.to_numpy(dtype=float))\n\n# For Infinite Cutoffs\nINF = np.inf\nif include_INF:\n gpr_thresholds_range = np.append(gpr_thresholds_range, INF)\n rf_thresholds_range = np.append(rf_thresholds_range, INF)\ngpr_thresholds, rf_thresholds = np.meshgrid(gpr_thresholds_range, rf_thresholds_range)\naccumulator = {(r, g, 1): [] for g in gpr_thresholds_range for r in rf_thresholds_range}\naccumulator.update({(r, g, 0): [[], [], []] for g in gpr_thresholds_range for r in rf_thresholds_range})\n\n# Leave out group test for Material compositions 1\nrfk = LeaveOneGroupOut()\nin_domain = []\nout_domain = [[], [], []]\nalreadyDone = []\ncount = 0\n\npath = os.path.abspath('..') + \"/domain results/trial/\"\n\n\ndef checkAlreadyDone(element, alreadylist):\n for x in alreadylist:\n if element == x:\n return True\n return False\n\n\n# Getting the values computed previously:\nif os.path.isfile('LOG_Pair_values.pkl'):\n with open('LOG_Pair_values.pkl', 'rb') as f:\n in_domain, out_domain, accumulator = pickle.load(f)\n\nelse:\n for train_index, test_index in rfk.split(X, Y, groups):\n X_train_1, X_test_1 = X.iloc[train_index], X.iloc[test_index]\n y_train_1, y_test_1 = Y.iloc[train_index], Y.iloc[test_index]\n\n groups2 = np.delete(groups, test_index)\n testGroup = np.delete(groups, train_index)\n\n for train_index_2, test_index_2 in rfk.split(X_train_1, y_train_1, groups2):\n X_train, X_test = X.iloc[train_index_2], X.iloc[test_index_2]\n y_train, y_test = Y.iloc[train_index_2], Y.iloc[test_index_2]\n\n testGroup2 = np.delete(groups2, train_index_2)\n\n if checkAlreadyDone(testGroup2[0], alreadyDone):\n continue\n\n frames = [X_test_1, X_test]\n twoTest = pd.concat(frames)\n\n yTest = [y_test_1, y_test]\n yFrames = pd.concat(yTest)\n\n testFinal = np.concatenate((testGroup, testGroup2))\n\n RF = rf.RF()\n RF.train(X_train, y_train, std=y_std)\n\n GPR = gpr.GPR()\n GPR.train(X_train, y_train, userkernel=gprsavedkernel, std=y_std, optimizer_restarts=0)\n # Here instead of res, sigma try calculating domain prediction for the test data.\n\n gpr_pred, GPR_errors = GPR.predict(twoTest, True)\n rf_pred, RF_errors = RF.predict(twoTest, True)\n RF_errors = rfslope * RF_errors + rfintercept\n\n # Start measuring on different thresholds\n for i_rf_thresholds in range(0, len(rf_thresholds_range)):\n for i_gpr_thresholds in range(0, len(gpr_thresholds_range)):\n gpr_thresh = round(gpr_thresholds[i_rf_thresholds, i_gpr_thresholds], 1)\n rf_thresh = round(rf_thresholds[i_rf_thresholds, i_gpr_thresholds], 1)\n in_domain = accumulator[(rf_thresh, gpr_thresh, 1)]\n out_domain = accumulator[(rf_thresh, gpr_thresh, 0)]\n predictions = [th.predictdomain(GPR_errors[i], RF_errors[i],\n gpr_threshold=gpr_thresh, rf_threshold=rf_thresh)\n for i in range(0, len(twoTest))]\n\n for i in range(0, len(twoTest)):\n residual_by_std = (rf_pred[i] - yFrames.to_numpy(dtype=float)[i]) / y_std\n predicted_error = RF_errors[i]\n if predictions[i] is 1:\n in_domain.append(residual_by_std / predicted_error if predicted_error else 0)\n else:\n out_domain[th.getcontribution(GPR_errors[i], RF_errors[i],\n gpr_threshold=gpr_thresh, rf_threshold=rf_thresh) - 1]. \\\n append(residual_by_std / predicted_error if predicted_error else 0)\n\n count += 1\n print(str(count) + \": \" + str(testGroup[0]) + \"+\" + str(testGroup2[0]))\n alreadyDone.append(testGroup[0])\n # Can save these variables so no need to run it again\n with open('LOG_Pair_values.pkl', 'wb') as f:\n pickle.dump([in_domain, out_domain, accumulator], f)\n\nin_domain_norm_scores = {a: {b_i: [] for b_i in bin_sizes} for a in normalityTests}\nout_domain_norm_scores = {a: {b_i: [] for b_i in bin_sizes} for a in normalityTests}\nin_domain_num_points = []\nout_domain_num_points = []\n\nresults = []\n\nfor i_rf_thresholds in range(0, len(rf_thresholds_range)):\n for i_gpr_thresholds in range(0, len(gpr_thresholds_range)):\n gpr_thresh = round(gpr_thresholds[i_rf_thresholds, i_gpr_thresholds], 1)\n rf_thresh = round(rf_thresholds[i_rf_thresholds, i_gpr_thresholds], 1)\n cur_result = [rf_thresh, gpr_thresh]\n in_domain = accumulator[(rf_thresh, gpr_thresh, 1)]\n out_domain = accumulator[(rf_thresh, gpr_thresh, 0)]\n num_in_domain = len(in_domain)\n num_out_domain = len(out_domain[0]) + len(out_domain[1]) + len(out_domain[2])\n cur_result.append(num_in_domain)\n cur_result.append(num_out_domain)\n in_domain_num_points.append(num_in_domain)\n out_domain_num_points.append(num_out_domain)\n\n if num_in_domain is 0:\n print('GPR Threshold = {} RF Threshold = {}, No points in-domain'.format(gpr_thresh, rf_thresh))\n score = th.plotrstatwithgaussian(in_domain, _label=['GPR and RF'],\n _xlabel='RF residual / RF predicted error',\n _ylabel='Normalized Counts',\n _title='LOG Pair: In-domain RF: {} GPR: {}'.format(rf_thresh,\n gpr_thresh),\n filename=path + \"Plots/\",\n _bincount=bin_sizes, _normalitytest=normalityTests\n , temp_file='In_domain_RF-{}_GPR-{}'.format(rf_thresh,\n gpr_thresh)\n )\n for test in normalityTests:\n for b_i in bin_sizes:\n in_domain_norm_scores[test][b_i].append(score[test][b_i])\n\n if num_out_domain is 0:\n print('GPR Threshold = {} RF Threshold = {}, No points out-domain'.format(gpr_thresh, rf_thresh))\n score = th.plotrstatwithgaussian(out_domain, _label=['GPR', 'RF', 'both'],\n _xlabel='RF residual / RF predicted error',\n _ylabel='Normalized Counts',\n _title='LOG Pair: Out-domain RF: {} GPR: {}'.format(rf_thresh,\n gpr_thresh),\n filename=path + \"Plots/\",\n _bincount=bin_sizes, _normalitytest=normalityTests,\n # _showhist=False,\n temp_file='Out_domain_RF-{}_GPR-{}'.format(rf_thresh,\n gpr_thresh)\n )\n for test in normalityTests:\n for b_i in bin_sizes:\n out_domain_norm_scores[test][b_i].append(score[test][b_i])\n\n for test in normalityTests:\n for b_i in bin_sizes:\n cur_result.append(in_domain_norm_scores[test][b_i][-1])\n cur_result.append(out_domain_norm_scores[test][b_i][-1])\n results.append(cur_result)\n\n# For infintity threshold\n# Hack to include INF in contour plots\ncf_xticks = gpr_thresholds_range\ncf_yticks = rf_thresholds_range\nif include_INF:\n replace_INF_val_gpr = gpr_thresholds_range[-2] + (gpr_thresholds_range[1] - gpr_thresholds_range[0])\n replace_INF_val_rf = rf_thresholds_range[-2] + (rf_thresholds_range[1] - rf_thresholds_range[0])\n gpr_thresholds_range[-1] = replace_INF_val_gpr\n rf_thresholds_range[-1] = replace_INF_val_rf\n gpr_thresholds[gpr_thresholds == INF] = replace_INF_val_gpr\n rf_thresholds[rf_thresholds == INF] = replace_INF_val_rf\n cf_xticks = np.append(cf_xticks[:-1], 'INF')\n cf_yticks = np.append(cf_yticks[:-1], 'INF')\n\nif make_counts_plot:\n in_domain_num_points = np.array(in_domain_num_points).reshape(\n (len(rf_thresholds_range), len(gpr_thresholds_range)))\n out_domain_num_points = np.array(out_domain_num_points).reshape(\n (len(rf_thresholds_range), len(gpr_thresholds_range)))\n cs = plt.contourf(gpr_thresholds, rf_thresholds, in_domain_num_points, cmap=plt.cm.jet)\n plt.colorbar()\n cs.cmap.set_under('k')\n plt.title('LOG Pair Diffusion In-Domain Num Points')\n plt.xlabel('GPR cutoff')\n plt.ylabel('RF cutoff')\n plt.xticks(gpr_thresholds_range, cf_xticks)\n plt.yticks(rf_thresholds_range, cf_yticks)\n plt.savefig(path + 'LOG Pair Diffusion In-Domain Num Points.png')\n plt.clf()\n cs = plt.contourf(gpr_thresholds, rf_thresholds, out_domain_num_points, cmap=plt.cm.jet)\n cs.cmap.set_under('k')\n plt.colorbar()\n plt.title('LOG Pair Diffusion Out-Domain Num Points')\n plt.xlabel('GPR cutoff')\n plt.ylabel('RF cutoff')\n plt.xticks(gpr_thresholds_range, cf_xticks)\n plt.yticks(rf_thresholds_range, cf_yticks)\n plt.savefig(path + 'LOG Pair Diffusion Out-Domain Num Points.png')\n plt.clf()\n\n\nfor test in normalityTests:\n for b_i in bin_sizes:\n in_domain_norm_score_cur = np.array(in_domain_norm_scores[test][b_i]).reshape(\n (len(rf_thresholds_range), len(gpr_thresholds_range)))\n out_domain_norm_score_cur = np.array(out_domain_norm_scores[test][b_i]).reshape(\n (len(rf_thresholds_range), len(gpr_thresholds_range)))\n\n if contour_plot_same_scale:\n if test is 'MetricOne' or test is 'MetricTwo':\n clevels = np.linspace(\n max(0, np.min(in_domain_norm_score_cur)),\n np.max(in_domain_norm_score_cur),\n 10)\n # else:\n # in_domain_clevels = np.linspace(\n # min(np.min(in_domain_norm_score_cur), np.min(out_domain_norm_score_cur)),\n # max(np.max(in_domain_norm_score_cur), np.max(out_domain_norm_score_cur)),\n # 10)\n # if in_domain_clevels[0] == in_domain_clevels[-1]:\n # in_domain_clevels = [in_domain_clevels[0], in_domain_clevels[0] + 0.1]\n # out_domain_clevels = in_domain_clevels\n\n # if contour_plot_same_scale:\n # clevels = np.linspace(min(np.min(in_domain_norm_score_cur), np.min(out_domain_norm_score_cur)),\n # max(np.max(in_domain_norm_score_cur), np.max(out_domain_norm_score_cur)),\n # 10)\n # else:\n # clevels = None\n\n # Changes\n # clevels = np.linspace(0, np.max(out_domain_norm_score_cur), 10)\n #\n cs = plt.contourf(gpr_thresholds, rf_thresholds, in_domain_norm_score_cur, levels=clevels\n , cmap=plt.cm.jet\n , extend=\"both\"\n )\n cs.cmap.set_under('k')\n plt.colorbar()\n # plt.show()\n\n # plt.contourf(gpr_thresholds, rf_thresholds, in_domain_norm_score_cur, levels=clevels)\n # plt.colorbar()\n # mymax = max([max(r) for r in in_domain_norm_score_cur])\n # plt.clim(0,mymax)\n plt.title('LOG Pair Diffusion In-Domain {} {} bins'.format(test, b_i))\n plt.xlabel('GPR cutoff')\n plt.ylabel('RF cutoff')\n plt.xticks(gpr_thresholds_range, cf_xticks)\n plt.yticks(rf_thresholds_range, cf_yticks)\n plt.savefig(path + 'LOG Pair Diffusion In-Domain {} {} bins.png'.format(test, b_i))\n plt.clf()\n\n clevels = np.linspace(\n max(0, np.min(out_domain_norm_score_cur)),\n np.max(out_domain_norm_score_cur),\n 10)\n # plt.contourf(gpr_thresholds, rf_thresholds, out_domain_norm_score_cur, levels=clevels)\n # plt.colorbar()\n # mymax = max([max(r) for r in out_domain_norm_score_cur])\n # plt.clim(0, mymax)\n cs = plt.contourf(gpr_thresholds, rf_thresholds, out_domain_norm_score_cur, levels=clevels\n , cmap=plt.cm.jet\n , extend=\"both\"\n )\n cs.cmap.set_under('k')\n plt.colorbar()\n plt.title('LOG Pair Diffusion Out-Domain {} {} bins'.format(test, b_i))\n plt.xlabel('GPR cutoff')\n plt.ylabel('RF cutoff')\n plt.xticks(gpr_thresholds_range, cf_xticks)\n plt.yticks(rf_thresholds_range, cf_yticks)\n plt.savefig(path + 'LOG Pair Diffusion Out-Domain {} {} bins.png'.format(test, b_i))\n plt.clf()\n\nfd = open('LOG_Pair_Normality_RMSE_Diffusion.txt', 'w')\nlog_headers = [\"RF cutoff\",\n \"GPR cutoff\",\n \"Points in-domain\",\n \"Points out-domain\"]\nvalue_format = [\".1f\", \".1f\", \".0f\", \".0f\"]\n\nfor testname in normalityTests:\n for b_i in bin_sizes:\n log_headers.append('In-Domain {} test score'.format(testname))\n log_headers.append('Out-Domain {} test score'.format(testname))\n value_format.append(\".5f\")\n value_format.append(\".5f\")\n\nprint(tabulate(results,\n headers=log_headers,\n tablefmt=\"github\",\n floatfmt=value_format), file=fd)\n","sub_path":"LOG Tests - Diffusion Data/normality_avergae_RMSE_fast.py","file_name":"normality_avergae_RMSE_fast.py","file_ext":"py","file_size_in_byte":14765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"491743424","text":"#Practice in numpy(1)\n\nimport json\nimport numpy as np\n# import DataFrame and series\n# from pandas import DataFrame, Series\n# import pandas as pd; import numpy as np\n\n# get data form a outter file\npath = 'F:/codeGit/pydata-book/ch02/usagov_bitly_data2012-03-16-1331923249.txt'\nrecords = [json.loads(line) for line in open(path)]\n\ndata = np.ndarray(records) \n\t# error:data which will be loaded in ndarray can not be \n\t# large than 32\n\n# create a ndarray\ndata1 = [6, 7.5, 8, 0, 1]\ndata2 = [[6, 7.5, 8, 0, 1], [6, 7.5, 8, 0, 1]]\narr1 = np.array(data1)\narr2 = np.array(data2)\n\narr2.ndim # get dim\narr2.shape # get size\n\t# get the shape info of a ndarray\n\t\narr1.dtype # get the data type \n\n# create some initial ndarray\narr3 = np.zeros(10)\narr3 = np.empty(10) # don't use this\n\narr3 = np.arange(10) \narr3 = np.ones(10)\narr3 = np.eye(10)\n\n# transverse data type\nint_arr1 = arr1.astype(np.int64)\nnumertic_string = np.array(['1.25', '-0.85'], dtype = np.string_)\n\t# what in numertic_string are two string\nnumertic_string.astype(float)\n\t# transverse string to float\nfloat_arr1 = arr1.astype(int_arr1.dtype)\n\n","sub_path":"numpy/numpy_1.py","file_name":"numpy_1.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"179834638","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, absolute_import, print_function)\n\nfrom aiohttp import web\nimport psutil\n\n\ndef checkIfProcessRunning(processName):\n '''\n Check if there is any running process that contains\n the given name processName.\n '''\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied,\n psutil.ZombieProcess):\n pass\n return False\n\n\nasync def livenessProbe(request):\n process = checkIfProcessRunning('Agent.Listener')\n if not process:\n raise web.HTTPInternalServerError()\n return web.Response(text=\"i'm alive!\",\n headers={\"Custom-Header\": \"Awesome\"})\n\n\ndef main():\n app = web.Application()\n app.add_routes([web.get(\"/healthz\", livenessProbe)])\n\n # disable SIGTERM handling for disruption-free rolling updates\n web.run_app(app, handle_signals=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"50218788","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport autoslug.fields\nimport brouwers.kits.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Brand',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, verbose_name='brand', db_index=True)),\n ('slug', autoslug.fields.AutoSlugField(verbose_name='slug', unique=True, editable=False)),\n ('logo', models.ImageField(upload_to=b'images/brand_logos/', verbose_name='logo', blank=True)),\n ('is_active', models.BooleanField(default=True, help_text='Whether the brand still exists or not', db_index=True, verbose_name='is active')),\n ],\n options={\n 'ordering': ['name'],\n 'verbose_name': 'brand',\n 'verbose_name_plural': 'brands',\n },\n ),\n migrations.CreateModel(\n name='ModelKit',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name='kit name', db_index=True)),\n ('slug', autoslug.fields.AutoSlugField(verbose_name='slug', unique=True, editable=False)),\n ('kit_number', models.CharField(help_text='Kit number as found on the box.', max_length=50, verbose_name='kit number', db_index=True, blank=True)),\n ('difficulty', models.PositiveSmallIntegerField(default=30, choices=[(10, 'very easy'), (20, 'easy'), (30, 'medium'), (40, 'hard'), (50, 'very hard')], verbose_name='difficulty', validators=[brouwers.kits.models.KitDifficulties.validator])),\n ('box_image', models.ImageField(upload_to=b'kits/box_images/%Y/%m', verbose_name='box image', blank=True)),\n ('submitted_on', models.DateTimeField(auto_now_add=True)),\n ('brand', models.ForeignKey(verbose_name='brand', to='kits.Brand')),\n ('duplicates', models.ManyToManyField(help_text='Kits that are the same but have another producer.', related_name='duplicates_rel_+', verbose_name='duplicates', to='kits.ModelKit', blank=True)),\n ],\n options={\n 'verbose_name': 'model kit',\n 'verbose_name_plural': 'model kits',\n },\n ),\n migrations.CreateModel(\n name='Scale',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('scale', models.PositiveSmallIntegerField(verbose_name='scale', db_index=True)),\n ],\n options={\n 'ordering': ['scale'],\n 'verbose_name': 'scale',\n 'verbose_name_plural': 'scales',\n },\n ),\n migrations.AddField(\n model_name='modelkit',\n name='scale',\n field=models.ForeignKey(verbose_name='scale', to='kits.Scale'),\n ),\n migrations.AddField(\n model_name='modelkit',\n name='submitter',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"src/brouwers/kits/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"540359937","text":"from django.conf.urls import url, include\nfrom rest_framework.routers import DefaultRouter\n\n##########\nfrom apps.shop.views import ShopCategoryViewSet, ShopGoodViewSet, ShoppingCartViewSet, ShopCustomerViewSet\n\nrouter = DefaultRouter()\nrouter.register(r'ShopCategory', ShopCategoryViewSet)\nrouter.register(r'ShopGood', ShopGoodViewSet)\nrouter.register(r'ShoppingCart', ShoppingCartViewSet)\nrouter.register(r'ShopCustomer', ShopCustomerViewSet)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n ]\n","sub_path":"XJSExpress/apps/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"262132316","text":"# Imports\r\nimport pygame\r\nimport random\r\n\r\n# Initialize game engine\r\npygame.init()\r\n\r\n\r\n# Window\r\nWIDTH = 1077\r\nHEIGHT = 800\r\nSIZE = (WIDTH, HEIGHT)\r\nTITLE = \"Balloon Tower Defense\"\r\nscreen = pygame.display.set_mode(SIZE)\r\npygame.display.set_caption(TITLE)\r\n\r\n\r\n# Timer\r\nclock = pygame.time.Clock()\r\nrefresh_rate = 60\r\n\r\n\r\n# Colors\r\nRED = (255, 0, 0)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nYELLOW = (255, 255, 0)\r\nGREEN = (139, 69, 19)\r\n\r\n\r\n# Fonts\r\nFONT_SM = pygame.font.Font(None, 24)\r\nFONT_MD = pygame.font.Font(None, 32)\r\nFONT_LG = pygame.font.Font(None, 64)\r\nFONT_XL = pygame.font.Font(\"assets/fonts/Cleveland.otf\", 73)\r\n\r\n\r\n# Images\r\nship_img = pygame.image.load('assets/images/BTD5Towers.png').convert_alpha()\r\nlaser_img = pygame.image.load('assets/images/poop.png').convert_alpha()\r\nenemyP_img = pygame.image.load('assets/images/PinkBalloon.png').convert_alpha()\r\nenemyY_img = pygame.image.load('assets/images/YellowBalloon.png').convert_alpha()\r\nenemyG_img = pygame.image.load('assets/images/GreenBalloon.png').convert_alpha()\r\nenemyB_img = pygame.image.load('assets/images/BlueBalloon.png').convert_alpha()\r\nenemyR_img = pygame.image.load('assets/images/RedBalloon.png').convert_alpha()\r\nbomb_img = pygame.image.load('assets/images/bullet.png').convert_alpha() \r\nbackround_img = pygame.image.load('assets/images/BackroundBTD.jpg').convert_alpha() \r\nboss_img = pygame.image.load('assets/images/Boss.png').convert_alpha() \r\npowerup_img = pygame.image.load('assets/images/chug.png').convert_alpha()\r\npowerupRP_img = pygame.image.load('assets/images/RF.png').convert_alpha()\r\nSuper_img = pygame.image.load('assets/images/Super.png').convert_alpha()\r\n\r\n# Sounds\r\nshoot_sound = pygame.mixer.Sound('assets/sounds/Boomerang.wav')\r\nboomboy = pygame.mixer.Sound('assets/sounds/pop.wav')\r\npygame.mixer.music.load('assets/sounds/fallout.ogg')\r\n\r\n# Stages\r\nSTART = 0\r\nPLAYING = 1\r\nLOSE = 2\r\nWIN = 3\r\n\r\n# Game classes\r\nclass Ship(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.max_health = 3\r\n self.health = self.max_health\r\n self.rapidfire_timer = 0\r\n self.hits = 0\r\n self.speed = 10\r\n \r\n def move_left(self):\r\n self.rect.x -= self.speed\r\n \r\n def move_right(self):\r\n self.rect.x += self.speed\r\n\r\n def move_up(self):\r\n self.rect.y -= self.speed\r\n\r\n def move_down(self):\r\n self.rect.y += self.speed\r\n\r\n\r\n def shoot(self):\r\n print(\"pew!\")\r\n shoot_sound.play()\r\n\r\n laser = Laser(laser_img)\r\n laser.rect.centerx = self.rect.centerx\r\n laser.rect.centery = self.rect.top\r\n lasers.add(laser)\r\n \r\n def update(self):\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n elif self.rect.right > WIDTH:\r\n self.rect.right = WIDTH\r\n\r\n if self.rect.top < 0:\r\n self.rect.top = 0\r\n elif self.rect.bottom > HEIGHT:\r\n self.rect.bottom = HEIGHT\r\n hit_list = pygame.sprite.spritecollide(self, powerups, True, pygame.sprite.collide_mask)\r\n\r\n for hit in hit_list:\r\n hit.apply(self)\r\n\r\n hit_list = pygame.sprite.spritecollide(self, bombs, True,pygame.sprite.collide_mask)\r\n if len(hit_list) > 0:\r\n self.health -= 1\r\n\r\n if self.rapidfire_timer > 0:\r\n self.rapidfire_timer -= 1\r\n \r\n if self.health == 0:\r\n self.kill()\r\n\r\n if self.rapidfire_timer == 0:\r\n self.image = ship.image\r\n\r\n\r\n \r\nclass Laser(pygame.sprite.Sprite):\r\n def __init__(self, image):\r\n super().__init__()\r\n \r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n \r\n self.speed = 10\r\n\r\n def update(self):\r\n self.rect.y -= self.speed\r\n\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n\r\nclass Boss(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n \r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.health = 10\r\n \r\n def drop_bomb(self):\r\n print(\"bomb noise\")\r\n shoot_sound.play()\r\n\r\n bomb = Bomb(bomb_img)\r\n self.mask = pygame.mask.from_surface(self.image)\r\n bomb.rect.centerx = self.rect.centerx\r\n bomb.rect.centery = self.rect.bottom\r\n bombs.add(bomb)\r\n \r\n def update(self):\r\n hit_list = pygame.sprite.spritecollide(self, lasers, True,\r\n pygame.sprite.collide_mask)\r\n\r\n for hit in hit_list:\r\n print(\"boom\")\r\n boomboy.play()\r\n self.health -= 1\r\n if self.health <= 0:\r\n self.kill()\r\n\r\nclass Mob(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n \r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.health = 5\r\n\r\n \r\n def drop_bomb(self):\r\n print(\"bomb noise\")\r\n shoot_sound.play()\r\n\r\n bomb = Bomb(bomb_img)\r\n self.mask = pygame.mask.from_surface(self.image)\r\n bomb.rect.centerx = self.rect.centerx\r\n bomb.rect.centery = self.rect.bottom\r\n bombs.add(bomb)\r\n \r\n def update(self):\r\n hit_list = pygame.sprite.spritecollide(self, lasers, True,\r\n pygame.sprite.collide_mask)\r\n\r\n if self.health == 4:\r\n self.image = enemyY_img\r\n \r\n if self.health == 3:\r\n self.image = enemyG_img\r\n\r\n if self.health == 2:\r\n self.image = enemyB_img\r\n\r\n if self.health == 1:\r\n self.image = enemyR_img\r\n\r\n \r\n for hit in hit_list:\r\n print(\"boom\")\r\n boomboy.play()\r\n self.health -= 1\r\n if self.health <= 0:\r\n self.kill()\r\n\r\n if len(hit_list) > 0:\r\n player.score += 100\r\n print(player.score)\r\n\r\n\r\n\r\nclass Bomb(pygame.sprite.Sprite):\r\n def __init__(self, image):\r\n super().__init__()\r\n \r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.speed = 15\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n \r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n\r\nclass ShootPowerUp(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.speed = 10\r\n \r\n def apply(self, ship):\r\n ship.rapidfire_timer = 2 * refresh_rate\r\n ship.image = Super_img\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n \r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n\r\nclass HealthPowerUp(pygame.sprite.Sprite):\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n self.image = image\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect = image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.health = 9\r\n \r\n self.speed = 10\r\n \r\n def apply(self, ship):\r\n ship.health = 3\r\n\r\n def update(self):\r\n self.rect.y += self.speed\r\n \r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n \r\nclass Fleet():\r\n def __init__(self,mobs):\r\n self.mobs = mobs\r\n self.speed = 5\r\n self.moving_right = True\r\n self.drop = 30\r\n self.moving_right = True\r\n self.bomb_rate = 20\r\n self.boss_added = False\r\n \r\n def move(self):\r\n hits_edge = False\r\n\r\n for m in mobs:\r\n if self.moving_right:\r\n m.rect.x += self.speed\r\n\r\n if m.rect.right >= WIDTH:\r\n hits_edge = True\r\n else:\r\n m.rect.x -= self.speed\r\n\r\n if m.rect.left <= 0:\r\n hits_edge = True\r\n \r\n if hits_edge:\r\n self.reverse()\r\n\r\n def reverse(self):\r\n self.moving_right = not self.moving_right\r\n\r\n def move_down(self):\r\n for m in mobs:\r\n m.rect.y += self.drop\r\n \r\n def choose_bomber(self):\r\n rand = random.randrange(self.bomb_rate)\r\n mob_list = mobs.sprites()\r\n\r\n if len(mob_list) > 0 and rand == 0:\r\n bomber = random.choice(mob_list)\r\n bomber.drop_bomb()\r\n\r\n def add_boss(self):\r\n boss = Boss(400, 25, boss_img)\r\n mobs.add(boss)\r\n self.boss_added = True\r\n\r\n\r\n def update(self):\r\n self.move() \r\n self.choose_bomber()\r\n\r\n if len(mobs) == 3 and self.boss_added == False:\r\n self.add_boss()\r\n \r\n# Game helper functions\r\ndef display_statistics():\r\n health_txt = FONT_XL.render(str(ship.health), 1, RED)\r\n screen.blit(health_txt, [80, 20])\r\n\r\n score_txt = FONT_XL.render(str(player.score), 1, YELLOW)\r\n screen.blit(score_txt, [820, 20])\r\n\r\ndef show_title_screen():\r\n title_text = FONT_XL.render(\"Balloon Tower Defense\", 1, WHITE)\r\n screen.blit(title_text, [20, 300])\r\n\r\ndef show_win_screen():\r\n text1 = FONT_XL.render(\"You win\", True, WHITE)\r\n text2 = FONT_XL.render(\"Press 'r' to restart\", True, WHITE)\r\n\r\n screen.blit(text1, [250, 300])\r\n screen.blit(text2, [0, 350])\r\n\r\ndef show_lose_screen():\r\n text1 = FONT_XL.render(\"You lose\", True, WHITE)\r\n text2 = FONT_XL.render(\"Press 'r' to restart\", True, WHITE)\r\n\r\n screen.blit(text1, [250, 300])\r\n screen.blit(text2, [0, 350])\r\n\r\ndef draw_healthbar(player):\r\n height_ratio = 0.05\r\n ratio = player.health / player.max_health\r\n\r\n if ratio > .67:\r\n color = GREEN\r\n elif ratio > .34:\r\n color = YELLOW\r\n else:\r\n color = RED\r\n\r\n bar_length = ratio * (player.rect.width - 10)\r\n height = height_ratio * player.rect.height\r\n\r\n pygame.draw.rect(screen, WHITE, [player.rect.x + 5, player.rect.bottom + 5, player.rect.width - 10, height])\r\n pygame.draw.rect(screen, color, [player.rect.x + 5, player.rect.bottom + 5, bar_length, height])\r\n\r\n\r\ndef setup(): \r\n global stage, done, player, ship, lasers, mobs, fleet, bombs, score, powerups\r\n \r\n ''' Make game objects '''\r\n ship = Ship(364, 680, ship_img)\r\n\r\n ''' Make sprite groups '''\r\n player = pygame.sprite.GroupSingle()\r\n player.add(ship)\r\n\r\n player.score = 0\r\n \r\n lasers = pygame.sprite.Group()\r\n bombs = pygame.sprite.Group()\r\n\r\n mob1 = Mob(100, 200, enemyP_img)\r\n mob2 = Mob(200, 200, enemyP_img)\r\n mob3 = Mob(300, 200, enemyP_img)\r\n mob4 = Mob(400, 200, enemyP_img)\r\n mob5 = Mob(500, 200, enemyP_img)\r\n mob6 = Mob(600, 200, enemyP_img)\r\n mob7 = Mob(700, 200, enemyP_img)\r\n mob8 = Mob(800, 200, enemyP_img)\r\n mob9 = Mob(900, 200, enemyP_img)\r\n\r\n mobs = pygame.sprite.Group()\r\n mobs.add(mob1, mob2, mob3, mob4, mob5, mob6, mob7, mob8, mob9)\r\n\r\n powerup1 = HealthPowerUp(200, -4000, powerup_img)\r\n powerup2 = ShootPowerUp(600, -7000, powerupRP_img)\r\n \r\n powerups = pygame.sprite.Group()\r\n powerups.add(powerup1, powerup2)\r\n\r\n fleet = Fleet(mobs)\r\n \r\n ''' set stage '''\r\n stage = START\r\n done = False\r\n \r\ndef check_win():\r\n global stage\r\n if len(mobs) == 0:\r\n stage = WIN\r\n \r\n# Game loop\r\npygame.mixer.music.play(-1)\r\n\r\nsetup()\r\n\r\nwhile not done:\r\n # Input handling (React to key presses, mouse clicks, etc.)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n\r\n done = True\r\n elif event.type == pygame.KEYDOWN:\r\n if stage == START:\r\n if event.key == pygame.K_SPACE:\r\n stage = PLAYING\r\n elif stage == PLAYING:\r\n if event.key == pygame.K_SPACE:\r\n ship.shoot()\r\n if stage == LOSE:\r\n if event.key == pygame.K_r:\r\n setup()\r\n if stage == WIN:\r\n if event.key == pygame.K_r:\r\n setup()\r\n \r\n pressed = pygame.key.get_pressed()\r\n \r\n if stage == PLAYING:\r\n if pressed[pygame.K_LEFT]:\r\n ship.move_left()\r\n elif pressed[pygame.K_RIGHT]:\r\n ship.move_right()\r\n if pressed[pygame.K_UP]:\r\n ship.move_up()\r\n elif pressed[pygame.K_DOWN]:\r\n ship.move_down()\r\n \r\n if ship.rapidfire_timer > 0 and pressed[pygame.K_SPACE]:\r\n ship.shoot()\r\n\r\n \r\n # Game logic (Check for collisions, update points, etc.)\r\n if stage == PLAYING:\r\n player.update()\r\n lasers.update()\r\n bombs.update()\r\n fleet.update()\r\n mobs.update()\r\n check_win()\r\n powerups.update()\r\n \r\n if ship.health == 0:\r\n stage = LOSE\r\n \r\n\r\n # Drawing code (Describe the picture. It isn't actually drawn yet.)\r\n screen.blit(backround_img, [0,0])\r\n lasers.draw(screen)\r\n bombs.draw(screen)\r\n player.draw(screen)\r\n mobs.draw(screen)\r\n display_statistics()\r\n powerups.draw(screen)\r\n draw_healthbar(ship)\r\n \r\n if stage == START:\r\n show_title_screen()\r\n\r\n if stage == LOSE:\r\n show_lose_screen()\r\n\r\n if stage == WIN:\r\n show_win_screen()\r\n\r\n # Update screen (Actually draw the picture in the window.)\r\n pygame.display.flip()\r\n\r\n\r\n # Limit refresh rate of game loop \r\n clock.tick(refresh_rate)\r\n\r\n\r\n# Close window and quit\r\npygame.quit()\r\n","sub_path":"space_war/space_war.py","file_name":"space_war.py","file_ext":"py","file_size_in_byte":14146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"329668123","text":"#!/usr/bin/env python\n\"\"\"\nmoonphase.py - Calculate Lunar Phase\nAuthor: Sean B. Palmer, inamidst.com\nCf. http://en.wikipedia.org/wiki/Lunar_phase#Lunar_phase_calculation\n\"\"\"\n\nimport math, decimal, datetime\ndec = decimal.Decimal\n\nconky_emoji_open = \"${font 'Symbola'}\"\nconky_emoji_close = \"${font}\"\n\nMOON_EMOJIS = (\n u\"🌑\", u\"🌒\", u\"🌓\", u\"🌔\", u\"🌕\", u\"🌖\", u\"🌗\", u\"🌘\"\n)\n\ndef position(now=None): \n if now is None: \n now = datetime.datetime.now()\n\n diff = now - datetime.datetime(2001, 1, 1)\n days = dec(diff.days) + (dec(diff.seconds) / dec(86400))\n lunations = dec(\"0.20439731\") + (days * dec(\"0.03386319269\"))\n\n return lunations % dec(1)\n\ndef phase(pos): \n index = (pos * dec(8)) + dec(\"0.5\")\n index = math.floor(index)\n return {\n 0: \"New Moon\", \n 1: \"Waxing Crescent\", \n 2: \"First Quarter\", \n 3: \"Waxing Gibbous\", \n 4: \"Full Moon\", \n 5: \"Waning Gibbous\", \n 6: \"Last Quarter\", \n 7: \"Waning Crescent\"\n }[int(index) & 7]\n\ndef emoji(pos):\n index = (pos * dec(8)) + dec(\"0.5\")\n index = math.floor(index)\n emoji = {\n 0: u\"🌑\",\n 1: u\"🌒\",\n 2: u\"🌓\",\n 3: u\"🌔\",\n 4: u\"🌕\",\n 5: u\"🌖\",\n 6: u\"🌗\",\n 7: u\"🌘\"\n }[int(index) & 7]\n return conky_emoji_open + emoji + conky_emoji_close\n\ndef main(): \n pos = position()\n phasename = phase(pos)\n phaseemoji = emoji(pos)\n\n roundedpos = round(float(pos)*100)\n print(\"%s %s (%s%%)\" % (phaseemoji, phasename, roundedpos))\n\nif __name__==\"__main__\": \n main()\n","sub_path":"scripts/executable_moonphase.py","file_name":"executable_moonphase.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"320810085","text":"import pygame\r\nfrom pygame.locals import*\r\nfrom sys import exit\r\n\r\npygame.init()\r\n\r\npreto = (0, 0, 0)\r\nverde = (0, 255, 0)\r\n\r\nscreen = pygame.display.set_mode((640, 360), 0, 32)\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n exit()\r\n\r\n screen.fill(preto)\r\n\r\n pygame.draw.rect(screen, verde, Rect((300, 120), (140, 70)))\r\n\r\n pygame.display.update()\r\n","sub_path":"rectangles.py","file_name":"rectangles.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"229116757","text":"from django_filters.rest_framework import (\n FilterSet, OrderingFilter, CharFilter, BooleanFilter,\n)\nfrom django.db.models import Q\n\nfrom . import models\n\nDEFAULT_NUMERIC_FILTER_OPERATORS = [\n 'exact', 'lte', 'gte', 'lt', 'gt', 'isnull', 'in',\n]\n\nDEFAULT_STRING_FILTER_OPERATORS = [\n 'iexact', 'icontains', 'istartswith', 'iendswith', 'startswith',\n 'endswith', 'contains', 'exact', 'regex', 'iregex', 'isnull', 'in',\n]\n\n\nclass BeerFilterSet(FilterSet):\n\n o = OrderingFilter(\n fields=[\n 'name', 'abv', 'ibu', 'style__name', 'style__category__name',\n 'manufacturer__name',\n ],\n )\n\n search = CharFilter(method='filter_search')\n on_tap = BooleanFilter(method='filter_on_tap')\n\n def filter_search(self, queryset, name, value):\n return self.queryset.filter(\n Q(\n name__icontains=value,\n ) | Q(\n alternate_names__name__icontains=value,\n ) | Q(\n manufacturer__name__icontains=value,\n ) | Q(\n style__name__icontains=value,\n ) | Q(\n style__category__name__icontains=value,\n ) | Q(\n manufacturer__alternate_names__name__icontains=value,\n ),\n ).distinct()\n\n def filter_on_tap(self, queryset, name, value):\n return queryset.filter(taps__isnull=not value)\n\n class Meta:\n fields = {\n 'name': DEFAULT_STRING_FILTER_OPERATORS,\n 'abv': DEFAULT_NUMERIC_FILTER_OPERATORS,\n 'ibu': DEFAULT_NUMERIC_FILTER_OPERATORS,\n 'manufacturer__name': DEFAULT_NUMERIC_FILTER_OPERATORS,\n 'taps__venue__name': DEFAULT_STRING_FILTER_OPERATORS,\n 'style__name': DEFAULT_STRING_FILTER_OPERATORS,\n 'style__category__name': DEFAULT_STRING_FILTER_OPERATORS,\n 'search': ['exact'],\n 'on_tap': ['exact'],\n }\n model = models.Beer\n","sub_path":"beers/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"621373184","text":"#!/usr/local/bin/python3\nimport nbformat\nimport rich\nimport sys\n\n\ndef main():\n errors = False\n for filename in sys.argv[1:]:\n doc = nbformat.read(filename, nbformat.current_nbformat)\n header_cells = [\n x\n for x in doc.cells\n if x[\"cell_type\"] == \"markdown\" and x[\"source\"].startswith(\"#\")\n ]\n bad_cells = [x for x in header_cells if x[\"source\"].startswith(\"# \")]\n toplevel = [x for x in header_cells if x[\"source\"].startswith(\"## \")]\n if bad_cells:\n for cell in bad_cells:\n source = cell[\"source\"]\n rich.print(\n f\"[red]{filename}[/red] - H1 header -\",\n source[: source.index(\"\\n\")],\n )\n errors = True\n if len(toplevel) == 0:\n rich.print(f\"[red]{filename}[/red] - No H2\")\n errors = True\n elif len(toplevel) > 1:\n rich.print(f\"[red]{filename}[/red] - {len(toplevel)} H2s\")\n errors = True\n\n sys.exit(int(errors))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/check_headers.py","file_name":"check_headers.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"217695889","text":"\"\"\"tornado IOLoop API with zmq compatibility\n\nIf you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop,\notherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado.\n\nThe minimal shipped version of tornado's IOLoop does not include\nsupport for concurrent futures - this will only be available if you\nhave tornado ≥ 3.0.\n\"\"\"\n\n# Copyright (C) PyZMQ Developers\n# Distributed under the terms of the Modified BSD License.\n\n\nimport time\nimport warnings\nfrom typing import Tuple\n\nfrom zmq import ETERM, POLLERR, POLLIN, POLLOUT, Poller, ZMQError\n\ntornado_version: Tuple = ()\ntry:\n import tornado\n\n tornado_version = tornado.version_info\nexcept (ImportError, AttributeError):\n pass\n\nfrom .minitornado.ioloop import PeriodicCallback, PollIOLoop\nfrom .minitornado.log import gen_log\n\n\nclass DelayedCallback(PeriodicCallback):\n \"\"\"Schedules the given callback to be called once.\n\n The callback is called once, after callback_time milliseconds.\n\n `start` must be called after the DelayedCallback is created.\n\n The timeout is calculated from when `start` is called.\n \"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None):\n # PeriodicCallback require callback_time to be positive\n warnings.warn(\n \"\"\"DelayedCallback is deprecated.\n Use loop.add_timeout instead.\"\"\",\n DeprecationWarning,\n )\n callback_time = max(callback_time, 1e-3)\n super().__init__(callback, callback_time, io_loop)\n\n def start(self):\n \"\"\"Starts the timer.\"\"\"\n self._running = True\n self._firstrun = True\n self._next_timeout = time.time() + self.callback_time / 1000.0\n self.io_loop.add_timeout(self._next_timeout, self._run)\n\n def _run(self):\n if not self._running:\n return\n self._running = False\n try:\n self.callback()\n except Exception:\n gen_log.error(\"Error in delayed callback\", exc_info=True)\n\n\nclass ZMQPoller:\n \"\"\"A poller that can be used in the tornado IOLoop.\n\n This simply wraps a regular zmq.Poller, scaling the timeout\n by 1000, so that it is in seconds rather than milliseconds.\n \"\"\"\n\n def __init__(self):\n self._poller = Poller()\n\n @staticmethod\n def _map_events(events):\n \"\"\"translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR\"\"\"\n z_events = 0\n if events & IOLoop.READ:\n z_events |= POLLIN\n if events & IOLoop.WRITE:\n z_events |= POLLOUT\n if events & IOLoop.ERROR:\n z_events |= POLLERR\n return z_events\n\n @staticmethod\n def _remap_events(z_events):\n \"\"\"translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR\"\"\"\n events = 0\n if z_events & POLLIN:\n events |= IOLoop.READ\n if z_events & POLLOUT:\n events |= IOLoop.WRITE\n if z_events & POLLERR:\n events |= IOLoop.ERROR\n return events\n\n def register(self, fd, events):\n return self._poller.register(fd, self._map_events(events))\n\n def modify(self, fd, events):\n return self._poller.modify(fd, self._map_events(events))\n\n def unregister(self, fd):\n return self._poller.unregister(fd)\n\n def poll(self, timeout):\n \"\"\"poll in seconds rather than milliseconds.\n\n Event masks will be IOLoop.READ/WRITE/ERROR\n \"\"\"\n z_events = self._poller.poll(1000 * timeout)\n return [(fd, self._remap_events(evt)) for (fd, evt) in z_events]\n\n def close(self):\n pass\n\n\nclass ZMQIOLoop(PollIOLoop):\n \"\"\"ZMQ subclass of tornado's IOLoop\n\n Minor modifications, so that .current/.instance return self\n \"\"\"\n\n _zmq_impl = ZMQPoller\n\n def initialize(self, impl=None, **kwargs):\n impl = self._zmq_impl() if impl is None else impl\n super().initialize(impl=impl, **kwargs)\n\n @classmethod\n def instance(cls, *args, **kwargs):\n \"\"\"Returns a global `IOLoop` instance.\n\n Most applications have a single, global `IOLoop` running on the\n main thread. Use this method to get this instance from\n another thread. To get the current thread's `IOLoop`, use `current()`.\n \"\"\"\n # install ZMQIOLoop as the active IOLoop implementation\n # when using tornado 3\n if tornado_version >= (3,):\n PollIOLoop.configure(cls)\n loop = PollIOLoop.instance(*args, **kwargs)\n if not isinstance(loop, cls):\n warnings.warn(\n f\"IOLoop.current expected instance of {cls!r}, got {loop!r}\",\n RuntimeWarning,\n stacklevel=2,\n )\n return loop\n\n @classmethod\n def current(cls, *args, **kwargs):\n \"\"\"Returns the current thread’s IOLoop.\"\"\"\n # install ZMQIOLoop as the active IOLoop implementation\n # when using tornado 3\n if tornado_version >= (3,):\n PollIOLoop.configure(cls)\n loop = PollIOLoop.current(*args, **kwargs)\n if not isinstance(loop, cls):\n warnings.warn(\n f\"IOLoop.current expected instance of {cls!r}, got {loop!r}\",\n RuntimeWarning,\n stacklevel=2,\n )\n return loop\n\n def start(self):\n try:\n super().start()\n except ZMQError as e:\n if e.errno == ETERM:\n # quietly return on ETERM\n pass\n else:\n raise\n\n\n# public API name\nIOLoop = ZMQIOLoop\n\n\ndef install():\n \"\"\"set the tornado IOLoop instance with the pyzmq IOLoop.\n\n After calling this function, tornado's IOLoop.instance() and pyzmq's\n IOLoop.instance() will return the same object.\n\n An assertion error will be raised if tornado's IOLoop has been initialized\n prior to calling this function.\n \"\"\"\n from tornado import ioloop\n\n # check if tornado's IOLoop is already initialized to something other\n # than the pyzmq IOLoop instance:\n assert (\n not ioloop.IOLoop.initialized()\n ) or ioloop.IOLoop.instance() is IOLoop.instance(), (\n \"tornado IOLoop already initialized\"\n )\n\n if tornado_version >= (3,):\n # tornado 3 has an official API for registering new defaults, yay!\n ioloop.IOLoop.configure(ZMQIOLoop)\n else:\n # we have to set the global instance explicitly\n ioloop.IOLoop._instance = IOLoop.instance()\n","sub_path":"contrib/python/pyzmq/py3/zmq/eventloop/_deprecated.py","file_name":"_deprecated.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"29974968","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, render_template\nimport os\nimport sys\n\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nprint (SCRIPT_DIR)\napp = Flask(__name__)\n\nchatbot = ChatBot(\n \"Terminal\",\n storage_adapter=\"chatterbot.storage.SQLStorageAdapter\",\n logic_adapters=[\n # \"chatterbot.logic.MathematicalEvaluation\",\n # \"chatterbot.logic.TimeLogicAdapter\",\n \"chatterbot.logic.BestMatch\"\n ,\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.35,\n 'default_response': '很抱歉,我没能理解您的意思,请重新表述您的问题,谢谢!.'\n },\n {\n 'import_path': 'chatterbot.logic.SpecificResponseAdapter',\n 'input_text': '百分点',\n 'output_text': 'Ok, here is a link: http://www.baifendian.com/'\n }\n ],\n # input_adapter=\"chatterbot.input.TerminalAdapter\",\n # output_adapter=\"chatterbot.output.TerminalAdapter\",\n # database=\"../database.db\"\n)\nchatbot.set_trainer(ChatterBotCorpusTrainer)\nchatbot.train(\n # \"chatterbot.corpus.chinese\",\n # \"chatterbot.corpus.english\",\n \"../cms.json\"\n)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/get/\")\ndef get_raw_response(query):\n return str(chatbot.get_response(query))\n\n\nif __name__ == \"__main__\":\n app.run(host='172.18.1.146', port=8000)\n","sub_path":"bot/flask_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"258886048","text":"# Date 18-3-2021\r\n\r\nfile1 = open(\"tutorial 28.1.txt\", \"w\")\r\nfile1.write(\"My name is Aman Rathore\")\r\nfile1.close()\r\n# The upper syntax will overwrite the lines in the given file\r\n\r\nprint(10*\"\\n\")\r\n\r\nfile1 = open(\"tutorial 28.2.txt\", \"a\")\r\nfile1.write(\"My name is Aman Rathore\\n\")\r\nfile1.close()\r\n# The upper syntax will add the lines in the given file(Without losing the previous data)\r\n\r\nprint(10*\"\\n\")\r\n\r\nfile1 = open(\"tutorial 28.3.txt\", \"r+\")\r\nprint(file1.read())\r\na = file1.write(\"\\nMy name is Aman Rathore\")\r\nprint(\"\\n\")\r\nprint(a)\r\n# The upper line will help you to know that how many characters you are printing\r\nfile1.close()\r\n# The upper syntax will help you toread and write on in the given file at a same time \r\n\r\n# print(10*\"\\n\")\r\n# print(len(\"My name is Aman Rathore\\n\"))\r\n","sub_path":"tutorial 28.py","file_name":"tutorial 28.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"304063453","text":"#power of two or not\nn=int(input())\nd=0\nfor i in range(0,n):\n c=2**i\n if n==c:\n d=d+1\nif d==1:\n print(\"yes\")\nelse:\n print(\"no\")\n","sub_path":"checkispoweroftwo.py","file_name":"checkispoweroftwo.py","file_ext":"py","file_size_in_byte":147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"232466160","text":"from flask import Blueprint, render_template\nfrom app.routes.helpers import (_fetch_recordings, _fetch_bio, _fetch_gigs,\n _fetch_upcoming_gigs)\nimport markdown\n\nSOUNDCLOUD_REGEX = r'^https?://[^/]*soundcloud\\.com/[^ ]*$'\n\nmain = Blueprint('main', __name__)\n\n\n@main.route('/', methods=['GET'])\ndef index():\n bio = _fetch_bio()\n gigs = _fetch_upcoming_gigs()\n short_bio_html = markdown.markdown(bio.short_bio)\n return render_template('index.html',\n gigs=gigs,\n tagline=bio.tagline,\n short_bio=short_bio_html)\n\n\n@main.route('/about')\ndef about():\n bio = _fetch_bio()\n long_bio_html = markdown.markdown(bio.long_bio)\n return render_template('about.html',\n tagline=bio.tagline,\n long_bio=long_bio_html)\n\n\n@main.route('/gigs')\ndef gigs():\n bio = _fetch_bio()\n gigs = _fetch_gigs()\n return render_template('gigs.html',\n gigs=gigs,\n tagline=bio.tagline)\n\n\n@main.route('/bands')\ndef bands():\n bio = _fetch_bio()\n bands_html = markdown.markdown(bio.bands)\n return render_template('bands.html',\n tagline=bio.tagline,\n bands=bands_html)\n\n\n@main.route('/music')\ndef music():\n bio = _fetch_bio()\n recordings = _fetch_recordings()\n urls = recordings.items.split(', ')\n return render_template('music.html',\n tagline=bio.tagline,\n urls=urls)\n","sub_path":"app/routes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"42682547","text":"\"\"\"\r\nProjekthintergrund:\r\nDas vorliegende Projekt ist im Rahmen meiner Weiterbildung \"Programmieren mit Python\"\r\nentstanden. Meine Lernziele bei diesem Projekt waren, mich mit\r\n a) objektorientierter Programmierung,\r\n b) dem Anlegen von graphischen Benutzeroberflächen in Tkinter,\r\n c) der Abspeicherung von User Input in einer JSON-Datei,\r\n d) sowie der Aufbereitung und Visualisierung von externen Daten\r\n zu befassen.\r\n\r\n Zum Program:\r\nDieses Programm ist eine einfache Darstellung zum Anlegen und Ändern eines\r\n Lagerbestands für einen fiktiven Skiverleih.\r\nAm Beispiel von Carvern können neue Skimodell gespeichert und deren Bestand verändert werden.\r\nZudem können Lawinenabgänge in den USA im Frühjahr 2020 angezeigt werden\r\n\r\n!!!ACHTUNG - WORK IN PROGRESS !!!\r\n Das Program ist in keinem finalen Status. Nächste geplante Schritte sind:\r\n Prüfungen auf die Eingabefelder legen\r\n Prüfungen, ob angelegte Ski bereits vorhanden sind\r\n Speicherung der Daten alternativ in einer csv-Datei\r\n\"\"\"\r\n\r\n# IMPORTS\r\nimport json\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport matplotlib.pyplot as plt\r\nimport tkinter.ttk as ttk\r\nimport cartopy.crs as ccrs\r\n\r\n\r\n# KLASSEN\r\n\r\n# Elternklasse \"Ski\"\r\nclass Ski:\r\n \"\"\"Eine einfache Beschreibung eines Skiers. Elternklasse \"Ski\" enthält die Kindklassen:\r\n \"Carver\", \"Tourenski\", \"Allmountain\", \"Freestyle-Ski\".\"\"\"\r\n\r\n # Initialieren der Attribute\r\n def __init__(self, brand=\"\", purchase_price=float, length=int):\r\n \"\"\"Initialisieren von Atrributen, die Ski beschreiben.\"\"\"\r\n self.brand = brand\r\n self.purchase_price = purchase_price\r\n self.length = length\r\n\r\n # Erstellen eines neuen Ski mit Attributen Elternklasse\r\n def new_ski(self) -> dict:\r\n \"\"\"Erstellt ein Dictionary. Inhalt:\r\n a) \"description\": Dictionary, welches Attribute des Skis enthält\r\n b) \"stock\": Lager-Anfangsbestand mit int-Wert 0.\r\n Der Ski benötigt noch weitere Angaben, je nach Ski-Typ\"\"\"\r\n new_ski, description, stock = {}, {}, {} # anlegen Dictionairies.\r\n description[\"brand\"] = self.brand # füllen des dictionairies \"description\" mit den Attributen \"Brand\",\r\n # \"Purchase_Price\" und \"length\".\r\n description[\"purchase_price\"] = self.purchase_price #\r\n description[\"length\"] = self.length\r\n new_ski[\"description\"] = description # anhängen dict \"description\" an dict \"new_ski\".\r\n new_ski[\"stock\"] = 0 # anhängen Sch_W-Paar \"Stock\" an dict \"new_ski\".\r\n return new_ski\r\n\r\n\r\n# Kindklasse \"Carver\"\r\nclass Carver(Ski):\r\n \"\"\"Repräsentiert Aspekte eines Skis, spezifisch von Carvern\"\"\"\r\n\r\n # Initialieren der Attribute\r\n def __init__(self, brand=\"\", purchase_price=float, length=int, name=\"\", typ=\"\"):\r\n \"\"\"Initialisieren der Attribute der Elternklasse\"\"\"\r\n super().__init__(brand, purchase_price, length)\r\n self.name_carver = name\r\n self.typ = typ\r\n\r\n # Beschreibung Carver\r\n def describe_carver(self) -> str:\r\n \"\"\"Beschreibt die Eigenschaften des Carvers\"\"\"\r\n description = f\"Model: {self.name_carver}\\n\" \\\r\n f\"Klasse: Caver\\n\" \\\r\n f\"Marke: {self.brand}\\n\" \\\r\n f\"Carver-Typ: {self.typ}\\n\" \\\r\n f\"Länge: {self.length} cm.\"\r\n return description\r\n\r\n # Erstellung eines neuen Carvers\r\n def new_carver(self) -> dict:\r\n \"\"\"Erweitert das dict aus new_ski(), welches die Attribute der Elternklasse enthält\r\n Inhalt:\r\n a) Weiteres Sch-W-Paar: \"model\" mit Modelname\r\n b) Carvertyp wird in description eingefügt.\"\"\"\r\n new_carver = self.new_ski() # Erhalt der Attribute eines Skis (Elternklasse) als dict.\r\n new_carver[\"model\"] = self.name_carver # anhängen dict Sch-W-Paar \"Model\" \"new_carver\".\r\n for key, value in new_carver.items(): # anhängen Sch-W-Paar \"typ\" an dict \"description\".\r\n if key == \"description\":\r\n value[\"typ\"] = self.typ\r\n return new_carver\r\n\r\n # Einfügen des neuen Carvers in die json-Datei\r\n def new_carver_json(self):\r\n \"\"\"Nimmt das dict aus new_carver() und fügt es dem Schlüssel \"Carver\" in der json-Datei \"ski.json\" an.\"\"\"\r\n filename_carver = \"ski.json\"\r\n with open(filename_carver, \"r\") as fc: # öffnen und laden der json-Datei.\r\n contents = json.load(fc)\r\n new_contents = {} # neues dict zur Überschreibung der json-Datei.\r\n for key, value in contents.items(): # einfügen des dict aus new_carver() an json-Datei unter Schlüssel.\r\n # \"Carver\"\r\n if key == \"Carver\":\r\n value.append(self.new_carver())\r\n new_contents[key] = value # Speichern der Werte in neuem dict.\r\n with open(filename_carver, \"w\") as fc: # Überschreiben der json-Datei mit den neuen Werten.\r\n json.dump(new_contents, fc)\r\n print(f'Es wurde folgender Carver der JSON-Datei \"ski.json\" hinzugefügt:\\n'\r\n f'{self.describe_carver()}') # Info-Zeile, welcher Carver hinzugefügt wurde.\r\n\r\n\r\n# Kindklasse \"Tourenski\"\r\nclass TouringSki(Ski):\r\n \"\"\"Repräsentiert Aspekte eines Skis, spezifisch von Tourenski\"\"\"\r\n\r\n # Initialieren der Attribute\r\n def __init__(self, brand=\"\", purchase_price=float, length=int, name=\"\", typ=\"\"):\r\n \"\"\"Initialisieren der Attribute der Elternklasse\"\"\"\r\n super().__init__(brand, purchase_price, length)\r\n self.name_touring_ski = name\r\n self.typ = typ\r\n\r\n # Beschreibung Tourenski\r\n def describe_touren_ski(self) -> str:\r\n \"\"\"Beschreibt die Eigenschaften des Tourenskis\"\"\"\r\n description = f\"Model: {self.name_touring_ski}, Klasse: Tourenski, Marke: {self.brand}, \" \\\r\n f\"Tourenski-Typ: {self.typ}, Länge: {self.length} cm.\"\r\n return description\r\n\r\n # Erstelung eines neuen Tourenskis\r\n def new_touren_ski(self) -> dict:\r\n \"\"\"Erweitert das dict aus new_ski(), welches die Attribute der Elternklasse enthält\r\n Inhalt:\r\n a) Weiteres Sch-W-Paar: \"model\" mit Modelname\r\n b) Tourenski-typ wird in description eingefügt.\"\"\"\r\n new_touring_ski = self.new_ski() # Erhalt der Attribute eines Skis (Elternklasse) als dict.\r\n new_touring_ski[\"model\"] = self.name_touring_ski # anhängen dict Sch-W-Paar \"Model\" \"new_touring_ski\".\r\n for key, value in new_touring_ski.items(): # anhängen Sch-W-Paar \"typ\" an dict \"description\".\r\n if key == \"description\":\r\n value[\"typ\"] = self.typ\r\n return new_touring_ski\r\n\r\n # Einfügen des neuen Tourenskis in die json-Datei\r\n def new_touring_ski_json(self):\r\n \"\"\"Nimmt das dict aus new_touren_ski() und fügt es dem Schlüssel \"Tourenski\" in der json-Datei \"ski.json\" an.\"\"\"\r\n filename_carver = \"ski.json\"\r\n with open(filename_carver, \"r\") as fc: # öffnen und laden der json-Datei.\r\n contents = json.load(fc)\r\n new_contents = {} # neues dict zur Überschreibung der json-Datei.\r\n for key, value in contents.items(): # einfügen des dict aus new_touren_ski() an json-Datei unter\r\n # Schlüssel \"Tourenski\".\r\n if key == \"Tourenski\":\r\n value.append(self.new_touren_ski())\r\n new_contents[key] = value # Speichern der Werte in neuem dict.\r\n with open(filename_carver, \"w\") as fc: # Überschreiben der json-Datei mit den neuen Werten.\r\n json.dump(new_contents, fc)\r\n print(f'Es wurde folgender Tourenski der JSON-Datei \"ski.json\" hinzugefügt:\\n'\r\n f'{self.describe_touren_ski()}') # Info-Zeile, welcher Tourenski hinzugefügt wurde.\r\n\r\n\r\n# DATEIVERWALTUNG\r\n\r\n# Anlegen von dict mit Ski-Typen\r\ndef dict_ski() -> dict:\r\n \"\"\"Erstellt ein Dictionairy mit unterschiedlichen Typen von Skiern\"\"\"\r\n ski = {\"Carver\": [], \"Tourenski\": [], \"Allmountain\": [], \"Freestyle-Ski\": []}\r\n return ski\r\n\r\n\r\n# Erstellen json-Datei mit Ski-Typen\r\ndef new_ski_json():\r\n \"\"\"Erstellt neue json-Datei \"ski.json bzw. leert vorhanden Datei.\"\"\"\r\n filename_json = \"ski.json\"\r\n with open(filename_json, \"w\") as f_json: # Erstellen der Datei mit der Liste aus dict_ski().\r\n json.dump(dict_ski(), f_json)\r\n\r\n\r\n# Importiert json-Datei für dict\r\ndef import_json_file() -> dict:\r\n \"\"\"Importiert die Datei \"ski.json\" und übergibt sie als dictionairy\"\"\"\r\n filename = \"ski.json\"\r\n with open(filename, \"r\") as f_json: # Importiert Datei \"ski.json\" und erstellt ein dict aus ihr.\r\n contents = json.load(f_json)\r\n return contents\r\n\r\n\r\n# MATPLOTLIB LAWINENGRAPHIK\r\n\r\n# Erstellen von Listen zu Ortsangaben vON Lawinenabgängen\r\ndef data_us_avalanche_report():\r\n \"\"\"Erstellt zwei Listen (lons, lats), um Ortsangaben zu Lawinenabgängen verwenden zu können.\r\n Lons: Longituden\r\n Lats: Latituden\r\n Basis: json Datei, Quelle: https://openavalancheproject.org/#\r\n Rohdaten: https://raw.githubusercontent.com/scottcha/OpenAvalancheProject/master/Data/USAvalancheRegions.geojson\r\n \"\"\"\r\n filename = \"us_avalanche_report\"\r\n\r\n with open(filename) as f_aval_list: # Öffnen und laden der Rohdaten.\r\n us_avalanche_report = json.load(f_aval_list)\r\n avalanches = us_avalanche_report[\"features\"] # Auswahl des Sch-W-Paars \"features\".\r\n\r\n lons, lats = [], [] # Listen für Graphik\r\n\r\n for avalanche in avalanches: # Liste der Regionen mit erfassten Lawinen öffnen.\r\n for key_1, value_1 in avalanche.items():\r\n if key_1 == \"geometry\":\r\n for key_2, value_2 in value_1.items():\r\n if key_2 == \"coordinates\":\r\n val2 = value_2[0] # Liste mit Ortsangaben je Lawinenregion öffnen.\r\n for i in val2:\r\n if isinstance(i[0], float) and isinstance(i[1], float):\r\n lon = i[0] # Prüfung ob Eintrag eine Float-Variable ist, aufgrund inhomogener Daten.\r\n lat = i[1]\r\n lons.append(lon) # Befüllen der Liste \"lons\".\r\n lats.append(lat) # Befüllen der Liste \"lats\".\r\n\r\n return lons, lats\r\n\r\n\r\n# Erstellen einer matplotlib Graphik von Lawinenabgängen in den USA\r\ndef show_avalanche_map():\r\n \"\"\"Erstellt eine Graphik der Lawinenabgänge in den USA.\r\n Daten erhält Funtkion aus data_US_avalanche_Report()\"\"\"\r\n\r\n lons, lats = data_us_avalanche_report() # Erhalten der Listen mit Longituden und Latituden.\r\n\r\n ax = plt.axes(projection=ccrs.PlateCarree()) # Laden der Landkarte von PlateCarree.\r\n ax.stock_img()\r\n ax.coastlines()\r\n ax.scatter(lons, lats, color=\"deepskyblue\", s=20, alpha=1.0,\r\n label=\"Lawinenabhänge, März 2020\\nhttps://openavalancheproject.org/#\",\r\n edgecolors=\"dodgerblue\")\r\n ax.set_xlim(-170, 10)\r\n ax.set_ylim(-20, 90)\r\n ax.legend()\r\n\r\n mng = plt.get_current_fig_manager()\r\n mng.window.showMaximized()\r\n\r\n plt.show()\r\n\r\n\r\n# GUI\r\n\r\n# Speichert eingegebenen Carver\r\ndef save_new_carver():\r\n \"\"\"Erstellt aus den Eingabe des Labelframes \"Neuer Carver einen neuen\r\n Eintrag in der Datei \"ski.json\"\"\"\r\n\r\n brand = omVar_new_carver_brand.get() # Erhalten der Eingabewerte.\r\n price = float(enVar_carv_price.get())\r\n length = int(enVar_carv_length.get())\r\n model = en_new_carver_model.get()\r\n typ = omVar_new_carver_typ.get()\r\n\r\n omVar_new_carver_brand.set(carver_brandlist[8]) # \"Bereinigen\" der Eingabefelder.\r\n enVar_carv_price.set(\"\")\r\n enVar_carv_length.set(\"\")\r\n enVar_carv_model.set(\"\")\r\n omVar_new_carver_typ.set(carver_typ[4])\r\n\r\n new_carver = Carver(brand, price, length, model, typ) # Einspeisen der Eingabewerte in Funktion zum anlegen\r\n # eines neuen Carvers nud speichern in Datei \"ski.json\"\r\n new_carver.new_carver_json()\r\n\r\n messagebox.showinfo(\"Neuer Ski\", f'Es wurde folgender Carver dem Bestand hinzugefügt:\\n\\n'\r\n f'{Carver.describe_carver(new_carver)}')\r\n\r\n\r\n# ändert den Lagerbestand\r\ndef change_amount(typ=\"\".upper(), model=\"\".upper(), length=int, amount=int, add=bool, use=bool):\r\n \"\"\"Erhöht Lagerbestand der Typ-, Model-, Längenkombination um amount\"\"\"\r\n new_contents = {} # Anlegen eines dict zum Überschreiben der alten Werte.\r\n current_length = 0 # Variablen zum Abgleich der Eingabewerte mit Werten aus json Datei.\r\n current_stock = 0\r\n f_change_stock = import_json_file()\r\n filename = \"ski.json\"\r\n for key, value in f_change_stock.items(): # Skityp identifizieren.\r\n if key == typ: # Prüfung Skityp.\r\n for ski in value: # Sepzifisches Model und Länge identifizieren.\r\n for key_ski, value_i in ski.items():\r\n if key_ski == \"description\":\r\n current_length = value_i[\"length\"] # übernimmt abgespeicherten Wert Länge.\r\n if key_ski == \"stock\":\r\n current_stock = value_i # übernahme abgespeicherter Wert.\r\n if key_ski == \"model\" and value_i == model:\r\n if current_length == length: # Art der Mengenänderung bestimmen.\r\n if use:\r\n current_stock = amount\r\n else:\r\n if add:\r\n current_stock += amount\r\n else:\r\n current_stock -= amount\r\n ski[\"stock\"] = current_stock # Neuen Bestandswert übergeben.\r\n new_contents[key] = value # Alten Wert in neues dict eintragen.\r\n messagebox.showinfo(\"Änderung Lagerbestand\", f'Der neue Lagerebestand des Ski \"{model}\"\\n'\r\n f' mit der Länge {current_length} ist {current_stock}.')\r\n else:\r\n new_contents[key] = value # neues dict mit bisherigen, ungeänderten Werten füllen.\r\n with open(filename, \"w\") as fc:\r\n json.dump(new_contents, fc) # neues dict in Datei \"ski.json\" schreiben.\r\n\r\n\r\n# Speichert Veränderung Lagerbestand\r\ndef save_stock_carver():\r\n \"\"\"Liest Werte des Buttons speicherns (b_stock_carver_save) ein und setzt Variablen add und use.\"\"\"\r\n typ_stock = \"Carver\"\r\n model_stock = enVar_carver_model_stock.get() # Erhalten der Eingabewerte.\r\n length_stock = int(enVar_carv_length_stock.get())\r\n amount_stock = int(enVar_carv_amount_stock.get())\r\n option_stock = omVar_stock_carver_option.get()\r\n add = 0\r\n use = 0\r\n if option_stock == stock_carver_option[0]: # Setzt Variablen gemäße Auswahl.\r\n add = 1\r\n if option_stock == stock_carver_option[2]:\r\n use = 1\r\n\r\n change_amount(typ_stock, model_stock, length_stock, amount_stock, add, use) #\r\n\r\n enVar_carver_model_stock.set(\"\") # \"Bereinigen\" der Eingabefelder.\r\n enVar_carv_length_stock.set(\"\")\r\n enVar_carv_amount_stock.set(\"\")\r\n omVar_stock_carver_option.set(stock_carver_option[3])\r\n\r\n\r\n# GUI PROGRAMMABLAUF\r\n\r\nroot = Tk()\r\nroot.geometry = \"900x900\"\r\n\r\n# RAHMEN und ÜBERSCHRIFT\r\nf = Frame(root, width=500, height=100)\r\nf.grid()\r\nl1 = Label(f, text=\"Ski Verwaltung\", font=(\"default\", \"20\"), pady=20)\r\nl1.grid(row=0, column=0, columnspan=1)\r\n\r\n# NOTEBOOK ANLEGEN\r\nnb = ttk.Notebook(f, width=324, height=600)\r\ntab_admin_carver = ttk.Frame(nb)\r\ntab_admin_touring = ttk.Frame(nb)\r\ntab_admin_all_mountain = ttk.Frame(nb)\r\ntab_admin_freestyle = ttk.Frame(nb)\r\ntab_admin_misc = ttk.Frame(nb)\r\nnb.add(tab_admin_carver, text=\"Carver\")\r\nnb.add(tab_admin_touring, text=\"Tourenski\")\r\nnb.add(tab_admin_all_mountain, text=\"All-Mountain\")\r\nnb.add(tab_admin_freestyle, text=\"Freestyle\")\r\nnb.add(tab_admin_misc, text=\"Allgemein\")\r\nnb.grid()\r\n\r\n# TAB NEUER CARVER\r\n\r\n# CARVER ANLEGEN\r\n\r\n# Labelframe \"Neuer Carver\"\r\nlf_new_carver = ttk.LabelFrame(tab_admin_carver, text=\"Neuer Carver\")\r\nlf_new_carver.grid(row=0, column=0, columnspan=3, ipadx=30, padx=8, pady=15, sticky=W)\r\nla_new_carver = ttk.Label(lf_new_carver, text=\"Hier können Sie einen neuen Carver anlegen.\")\r\nla_new_carver.grid(row=1, column=0, columnspan=3, pady=12, padx=4)\r\n\r\n# Marke Neuer Carver\r\nomVar_new_carver_brand = StringVar()\r\ncarver_brandlist = [\"Atomic\", \"Fischer\", \"Head\", \"K2\", \"Nordica\", \"Rossignol\", \"Salomon\", \"Voelkl\", \"\"]\r\n# carver_brandlist = sorted(carver_brandlist)\r\n\r\nla_new_carver_brand = ttk.Label(lf_new_carver, text=\"Marke:\")\r\nla_new_carver_brand.grid(row=2, column=0, pady=2, sticky=W, padx=6)\r\nom_new_carver_brand = OptionMenu(lf_new_carver, omVar_new_carver_brand, *carver_brandlist)\r\nom_new_carver_brand.grid(row=2, column=1, pady=1, sticky=W, padx=4)\r\n\r\n# Preis Neuer Carver\r\nenVar_carv_price = StringVar()\r\n\r\nla_new_carver_price = ttk.Label(lf_new_carver, text=\"Preis:\")\r\nla_new_carver_price.grid(row=3, column=0, pady=2, sticky=W, padx=6)\r\nen_new_carver_price = ttk.Entry(lf_new_carver, textvariable=enVar_carv_price)\r\nen_new_carver_price.grid(row=3, column=1, pady=1, sticky=W, padx=4)\r\nla_new_carver_price_desc = ttk.Label(lf_new_carver, text=\"€.CC\")\r\nla_new_carver_price_desc.grid(row=3, column=2, pady=2, sticky=W)\r\n\r\n# Länge Neuer Carver\r\nenVar_carv_length = StringVar()\r\n\r\nla_new_carver_length = ttk.Label(lf_new_carver, text=\"Länge:\")\r\nla_new_carver_length.grid(row=4, column=0, pady=2, sticky=W, padx=6)\r\nen_new_carver_length = ttk.Entry(lf_new_carver, textvariable=enVar_carv_length)\r\nen_new_carver_length.grid(row=4, column=1, pady=1, sticky=W, padx=4)\r\nla_new_carver_length_desc = ttk.Label(lf_new_carver, text=\"cm\")\r\nla_new_carver_length_desc.grid(row=4, column=2, pady=2, sticky=W)\r\n\r\n# Model Neuer Carver\r\nenVar_carv_model = StringVar()\r\n\r\nla_new_carver_model = ttk.Label(lf_new_carver, text=\"Model:\")\r\nla_new_carver_model.grid(row=5, column=0, pady=2, sticky=W, padx=6)\r\nen_new_carver_model = ttk.Entry(lf_new_carver, textvariable=enVar_carv_model)\r\nen_new_carver_model.grid(row=5, column=1, pady=1, sticky=W, padx=4)\r\n\r\n# Typ Neuer Carver\r\nomVar_new_carver_typ = StringVar()\r\ncarver_typ = [\"Allround\", \"Freeride\", \"Slalom\", \"Race\", \"\"]\r\n\r\nla_new_carver_typ = ttk.Label(lf_new_carver, text=\"Typ:\")\r\nla_new_carver_typ.grid(row=6, column=0, pady=2, sticky=W, padx=6)\r\nom_new_carver_typ = OptionMenu(lf_new_carver, omVar_new_carver_typ, *carver_typ)\r\nom_new_carver_typ.grid(row=6, column=1, pady=1, sticky=W, padx=4)\r\n\r\n# speichern neuer Carver\r\nb_new_carver_save = Button(lf_new_carver, text=\"speichern\", command=save_new_carver)\r\nb_new_carver_save.grid(row=7, columnspan=3, pady=20, padx=4)\r\n\r\n# Informationsanzeige neuer Carver\r\n\r\n\r\n# BESTAND ÄNDERN\r\n\r\n# Labelframe \"Bestand\"\r\nlf_new_stock = ttk.LabelFrame(tab_admin_carver, text=\"Bestandsverwaltung\")\r\nlf_new_stock.grid(row=1, column=0, columnspan=2, ipadx=40, padx=8, pady=10)\r\nla_new_stock = ttk.Label(lf_new_stock, text=\"Hier können Sie den Bestand ändern.\\nBitte geben Model und die Länge an.\")\r\nla_new_stock.grid(row=0, column=0, columnspan=3, pady=12, padx=4, sticky=W)\r\n\r\n# Model Bestand\r\nenVar_carver_model_stock = StringVar()\r\n\r\nla_stock_carver_model = ttk.Label(lf_new_stock, text=\"Model:\")\r\nla_stock_carver_model.grid(row=1, column=0, pady=1, sticky=W, padx=6)\r\nen_stock_carver_model = ttk.Entry(lf_new_stock, textvariable=enVar_carver_model_stock)\r\nen_stock_carver_model.grid(row=1, column=1, pady=1, sticky=W, padx=4)\r\n\r\n# Länge Bestand\r\nenVar_carv_length_stock = StringVar()\r\n\r\nla_stock_carver_length = ttk.Label(lf_new_stock, text=\"Länge:\")\r\nla_stock_carver_length.grid(row=2, column=0, pady=2, sticky=W, padx=6)\r\nen_stock_carver_length = ttk.Entry(lf_new_stock, textvariable=enVar_carv_length_stock)\r\nen_stock_carver_length.grid(row=2, column=1, pady=1, sticky=W, padx=4)\r\nla_stock_carver_length_desc = ttk.Label(lf_new_stock, text=\"cm\")\r\nla_stock_carver_length_desc.grid(row=2, column=2, pady=2, sticky=W)\r\n\r\n# Menge Bestand\r\nla_change_stock = ttk.Label(lf_new_stock, text=\"Mengenänderung:\\nMenge und Art der Änderung angeben.\")\r\nla_change_stock.grid(row=3, column=0, columnspan=3, pady=8, padx=4, sticky=SW)\r\n\r\nenVar_carv_amount_stock = StringVar()\r\nla_stock_carver_amount = ttk.Label(lf_new_stock, text=\"Menge:\")\r\nla_stock_carver_amount.grid(row=4, column=0, pady=2, sticky=W, padx=6)\r\nen_stock_carver_amount = ttk.Entry(lf_new_stock, textvariable=enVar_carv_amount_stock)\r\nen_stock_carver_amount.grid(row=4, column=1, pady=2, sticky=W, padx=4)\r\n\r\n# ändern Bestand\r\nomVar_stock_carver_option = StringVar()\r\nstock_carver_option = [\"hinzufügen\", \"entnehmen\", \"Menge neu setzen\", \"\"]\r\n\r\nla_stock_carver_option = ttk.Label(lf_new_stock, text=\"Änderung:\")\r\nla_stock_carver_option.grid(row=5, column=0, pady=2, sticky=W, padx=6)\r\nom_stock_carver_change = OptionMenu(lf_new_stock, omVar_stock_carver_option, *stock_carver_option)\r\nom_stock_carver_change.grid(row=5, column=1, pady=1, sticky=W, padx=4)\r\n\r\n# speichern Bestand\r\nb_stock_carver_save = Button(lf_new_stock, text=\"speichern\", command=save_stock_carver)\r\nb_stock_carver_save.grid(row=6, columnspan=3, pady=20, padx=2)\r\n\r\n# TAB ALLGEMEIN\r\n\r\n# Labelframe \"Allgemein\"\r\nlf_new_file = ttk.LabelFrame(tab_admin_misc, text=\"Neue Datei\")\r\nlf_new_file.grid(row=0, column=0, columnspan=3, ipadx=63, padx=8, pady=15)\r\nla_new_file = ttk.Label(lf_new_file, text=\"Hier können Sie eine neue Datei\\n\"\r\n \"zur Skiverwaltung anlegen.\")\r\nla_new_file.grid(row=0, column=0, columnspan=3, pady=15, padx=4)\r\n\r\nb_new_file = Button(lf_new_file, text=\"Datei anlegen\", command=new_ski_json)\r\nb_new_file.grid(row=1, columnspan=3, pady=20, padx=4, sticky=W)\r\n\r\n# Labelframe \"Lawinenreport\"\r\nlf_avalanche_report = ttk.LabelFrame(tab_admin_misc, text=\"Lawinenreport\")\r\nlf_avalanche_report.grid(row=1, column=0, columnspan=3, ipadx=42, padx=8, pady=15)\r\nla_new_file = ttk.Label(lf_avalanche_report, text=\"Report zu Lawinenabgängen in den USA\\nStand: März 2020\")\r\nla_new_file.grid(row=0, column=0, columnspan=3, pady=12, padx=4)\r\n\r\nb_avalanche_report = Button(lf_avalanche_report, text=\"Report anzeigen\", command=show_avalanche_map)\r\nb_avalanche_report.grid(row=1, columnspan=3, pady=15, padx=4, sticky=W)\r\n\r\nroot.title('Pythonski')\r\nroot.mainloop()\r\n","sub_path":"save_data_from_gui/Skiverleih.py","file_name":"Skiverleih.py","file_ext":"py","file_size_in_byte":22355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"544690168","text":"\"\"\"\nHow do you find duplicate numbers in an array if it contains multiple duplicates?\n\"\"\"\n\ndef find_all_dupes(numbers):\n checked_nums = []\n duplicates = []\n for i in numbers:\n if i not in checked_nums:\n checked_nums.append(i)\n else:\n duplicates.append(i)\n return duplicates\n\nnumbers = [1,2,3,4,5,6,7,5,8,9,7,10]\ndupes = find_all_dupes(numbers)\nprint(str(dupes))","sub_path":"array_questions/find_multiple_duplicates.py","file_name":"find_multiple_duplicates.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"428581776","text":"\"\"\"\nType annotations for ivs service client paginators.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html)\n\nUsage::\n\n ```python\n import boto3\n\n from mypy_boto3_ivs import IVSClient\n from mypy_boto3_ivs.paginator import (\n ListChannelsPaginator,\n ListPlaybackKeyPairsPaginator,\n ListRecordingConfigurationsPaginator,\n ListStreamKeysPaginator,\n ListStreamsPaginator,\n )\n\n client: IVSClient = boto3.client(\"ivs\")\n\n list_channels_paginator: ListChannelsPaginator = client.get_paginator(\"list_channels\")\n list_playback_key_pairs_paginator: ListPlaybackKeyPairsPaginator = client.get_paginator(\"list_playback_key_pairs\")\n list_recording_configurations_paginator: ListRecordingConfigurationsPaginator = client.get_paginator(\"list_recording_configurations\")\n list_stream_keys_paginator: ListStreamKeysPaginator = client.get_paginator(\"list_stream_keys\")\n list_streams_paginator: ListStreamsPaginator = client.get_paginator(\"list_streams\")\n ```\n\"\"\"\nfrom typing import Iterator\n\nfrom botocore.paginate import Paginator as Boto3Paginator\n\nfrom .type_defs import (\n ListChannelsResponseTypeDef,\n ListPlaybackKeyPairsResponseTypeDef,\n ListRecordingConfigurationsResponseTypeDef,\n ListStreamKeysResponseTypeDef,\n ListStreamsResponseTypeDef,\n PaginatorConfigTypeDef,\n StreamFiltersTypeDef,\n)\n\n__all__ = (\n \"ListChannelsPaginator\",\n \"ListPlaybackKeyPairsPaginator\",\n \"ListRecordingConfigurationsPaginator\",\n \"ListStreamKeysPaginator\",\n \"ListStreamsPaginator\",\n)\n\nclass ListChannelsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListChannels)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listchannelspaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n filterByName: str = None,\n filterByRecordingConfigurationArn: str = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListChannelsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListChannels.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listchannelspaginator)\n \"\"\"\n\nclass ListPlaybackKeyPairsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListPlaybackKeyPairs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listplaybackkeypairspaginator)\n \"\"\"\n\n def paginate(\n self, *, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListPlaybackKeyPairsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListPlaybackKeyPairs.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listplaybackkeypairspaginator)\n \"\"\"\n\nclass ListRecordingConfigurationsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListRecordingConfigurations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listrecordingconfigurationspaginator)\n \"\"\"\n\n def paginate(\n self, *, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListRecordingConfigurationsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListRecordingConfigurations.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#listrecordingconfigurationspaginator)\n \"\"\"\n\nclass ListStreamKeysPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListStreamKeys)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#liststreamkeyspaginator)\n \"\"\"\n\n def paginate(\n self, *, channelArn: str, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListStreamKeysResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListStreamKeys.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#liststreamkeyspaginator)\n \"\"\"\n\nclass ListStreamsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListStreams)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#liststreamspaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n filterBy: \"StreamFiltersTypeDef\" = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListStreamsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/ivs.html#IVS.Paginator.ListStreams.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_ivs/paginators.html#liststreamspaginator)\n \"\"\"\n","sub_path":"typings/mypy_boto3/ivs/paginator.pyi","file_name":"paginator.pyi","file_ext":"pyi","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"170412548","text":"\"\"\"ftp 文件服务器\n\n***代码实现: day5/ftp***\n\n功能\n\t【1】 分为服务端和客户端,要求可以有多个客户端同时操作。\n\t【2】 客户端可以查看服务器文件库中有什么文件。\n\t【3】 客户端可以从文件库中下载文件到本地。\n\t【4】 客户端可以上传一个本地文件到文件库。\n\t【5】 使用print在客户端打印命令输入提示,引导操作\n1.功能需求分析\n2.技术分析\n 服务端 客户端\n 网络传输方式:tcp传输协议 下载文件必须可靠 时间长需要保持连接\n 客户端采用多进程多线程并发通信同时操作文件\n 服务端数据存储库\n3.结构设计\n 客户端 : 发起请求\n\n 服务端 : 封装用类来封装\n4.分析功能模块,指定编写流程\n 网络并发模块\n 进入文件库 entrance\n 查看文件 list\n 下载文件 get_filename\n 上传文件 put_filename\n 退出文件库 exit\n5.协议设置\n 文件列表查看: 只提供普通文件(非隐藏文件)\n 客户端请求类型: L 文件链表 查看文件\n G filename 下载文件\n P filename 上传文件\n Q 退出 退出文件库\n\"\"\"\nfrom socket import *\nfrom threading import Thread\nimport os ,time ,sys\n\nHOST = \"0.0.0.0\"\nPORT = 22222\nADDR = (HOST,PORT)\nFTP = \"/home/tarena/下载/wenjianku/\"\n\ndef main():\n sockfd = socket()\n sockfd.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)\n sockfd.bind(ADDR)\n sockfd.listen(3)\n\n print(\"Listen the port %d\" %PORT)\n while True:\n try:\n connfd ,addr = sockfd.accept()\n except KeyboardInterrupt:\n sys.exit(\"Exit\")\n except Exception as e:\n print(e)\n continue\n ftp = FTPMyThead(connfd)\n ftp.setDaemon(True)\n ftp.start()\nclass FTPMyThead(Thread):\n def __init__(self,connfd):\n self.connfd = connfd\n super().__init__()\n\n def run(self):\n while True:\n data = self.connfd.recv(1024).decode()\n print(data)\n if data == \"L\":\n self.do_list()\n elif data.split(\" \")[0] == \"G\":\n self.do_get(data)\n\n def do_list(self):\n files = os.listdir(FTP)\n if not files:\n self.connfd.send(\"文件库为空\".encode())\n return\n else:\n self.connfd.send(b\"ok\")\n time.sleep(0.1)\n\n files1 = \"\"\n for file in files:\n if file[0] != \".\" and os.path.isfile(FTP+file):\n files1 += \"\\n\" + file\n\n self.connfd.send(files1.encode())\n\n def do_get(self,data):\n filename = data.split(\" \")[-1]\n try:\n obj = open(FTP+filename,\"rb\")\n except:\n self.connfd.send(\"该文件不存在\".encode())\n return\n else:\n self.connfd.send(b\"ok\")\n time.sleep(0.1)\n while True:\n data = obj.read(1024)\n print(data.decode())\n if not data:\n time.sleep(0.1)\n self.connfd.send(b\"**\")\n obj.close()\n break\n self.connfd.send(data)\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n","sub_path":"xiaojian/xiaojian/second_phase/day10/my_ftp_s.py","file_name":"my_ftp_s.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"377950440","text":"import abc\nimport random\n\nfrom itertools import cycle\n\nfrom .constants import ABBREVS\nfrom .constants import COMMANDS\nfrom .gestures import Gestures\n\nclass Player(abc.ABC):\n\n def __init__(self, name):\n self.name = name\n\n @abc.abstractmethod\n def throw(self):\n raise NotImplementedError\n\n\nclass AlwaysPlayer(Player):\n \"Player always throws one type of gesture\"\n\n def __init__(self, name, gesture):\n super().__init__(name)\n self.gesture = gesture\n\n def throw(self):\n return self.gesture\n\n\nclass RandomPlayer(Player):\n \"Player throws random gestures\"\n\n def throw(self):\n return random.choice(list(Gestures))\n\n\nclass CyclePlayer(Player):\n \"Player cycles through every type of gesture\"\n\n throws = cycle(Gestures)\n\n def throw(self):\n return next(self.throws)\n\n\nclass StdinPlayer(Player):\n \"Player reads throw from stdin\"\n\n def throw(self):\n while True:\n throw = input(f'{self.name} (RPS)> ').lower()\n if throw not in COMMANDS:\n print('invalid')\n elif throw in ABBREVS:\n throw = ABBREVS[throw]\n return throw\n else:\n return throw\n","sub_path":"rockpaperscissors/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"587503263","text":"from unittest.case import TestCase\n\nfrom SuiSiannAdminApp.models import 句表\n\n\nclass 羅馬字單元試驗(TestCase):\n def test_正確(self):\n 句 = 句表.objects.create(\n 原始漢字=\"豬\",\n 原始臺羅=\"ti\",\n 漢字=\"豬仔\",\n 臺羅=\"ti-á\"\n )\n self.assertEqual(句.羅馬字, \"ti-á\", )\n\n def test_無對齊(self):\n 句 = 句表.objects.create(\n 原始漢字=\"豬\",\n 原始臺羅=\"ti\",\n 漢字=\"豬仔\",\n 臺羅=\"ti-á\"\n )\n self.assertEqual(句.原始羅馬字, \"ti\", )\n","sub_path":"tests/SuiSiannAdminApp/test羅馬字臺羅試驗.py","file_name":"test羅馬字臺羅試驗.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"467720127","text":"# runtime O(n)\n# memory O(n)\n\ndef contiguous_bin(array):\n d = {}\n maxx = 0\n summ = 0\n d[0] = -1\n \n # As explained in the class. we maintain a sum\n # variable where we add one if we encounter 1 and \n # -1 if encounter 0. We store the first occurence\n # of a particular sum. If in the future we encouter \n # a sumular sum, that means that the subarray in between \n # these indices has equal number of 1's and 0's\n for n in range(len(array)):\n if array[n]:\n summ += 1\n elif not array[n]:\n summ -= 1\n \n if summ in d:\n maxx = max(maxx, n - d[summ])\n \n elif summ not in d:\n d[summ] = n\n \n return maxx\n\narray = [0,1,0,1,0,0,1,1]\nprint(contiguous_bin(array))","sub_path":"problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"218260925","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n@time: 2016/11/23 16:36\n@author: Silence\n'''\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pre_saltandpepper\n'''\n对于2D图像可以进行低通或者高通滤波操作:\n低通滤波(LPF)有利于去噪,模糊图像,\n高通滤波(HPF)有利于找到图像边界。\n\n'''\n\ndef pre2DFilter():\n '''\n 自定义核滤波\n :return:显示图像\n '''\n img = cv2.imread(r'69-1.jpg')\n # kernel1 = np.ones((5,5),np.float32)/25\n kernel2 = np.array(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), np.float32)\n kernel3 = np.array(([-1, -1, -1], [-1, 9, -1], [-1, -1, -1]), np.float32)\n kernel4 = np.array(([1, -2, 1], [-2, 5, -2], [1, -2, 1]), np.float32)\n # dst = cv2.filter2D(img,-1,kernel1)\n dst1 = cv2.filter2D(img, -1, kernel2)\n dst2 = cv2.filter2D(img, -1, kernel3)\n dst3 = cv2.filter2D(img, -1, kernel4)\n\n plt.subplot(221), plt.imshow(img), plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(222), plt.imshow(dst1), plt.title('kernel1')\n plt.xticks([]), plt.yticks([])\n plt.subplot(223), plt.imshow(dst2), plt.title('kernel2')\n plt.xticks([]), plt.yticks([])\n plt.subplot(224), plt.imshow(dst3), plt.title('kernel3')\n plt.xticks([]), plt.yticks([])\n plt.show()\n cv2.imwrite('kernel1.jpg', dst1)\n cv2.imwrite('kernel2.jpg', dst2)\n cv2.imwrite('kernel3.jpg', dst3)\n\n\ndef preAveFilter():\n '''\n cv2.blur和cv2.boxfilter\n :return: 显示图像\n '''\n img = cv2.imread(r'F:\\image\\imageData\\negative\\front\\c1\\gray\\22-1_s_zoom_gray.jpg')\n blur = cv2.blur(img,(5,5))\n boxfilter = cv2.boxFilter(img,ddepth=-1,ksize=(3,3),normalize=False)\n\n plt.subplot(131), plt.imshow(img), plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(132), plt.imshow(blur), plt.title('Blur')\n plt.xticks([]), plt.yticks([])\n plt.subplot(133), plt.imshow(boxfilter), plt.title('BoxFilter')\n plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef preGaussianBlur():\n '''\n 高斯滤波\n :return: 显示图像\n '''\n img = cv2.imread(r'F:\\image\\imageData\\negative\\front\\c1\\gray\\22-1_s_zoom_gray.jpg')\n img = pre_saltandpepper.saltAndPepper(img,0.01)\n blur = cv2.GaussianBlur(img,(5,5),0)\n plt.subplot(121), plt.imshow(img), plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(blur), plt.title('GaussianBlur')\n plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef preMedianBLur():\n '''\n 中值滤波\n :return: 显示图像\n '''\n img = cv2.imread(r'F:\\image\\imageData\\negative\\front\\c1\\gray\\22-1_s_zoom_gray.jpg')\n img = pre_saltandpepper.saltAndPepper(img, 0.1)\n blur = cv2.medianBlur(img,5)\n plt.subplot(121), plt.imshow(img), plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(blur), plt.title('MedianBLur')\n plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef preBilateralFilter():\n '''\n 双边滤波\n cv2.bilateralFilter(img,d,’p1’,’p2’)函数有四个参数,\n d:是领域的直径\n p1:空间高斯函数标准差\n p2:灰度值相似性高斯函数标准差。\n :return:\n '''\n img = cv2.imread(r'F:\\image\\imageData\\negative\\front\\c1\\gray\\22-1_s_zoom_gray.jpg')\n img = pre_saltandpepper.saltAndPepper(img, 0.01)\n blur = cv2.bilateralFilter(img,9,75,75)\n #9:滤波领域直径,75:空间高斯函数标准差,75:灰度值相似性标准差\n plt.subplot(121), plt.imshow(img), plt.title('Original')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(blur), plt.title('BilateralFilter')\n plt.xticks([]), plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n pre2DFilter()\n # preAveFilter()\n # preGaussianBlur()\n # preMedianBLur()\n # preBilateralFilter()","sub_path":"pre_filter.py","file_name":"pre_filter.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"372843262","text":"def quicksort(arr):\n if len(arr)>=1:\n pivot = arr.pop(len(arr)//2)\n else:\n return arr\n items_greater = []\n items_lesser = []\n for ele in arr:\n if ele>=pivot:\n items_greater.append(ele)\n else:\n items_lesser.append(ele)\n\n return quicksort(items_lesser) + [pivot] + quicksort(items_greater)\n\narr = list(map(int,input().split()))\nprint(quicksort(arr))","sub_path":"Python/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"282238677","text":"#coding=utf-8\nimport os\nimport time\n\nk=1\nwhile k <2:\n now_time=time.strftime('%H_%M')\n\n print (u\"开始运行初始化脚本:\") \n os.chdir(\"E:\\\\workspace\\\\rongxin\")\n os.system('ready1.py') #执行脚本\n print (u\"下载及安装app\") \n time.sleep(10)\n print (now_time)\n \n ''' \n #now_time=time.strftime('_%M')\n os.chdir(\"E:\\\\workspace\\\\rongxin\")\n os.system('all-test.py') #执行脚本\n \n if now_time == '00_10' or now_time == '02_10' or now_time == '04_10' or now_time == '06_10' :\n print (u\"开始运行测试用例脚本:\")\n os.chdir(\"E:\\\\workspace\\\\rongxin\")\n os.system('all-test.py') #执行脚本\n print (u\"运行完成退出\")\n else:\n time.sleep(10)\n print (now_time)\n \n if now_time == '01_50' or now_time == '03_50' or now_time == '05_50' or now_time == '07_50':\n print (u\"开始运行清理群组数据脚本:\")\n os.chdir(\"E:\\\\workspace\\\\rongxin\")\n os.system('clear_group.py') #执行脚本\n print (u\"运行完成退出\") \n else:\n time.sleep(10)\n print (now_time)\n \n if now_time == '02_00' or now_time == '04_00' or now_time == '06_00' or now_time == '08_00':\n print (u\"开始运行清理讨论组数据脚本:\")\n os.chdir(\"E:\\\\workspace\\\\rongxin\")\n os.system('clear_discussion_group.py') #执行脚本\n print (u\"运行完成退出\") \n else:\n time.sleep(10)\n print (now_time) \n '''","sub_path":"youhui_ios/start_run.py","file_name":"start_run.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"183143408","text":"import pandas as pd\n\nimport pprint\nimport re\nimport json\nimport itertools\n#import networkx as nx\nimport numpy as np\n\nfrom datetime import datetime\n\nfrom .twitter_cascade_reconstruction import full_reconstruction,get_reply_cascade_root_tweet\n\n\ndef load_json(fn):\n\n json_data = []\n \n if type(fn) == str:\n with open(fn,'rb') as f:\n for line in f:\n json_data.append(json.loads(line))\n else:\n for fn0 in fn:\n with open(fn0,'rb') as f:\n for line in f:\n json_data.append(json.loads(line))\n\n return(json_data)\n \ndef convert_timestamps(dataset,timestamp_field = \"nodeTime\"):\n\n \"\"\"\n Converts all timestamps to ISO 8601 formatted strings\n \"\"\"\n \n try:\n dataset[timestamp_field] = pd.to_datetime(dataset[timestamp_field], unit='s')\n except:\n try:\n dataset[timestamp_field] = pd.to_datetime(dataset[timestamp_field], unit='ms')\n except:\n dataset[timestamp_field] = pd.to_datetime(dataset[timestamp_field])\n\n dataset[timestamp_field] = dataset[timestamp_field].dt.strftime('%Y-%m-%dT%H:%M:%SZ')\n \n return(dataset)\n\ndef get_info_id_from_text(text_list = [], keywords = []):\n\n word_list = r\"\\b\" + keywords[0] + r\"\\b\"\n for w in keywords[1:]:\n word_list += \"|\" + r\"\\b\" + w + r\"\\b\"\n\n info_ids = []\n for text in text_list:\n info_ids += re.findall(word_list,text)\n \n return(list(set(info_ids)))\n \ndef get_info_id_from_fields(row, fields=['entities.hashtags.text']):\n\n \"\"\"\n Extract information IDs from specified fields in the JSON\n\n :param row: A DataFrame row containing the JSON fields\n :param fields: A list of field paths from which to extract the info IDs, e.g. entities.hashtags.text, entities.user_mentions.screen_name\n :returns: a list of information IDs that are in the specified fields\n \"\"\"\n \n info_ids = []\n for path in fields:\n path = path.split('.')\n\n val = row.copy()\n\n for i,f in enumerate(path):\n \n if (isinstance(val,pd.Series) or type(val) == dict) and f in val.keys():\n #move down JSON path\n val = val[f]\n\n if type(val) == list:\n #iterate over list\n for v in val:\n if type(v) == dict:\n v = v[path[i+1]]\n info_ids.append(v)\n break\n elif i == len(path) and type(val) == str:\n info_ids.append(val)\n \n return list(set(info_ids))\n\n\ndef extract_telegram_data(fn='telegram_data.json',\n info_id_fields=None,\n keywords = [],\n anonymized=False):\n\n \"\"\"\n Extracts fields from Telegram JSON data\n\n :param fn: A filename or list of filenames which contain the JSON Telegram data\n :param info_id_fields: A list of field paths from which to extract the information IDs. If None, don't extract any.\n \"\"\"\n \n json_data = load_json(fn)\n data = pd.DataFrame(json_data)\n\n get_info_ids = False\n if not info_id_fields is None or len(keywords) > 0:\n get_info_ids = True\n \n if anonymized:\n name_suffix = \"_h\"\n text_suffix = \"_m\"\n else:\n name_suffix = \"\"\n text_sufix = \"\"\n \n output_columns = ['nodeID', 'nodeUserID', 'parentID', 'rootID', 'actionType', 'nodeTime',\n 'platform','communityID']\n if get_info_ids:\n output_columns.append('informationIDs')\n\n \n print('Extracting fields...')\n\n if len(keywords) > 0:\n data.loc[:,'informationIDs'] = data['doc'].apply(lambda x: get_info_id_from_text([x['text' + text_suffix]], keywords))\n elif not info_id_fields is None:\n data.loc[:,'informationIDs'] = pd.Series([get_info_id_from_fields(c,info_id_fields,dict_field=True) for i,c in data.iterrows()])\n\n data = data.drop_duplicates('uid' + name_suffix)\n \n data.loc[:,'actionType']=['message']*len(data)\n\n data.loc[:,'nodeTime'] = data['norm'].apply(lambda x: x['timestamp'])\n \n data.loc[:,'communityID'] = data['doc'].apply(lambda x: x['peer']['username'] if 'peer' in x.keys() else None)\n\n data.loc[:,'nodeID'] = data['doc'].apply(lambda x: str(x['to_id']['channel_id']) + '_' + str(x['id']))\n\n data.loc[:,'nodeUserID'] = data['doc'].apply(lambda x: x['from_id' + name_suffix] if 'from_id' + name_suffix in x.keys() else None)\n data.loc[data['nodeUserID'].isnull(),'nodeUserID'] = data.loc[data['nodeUserID'].isnull(),'norm'].apply(lambda x: x['author'])\n \n data.loc[:,'platform'] = 'telegram'\n \n data.loc[:,'parentID'] = data['doc'].apply(lambda x: str(x['fwd_from']['channel_id']) + '_' + str(x['fwd_from']['channel_post']) if 'fwd_from' in x.keys() and not x['fwd_from'] is None and not x['fwd_from']['channel_id'] is None and not x['fwd_from']['channel_post'] is None else None)\n\n data.loc[:,'parentID'] = data['doc'].apply(lambda x: str(x['to_id']['channel_id']) + '_' + str(x['reply_to_msg_id']) if 'reply_to_msg_id' in x.keys() and not x['reply_to_msg_id'] is None else None)\n\n data.loc[:,'rootID'] = '?'\n data.loc[data['parentID'].isna(),'rootID'] = data.loc[data['parentID'].isna(),'nodeID']\n\n data.loc[data['parentID'].isna(),'parentID'] = data.loc[data['parentID'].isna(),'nodeID']\n\n data = data[data['parentID'].isin(list(set(data['nodeID'])))]\n\n data = data[output_columns]\n \n data = get_reply_cascade_root_tweet(data)\n \n #remove broken portions\n data = data[data['rootID'].isin(list(set(data['nodeID'])))]\n\n print('Sorting...')\n data = data.sort_values('nodeTime').reset_index(drop=True) \n\n #initialize info ID column with empty lists\n data['threadInfoIDs'] = [[] for i in range(len(data))]\n \n #for some reason having a non-object column in the dataframe messes up the assignment of lists to individual cell values\n #remove it temporarily and add back later\n nodeTimes = data['nodeTime']\n data = data[[c for c in data.columns if c != 'nodeTime']]\n \n #get children of node\n def get_children(nodeID):\n\n children = data[data['parentID'] == nodeID]['nodeID']\n children = children[children.values != nodeID]\n \n return(children)\n\n\n #all replies/fwds of a message mentioning a unit of information are also assigned that unit of information\n def add_info_to_children(nodeID,list_info=[]):\n\n infos = list(data[data['nodeID'] == nodeID]['informationIDs'].values[0])\n\n list_info = list_info.copy()\n\n children = get_children(nodeID)\n \n if len(children) > 0:\n\n list_info += infos\n \n if len(list_info) > 0 and len(children) > 1:\n data.loc[children.index.values,'threadInfoIDs'] = [list_info for i in range(len(children))]\n elif len(list_info) > 0 and len(children) == 1:\n data.at[children.index[0],'threadInfoIDs'] = list_info\n\n for child in children.values:\n add_info_to_children(child,list_info)\n\n if get_info_ids:\n print('Adding information IDs to children...')\n #for each thread in data, propagate infromation IDs to children\n roots = data['rootID'].unique()\n for r,root in enumerate(roots):\n add_info_to_children(root)\n if r % 50 == 0:\n print('{}/{}'.format(r,len(roots)))\n\n data['nodeTime'] = nodeTimes\n\n if get_info_ids:\n data['informationIDs'] = data.apply(lambda x: list(set(x['informationIDs'] + x['threadInfoIDs'])),axis=1)\n \n data = data[data['informationIDs'].str.len() > 0]\n \n print('Expanding events...')\n #expand lists of info IDs into seperate rows (i.e. an individual event is duplicated if it pertains to multiple information IDs)\n s = data.apply(lambda x: pd.Series(x['informationIDs']), axis=1).stack().reset_index(level=1, drop=True)\n s.name = 'informationID'\n\n data = data.drop('informationIDs', axis=1).join(s).reset_index(drop=True)\n\n data = data.drop('threadInfoIDs',axis=1)\n data = data.sort_values('nodeTime').reset_index(drop=True)\n data = convert_timestamps(data)\n data = data[~data['communityID'].isnull()]\n \n print('Done!')\n return data\n \n\ndef extract_reddit_data(fn='reddit_data.json',\n info_id_fields=None,\n keywords = [],\n anonymized=False):\n\n \"\"\"\n Extracts fields from Reddit JSON data\n\n :param fn: A filename or list of filenames which contain the JSON Reddit data\n :param info_id_fields: A list of field paths from which to extract the information IDs. If None, don't extract any.\n \"\"\"\n\n json_data = load_json(fn)\n data = pd.DataFrame(json_data)\n\n get_info_ids = False\n if not info_id_fields is None or len(keywords) > 0:\n get_info_ids = True\n \n if anonymized:\n name_suffix = \"_h\"\n text_suffix = \"_m\"\n else:\n name_suffix = \"\"\n text_suffix = \"\"\n \n output_columns = ['nodeID', 'nodeUserID', 'parentID', 'rootID', 'actionType',\n 'nodeTime','platform','communityID']\n if get_info_ids:\n output_columns.append('informationIDs')\n \n print('Extracting fields...')\n if len(keywords) > 0:\n data['text'] = data['body' + text_suffix].replace(np.nan, '', regex=True) + data['selftext' + text_suffix].replace(np.nan, '', regex=True) + data['title' + text_suffix].replace(np.nan, '', regex=True)\n data.loc[:,'informationIDs'] = data['text'].apply(lambda x: get_info_id_from_text([x], keywords))\n elif not info_id_fields is None:\n data.loc[:,'informationIDs'] = pd.Series([get_info_id_from_fields(c,info_id_fields) for i,c in data.iterrows()])\n data['n_info_ids'] = data['informationIDs'].apply(len)\n data = data.sort_values(\"n_info_ids\",ascending=False)\n\n data = data.drop_duplicates('id' + name_suffix)\n \n data.rename(columns={'id' + name_suffix:'nodeID','author' + name_suffix:'nodeUserID',\n 'created_utc':'nodeTime','parent_id' + name_suffix:'parentID','link_id' + name_suffix:'rootID'}, inplace=True)\n \n data.loc[:,'actionType']=['comment']*len(data)\n data.loc[~data[\"title_m\"].isnull(),'actionType'] = 'post'\n \n data.loc[data['actionType'] == \"comment\",'nodeID']=['t1_' + x for x in data.loc[data['actionType'] == \"comment\",'nodeID']]\n data.loc[data['actionType'] == \"post\",'nodeID']=['t3_' + x for x in data.loc[data['actionType'] == \"post\",'nodeID']]\n\n data.loc[data['actionType'] == \"post\",'rootID'] = data.loc[data['actionType'] == \"post\",'nodeID']\n data.loc[data['actionType'] == \"post\",'parentID'] = data.loc[data['actionType'] == \"post\",'nodeID']\n\n data.loc[:,'communityID'] = data['subreddit_id']\n\n data.loc[:,'platform'] = 'reddit'\n \n #remove broken portions\n data = data[data['parentID'].isin(list(set(data['nodeID'])))]\n data = data[data['rootID'].isin(list(set(data['nodeID'])))]\n\n print('Sorting...')\n data = data.sort_values('nodeTime').reset_index(drop=True) \n\n data = data[output_columns]\n \n #initialize info ID column with empty lists\n data['threadInfoIDs'] = [[] for i in range(len(data))]\n \n #for some reason having a non-object column in the dataframe messes up the assignment of lists to individual cell values\n #remove it temporarily and add back later\n nodeTimes = data['nodeTime']\n data = data[[c for c in data.columns if c != 'nodeTime']]\n \n \n #get children of node\n def get_children(nodeID):\n\n children = data[data['parentID'] == nodeID]['nodeID']\n children = children[children.values != nodeID]\n \n return(children)\n\n print(data)\n # all comments on a post/comment mentioning a unit of information are also assigned that unit of information\n def add_info_to_children(nodeID,list_info=[]):\n\n infos = list(data[data['nodeID'] == nodeID]['informationIDs'].values[0])\n\n list_info = list_info.copy()\n\n children = get_children(nodeID)\n \n if len(children) > 0:\n\n list_info += infos\n \n if len(list_info) > 0 and len(children) > 1:\n data.loc[children.index.values,'threadInfoIDs'] = [list_info for i in range(len(children))]\n elif len(list_info) > 0 and len(children) == 1:\n data.at[children.index[0],'threadInfoIDs'] = list_info\n\n for child in children.values:\n add_info_to_children(child,list_info)\n\n if get_info_ids:\n print('Adding information IDs to children...')\n #for each thread in data, propagate infromation IDs to children\n roots = data['rootID'].unique()\n for r,root in enumerate(roots):\n add_info_to_children(root)\n if r % 50 == 0:\n print('{}/{}'.format(r,len(roots)))\n\n \n data['nodeTime'] = nodeTimes\n\n if get_info_ids:\n data['informationIDs'] = data.apply(lambda x: list(set(x['informationIDs'] + x['threadInfoIDs'])),axis=1)\n \n data = data[data['informationIDs'].str.len() > 0]\n \n print('Expanding events...')\n #expand lists of info IDs into seperate rows (i.e. an individual event is duplicated if it pertains to multiple information IDs)\n s = data.apply(lambda x: pd.Series(x['informationIDs']), axis=1).stack().reset_index(level=1, drop=True)\n s.name = 'informationID'\n \n data = data.drop('informationIDs', axis=1).join(s).reset_index(drop=True)\n\n data = data.drop('threadInfoIDs',axis=1)\n data = data.sort_values('nodeTime').reset_index(drop=True)\n data = convert_timestamps(data)\n\n print('Done!')\n return data\n \n\ndef extract_twitter_data(fn='twitter_data.json',\n info_id_fields=None,\n keywords = [],\n anonymized=False):\n\n \"\"\"\n Extracts fields from Twitter JSON data\n\n :param fn: A filename or list of filenames which contain the JSON Twitter data\n :param info_id_fields: A list of field paths from which to extract the information IDs. If None, don't extract any.\n :param keywords:\n :params anonymized: Whether the data is in raw Twitter API format (False) or if it is in the processed and anonymized SocialSim data format (True). The anonymized format has several modifications to field names.\n \"\"\"\n \n json_data = load_json(fn)\n data = pd.DataFrame(json_data)\n\n get_info_ids = False\n if not info_id_fields is None or len(keywords) > 0:\n get_info_ids = True\n\n \n if anonymized:\n name_suffix = \"_h\"\n text_suffix = \"_m\"\n else:\n name_suffix = \"\"\n text_suffix = \"\"\n \n data = data.sort_values(\"timestamp_ms\").reset_index(drop=True)\n\n output_columns = ['nodeID', 'nodeUserID', 'parentID', 'rootID', 'actionType', 'nodeTime',\n 'partialParentID','platform']\n if get_info_ids:\n output_columns.append('informationIDs')\n \n print('Extracting fields...')\n tweets = data\n if len(keywords) > 0:\n data.loc[:,'informationIDs'] = data['text' + text_suffix].apply(lambda x: get_info_id_from_text([x], keywords))\n elif not info_id_fields is None:\n tweets.loc[:,'informationIDs'] = pd.Series([get_info_id_from_fields(t,info_id_fields) for i,t in tweets.iterrows()])\n tweets.loc[:,'n_info_ids'] = tweets['informationIDs'].apply(len)\n tweets = tweets.sort_values('n_info_ids',ascending=False).reset_index(drop=True)\n\n tweets = tweets.drop_duplicates('id_str' + name_suffix)\n \n tweets.rename(columns={'id_str' + name_suffix: 'nodeID',\n 'timestamp_ms': 'nodeTime'}, inplace=True)\n\n\n tweets.loc[:,'platform'] = 'twitter'\n tweets.loc[:,'nodeTime'] = pd.to_datetime(tweets['nodeTime'],unit='ms')\n tweets.loc[:,'nodeTime'] = tweets['nodeTime'].apply(lambda x: datetime.strftime(x,'%Y-%m-%dT%H:%M:%SZ'))\n\n tweets.loc[:,'nodeUserID'] = tweets['user'].apply(lambda x: x['id_str' + name_suffix])\n \n tweets.loc[:,'is_reply'] = (tweets['in_reply_to_status_id_str' + name_suffix] != '') & (~tweets['in_reply_to_status_id_str' + name_suffix].isna())\n\n if 'retweeted_status.in_reply_to_status_id_str' + name_suffix not in tweets:\n tweets.loc[:,'retweeted_status.in_reply_to_status_id_str' + name_suffix] = ''\n if 'quoted_status.in_reply_to_status_id_str' + name_suffix not in tweets:\n tweets.loc[:,'quoted_status.in_reply_to_status_id_str' + name_suffix] = ''\n if 'quoted_status.is_quote_status' not in tweets:\n tweets.loc[:,'quoted_status.is_quote_status'] = False\n if 'quoted_status' not in tweets:\n tweets.loc[:,'quoted_status'] = None\n \n #keep track of specific types of reply chains (e.g. retweet of reply, retweet of quote of reply) because the parents and roots will be assigned differently\n tweets.loc[:,'is_retweet_of_reply'] = (~tweets['retweeted_status.in_reply_to_status_id_str' + name_suffix].isna()) & (~(tweets['retweeted_status.in_reply_to_status_id_str' + name_suffix] == ''))\n tweets.loc[:,'is_retweet_of_quote'] = (~tweets['retweeted_status'].isna()) & (~tweets['quoted_status'].isna()) & (tweets['quoted_status.in_reply_to_status_id_str' + name_suffix] == '') \n tweets.loc[:,'is_retweet_of_quote_of_reply'] = (~tweets['retweeted_status'].isna()) & (~tweets['quoted_status'].isna()) & (~(tweets['quoted_status.in_reply_to_status_id_str' + name_suffix] == ''))\n tweets.loc[:,'is_retweet'] = (~tweets['retweeted_status'].isna()) & (~tweets['is_retweet_of_reply']) & (~tweets['is_retweet_of_quote']) & (~tweets['is_retweet_of_quote_of_reply'])\n\n \n tweets.loc[:,'is_quote_of_reply'] = (~tweets['quoted_status.in_reply_to_status_id_str' + name_suffix].isna()) & (~(tweets['quoted_status.in_reply_to_status_id_str' + name_suffix] == '')) & (tweets['retweeted_status'].isna())\n tweets.loc[:,'is_quote_of_quote'] = (~tweets['quoted_status.is_quote_status'].isna()) & (tweets['quoted_status.is_quote_status'] == True) & (tweets['retweeted_status'].isna())\n tweets.loc[:,'is_quote'] = (~tweets['quoted_status'].isna()) & (~tweets['is_quote_of_reply']) & (~tweets['is_quote_of_quote']) & (tweets['retweeted_status'].isna()) & (~tweets['is_reply']) \n\n tweets.loc[:,'is_orig'] = (~tweets['is_reply']) & (~tweets['is_retweet']) & (~tweets['is_quote']) & (~tweets['is_quote_of_reply']) & (~tweets['is_quote_of_quote']) & (~tweets['is_retweet_of_reply']) & (~tweets['is_retweet_of_quote_of_reply']) & (~tweets['is_retweet_of_quote'])\n\n \n tweet_types = ['is_reply','is_retweet','is_quote','is_orig','is_retweet_of_reply','is_retweet_of_quote','is_retweet_of_quote_of_reply','is_quote_of_reply','is_quote_of_quote']\n \n to_concat = []\n\n replies = tweets[tweets['is_reply']]\n if len(replies) > 0:\n #for replies we know immediate parent but not root\n replies.loc[:,'actionType'] = 'reply'\n replies.loc[:,'parentID'] = tweets['in_reply_to_status_id_str' + name_suffix]\n replies.loc[:,'rootID'] = '?'\n replies.loc[:,'partialParentID'] = tweets['in_reply_to_status_id_str' + name_suffix]\n\n to_concat.append(replies)\n\n retweets = tweets[ (tweets['is_retweet']) & (~tweets['is_quote']) ]\n if len(retweets) > 0:\n #for retweets we know the root but not the immediate parent\n retweets.loc[:,'actionType'] = 'retweet'\n retweets.loc[:,'rootID'] = retweets['retweeted_status'].apply(lambda x: x['id_str' + name_suffix])\n retweets.loc[:,'parentID'] = '?'\n retweets.loc[:,'partialParentID'] = retweets['retweeted_status'].apply(lambda x: x['id_str' + name_suffix])\n\n to_concat.append(retweets)\n \n retweets_of_replies = tweets[ tweets['is_retweet_of_reply'] ]\n if len(retweets_of_replies) > 0:\n #for retweets of replies the \"root\" is actually the reply not the ultimate root\n #the parent of a retweet of a reply will be the reply or any retweet of the reply\n #the root can be retraced by following parents up the tree\n retweets_of_replies.loc[:,'parentID'] = '?'\n retweets_of_replies.loc[:,'rootID'] = '?'\n retweets_of_replies.loc[:,'partialParentID'] = retweets_of_replies['retweeted_status'].apply(lambda x: x['in_reply_to_status_id_str' + name_suffix])\n retweets_of_replies.loc[:,'actionType'] = 'retweet'\n\n to_concat.append(retweets_of_replies)\n\n retweets_of_quotes = tweets[ tweets['is_retweet_of_quote'] ]\n if len(retweets_of_quotes) > 0:\n #for retweets of quotes we know the root (from the quoted status) but not the parent\n #the parent will be either the quote or any retweets of it\n retweets_of_quotes.loc[:,'parentID'] = '?'\n retweets_of_quotes.loc[:,'rootID'] = retweets_of_quotes['quoted_status'].apply(lambda x: x['id_str' + name_suffix])\n retweets_of_quotes.loc[:,'partialParentID'] = retweets_of_quotes['retweeted_status'].apply(lambda x: x['id_str' + name_suffix])\n retweets_of_quotes.loc[:,'actionType'] = 'retweet'\n\n to_concat.append(retweets_of_quotes)\n\n retweets_of_quotes_of_replies = tweets[ tweets['is_retweet_of_quote_of_reply'] ]\n if len(retweets_of_quotes_of_replies) > 0:\n #for retweets of quotes of replies we don't know the root or the parent. the quoted status refers back to the reply not the final root\n #the parent will be either the quote or a retweet of the quote\n #we can find the root by tracking parents up the tree\n retweets_of_quotes_of_replies.loc[:,'parentID'] = '?'\n retweets_of_quotes_of_replies.loc[:,'rootID'] = '?'\n retweets_of_quotes_of_replies.loc[:,'partialParentID'] = retweets_of_quotes_of_replies['quoted_status'].apply(lambda x: x['id_str' + name_suffix])\n retweets_of_quotes_of_replies.loc[:,'actionType'] = 'retweet'\n\n to_concat.append(retweets_of_quotes_of_replies)\n \n quotes = tweets[tweets['is_quote']]\n if len(quotes) > 0:\n #for quotes we know the root but not the parent\n quotes.loc[:,'actionType'] = 'quote'\n quotes.loc[:,'rootID'] = quotes['quoted_status'].apply(lambda x: x['id_str' + name_suffix])\n quotes.loc[:,'parentID'] = '?'\n quotes.loc[:,'partialParentID'] = quotes['quoted_status'].apply(lambda x: x['id_str' + name_suffix])\n\n to_concat.append(quotes)\n\n quotes_of_replies = tweets[ tweets['is_quote_of_reply'] ]\n if len(quotes_of_replies) > 0:\n #for quotes of replies we don't know the root or the parent\n #the parent will be the reply or any retweets of the reply\n #the root can be tracked back using the parents in the tree\n quotes_of_replies.loc[:,'parentID'] = '?'\n quotes_of_replies.loc[:,'rootID'] = '?'\n quotes_of_replies.loc[:,'partialParentID'] = quotes_of_replies['quoted_status'].apply(lambda x: x['in_reply_to_status_id_str' + name_suffix])\n quotes_of_replies.loc[:,'actionType'] = 'quote'\n\n to_concat.append(quotes_of_replies)\n\n quotes_of_quotes = tweets[ tweets['is_quote_of_quote'] ]\n if len(quotes_of_quotes) > 0:\n #for quotes of quotes we don't know the parent or the root\n #the parent will be the first quote or any retweets of it\n #the root can be traced back through the parent tree\n quotes_of_quotes.loc[:,'parentID'] = '?'\n quotes_of_quotes.loc[:,'rootID'] = '?'\n quotes_of_quotes.loc[:,'partialParentID'] = quotes_of_quotes['quoted_status'].apply(lambda x: x['quoted_status_id_str'])\n quotes_of_quotes.loc[:,'actionType'] = 'quote'\n\n to_concat.append(quotes_of_quotes)\n\n orig_tweets = tweets[tweets['is_orig']]\n if len(orig_tweets) > 0:\n #for original tweets assign parent and root to be itself\n orig_tweets.loc[:,'actionType'] = 'tweet'\n orig_tweets.loc[:,'parentID'] = orig_tweets['nodeID']\n orig_tweets.loc[:,'rootID'] = orig_tweets['nodeID']\n orig_tweets.loc[:,'partialParentID'] = orig_tweets['nodeID']\n to_concat.append(orig_tweets)\n\n tweets = pd.concat(to_concat,ignore_index=True,sort=False)\n \n tweets = tweets[output_columns]\n\n print('Sorting...')\n tweets = tweets.sort_values(\"nodeTime\").reset_index(drop=True)\n\n print('Reconstructing cascades...')\n tweets = full_reconstruction(tweets)\n\n #initialize info ID column with empty lists\n tweets['threadInfoIDs'] = [[] for i in range(len(tweets))]\n \n tweets = tweets.reset_index(drop=True)\n \n #get children of node\n def get_children(nodeID):\n\n children = tweets[tweets['parentID'] == nodeID]['nodeID']\n children = children[children.values != nodeID]\n \n return(children)\n\n\n #all comments on a post/comment mentioning a unit of information are also assigned that unit of information\n def add_info_to_children(nodeID,list_info=[]):\n\n infos = list(tweets[tweets['nodeID'] == nodeID]['informationIDs'].values[0])\n\n list_info = list_info.copy()\n\n children = get_children(nodeID)\n \n if len(children) > 0:\n\n list_info += infos\n \n if len(list_info) > 0 and len(children) > 1:\n #assign parents information ID list to all children\n tweets.loc[children.index.values,'threadInfoIDs'] = [list_info for i in range(len(children))]\n elif len(list_info) > 0 and len(children) == 1:\n #assign parents information ID list to single child\n tweets.at[children.index[0],'threadInfoIDs'] = list_info\n\n for child in children.values:\n #navigate further down the tree\n add_info_to_children(child,list_info)\n\n\n if get_info_ids:\n print('Adding information IDs to children...')\n #for each thread in data, propagate infromation IDs to children\n roots = tweets['rootID'].unique()\n for r,root in enumerate(roots):\n if root in tweets['nodeID'].values:\n add_info_to_children(root)\n if r % 50 == 0:\n print('{}/{}'.format(r,len(roots)))\n\n tweets['informationIDs'] = tweets.apply(lambda x: list(set(x['informationIDs'] + x['threadInfoIDs'])),axis=1)\n tweets = tweets[tweets['informationIDs'].str.len() > 0]\n\n #tweets = tweets.drop(\"threadIn\n \n if get_info_ids:\n\n print('Expanding events...')\n #expand lists of info IDs into seperate rows (i.e. an individual event is duplicated if it pertains to multiple information IDs)\n s = tweets.apply(lambda x: pd.Series(x['informationIDs']), axis=1).stack().reset_index(level=1, drop=True)\n s.name = 'informationID'\n tweets = tweets.drop(['informationIDs','partialParentID'], axis=1).join(s).reset_index(drop=True)\n \n tweets = tweets.drop('threadInfoIDs',axis=1)\n tweets = convert_timestamps(tweets)\n\n print('Done!')\n return tweets\n\n\n \ndef extract_github_data(fn='github_data.json',\n info_id_fields=None,\n keywords = [],\n anonymized=False):\n\n json_data = load_json(fn)\n data = pd.DataFrame(json_data)\n\n get_info_ids = False\n if not info_id_fields is None or len(keywords) > 0:\n get_info_ids = True\n \n if anonymized:\n name_suffix = \"_h\"\n text_suffix = \"_m\"\n else:\n name_suffix = \"\"\n text_suffix = \"\"\n\n\n github_text_fields = {\"PushEvent\":[\"commits\",\"message\" + text_suffix],\n \"PullRequestEvent\":[\"pull_request\",\"body\" + text_suffix],\n \"IssuesEvent\":[\"issue\",\"body\" + text_suffix],\n \"CreateEvent\":[\"description\" + text_suffix],\n \"PullRequestReviewCommentEvent\":[\"comment\",\"body\" + text_suffix],\n \"ForkEvent\":[\"forkee\",\"description\" + text_suffix],\n \"IssueCommentEvent\":[\"comment\",\"body\" + text_suffix],\n \"CommitCommentEvent\":[\"comment\",\"body\" + text_suffix]}\n\n \n print('Extracting fields...')\n output_columns = ['nodeID', 'nodeUserID', 'actionType', 'nodeTime', 'platform']\n if get_info_ids:\n output_columns.append('informationIDs')\n\n \n if 'event' in data.columns:\n data.loc[:,'nodeTime'] = data['event'].apply(lambda x: x['created_at'])\n data.loc[:,'actionType'] = data['event'].apply(lambda x: x['type'])\n data.loc[:,'nodeUserID'] = data['event'].apply(lambda x: x['actor']['login' + name_suffix])\n data.loc[:,'nodeID'] = data['event'].apply(lambda x: x['repo']['name' + name_suffix])\n else:\n data.loc[:,'nodeUserID'] = data['actor'].apply(lambda x: x['login' + name_suffix])\n data.loc[:,'nodeID'] = data['repo'].apply(lambda x: x['name' + name_suffix])\n\n data.rename(columns={'created_at': 'nodeTime',\n 'type':'actionType'}, inplace=True)\n \n data.loc[:,'platform'] = 'github'\n\n\n def get_text_field(row):\n\n if row['actionType'] not in github_text_fields.keys():\n return ''\n \n if row['actionType'] == 'PushEvent':\n text = ' '.join(c['message' + text_suffix] for c in row['payload']['commits'])\n else:\n text = row['payload']\n \n for f in github_text_fields[row['actionType']]:\n if f in text:\n text = text[f]\n else:\n text = ''\n \n return text\n\n \n if len(keywords) > 0:\n data.loc[:,'text_field'] = data.apply(get_text_field,axis=1)\n data = data.dropna(subset=['text_field'])\n data.loc[:,'informationIDs'] = data['text_field'].apply(lambda x: get_info_id_from_text([x], keywords))\n data = data.drop('text_field',axis=1)\n elif not info_id_fields == None: \n data.loc[:,'informationIDs'] = pd.Series(data['socialsim_details'].apply(lambda x: list(itertools.chain.from_iterable([get_info_id_from_fields(m,info_id_fields) for m in x]))))\n \n events = data[output_columns]\n \n events = events[events.actionType.isin(['PullRequestEvent','IssuesEvent','CreateEvent','DeleteEvent','WatchEvent','ForkEvent',\n 'PullRequestReviewCommentEvent','CommitCommentEvent','PushEvent','IssueCommentEvent'])]\n\n if get_info_ids:\n print('Expanding events...') \n #expand lists of info IDs into seperate rows (i.e. an individual event is duplicated if it pertains to multiple information IDs)\n s = events.apply(lambda x: pd.Series(x['informationIDs']), axis=1).stack().reset_index(level=1, drop=True)\n s.name = 'informationID'\n events = events.drop('informationIDs', axis=1).join(s).reset_index(drop=True)\n events = events.dropna(subset=['informationID'])\n \n events = convert_timestamps(events)\n\n events = events.drop_duplicates([c for c in events.columns if c != 'urlDomains'])\n \n print('Done!')\n return events\n\n \ndef main():\n\n fn = 'twitter_data2.json'\n #fn = ['reddit_posts_data.json','reddit_comments_data.json']\n #fn = ['github_repo_data.json','github_events_data.json']\n \n #data = extract_reddit_data(fn, anonymized=True, keywords=['issue','recent','client','code','secure','version'])\n data = extract_twitter_data(fn, anonymized=False, keywords=['venzuela','maduro'])\n #data = extract_twitter_data(fn, anonymized=False, info_id_fields = [\"entities.hashtags.text\"])\n #data = extract_reddit_data(fn, anonymized=True, info_id_fields = [\"extension.socialsim_keywords\"])\n\n print(data)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"socialsim/extract_ground_truth.py","file_name":"extract_ground_truth.py","file_ext":"py","file_size_in_byte":32042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"546286229","text":"from subprocess import Popen\nimport argparse\nimport os\nimport subprocess\nimport sys\n\nfrom pathlib import Path\n\nfile_directory = str(Path(__file__).parent)\n\n\nUI_jar_folder_path = '/crisis_map_UI/build/libs/'\nUI_jar_file_name = 'crisis-map-1.0-SNAPSHOT.jar'\njar_file_path = file_directory+UI_jar_folder_path+UI_jar_file_name\n\n\nbackend_py_file_folder_path = '/crisis_map_backend/'\nbackend_py_file_name = 'app.py'\nbackend_flask_app_path = file_directory+backend_py_file_folder_path+backend_py_file_name\n\n\nstanford_folder_path = '/stanford-corenlp-full-2018-10-05/'\nstanford_jar_file_name = 'stanford-corenlp-3.9.2.jar'\nstanford_app_path = file_directory+stanford_folder_path+stanford_jar_file_name\n\ndef check_and_kill_apps(port):\n cmd = 'lsof -t -i:{0}'.format(port)\n pid = subprocess.check_output(cmd, shell=True)\n pid = pid.decode(\"utf-8\").split('\\n')\n for p in pid:\n print(p)\n pid = p\n killcmd = 'kill -9 {0}'.format(pid) if pid else None\n isKilled = os.system('kill -9 {0}'.format(pid)) if pid else None\n if isKilled == 0:\n print(\"Port {0} is free. Processs {1} killed successfully\".format(port, pid))\n else:\n print(\"Cannot free port {0}.Failed to kill process {1}, err code:{2}\".format(port, pid, isKilled))\n\ntry:\n check_and_kill_apps(8080)\n check_and_kill_apps(5000)\n check_and_kill_apps(9000)\n\nexcept:\n pass\n\n\nPopen(['java', '-jar', jar_file_path])\n#Popen(['java', '-classpath', stanford_app_path, 'edu.stanford.nlp.pipeline.StanfordCoreNLPServer', '-port', '9000', '-timeout', '15000']).wait()\nPopen(['python', backend_flask_app_path]).wait()\n\n","sub_path":"run_application.py","file_name":"run_application.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"66057708","text":"#!/usr/bin/env python\n\nimport random\nimport proto.person_pb2 as proto_person\nfrom endpoints import EndpointsAPI\n\ndef getAPI():\n print(\"\\u001b[36mStarting server API endpoints calls..!\\u001b[39m\")\n return EndpointsAPI()\n\ndef insert(API):\n num = str(random.randint(10,100))\n person = proto_person.Person()\n person.name = \"Diego \"+num\n person.email = \"diego\"+num+\"@test.com\"\n phone = person.phones.add()\n phone.number = str(random.randint(1000,10000))\n phone.type = proto_person.Person.PhoneType.MOBILE\n person = API.insert_person(person)\n print(\"\\u001b[33m\" + str(person) + \"\\u001b[39m\")\n return person\n\ndef test():\n API = getAPI()\n person = insert(API)\n\n query = {\"id\": person.id}\n persons = API.find_persons(query)\n print(\"\\u001b[33m\" + str(persons) + \"\\u001b[39m\")\n\ndef tryme():\n ## Create API instance\n API = getAPI()\n\n\n #### HELLO ####\n hello = API.get_hello()\n print(\"\\u001b[33m\" + hello + \"\\u001b[39m\")\n\n\n #### INSERT ####\n insert(API)\n\n person = proto_person.Person()\n person.name = \"Manuel\"\n person.email = \"manu@test.com\"\n person = API.insert_person(person)\n print(\"\\u001b[33m\" + str(person) + \"\\u001b[39m\")\n\n person = proto_person.Person()\n person.name = \"Fiqus\"\n person.email = \"info@fiqus.coop\"\n person = API.insert_person(person)\n print(\"\\u001b[33m\" + str(person) + \"\\u001b[39m\")\n\n\n #### LIST ####\n persons = API.list_persons()\n print(\"\\u001b[33m\" + str(persons) + \"\\u001b[39m\")\n\n\n #### FIND ####\n query = {\"name\": \"Diego%\", \"email\": \"%@test.com\"}\n persons = API.find_persons(query)\n print(\"\\u001b[33m\" + str(persons) + \"\\u001b[39m\")\n\n query = {\"id\": 1}\n persons = API.find_persons(query)\n print(\"\\u001b[33m\" + str(persons) + \"\\u001b[39m\")\n\n query = {\"name\": \"Not Found!\"}\n persons = API.find_persons(query)\n print(\"\\u001b[33m\" + str(persons) + \"\\u001b[39m\")\n\nif __name__ == \"__main__\":\n print(\"\\u001b[32mWelcome to prototest client =]\\u001b[39m\")\n print(\"\\u001b[33mEnter tryme() to run some examples!\\u001b[39m\")\n print(\"\\u001b[33mEnter test() for a quick insert/retrieve test!\\u001b[39m\")","sub_path":"protobuf/client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"294659177","text":"import time\nimport win32api\nfrom ctypes import windll\n\nimport win32con\nimport logging\nimport sys\n\nFORMAT = \"%(levelname)s-%(module)s-Line %(lineno)s: %(message)s\"\n#logging.basicConfig(stream='mouse.log', level=logging.DEBUG, format=FORMAT)\n\n#https://msdn.microsoft.com/en-us/library/windows/desktop/ms646260(v=vs.85).aspx\nclass MouseMovement:\n def click(self, coords, button=\"left\",hold=False):\n \"\"\"\n Args:\n coords (touple): coords takes two arguments, either both float\n or int. If float is supplied, it will try to treat them as\n percentages. X, Y\n button (string): either \"left\",\"right\" or \"middle\". Decides what button that\n will be sent to the running program.\n\n Returns:\n bool: True if successful, False otherwise.\n\n Raises:\n SyntaxError: The button param does not contain \"left\",\"right og \"middle\"\n \"\"\"\n\n hwnd = self.win_handler.get_hwnd()\n\n if all(isinstance(elem, float) for elem in coords):\n coords = self.to_pixel(coords)\n\n\n logging.debug(\"Trying to click on:\" + str(coords) + \" with \" + button + \" button\")\n\n x = coords[0]\n y = coords[1]\n\n if \"right\" in button.lower():\n _button_state = win32con.MK_RBUTTON\n _button_down = win32con.WM_RBUTTONDOWN\n _button_up = win32con.WM_RBUTTONUP\n elif \"left\" in button.lower():\n _button_state = win32con.MK_LBUTTON\n _button_down = win32con.WM_LBUTTONDOWN\n _button_up = win32con.WM_LBUTTONUP\n elif \"middle\" in button.lower():\n _button_state = win32con.MK_MBUTTON\n _button_down = win32con.WM_MBUTTONDOWN\n _button_up = win32con.WM_MBUTTONUP\n else:\n raise SyntaxError('\"Button\" needs to contain \"left\", \"right\" or \"middle\"')\n\n l_param = win32api.MAKELONG(x, y)\n\n win32api.SendMessage(hwnd, win32con.WM_MOUSEMOVE,0,l_param)\n\n time.sleep(0.2)\n win32api.SendMessage(hwnd,_button_down, _button_state, l_param)\n time.sleep(0.1)\n\n if not hold: #Do not release the button if hold is true\n win32api.SendMessage(hwnd, _button_up, 0, l_param)\n\n self._last_x = x\n self._last_y = y\n return True\n\n def release_button(self, coords, button = \"left\"):\n\n if \"right\" in button.lower():\n _button_up = win32con.WM_RBUTTONUP\n elif \"left\" in button.lower():\n _button_up = win32con.WM_LBUTTONUP\n elif \"middle\" in button.lower():\n _button_up = win32con.WM_MBUTTONUP\n else:\n raise SyntaxError('\"Button\" needs to contain \"left\", \"right\" or \"middle\"')\n\n if all(isinstance(elem, float) for elem in coords):\n coords = self.to_pixel(coords)\n x = coords[0]\n y = coords[1]\n\n l_param = win32api.MAKELONG(x, y)\n\n\n hwnd = self.win_handler.get_hwnd()\n win32api.SendMessage(hwnd, _button_up, 0, l_param)\n\n\n raise NotImplementedError\n\n def offset_click(self, x, y, button=\"left\"):\n \"\"\"\n Args:\n x (int): The offset in the left/right direction\n y (int): The offset in the up/down direction\n button (string): either \"left\" or \"right\". Decides what button that\n will be sent to the running program.\n Returns:\n bool: True if successful, False otherwise.\n\n Raises:\n SyntaxError: The button param does not contain \"left\" or \"right\"\n \"\"\"\n\n if all(isinstance(elem, float) for elem in [x, y]):\n x, y = self.to_pixel([x, y])\n\n return self.click([self._last_x + x, self._last_y + y], button)\n\n def move(self,coords, button=None):\n if all(isinstance(elem, float) for elem in coords):\n coords = self.to_pixel(coords)\n\n if button == None:\n _button_state = 0\n elif \"right\" in button.lower():\n _button_state = win32con.MK_RBUTTON\n elif \"left\" in button.lower():\n _button_state = win32con.MK_LBUTTON\n elif \"middle\" in button.lower():\n _button_state = win32con.MK_MBUTTON\n\n else:\n raise SyntaxError('\"Button\" needs to contain \"left\", \"right\" or \"middle\"')\n\n l_param = win32api.MAKELONG(coords[0], coords[1])\n win32api.PostMessage(self.win_handler.get_hwnd(), win32con.WM_MOUSEMOVE, _button_state, l_param)\n\n def hold_and_drag(self,start,end,steps,button=\"left\"):\n hwnd = self.win_handler.get_hwnd()\n\n if all(isinstance(elem, float) for elem in start):\n start = self.to_pixel(start)\n\n if all(isinstance(elem, float) for elem in end):\n end = self.to_pixel(end)\n\n step_x = (float(end[0] - start[0])) / steps\n step_y = (float(end[1] - start[1])) / steps\n\n if \"right\" in button.lower():\n _button_state = win32con.MK_RBUTTON\n _button_down = win32con.WM_RBUTTONDOWN\n _button_up = win32con.WM_RBUTTONUP\n elif \"left\" in button.lower():\n _button_state = win32con.MK_LBUTTON\n _button_down = win32con.WM_LBUTTONDOWN\n _button_up = win32con.WM_LBUTTONUP\n elif \"middle\" in button.lower():\n _button_state = win32con.MK_MBUTTON\n _button_down = win32con.WM_MBUTTONDOWN\n _button_up = win32con.WM_MBUTTONUP\n else:\n raise SyntaxError('\"Button\" needs to contain \"left\", \"right\" or \"middle\"')\n\n self.move(start)\n l_param = win32api.MAKELONG(start[0], start[1])\n\n time.sleep(0.1)\n win32api.SendMessage(hwnd,_button_down,_button_state,l_param)\n time.sleep(0.1)\n\n x, y = start\n for step in range(0,steps):\n x += step_x\n y += step_y\n self.move((int(x),int(y)), button=button)\n time.sleep(0.01)\n\n l_param = win32api.MAKELONG(int(x), int(y))\n win32api.SendMessage(hwnd,_button_up,0,l_param)\n self._last_x = x\n self._last_y = y\n\n def to_ratio(self, coords):\n size_vertical, size_horizontal = self.win_handler.get_bbox_size()\n\n x, y = coords[0] / size_horizontal, coords[1] / size_vertical\n return float(x),float(y)\n\n def to_pixel(self, coords, bbox = None):\n \"\"\"\n Args:\n coords (touple): a pair of floating point numbers between 0.0 and 1.0\n representing a percentage of the screen in the x/y directions\n bbox (touple):\n Returns:\n touple: a pair of integers representing the actual coordinates in\n the form of pixels\n \"\"\"\n\n if bbox is None:\n bbox = self.win_handler.create_boundingbox()\n size_vertical,size_horizontal = self.win_handler.get_bbox_size()\n else:\n size_vertical = bbox[2] - bbox[0]\n size_horizontal = bbox[3] - bbox[1]\n\n x, y = coords[0] * size_vertical, coords[1] * size_horizontal\n\n logging.debug(\"To Pixel: {} -> {} in the box {}\".format(coords,(x,y),bbox))\n\n return int(x), int(y)\n\n def click_centre(self,bbox,button=\"left\"):\n return self.click()\n\n def __init__(self, window_handler):\n self._last_x = 0\n self._last_y = 0\n self.win_handler = window_handler\n self._pycwnd = self.win_handler.get_pycwnd()\n self.window_size = self._pycwnd.GetWindowPlacement()[4]\n","sub_path":"pytomatic/actions/MouseMovement.py","file_name":"MouseMovement.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"152351768","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\dev\\cocos2020\\test\\test_action_non_interval.py\n# Compiled at: 2020-01-10 23:58:31\n# Size of source mod 2**32: 5112 bytes\nfrom __future__ import division, print_function, unicode_literals\nimport sys, os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\ntestinfo = 'f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, f 30 0.033, s, q'\ntags = 'Action'\nimport random, math, cocos\nimport cocos.director as director\nfrom cocos.sprite import Sprite\nimport cocos.actions as ac\nimport pyglet\nfrom pyglet.gl import *\nfastness_green = 60.0\nfastness_bullet = 80.0\n\nclass ProbeQuad(cocos.cocosnode.CocosNode):\n\n def __init__(self, r, color4):\n super(ProbeQuad, self).__init__()\n self.color4 = color4\n self.vertexes = [(r, 0, 0), (0, r, 0), (-r, 0, 0), (0, -r, 0)]\n\n def draw(self):\n glPushMatrix()\n self.transform()\n glBegin(GL_QUADS)\n glColor4ub(*self.color4)\n for v in self.vertexes:\n glVertex3i(*v)\n else:\n glEnd()\n glPopMatrix()\n\n\nclass RandomWalk(ac.Action):\n\n def init(self, fastness):\n self.fastness = fastness\n\n def start(self):\n self.make_new_leg()\n\n def make_new_leg(self):\n self._elapsed = 0.0\n x0, y0 = self.target.position\n width, height = director.get_window_size()\n x1 = random.randint(0, width)\n y1 = random.randint(0, height)\n dx = x1 - x0\n dy = y1 - y0\n norm = math.hypot(dx, dy)\n try:\n self.t_arrival = norm / (1.0 * self.fastness)\n except ZeroDivisionError:\n norm = 1.0\n self.t_arrival = 0.1\n else:\n self.dx = dx / norm\n self.dy = dy / norm\n print('dx, dy:', dx, dy)\n self.x0 = x0\n self.y0 = y0\n\n def step(self, dt):\n self._elapsed += dt\n if self._elapsed > self.t_arrival:\n self.make_new_leg()\n x = self.fastness * self._elapsed * self.dx + self.x0\n y = self.fastness * self._elapsed * self.dy + self.y0\n self.target.position = (\n x, y)\n\n\nclass Chase(ac.Action):\n\n def init(self, fastness):\n self.fastness = fastness\n\n def init2(self, chasee, on_bullet_hit):\n self.chasee = chasee\n self.on_bullet_hit = on_bullet_hit\n\n def step(self, dt):\n if self.chasee is None:\n return\n x0, y0 = self.target.position\n x1, y1 = self.chasee.position\n dx, dy = x1 - x0, y1 - y0\n mod = math.hypot(dx, dy)\n x = self.fastness * dt * (x1 - x0) / mod + x0\n y = self.fastness * dt * (y1 - y0) / mod + y0\n self.target.position = (x, y)\n if math.hypot(x1 - x, y1 - y) < 5:\n self._done = True\n\n def stop(self):\n self.chasee.do(ac.RotateBy(360, 1.0))\n self.on_bullet_hit(self.target)\n\n\nclass TestLayer(cocos.layer.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n x, y = director.get_window_size()\n self.green_obj = ProbeQuad(50, (0, 255, 0, 255))\n self.add(self.green_obj)\n self.green_obj.do(RandomWalk(fastness_green))\n self.schedule_interval(self.spawn_bullet, 1.0)\n\n def spawn_bullet(self, dt):\n bullet = ProbeQuad(5, (255, 0, 0, 255))\n bullet.position = (0, 0)\n bullet.color = (233, 70, 0)\n chase_worker = bullet.do(Chase(fastness_bullet))\n chase_worker.init2(self.green_obj, self.on_bullet_hit)\n self.add(bullet)\n\n def on_bullet_hit(self, bullet):\n self.remove(bullet)\n\n\ndescription = '\\nExample actions with duration not known at it start time ( no IntervalAction,\\nno InstantAction).\\nIt also shows one way of passing non deepcopy-able parameters, like a cocosnode,\\nto an action.\\nIt should be seen:\\n A green quad moving in rectilinear traits.\\n A bunch of red dots spawning from left-bottom corner and moving towards\\n the green quad.\\n Green quad spining when a red dot reach the center of green quad.\\n'\n\ndef main():\n print(description)\n director.init()\n a = cocos.cocosnode.CocosNode()\n\n class A(object):\n\n def __init__(self, x):\n self.x = x\n\n z = A(a)\n import copy\n b = copy.deepcopy(a)\n print('a:', a)\n print('b:', b)\n test_layer = TestLayer()\n main_scene = cocos.scene.Scene(test_layer)\n director.run(main_scene)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/cocos2d-0.6.8.tar/test_action_non_interval.cpython-38.py","file_name":"test_action_non_interval.cpython-38.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"507236444","text":"import json\nimport logging\n\nfrom modsolr import Solr\n\nlogging.basicConfig(level='DEBUG')\nlogger = logging.getLogger(__name__)\n\nsolr = Solr(username='alice', password='2ozs4NXFeWmu',\n dsn='https://solr-staging.bidvestalice.com/',\n collection='staging', auth_type='basic')\n\n# Set file to Open\nfile = 'backup_exec.json'\n\nwith open(file) as f:\n loaded_file = json.load(f)\n # Create list of schemas\n l = [loaded_file[x]['message_schema'] for x in loaded_file.keys()]\n\n# Lookup table to convert BQ fields to SOLR\nlookup = {\"STRING\": \"string\", \"INTEGER\": \"tlong\", \"FLOAT\": \"tfloat\",\n \"TIMESTAMP\": \"tdate\", \"DATE\": \"tdate\", \"TIME\": \"tdate\",\n \"DATETIME\": \"tdate\", \"BOOLEAN\": \"boolean\"}\n\n\ndef create_schema(schema: list) -> None:\n \"\"\"\n Creates a solr schema from a BQ schema\n :param schema:\n :return:\n \"\"\"\n for row in schema:\n # Recursively find all fields\n if row['type'] == 'RECORD':\n create_schema(row['fields'])\n else:\n if 'mode' in row:\n field = {\n 'name': row['name'],\n # [field] = [field] + 's' for repeated fields\n 'type': lookup[row['type']] + 's' \\\n if row['mode'] == 'REPEATED' else lookup[row['type']]\n }\n else:\n field = {\n 'name': row['name'],\n 'type': lookup[row['type']]\n }\n r = solr.schema(add_field=json.dumps(field))\n r.raise_for_status()\n response = r.json()\n # Check that returned fields match\n try:\n if lookup[row['type']] not in response[\"errors\"][0][\"add-field\"][\"type\"]:\n raise ValueError(\"Field Type does not match\")\n except KeyError:\n logger.info(f\"Adding new field to solr collection {row}\")\n logger.debug(r.text)\n\n# Create schema for each item in list l\nfor item in l:\n create_schema(item)\n","sub_path":"tools/solr_schema_manager.py","file_name":"solr_schema_manager.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"204984599","text":"import numpy as np\nimport ROOT\nimport sys\n\n\n## some variablesS\n## NOTE: 30.1234 -> see getSFs_libs.py, getBinLabel function\nptBins = np.array([2., 2.5, 2.75, 3., 3.25, 3.5, 3.75, 4., 4.5, 5., 6., 8., 10., 15., 20., 30.1234])\netaBins = np.array([-2.4, -2.1, -1.6, -1.2, -0.9, -0.3, -0.2, 0.2, 0.3, 0.9, 1.2, 1.6, 2.1, 2.4])\nnVtxBins = np.array([0.5,2.5,4.5,6.5,8.5,10.5,12.5,14.5,16.5,18.5,20.5,22.5,24.5,26.5,28.5,30.5,32.5,34.5,36.5,38.5,40.5,42.5,44.5,46.5,48.5,50.5])\nabsetaBins = np.array([0, 0.9, 1.2, 2.1, 2.4])\n\n\n## varuables\nvarList =[ #(\"pt\" , ptBins ),\n #(\"eta\" , etaBins ),\n #(\"nVtx\" , nVtxBins),\n (\"pt_abseta\", (ptBins, absetaBins)),\n]\n\n##input files\nfileDA = ROOT.TFile.Open(\"../root_files/data/data_%s.root\" % str(sys.argv[1]))\nfileMC = ROOT.TFile.Open(\"../root_files/mc/mc_%s.root\" % str(sys.argv[1]))\n\n## main directory\nMAINDIR = \"./%s\" % str(sys.argv[1])\n\n## ID type\nID_type = str(sys.argv[1])+\"_muonID\"\n\n## booleans\n ## use to plot pt with log on x\nuseLogXforPt= False\n\ndef printConfig():\n print ('''\n ********************************************************************\n CONFIGURATION of cfg_getSFs.py\n \n variable list \\t\\t {VARLIST}\n data file \\t\\t {DATAFILE}\n MC file \\t\\t {MCFILE}\n main directory \\t\\t {MDIR}\n useLogXforPt \\t\\t {LOGX}\n ID type \\t\\t {IDTYPE}\n\n '''.format( VARLIST = list(\"%s\" % varList[i][0] for i in range( len(varList))),\n DATAFILE = fileDA.GetName() ,\n MCFILE = fileMC.GetName() ,\n MDIR = MAINDIR ,\n LOGX = useLogXforPt ,\n IDTYPE = ID_type ,\n )\n )","sub_path":"Scale_Factors/cfg/cfg_getSFs.py","file_name":"cfg_getSFs.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"338476690","text":"import configparser\nimport base58\nimport os\n\nfrom log_config import main_logger\nfrom util.client_utils import check_response\nfrom util.rpc_utils import parse_json_response\nfrom random import randint\nfrom time import sleep\n\nlogger = main_logger\n\nMAX_TX_PER_BLOCK = 280\nPKH_LENGHT = 36\nCONFIRMATIONS = 1\n\nCOMM_HEAD = \" rpc get http://{}/chains/main/blocks/head\"\nCOMM_COUNTER = \" rpc get http://{}/chains/main/blocks/head/context/contracts/{}/counter\"\nCONTENT = '{\"kind\":\"transaction\",\"source\":\"%SOURCE%\",\"destination\":\"%DESTINATION%\",\"fee\":\"%fee%\",\"counter\":\"%COUNTER%\",\"gas_limit\": \"%gas_limit%\", \"storage_limit\": \"%storage_limit%\",\"amount\":\"%AMOUNT%\"}'\nFORGE_JSON = '{\"branch\": \"%BRANCH%\",\"contents\":[%CONTENT%]}'\nRUNOPS_JSON = '{\"branch\": \"%BRANCH%\",\"contents\":[%CONTENT%], \"signature\":\"edsigtXomBKi5CTRf5cjATJWSyaRvhfYNHqSUGrn4SdbYRcGwQrUGjzEfQDTuqHhuA8b2d8NarZjz8TRf65WkpQmo423BtomS8Q\"}'\nPREAPPLY_JSON = '[{\"protocol\":\"%PROTOCOL%\",\"branch\":\"%BRANCH%\",\"contents\":[%CONTENT%],\"signature\":\"%SIGNATURE%\"}]'\nCOMM_FORGE = \" rpc post http://%NODE%/chains/main/blocks/head/helpers/forge/operations with '%JSON%'\"\nCOMM_RUNOPS = \" rpc post http://%NODE%/chains/main/blocks/head/helpers/scripts/run_operation with '%JSON%'\"\nCOMM_PREAPPLY = \" rpc post http://%NODE%/chains/main/blocks/head/helpers/preapply/operations with '%JSON%'\"\nCOMM_INJECT = \" rpc post http://%NODE%/injection/operation with '\\\"%OPERATION_HASH%\\\"'\"\nCOMM_WAIT = \" wait for %OPERATION% to be included --confirmations {}\".format(CONFIRMATIONS)\n\nFEE_INI = 'fee.ini'\nDUMMY_FEE = 1000\n\n\nclass BatchPayer():\n def __init__(self, node_url, pymnt_addr, wllt_clnt_mngr, delegator_pays_xfer_fee):\n super(BatchPayer, self).__init__()\n self.pymnt_addr = pymnt_addr\n self.node_url = node_url\n self.wllt_clnt_mngr = wllt_clnt_mngr\n\n config = configparser.ConfigParser()\n if os.path.isfile(FEE_INI):\n config.read(FEE_INI)\n else:\n logger.warn(\"File {} not found. Using default fee values\".format(FEE_INI))\n\n kttx = config['KTTX']\n self.base = kttx['base']\n self.gas_limit = kttx['gas_limit']\n self.storage_limit = kttx['storage_limit']\n self.default_fee = kttx['fee']\n\n # section below is left to make sure no one using legacy configuration option\n self.delegator_pays_xfer_fee = config.getboolean('KTTX', 'delegator_pays_xfer_fee',\n fallback=True) # Must use getboolean otherwise parses as string\n\n if not self.delegator_pays_xfer_fee:\n raise Exception(\n \"delegator_pays_xfer_fee is no longer read from fee.ini. It should be set in baking configuration file.\")\n\n self.delegator_pays_xfer_fee = delegator_pays_xfer_fee\n\n logger.debug(\"Transfer fee is paid by {}\".format(\"Delegator\" if self.delegator_pays_xfer_fee else \"Delegate\"))\n\n # pymnt_addr has a length of 36 and starts with tz or KT then it is a public key has, else it is an alias\n if len(self.pymnt_addr) == PKH_LENGHT and (\n self.pymnt_addr.startswith(\"KT\") or self.pymnt_addr.startswith(\"tz\")):\n self.source = self.pymnt_addr\n else:\n known_contracts = self.wllt_clnt_mngr.get_known_contracts_by_alias()\n if self.pymnt_addr in known_contracts:\n self.source = known_contracts[self.pymnt_addr]\n else:\n raise Exception(\"pymnt_addr cannot be translated into a PKH or alias: {}\".format(self.pymnt_addr))\n\n self.manager = self.wllt_clnt_mngr.get_addr_dict_by_pkh(self.source)['manager']\n self.manager_alias = self.wllt_clnt_mngr.get_addr_dict_by_pkh(self.manager)['alias']\n\n logger.debug(\"Payment address is {}\".format(self.source))\n logger.debug(\"Signing address is {}, manager alias is {}\".format(self.manager, self.manager_alias))\n\n self.comm_head = COMM_HEAD.format(self.node_url)\n self.comm_counter = COMM_COUNTER.format(self.node_url, self.source)\n self.comm_runops = COMM_RUNOPS.format().replace(\"%NODE%\", self.node_url)\n self.comm_forge = COMM_FORGE.format().replace(\"%NODE%\", self.node_url)\n self.comm_preapply = COMM_PREAPPLY.format().replace(\"%NODE%\", self.node_url)\n self.comm_inject = COMM_INJECT.format().replace(\"%NODE%\", self.node_url)\n self.comm_wait = COMM_WAIT.format()\n\n def pay(self, payment_items_in, verbose=None, dry_run=None):\n\n # initialize the result list with already paid items\n payment_logs = [pi for pi in payment_items_in if pi.paid]\n\n payment_items = [pi for pi in payment_items_in if not pi.paid]\n\n if payment_logs:\n for pl in payment_logs:\n logger.info(\"Reward already paid for cycle %s address %s amount %f tz type %s\",\n pl.cycle, pl.address, pl.payment, pl.type)\n\n # split payments into lists of MAX_TX_PER_BLOCK or less size\n # [list_of_size_MAX_TX_PER_BLOCK,list_of_size_MAX_TX_PER_BLOCK,list_of_size_MAX_TX_PER_BLOCK,...]\n payment_items_chunks = [payment_items[i:i + MAX_TX_PER_BLOCK] for i in\n range(0, len(payment_items), MAX_TX_PER_BLOCK)]\n\n op_counter = OpCounter()\n logger.debug(\"Payment will be done in {} batches\".format(len(payment_items_chunks)))\n\n for payment_items_chunk in payment_items_chunks:\n logger.debug(\"Payment of a batch started\")\n payments_log = \\\n self.pay_single_batch_wrap(payment_items_chunk, verbose=verbose, dry_run=dry_run, op_counter=op_counter)\n payment_logs.extend(payments_log)\n\n logger.debug(\"Payment of a batch is complete\")\n\n return payment_logs\n\n def pay_single_batch_wrap(self, payment_items, op_counter, verbose=None, dry_run=None):\n\n max_try = 3\n return_code = False\n operation_hash = \"\"\n\n # due to unknown reasons, some times a batch fails to pre-apply\n # trying after some time should be OK\n for attempt in range(max_try):\n return_code, operation_hash = \\\n self.pay_single_batch(payment_items, op_counter, verbose, dry_run=dry_run)\n\n if dry_run or not return_code:\n op_counter.rollback()\n else:\n op_counter.commit()\n\n # we do not want to preserve counter anymore\n # force re-read of counter at every try\n op_counter.set(None)\n\n # if successful, do not try anymore\n if return_code:\n break\n\n logger.debug(\"Batch payment attempt {} failed\".format(attempt))\n\n # But do not wait after last attempt\n if attempt < max_try - 1:\n self.wait_random()\n\n for payment_item in payment_items:\n payment_item.paid = return_code\n payment_item.hash = operation_hash\n\n return payment_items\n\n def wait_random(self):\n slp_tm = randint(10, 50)\n logger.debug(\"Wait for {} seconds before trying again\".format(slp_tm))\n sleep(slp_tm)\n\n def pay_single_batch(self, payment_records, op_counter, verbose=None, dry_run=None):\n\n if not op_counter.get():\n counter = parse_json_response(self.wllt_clnt_mngr.send_request(self.comm_counter))\n counter = int(counter)\n op_counter.set(counter)\n\n head = parse_json_response(self.wllt_clnt_mngr.send_request(self.comm_head))\n branch = head[\"hash\"]\n protocol = head[\"metadata\"][\"protocol\"]\n\n logger.debug(\"head: branch {} counter {} protocol {}\".format(branch, op_counter.get(), protocol))\n\n content_list = []\n\n for payment_item in payment_records:\n pymnt_amnt = int(payment_item.payment * 1e6) # expects in micro tezos\n\n if self.delegator_pays_xfer_fee:\n pymnt_amnt = max(pymnt_amnt - int(self.default_fee), 0) # ensure not less than 0\n\n if pymnt_amnt < 1e-3: # zero check\n continue\n\n op_counter.inc()\n content = CONTENT.replace(\"%SOURCE%\", self.source).replace(\"%DESTINATION%\", payment_item.address) \\\n .replace(\"%AMOUNT%\", str(pymnt_amnt)).replace(\"%COUNTER%\", str(op_counter.get())) \\\n .replace(\"%fee%\", self.default_fee).replace(\"%gas_limit%\", self.gas_limit).replace(\"%storage_limit%\",\n self.storage_limit)\n content_list.append(content)\n\n logger.info(\"Payment content: {}\".format(content))\n\n contents_string = \",\".join(content_list)\n\n # run the operations\n logger.debug(\"Running {} operations\".format(len(content_list)))\n runops_json = RUNOPS_JSON.replace('%BRANCH%', branch).replace(\"%CONTENT%\", contents_string)\n runops_command_str = self.comm_runops.replace(\"%JSON%\", runops_json)\n if verbose: logger.debug(\"runops_command_str is |{}|\".format(runops_command_str))\n runops_command_response = self.wllt_clnt_mngr.send_request(runops_command_str)\n if not check_response(runops_command_response):\n error_desc = parse_json_response(runops_command_response)\n # for content in runops_command_response[\"contents\"]:\n # op_result = content[\"metadata\"][\"operation_result\"]\n # if op_result[\"status\"] == 'failed':\n # error_desc = op_result[\"errors\"]\n # break\n logger.error(\"Error in run_operation response '{}'\".format(error_desc))\n return False, \"\"\n\n # forge the operations\n logger.debug(\"Forging {} operations\".format(len(content_list)))\n forge_json = FORGE_JSON.replace('%BRANCH%', branch).replace(\"%CONTENT%\", contents_string)\n forge_command_str = self.comm_forge.replace(\"%JSON%\", forge_json)\n if verbose: logger.debug(\"forge_command_str is |{}|\".format(forge_command_str))\n forge_command_response = self.wllt_clnt_mngr.send_request(forge_command_str)\n if not check_response(forge_command_response):\n logger.error(\"Error in forge response '{}'\".format(forge_command_response))\n return False, \"\"\n\n # sign the operations\n bytes = parse_json_response(forge_command_response, verbose=verbose)\n signed_bytes = self.wllt_clnt_mngr.sign(bytes, self.manager_alias)\n\n # pre-apply operations\n logger.debug(\"Preapplying the operations\")\n preapply_json = PREAPPLY_JSON.replace('%BRANCH%', branch).replace(\"%CONTENT%\", contents_string).replace(\n \"%PROTOCOL%\", protocol).replace(\"%SIGNATURE%\", signed_bytes)\n preapply_command_str = self.comm_preapply.replace(\"%JSON%\", preapply_json)\n\n if verbose: logger.debug(\"preapply_command_str is |{}|\".format(preapply_command_str))\n preapply_command_response = self.wllt_clnt_mngr.send_request(preapply_command_str)\n if not check_response(preapply_command_response):\n logger.error(\"Error in preapply response '{}'\".format(preapply_command_response))\n return False, \"\"\n\n # not necessary\n # preapplied = parse_response(preapply_command_response)\n\n # if dry_run, skip injection\n if dry_run: return True, \"\"\n\n # inject the operations\n logger.debug(\"Injecting {} operations\".format(len(content_list)))\n decoded = base58.b58decode(signed_bytes).hex()\n\n if signed_bytes.startswith(\"edsig\"): # edsig signature\n decoded_edsig_signature = decoded[10:][:-8] # first 5 bytes edsig, last 4 bytes checksum\n decoded_signature = decoded_edsig_signature\n elif signed_bytes.startswith(\"sig\"): # generic signature\n decoded_sig_signature = decoded[6:][:-8] # first 3 bytes sig, last 4 bytes checksum\n decoded_signature = decoded_sig_signature\n elif signed_bytes.startswith(\"p2sig\"):\n decoded_sig_signature = decoded[8:][:-8] # first 4 bytes sig, last 4 bytes checksum\n decoded_signature = decoded_sig_signature\n else:\n raise Exception(\"Signature '{}' is not in expected format\".format(signed_bytes))\n\n if len(decoded_signature) != 128: # must be 64 bytes\n # raise Exception(\"Signature length must be 128 but it is {}. Signature is '{}'\".format(len(signed_bytes), signed_bytes))\n logger.warn(\n \"Signature length must be 128 but it is {}. Signature is '{}'\".format(len(signed_bytes), signed_bytes))\n # return False, \"\"\n\n signed_operation_bytes = bytes + decoded_signature\n inject_command_str = self.comm_inject.replace(\"%OPERATION_HASH%\", signed_operation_bytes)\n if verbose: logger.debug(\"inject_command_str is |{}|\".format(inject_command_str))\n inject_command_response = self.wllt_clnt_mngr.send_request(inject_command_str)\n if not check_response(inject_command_response):\n logger.error(\"Error in inject response '{}'\".format(inject_command_response))\n return False, \"\"\n\n operation_hash = parse_json_response(inject_command_response)\n logger.debug(\"Operation hash is {}\".format(operation_hash))\n\n # wait for inclusion\n logger.debug(\"Waiting for operation {} to be included. Please be patient until the block has {} confirmation(s)\"\n .format(operation_hash, CONFIRMATIONS))\n self.wllt_clnt_mngr.send_request(self.comm_wait.replace(\"%OPERATION%\", operation_hash))\n\n logger.debug(\"Operation {} is included\".format(operation_hash))\n\n return True, operation_hash\n\n\nclass OpCounter:\n def __init__(self) -> None:\n super().__init__()\n self.__counter = None\n self.__counter_backup = None\n\n def inc(self):\n if self.__counter is None:\n raise Exception(\"Counter is not set!!!\")\n\n self.__counter += 1\n\n def get(self):\n return self.__counter\n\n def commit(self):\n self.__counter_backup = self.__counter\n\n def rollback(self):\n self.__counter = self.__counter_backup\n\n def set(self, counter):\n self.__counter = counter\n self.__counter_backup = counter\n","sub_path":"src/pay/batch_payer.py","file_name":"batch_payer.py","file_ext":"py","file_size_in_byte":14269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"356617305","text":"#! /usr/bin/env python3\n# Copyright (C) 2009 Sebastian Garcia, Veronica Valeros\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n#\n# Author:\n# Sebastian Garcia, sebastian.garcia@agents.fel.cvut.cz, sgarcia@exa.unicen.edu.ar, eldraco@gmail.com\n#\n# Changelog\n\n# Description\n# A tool to add labels in netflow files\n#\n# TODO\n# Take the flags into account\n\n\n# standard imports\nimport getopt\nimport sys\nimport re\n\n####################\n# Global Variables\n\ndebug = 0\nvernum = \"0.3\"\nverbose = False\n\n#########\n\n\n# Print version information and exit\ndef version():\n print(\"+----------------------------------------------------------------------+\")\n print(\"| netflowlabeler.py Version \"+ vernum +\" |\")\n print(\"| This program is free software; you can redistribute it and/or modify |\")\n print(\"| it under the terms of the GNU General Public License as published by |\")\n print(\"| the Free Software Foundation; either version 2 of the License, or |\")\n print(\"| (at your option) any later version. |\")\n print(\"| |\")\n print(\"| Author: Garcia Sebastian, eldraco@gmail.com |\")\n print(\"| Author: Veronica Valeros, vero.valeros@gmail.com |\")\n print(\"| UNICEN-ISISTAN, Argentina. CTU, Prague-ATG |\")\n print(\"+----------------------------------------------------------------------+\")\n print()\n\n\n# Print help information and exit:\ndef usage():\n print(\"+----------------------------------------------------------------------+\")\n print(\"| netflowlabeler.py Version \"+ vernum +\" |\")\n print(\"| This program is free software; you can redistribute it and/or modify |\")\n print(\"| it under the terms of the GNU General Public License as published by |\")\n print(\"| the Free Software Foundation; either version 2 of the License, or |\")\n print(\"| (at your option) any later version. |\")\n print(\"| |\")\n print(\"| Author: Garcia Sebastian, eldraco@gmail.com |\")\n print(\"| Author: Veronica Valeros, vero.valeros@gmail.com |\")\n print(\"| UNICEN-ISISTAN, Argentina. CTU, Prague-ATG |\")\n print(\"+----------------------------------------------------------------------+\")\n print(\"\\nusage: %s \" % sys.argv[0])\n print(\"options:\")\n print(\" -h, --help Show this help message and exit\")\n print(\" -V, --version Output version information and exit\")\n print(\" -v, --verbose Output more information.\")\n print(\" -D, --debug Debug. In debug mode the statistics run live.\")\n print(\" -f, --file Input netflow file to label.\")\n print(\" -c, --conf Input configuration file to create the labels.\")\n print()\n sys.exit(1)\n\n\n\nclass labeler():\n \"\"\"\n This class handles the adding of new labeling conditions and the return of the lables\n \"\"\"\n\n conditionsGroup = []\n \"\"\"\n conditionsGroup = [ \n {'Background': [ \n [ {'srcIP': 'all'} ] \n ] }, \n {'Normal': [ \n [ {'Proto':'IGMP'} ],\n [ {'Proto':'ARP'} ]\n ] }, \n {'Botnet-CC': [\n [ {'srcIP': '10.0.0.151'} ], \n [ {'dstIP': '10.0.0.151'} ]\n ] }, \n {'Botnet-SPAM': [\n [ {'Proto': 'TCP'}, {'srcPort': '25'} ], \n [ {'Proto': 'TCP'}, {'dstPort': '25'} ]\n ] }, \n {'Botnet-DGA': [ \n [ {'Proto':'UDP'}, {'srcPort':'53'} ] ,\n [ {'Proto':'UDP'}, {'dstPort':'53'} ] \n ] } \n ]\n \"\"\"\n\n def addCondition(self,condition):\n \"\"\"\n Add a condition.\n Input: condition is a string?\n \"\"\"\n try:\n global debug\n global verbose\n\n self.conditionsGroup.append(condition)\n\n if debug:\n print('\\tCondition added: {0}'.format(condition))\n\n except Exception as inst:\n print('Problem in addCondition() in class labeler')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n exit(-1)\n\n\n def getLabel(self,netflowLine):\n \"\"\"\n Get a netflow line and return a label\n Input: netflowLine is a string? or a dictionary?\n \"\"\"\n try:\n global debug\n global verbose\n\n\n #if debug:\n # print 'Netflow line asked: {0}'.format(netflowLine)\n\n # Default to empty label\n labelToReturn= \"\"\n\n # Convert the neflowLine array to a dict...\n netflowDict = {}\n for item in netflowLine:\n name = list(item.keys())[0]\n netflowDict[name] = item[name]\n\n \n # Process all the conditions \n #if debug:\n # print 'Processing the conditions'\n for group in self.conditionsGroup:\n labelToVerify = list(group.keys())[0]\n if debug:\n print('\\tLabel to verify {0}'.format(labelToVerify))\n\n orConditions = group[labelToVerify]\n #if debug:\n #print '\\t\\tOr conditions group : {0}'.format(orConditions)\n\n\n # orConditions is an array. Each position of this array should be ORed with the next position\n for andcondition in orConditions:\n # If any of these andConditions groups is true, just return the label, because this for is an 'OR'\n #if debug:\n #print '\\t\\tAnd condition group : {0}'.format(andcondition)\n\n # With this we keep control of how each part of the and is going...\n allTrue = True\n for acond in andcondition:\n #if debug:\n #print '\\t\\t\\tAnd this with : {0}'.format(acond)\n\n condColumn = list(acond.keys())[0]\n condValue = acond[condColumn].upper()\n\n netflowValue = netflowDict[condColumn]\n if debug:\n print('\\t\\tField: {0}, Condition value: {1}, Netflow value: {2}'.format(condColumn, condValue, netflowValue))\n \n if condValue.find('!') != -1:\n # This is negative condition\n temp = condValue.split('!')[1]\n condValue = temp\n if (condValue != netflowValue) or (condValue == 'ALL') :\n allTrue = True\n if debug:\n print('\\t\\t\\tTrue (negative)')\n continue\n else:\n if debug:\n print('\\t\\t\\tFalse (negative)')\n allTrue = False\n break\n elif condValue.find('!') == -1:\n # This is positive condition\n if (condColumn == 'Bytes') or (condColumn == 'Packets'):\n # We should be greater than or equal to these values...\n if (int(condValue) <= int(netflowValue)) or (condValue == 'ALL') :\n allTrue = True\n if debug:\n print('\\t\\t\\tTrue')\n continue\n else:\n if debug:\n print('\\t\\t\\tFalse')\n allTrue = False\n break\n elif (condValue == netflowValue) or (condValue == 'ALL') :\n allTrue = True\n #if debug:\n # print '\\t\\t\\tTrue'\n continue\n else:\n if debug:\n print('\\t\\t\\tFalse')\n allTrue = False\n break\n\n if allTrue:\n labelToReturn = labelToVerify\n if debug:\n print('\\tNew label assigned: {0}'.format(labelToVerify))\n \n if verbose:\n if 'Background' in labelToReturn:\n #if verbose:\n print('\\tFinal label assigned: {0}'.format(labelToReturn))\n else:\n print('\\tFinal label assigned: \\x1b\\x5b1;31;40m{0}\\x1b\\x5b0;0;40m'.format(labelToReturn))\n #if debug:\n # raw_input()\n return labelToReturn\n\n\n\n\n except Exception as inst:\n print('Problem in getLabel() in class labeler')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n exit(-1)\n\n\n\n\ndef output_netflow_line_to_file(outputfile, netflowArray):\n \"\"\"\n Get a netflow dictionary and store it on a new file\n \"\"\"\n try:\n global debug\n global verbose\n #if debug:\n # print 'NetFlowArray: {}'.format(netflowArray)\n \n if list(netflowArray[12].keys())[0] == 'Flows':\n # nfdump\n outputline = str(netflowArray[0]['Date']) + ' ' + str(netflowArray[1]['start']) + '\\t\\t' + str(netflowArray[2]['Duration']) + ' ' + str(netflowArray[3]['Proto']) + '\\t' + str(netflowArray[4]['srcIP']) + ':' + str(netflowArray[5]['srcPort']) + '\\t->' + ' ' + str(netflowArray[6]['dstIP']) + ':' + str(netflowArray[7]['dstPort']) + ' ' + str(netflowArray[8]['Flags']) + ' ' + str(netflowArray[9]['Tos']) + ' ' + str(netflowArray[10]['Packets']) + ' ' + str(netflowArray[11]['Bytes']) + ' ' + str(netflowArray[12]['Flows']) + ' ' + str(netflowArray[13]['Label']) + '\\n'\n else:\n # argus\n outputline = str(netflowArray[0]['Date']) + ' ' + str(netflowArray[1]['start']) + '\\t\\t' + str(netflowArray[2]['Duration']) + ' ' + str(netflowArray[3]['Proto']) + '\\t' + str(netflowArray[4]['srcIP']) + '\\t' + str(netflowArray[5]['srcPort']) + '\\t->' + ' ' + str(netflowArray[6]['dstIP']) + '\\t' + str(netflowArray[7]['dstPort']) + ' ' + str(netflowArray[8]['Flags']) + ' ' + str(netflowArray[9]['Tos']) + ' ' + str(netflowArray[10]['Packets']) + ' ' + str(netflowArray[11]['Bytes']) + ' ' + str(netflowArray[12]['Label']) + '\\n'\n outputfile.writelines(outputline)\n\n\n # write the line\n # keep it open!\n\n except Exception as inst:\n print('Problem in output_labeled_netflow_file()')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n exit(-1)\n\n\ndef process_nfdump(f, headers, netflowFile, labelmachine):\n \"\"\"\n Process and label an nfdump file\n \"\"\"\n # Just to monitor how many lines we read\n amountOfLines = 0\n\n # Parse the file into an array of dictionaries. We will use the columns names as dictionary keys\n # Example: [ {'Date': '10/10/2013} , {'SrcIp':'1.1.1.1} , , ]\n netflowArray = []\n columnDict = {}\n\n # Replace the TABs for spaces, if it has them..., and replace the : in the ports to spaces also, and strip the \\n, and the word flow\n temp2 = headers.replace('flow','')\n temp = re.sub( '\\s+', ' ', temp2 ).replace(':',' ').strip()\n columnNames = temp.split(' ')\n\n # Only to separate src ip from dst ip\n addressType = ''\n\n #if debug:\n # print 'Columns names: {0}'.format(columnNames)\n\n for cN in columnNames:\n # Separate between src ip and dst ip\n if 'Src' in cN:\n addressType = 'src'\n elif 'Dst' in cN:\n addressType = 'dst'\n elif 'IP' in cN:\n columnDict[addressType+cN] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n # Separate ports\n elif 'Port' in cN:\n columnDict[addressType+cN] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n elif 'Addr' in cN:\n pass\n elif 'Prot' in cN:\n columnDict['Proto'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n elif 'Durat' in cN:\n columnDict['Duration'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n elif 'Flow' in cN:\n columnDict['Flows'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n else:\n columnDict[cN] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Label'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n #if debug:\n #print 'netflowArray'\n #print netflowArray\n\n # Create the output file with the header\n outputfile = open(netflowFile+'.labeled','w+')\n \n # Write the column names\n columnnames = \"Date flow start Duration Proto Src IP Addr:Port Dst IP Addr:Port Flags Tos Packets Bytes Flows Label\\n\"\n outputfile.writelines(columnnames)\n\n\n # Read the second line to start processing\n line = f.readline()\n amountOfLines += 1\n while (line):\n if verbose:\n print('Netflow line: {0}'.format(line), end=' ')\n\n # Parse the columns\n # Strip and replace ugly stuff\n temp2 = line.replace('->','')\n temp = re.sub( '\\s+', ' ', temp2 ).strip()\n columnValues = temp.split(' ')\n\n\n #if debug:\n # print columnValues\n\n # Date\n date = columnValues[0]\n # Store the value in the dict\n dict = netflowArray[0]\n columnName = list(dict.keys())[0] \n dict[columnName] = date\n netflowArray[0] = dict\n\n hour = columnValues[1]\n # Store the value in the dict\n dict = netflowArray[1]\n columnName = list(dict.keys())[0] \n dict[columnName] = hour\n netflowArray[1] = dict\n\n duration = columnValues[2]\n # Store the value in the dict\n dict = netflowArray[2]\n columnName = list(dict.keys())[0] \n dict[columnName] = duration\n netflowArray[2] = dict\n\n protocol = columnValues[3].upper()\n # Store the value in the dict\n dict = netflowArray[3]\n columnName = list(dict.keys())[0] \n #columnName = 'Proto'\n dict[columnName] = protocol\n netflowArray[3] = dict\n\n \n if 'TCP' in protocol or 'UDP' in protocol or 'RTP' in protocol:\n temp = columnValues[4]\n if len(temp.split(':')) <= 2:\n # It is IPV4\n srcip = temp.split(':')[0]\n # Store the value in the dict\n dict = netflowArray[4]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcip\n netflowArray[4] = dict\n\n srcport = temp.split(':')[1]\n # Store the value in the dict\n dict = netflowArray[5]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcport\n netflowArray[5] = dict\n\n temp2 = columnValues[5]\n dstip = temp2.split(':')[0]\n # Store the value in the dict\n dict = netflowArray[6]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstip\n netflowArray[6] = dict\n\n dstport = temp2.split(':')[1]\n # Store the value in the dict\n dict = netflowArray[7]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstport\n netflowArray[7] = dict\n elif len(temp.split(':')) > 2:\n # We are using ipv6! THIS DEPENDS A LOT ON THE program that created the netflow..\n srcip = temp[0:temp.rfind(':')]\n # Store the value in the dict\n dict = netflowArray[4]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcip\n netflowArray[4] = dict\n\n srcport = temp[temp.rfind(':')+1:]\n # Store the value in the dict\n dict = netflowArray[5]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcport\n netflowArray[5] = dict\n\n temp2 = columnValues[5]\n dstip = temp2[0:temp2.rfind(':')]\n # Store the value in the dict\n dict = netflowArray[6]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstip\n netflowArray[6] = dict\n\n dstport = temp2[temp2.rfind(':')+1:]\n # Store the value in the dict\n dict = netflowArray[7]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstport\n netflowArray[7] = dict\n else:\n print() \n print('Please implement this protocol!')\n print(line)\n sys.exit(-1)\n elif protocol == 'IPNIP' or protocol == 'RSVP' or protocol == 'GRE' or protocol == 'UDT' or protocol == 'ARP' or protocol == 'ICMP' or protocol == 'PIM' or protocol == 'ESP' or protocol == 'UNAS' or protocol == 'IGMP' or 'IPX' in protocol or 'RARP' in protocol or 'LLC' in protocol or 'IPV6' in protocol:\n srcip = temp = columnValues[4]\n # Store the value in the dict\n dict = netflowArray[4]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcip\n netflowArray[4] = dict\n\n srcport = '0'\n # Store the value in the dict\n dict = netflowArray[5]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcport\n netflowArray[5] = dict\n\n dstip = temp = columnValues[5]\n # Store the value in the dict\n dict = netflowArray[6]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstip\n netflowArray[6] = dict\n\n dstport = '0'\n # Store the value in the dict\n dict = netflowArray[7]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstport\n netflowArray[7] = dict\n\n flags = columnValues[6].upper()\n # Store the value in the dict\n dict = netflowArray[8]\n columnName = list(dict.keys())[0] \n dict[columnName] = flags\n netflowArray[8] = dict\n\n tos = columnValues[7]\n # Store the value in the dict\n dict = netflowArray[9]\n columnName = list(dict.keys())[0] \n dict[columnName] = tos\n netflowArray[9] = dict\n\n packets = columnValues[8]\n # Store the value in the dict\n dict = netflowArray[10]\n columnName = list(dict.keys())[0] \n dict[columnName] = packets\n netflowArray[10] = dict\n\n bytes = columnValues[9]\n # Store the value in the dict\n dict = netflowArray[11]\n columnName = list(dict.keys())[0] \n dict[columnName] = bytes\n netflowArray[11] = dict\n\n flows = columnValues[10]\n # Store the value in the dict\n dict = netflowArray[12]\n columnName = list(dict.keys())[0] \n dict[columnName] = flows\n netflowArray[12] = dict\n\n # Empty the label in the dict\n dict = netflowArray[13]\n columnName = list(dict.keys())[0] \n dict[columnName] = \"\"\n netflowArray[13] = dict\n\n #if debug:\n # print date,hour,duration,protocol, srcip, srcport, dstip, dstport, flags, tos, packets, bytes, flows\n # print netflowArray\n\n\n # Request a label\n label = labelmachine.getLabel(netflowArray)\n # Store the value in the dict\n dict = netflowArray[13]\n columnName = list(dict.keys())[0] \n dict[columnName] = label\n netflowArray[13] = dict\n\n #if debug:\n #print netflowArray\n\n # Ask to store the netflow\n output_netflow_line_to_file(outputfile, netflowArray)\n\n\n line = f.readline()\n amountOfLines += 1\n\n # Close the outputfile\n outputfile.close()\n\n\ndef process_netflow(netflowFile, labelmachine):\n \"\"\"\n This function takes the netflowFile and parse it. Then it ask for a label and finally it calls a function to store the netflow in a file\n \"\"\"\n try:\n global debug\n global verbose\n if verbose:\n print('Processing the netflow file {0}'.format(netflowFile))\n\n\n # Read the netflow and parse the input\n try:\n f = open(netflowFile,'r')\n except Exception as inst:\n print('Some problem opening the input netflow file. In process_netflow()')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n exit(-1)\n\n \n \n # How to separate files?\n # nfdump header starts with 'Date'\n # binetflow header starts with 'StarTime'\n # Zeek conn.log file header in TAB format starts with '#separator'\n # Zeek conn.log file header in json format starts with '{\"ts\":' but no space\n\n headers = f.readline()\n\n ##################\n # nfdump processing...\n\n # What are we analyzing nfdump files or argus files?\n if 'Date' not in headers and 'StartTime' not in headers:\n print('The file has not headers. Please add them.')\n sys.exit(-1)\n\n if 'Date' in headers:\n amountOfLines = process_nfdump(f, headers, netflowFile, labelmachine)\n\n ##################\n # Argus processing...\n\n elif 'StartTime' in headers:\n # This is argus files...\n amountOfLines = 0\n\n # Parse the file into an array of dictionaries. We will use the columns names as dictionary keys\n # Example: [ {'Date': '10/10/2013} , {'SrcIp':'1.1.1.1} , , ]\n netflowArray = []\n columnDict = {}\n\n # Replace the TABs for spaces, if it has them..., and replace the : in the ports to spaces also, and strip the \\n, and the word flow\n temp = re.sub( '\\s+', ' ', headers ).strip()\n columnNames = temp.split(' ')\n\n #if debug:\n # print 'Columns names: {0}'.format(columnNames)\n\n # So far argus does no have a column Date\n columnDict['Date'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['start'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n \n columnDict['Duration'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Proto'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['srcIP'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['srcPort'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['dstIP'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['dstPort'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Flags'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Tos'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Packets'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Bytes'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n columnDict['Label'] = \"\"\n netflowArray.append(columnDict)\n columnDict = {}\n\n\n\n\n # Create the output file with the header\n outputfile = open(netflowFile+'.labeled','w+')\n \n # Write the column names\n columnnames = \"Date Time Dur Proto SrcAddr Sport Dir DstAddr Dport State sTos TotPkts TotBytes Label\\n\"\n outputfile.writelines(columnnames)\n\n\n # Read the second line to start processing\n line = f.readline()\n amountOfLines += 1\n while (line):\n if verbose:\n print('Netflow line: {0}'.format(line), end=' ')\n\n # Parse the columns\n # Strip and replace ugly stuff\n temp2 = line.replace('->','')\n temp = re.sub( '\\s+', ' ', temp2 ).strip()\n columnValues = temp.split(' ')\n\n\n #if debug:\n # print columnValues\n\n # Date\n date = columnValues[0]\n # Store the value in the dict\n dict = netflowArray[0]\n columnName = list(dict.keys())[0] \n dict[columnName] = date\n netflowArray[0] = dict\n\n hour = columnValues[1]\n # Store the value in the dict\n dict = netflowArray[1]\n columnName = list(dict.keys())[0] \n dict[columnName] = hour\n netflowArray[1] = dict\n\n duration = columnValues[2]\n # Store the value in the dict\n dict = netflowArray[2]\n columnName = list(dict.keys())[0] \n dict[columnName] = duration\n netflowArray[2] = dict\n\n protocol = columnValues[3].upper()\n # Store the value in the dict\n dict = netflowArray[3]\n columnName = list(dict.keys())[0] \n dict[columnName] = protocol\n netflowArray[3] = dict\n\n srcIP = columnValues[4]\n # Store the value in the dict\n dict = netflowArray[4]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcIP\n netflowArray[4] = dict\n\n if 'ARP' in protocol:\n srcPort = '0' \n # Store the value in the dict\n dict = netflowArray[5]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcPort\n netflowArray[5] = dict\n else:\n srcPort = columnValues[5]\n # Store the value in the dict\n dict = netflowArray[5]\n columnName = list(dict.keys())[0] \n dict[columnName] = srcPort\n netflowArray[5] = dict\n\n\n dstIP = columnValues[6] \n # Store the value in the dict\n dict = netflowArray[6]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstIP\n netflowArray[6] = dict\n\n\n if 'ARP' in protocol:\n dstPort = '0' \n # Store the value in the dict\n dict = netflowArray[7]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstPort\n netflowArray[7] = dict\n\n Flags = columnValues[8]\n # Store the value in the dict\n dict = netflowArray[8]\n columnName = list(dict.keys())[0] \n dict[columnName] = Flags\n netflowArray[8] = dict\n\n else:\n dstPort = columnValues[7]\n # Store the value in the dict\n dict = netflowArray[7]\n columnName = list(dict.keys())[0] \n dict[columnName] = dstPort\n netflowArray[7] = dict\n\n Flags = columnValues[8]\n # Store the value in the dict\n dict = netflowArray[8]\n columnName = list(dict.keys())[0] \n dict[columnName] = Flags\n netflowArray[8] = dict\n\n\n\n if 'LLC' in protocol:\n Tos = '0'\n # Store the value in the dict\n dict = netflowArray[9]\n columnName = list(dict.keys())[0] \n dict[columnName] = Tos\n netflowArray[9] = dict\n\n Packets = columnValues[9]\n # Store the value in the dict\n dict = netflowArray[10]\n columnName = list(dict.keys())[0] \n dict[columnName] = Packets\n netflowArray[10] = dict\n\n Bytes = columnValues[10]\n # Store the value in the dict\n dict = netflowArray[11]\n columnName = list(dict.keys())[0] \n dict[columnName] = Bytes\n netflowArray[11] = dict\n\n # Request a label\n label = labelmachine.getLabel(netflowArray)\n # Store the value in the dict\n dict = netflowArray[12]\n columnName = list(dict.keys())[0] \n dict[columnName] = label\n netflowArray[12] = dict\n elif 'ARP' in protocol:\n Tos = '0'\n # Store the value in the dict\n dict = netflowArray[9]\n columnName = list(dict.keys())[0] \n dict[columnName] = Tos\n netflowArray[9] = dict\n\n Packets = columnValues[8]\n # Store the value in the dict\n dict = netflowArray[10]\n columnName = list(dict.keys())[0] \n dict[columnName] = Packets\n netflowArray[10] = dict\n\n Bytes = columnValues[9]\n # Store the value in the dict\n dict = netflowArray[11]\n columnName = list(dict.keys())[0] \n dict[columnName] = Bytes\n netflowArray[11] = dict\n\n # Request a label\n label = labelmachine.getLabel(netflowArray)\n # Store the value in the dict\n dict = netflowArray[12]\n columnName = list(dict.keys())[0] \n dict[columnName] = label\n netflowArray[12] = dict\n else:\n Tos = columnValues[9]\n # Store the value in the dict\n dict = netflowArray[9]\n columnName = list(dict.keys())[0] \n dict[columnName] = Tos\n netflowArray[9] = dict\n\n Packets = columnValues[10]\n # Store the value in the dict\n dict = netflowArray[10]\n columnName = list(dict.keys())[0] \n dict[columnName] = Packets\n netflowArray[10] = dict\n\n Bytes = columnValues[11]\n # Store the value in the dict\n dict = netflowArray[11]\n columnName = list(dict.keys())[0] \n dict[columnName] = Bytes\n netflowArray[11] = dict\n\n # Request a label\n label = labelmachine.getLabel(netflowArray)\n # Store the value in the dict\n dict = netflowArray[12]\n columnName = list(dict.keys())[0] \n dict[columnName] = label\n netflowArray[12] = dict\n\n #if debug:\n # print netflowArray\n\n # Ask to store the netflow\n output_netflow_line_to_file(outputfile, netflowArray)\n\n line = f.readline()\n amountOfLines += 1\n\n # Close the outputfile\n outputfile.close()\n\n # End while\n\n\n print('Amount of lines read: {0}'.format(amountOfLines))\n\n # Ask for a label\n # Call a function to store the new netflow\n \n\n except Exception as inst:\n print('Problem in process_netflow()')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n exit(-1)\n\n\ndef loadConditions(configFile, labelmachine):\n global debug\n global verbose\n\n conditionsList = []\n try:\n try:\n if verbose:\n print('Opening the configuration file \\'{0}\\''.format(configFile))\n conf = open(configFile)\n except:\n print('The file \\'{0}\\' couldn\\'t be opened.'.format(configFile))\n exit(1)\n\n\n if debug:\n print('Loading the conditions from the configuration file ') \n\n # Read the conf file\n line = conf.readline()\n conditions = {}\n\n while (line):\n # Ignore comments\n if line.strip().find('#') == 0:\n line = conf.readline()\n continue\n\n # Read a label\n if line.strip()[0] != '-':\n label = line.split(':')[0]\n #if debug:\n # print 'Label: {}'.format(label)\n conditions[label]=[]\n\n # Now read all the conditions for this label\n line = conf.readline()\n while (line):\n if line.strip()[0] == '-':\n # Condition\n tempAndConditions = line.strip().split('-')[1]\n #if debug:\n # print 'Condition: {}'.format(tempAndConditions)\n andConditions = []\n for andCond in tempAndConditions.split('&'):\n tempdict = {}\n tempdict[andCond.strip().split('=')[0]] = andCond.strip().split('=')[1]\n andConditions.append(tempdict)\n\n conditions[label].append(andConditions)\n\n line = conf.readline()\n else:\n break\n labelmachine.addCondition(conditions) \n conditions = {}\n\n except KeyboardInterrupt:\n # CTRL-C pretty handling.\n print(\"Keyboard Interruption!. Exiting.\")\n sys.exit(1)\n except Exception as inst:\n print('Problem in main() function at configurationParser.py ')\n print(type(inst)) # the exception instance\n print(inst.args) # arguments stored in .args\n print(inst) # __str__ allows args to printed directly\n return False\n\n\n\n\ndef main():\n try:\n global debug\n global verbose\n\n netflowFile = \"\"\n confFile = \"\"\n\n opts, args = getopt.getopt(sys.argv[1:], \"VvDhf:c:\", [\"help\",\"version\",\"verbose\",\"debug\",\"file=\",\"conf=\"])\n except getopt.GetoptError: usage()\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"): usage()\n if opt in (\"-V\", \"--version\"): version();exit(-1)\n if opt in (\"-v\", \"--verbose\"): verbose = True\n if opt in (\"-D\", \"--debug\"): debug = 1\n if opt in (\"-f\", \"--file\"): netflowFile = str(arg)\n if opt in (\"-c\", \"--conf\"): confFile = str(arg)\n try:\n try:\n if debug:\n verbose = True\n\n if netflowFile == \"\" or confFile == \"\":\n usage()\n sys.exit(1)\n \n elif netflowFile != \"\" and confFile != \"\":\n # Print version information\n version()\n\n # Create an instance of the labeler\n labelmachine = labeler()\n\n # Load conditions\n loadConditions(confFile,labelmachine)\n\n # Direct process of netflow flows\n process_netflow(netflowFile, labelmachine)\n\n else:\n usage()\n sys.exit(1)\n\n except Exception as e:\n print(\"misc. exception (runtime error from user callback?):\", e)\n except KeyboardInterrupt:\n sys.exit(1)\n\n\n except KeyboardInterrupt:\n # CTRL-C pretty handling.\n print(\"Keyboard Interruption!. Exiting.\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"netflowlabeler.py","file_name":"netflowlabeler.py","file_ext":"py","file_size_in_byte":38421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"371633477","text":"##\n# Imports\n##\nimport boto3, json, sys\nfrom botocore.exceptions import ClientError\nfrom common.configuration import Configuration\nfrom common.server_model import ServerModel\nfrom common.logger import Logger\n\n\nclass DynamoDB:\n def __init__(self):\n dynamodb = boto3.resource('dynamodb')\n self.logger = Logger(\"HSDO.dynamodb\")\n self.table = dynamodb.Table(Configuration().get(\"DYNAMODB_TABLE_NAME\"))\n self.backendListSize = Configuration().get(\"HAPROXY_BACKEND_SERVERS_LIST_SIZE\")\n\n def checkTableReady(self):\n try:\n self.table.table_status\n except ClientError:\n self.logger.error(\"Table %s not found.\" % self.table)\n sys.exit(2)\n\n def updateServer(self, server):\n self.table.update_item(\n Key={\n \"BackendServerID\": server.backendServerID,\n },\n UpdateExpression=\"SET IPAddress = :IPAddress, ServerName = :ServerName, ASG = :ASG, Weight = :Weight, BackendServerStatus = :BackendServerStatus, LastWeightUpdate = :LastWeightUpdate\",\n ExpressionAttributeValues={\n ':IPAddress': server.IPAddress,\n ':ASG': server.ASG,\n ':ServerName': server.serverName,\n ':Weight': server.weight,\n ':BackendServerStatus': server.backendServerStatus,\n ':LastWeightUpdate': server.lastWeightUpdate,\n }\n )\n\n def createServer(self, server):\n self.table.put_item(\n Item={\n 'BackendServerID': server.backendServerID,\n 'IPAddress': server.IPAddress,\n 'ASG': server.ASG,\n 'ServerName': server.serverName,\n 'Weight': server.weight,\n 'BackendServerStatus': server.backendServerStatus,\n 'LastWeightUpdate': server.lastWeightUpdate,\n }\n )\n\n def listServers(self):\n servers = []\n\n response = self.table.scan()\n for item in response['Items']:\n servers.append(self.fillServer(item))\n while 'LastEvaluatedKey' in response:\n response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n for item in response['Items']:\n servers.append(self.fillServer(item))\n\n return servers\n\n def fillServer(self, item):\n s = ServerModel()\n s.backendServerID = int(item[\"BackendServerID\"])\n # if asg column isn't existing (problem occured when migrating from v2 to v3)\n if \"ASG\" in item:\n s.ASG = item[\"ASG\"]\n s.IPAddress = item[\"IPAddress\"]\n s.serverName = item[\"ServerName\"]\n s.weight = int(item[\"Weight\"])\n s.lastWeightUpdate = item[\"LastWeightUpdate\"]\n s.backendServerStatus = item[\"BackendServerStatus\"]\n return s\n","sub_path":"src/common/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"334242010","text":"\"\"\"\nTest the Rosetta force field\n\"\"\"\nimport isdhic\nimport numpy as np\n\nfrom scipy import optimize\nfrom isdhic.core import take_time\n\nn_particles = 100\nboxsize = 10\ncoords = np.ascontiguousarray(np.random.rand(3*n_particles) * boxsize)\nuniverse = isdhic.Universe(n_particles)\nforcefield = isdhic.ForcefieldFactory.create_forcefield('rosetta', universe)\n\nprint(forcefield.energy(coords)) \n\nE = forcefield.ctype.update_gradient(coords, universe.forces, forcefield.types, 1)\nprint(E)\n\na = universe.forces.flatten()\n\nmsg = 'eps={0:.0e}, norm={1:.2e}, corr={2:.1f}'\n\nfor eps in np.logspace(-3,-8,6):\n\n b = optimize.approx_fprime(coords, forcefield.energy, eps)\n print(msg.format(eps, np.fabs((a-b)/np.fabs(a)).max(), np.corrcoef(a,b)[0,1]*100))\n","sub_path":"tests/test_rosetta.py","file_name":"test_rosetta.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"447573628","text":"from blender.render_utils import OpenGLRenderer\nimport os\nfrom config import cfg\nimport numpy as np\nfrom base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\nimport cv2\nimport random\nfrom tqdm import tqdm\n\ndef fuse(img, mask, background):\n background = cv2.resize(background,(img.shape[1], img.shape[0]))\n silhouette = mask > 0\n background[silhouette] = img[silhouette]\n return background\n \nuse_background = True\nr = OpenGLRenderer()\nbg_imgs_path = os.path.join(cfg.DATA_DIR, 'bg_imgs.npy')\nbg_imgs = np.load(bg_imgs_path)\nfor class_type in tqdm(cfg.linemod_cls_names):\n dir_path = os.path.join(cfg.LINEMOD_ORIG,'{}/data'.format(class_type))\n train_set = np.loadtxt(os.path.join(cfg.LINEMOD, '{}/training_range.txt'.format(class_type)),np.int32)\n output_path = os.path.join(cfg.DATA_DIR, 'renders/{}'.format(class_type))\n trans = PoseTransformer(class_type)\n os.makedirs(output_path, exist_ok=True)\n for idx in tqdm(train_set):\n rot_path = os.path.join(dir_path, 'rot{}.rot'.format(idx))\n tra_path = os.path.join(dir_path, 'tra{}.tra'.format(idx))\n pose = read_pose(rot_path, tra_path)\n pose = trans.orig_pose_to_blender_pose(pose)\n rot, tra = pose[:, :3], pose[:, 3]\n rgb, mask = r.render(class_type, pose, intrinsic_matrix=r.intrinsic_matrix['linemod'], render_type='all')\n rgb = rgb[:,:,[2,1,0]] # opencv use bgr order instead of rgb\n if use_background:\n background = cv2.imread(bg_imgs[random.randint(0, bg_imgs.shape[0]-1)], 1)\n else:\n background = np.zeros_like(rgb)\n rgb = fuse(rgb, mask, background)\n retval = cv2.imwrite(os.path.join(output_path, '{}.jpg'.format(idx)), rgb)\n \n","sub_path":"data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"250634770","text":"# -*- coding: utf-8 -*-\r\n# -*- built with Python 3.6 -*-\r\n\"\"\"\r\nhar_lstm_predict.py\r\n\r\nLoads in a LSTM saved as a TensorFlow .meta object and then performs a prediction\r\non incoming data. Outputs a single integer for each 128 time step data instance\r\nprovided corresponding to the predicted class. WIll continously query STDIN for \r\na new data matrix file containing a single data series instance. Will print prediction\r\nfor each vector it receives to STDOUT. Data instance should be pickled numpy arrays\r\nof size 128 x 9 (timesteps x input channels). Feed the filename into STDIN.\r\n\r\nOutput class correspondances:\r\n \r\n1 = WALKING\r\n2 = WALKING_UPSTAIRS\r\n3 = WALKING_DOWNSTAIRS\r\n4 = SITTING\r\n5 = STANDING\r\n6 = LAYING\r\n\r\nHOW TO RUN:\r\n \r\n python har_lstm_predict.py {1}\r\n \r\nWHERE:\r\n \r\n {1} = The name of the TensorFlow .meta graph (ex. \"saved_model/har_lstm_graph\")\r\n\r\n@author: Brody Kutt (bjk4704@rit.edu), Poppy Immel (pgi8114@rit.edu,), Zach Lauzon (zrl3031@rit.edu)\r\n\"\"\"\r\n\r\nimport tensorflow as tf # built with version 0.12.1\r\nimport numpy as np\r\nimport sys\r\nimport pickle\r\n \r\n\r\nif __name__ == '__main__':\r\n if(len(sys.argv) == 2):\r\n \r\n # Create a clean graph and import the MetaGraphDef nodes\r\n tf.reset_default_graph()\r\n with tf.Session() as sess:\r\n # Import the previously exported meta graph and variables\r\n saver = tf.train.import_meta_graph(sys.argv[1] + '.meta')\r\n saver.restore(sess, sys.argv[1])\r\n all_vars = tf.get_collection('vars')\r\n X = all_vars[0]\r\n Y = all_vars[1]\r\n Pred_Y = all_vars[2]\r\n # Continuously query stdin for new data instances\r\n while(True):\r\n print('Please enter filename...')\r\n file = input()\r\n try:\r\n data = pickle.load(open(file, \"rb\" ))\r\n except FileNotFoundError:\r\n print(\"Wrong file or file path provided.\")\r\n sys.exit(1)\r\n \r\n data = np.reshape(data, (1, 128, 9)) # reformat data into 3D tensor\r\n feed_dict = {X: data} # Create a feed_dict with data\r\n pred = sess.run(Pred_Y, feed_dict=feed_dict) # make prediction\r\n print(np.argmax(pred)+1) # print result to STDOUT (classes are indexed from 1)\r\n else:\r\n print('Wrong number of arguments.')\r\n print(\"\"\"\r\n HOW TO RUN:\r\n \r\n python har_lstm_predict.py {1}\r\n \r\n WHERE:\r\n \r\n {1} = The name of the TensorFlow .meta graph ex. \"saved_model/cnn_graph\"\r\n \"\"\")","sub_path":"server/har_lstm_predict.py","file_name":"har_lstm_predict.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"395456134","text":"#!/usr/bin/env python\r\n\r\n# An easier way to visualize parent/clone relationships in a Redump 1G1R dat\r\n# Used to verify the output of Retool.\r\n\r\n# There isn't much error checking in here, it's rough and only intended to\r\n# work for a limited use case.\r\n\r\nimport html\r\nimport os\r\nimport platform\r\nimport re\r\nimport sys\r\nimport textwrap\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import Font, Alignment, PatternFill\r\nfrom openpyxl.utils import cell as Cell\r\n\r\ndef main():\r\n\r\n # Enable VT100 escape sequence for Windows 10 Ver. 1607+\r\n if old_windows() != True and sys.platform.startswith('win'):\r\n enable_vt_mode()\r\n\r\n print(font.bold + '\\nClonerel 0.16' + font.end)\r\n print('-------------')\r\n\r\n if len(sys.argv) == 1:\r\n print(\r\n textwrap.fill(\r\n 'Creates parent/clone visualizations from a Retool-generated 1G1R'\r\n + ' Redump dat.'\r\n )\r\n )\r\n\r\n command = ''\r\n\r\n if 'clonerel.py' in sys.argv[0]:\r\n command = 'python '\r\n\r\n print('\\nUSAGE: ' + font.bold + command + os.path.basename(sys.argv[0]) + font.end + ' ')\r\n sys.exit()\r\n\r\n # Super basic validation\r\n if sys.argv[1].endswith('.dat'):\r\n input_file_name = os.path.abspath(sys.argv[1])\r\n\r\n if not os.path.exists(input_file_name):\r\n print(textwrap.TextWrapper(width=80, subsequent_indent=' ').fill(font.red + '* Input file \"' + font.bold + input_file_name + font.end + font.red + '\" does not exist.' + font.end))\r\n else:\r\n print('* Reading file...')\r\n with open(input_file_name, 'r') as input_file_read:\r\n checkdat = input_file_read.read()\r\n soup = BeautifulSoup(checkdat, \"lxml-xml\")\r\n\r\n print('* Finding parents...')\r\n parent_list = {}\r\n orphan_list = []\r\n\r\n # Restrict scope to just clones\r\n clone_soup = soup.find_all('game', {'cloneof':re.compile('.*')})\r\n\r\n # Generate the parent list from those clones\r\n for item in clone_soup:\r\n parent_title = re.search('cloneof=\".*?\"', str(item)).group()[9:-1]\r\n if parent_title not in parent_list:\r\n parent_list[parent_title] = []\r\n\r\n # Now add the clones to each of those parents\r\n for item in clone_soup:\r\n parent_title = re.search('cloneof=\".*?\"', str(item)).group()[9:-1]\r\n clone_title = re.search('name=\".*?\"', str(item)).group()[6:-1]\r\n\r\n if parent_title in parent_list:\r\n parent_list[parent_title].append(clone_title)\r\n\r\n # Now add titles with no clones\r\n all_soup = soup.find_all('game', {'name':re.compile('.*')})\r\n for item in all_soup:\r\n title = re.search('name=\".*?\"', str(item)).group()[6:-1]\r\n has_clone = bool(re.search('cloneof=\".*?\"', str(item)))\r\n if has_clone == False and title not in parent_list:\r\n orphan_list.append(title)\r\n\r\n parent_list_sorted = {}\r\n\r\n # Do some formatting\r\n sort_list = sorted(parent_list.keys())\r\n\r\n for item in sort_list:\r\n parent_list_sorted[item] = parent_list[item]\r\n\r\n\r\n # Create a parents + orphans list for easier title comparison\r\n # in the Excel file\r\n parent_orphan_list = []\r\n\r\n for item in parent_list_sorted.keys():\r\n parent_orphan_list.append(item)\r\n\r\n for item in orphan_list:\r\n parent_orphan_list.append(item)\r\n\r\n # Create an Excel spreadsheet\r\n print('* Creating Excel file...')\r\n wb = Workbook()\r\n ws = wb.active\r\n\r\n ws.title = 'Parents'\r\n\r\n col = 'A'\r\n row = 2\r\n\r\n # Add the header\r\n ws.merge_cells('A1:B1')\r\n ws['A1'] = 'Parents with clones'\r\n ws['C1'] = 'Parents with orphans'\r\n ws['D1'] = 'Orphans'\r\n ws['A1'].font = Font(bold = True, color = 'ffffffff', size = '12')\r\n ws['A1'].fill = PatternFill(\"solid\", fgColor=\"ff808080\")\r\n ws['A1'].alignment = Alignment(vertical = 'center')\r\n ws['C1'].font = Font(bold = True, color = 'ffffffff', size = '12')\r\n ws['C1'].fill = PatternFill(\"solid\", fgColor=\"ff808080\")\r\n ws['C1'].alignment = Alignment(vertical = 'center')\r\n ws['D1'].font = Font(bold = True, color = 'ffffffff', size = '12')\r\n ws['D1'].fill = PatternFill(\"solid\", fgColor=\"ff808080\")\r\n ws['D1'].alignment = Alignment(vertical = 'center')\r\n\r\n ws.row_dimensions[1].height = 20\r\n ws.freeze_panes = ws['A2']\r\n\r\n # Populate parents that have clones\r\n for item in parent_list_sorted:\r\n ws.merge_cells(col + str(row) + ':' + chr(ord(col) + 1) + str(row))\r\n ws[col + str(row)] = html.unescape(item)\r\n ws[col + str(row)].font = Font(bold = True, size = 12)\r\n\r\n for i, subitem in enumerate(sorted(parent_list_sorted[item])):\r\n row += 1\r\n if i < len(parent_list_sorted[item]) - 1:\r\n ws[chr(ord(col)) + str(row)].alignment = Alignment(horizontal = 'right')\r\n ws[chr(ord(col)) + str(row)] = '├'\r\n else:\r\n ws[chr(ord(col)) + str(row)].alignment = Alignment(horizontal = 'right')\r\n ws[chr(ord(col)) + str(row)] = '└'\r\n\r\n ws[chr(ord(col) + 1) + str(row)] = html.unescape(subitem)\r\n if i == len(parent_list_sorted[item]) - 1:\r\n ws[chr(ord(col)) + str(row + 1)] = ''\r\n row += 2\r\n\r\n # Populate all parents\r\n col = 'C'\r\n row = 2\r\n for item in sorted(parent_orphan_list):\r\n ws[col + str(row)] = html.unescape(item)\r\n row += 1\r\n\r\n # Populate orphans\r\n col = 'D'\r\n row = 2\r\n for item in sorted(orphan_list):\r\n ws[col + str(row)] = html.unescape(item)\r\n row += 1\r\n\r\n # Adjust column widths\r\n dims = {}\r\n\r\n for col in ws.rows:\r\n for cell in col:\r\n if cell.value:\r\n dims[cell.column] = max((dims.get(cell.column, 0), len(str(cell.value))))\r\n for col, value in dims.items():\r\n if col != 1:\r\n ws.column_dimensions[Cell.get_column_letter(col)].width = value + 5\r\n\r\n\r\n ws.column_dimensions[Cell.get_column_letter(1)].width = 5\r\n\r\n # Write the file to disk\r\n py_file = sys.argv[0]\r\n path_name = os.path.dirname(py_file)\r\n file_path = os.path.join(path_name, os.path.basename(input_file_name)[:-3] + 'xlsx')\r\n print('* Outputting to \"' + file_path + font.end + '...')\r\n wb.save(file_path)\r\n print('\\nDone!')\r\n\r\n############### Classes and methods ###############\r\n# Console text formatting\r\nclass font:\r\n purple = '\\033[95m'\r\n cyan = '\\033[96m'\r\n darkcyan = '\\033[36m'\r\n blue = '\\033[94m'\r\n green = '\\033[92m'\r\n yellow = '\\033[93m'\r\n white = '\\033[37m'\r\n red = '\\033[91m'\r\n bold = '\\033[1m'\r\n underline = '\\033[4m'\r\n end = '\\033[0m'\r\n blink = '\\033[5m'\r\n\r\ndef enable_vt_mode():\r\n \"\"\" Turns on VT-100 emulation mode for Windows\r\n https://bugs.python.org/issue30075\r\n \"\"\"\r\n\r\n import ctypes\r\n import msvcrt\r\n\r\n from ctypes import wintypes\r\n\r\n kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)\r\n\r\n ERROR_INVALID_PARAMETER = 0x0057\r\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004\r\n\r\n def _check_bool(result, func, args):\r\n if not result:\r\n raise ctypes.WinError(ctypes.get_last_error())\r\n return args\r\n\r\n LPDWORD = ctypes.POINTER(wintypes.DWORD)\r\n kernel32.GetConsoleMode.errcheck = _check_bool\r\n kernel32.GetConsoleMode.argtypes = (wintypes.HANDLE, LPDWORD)\r\n kernel32.SetConsoleMode.errcheck = _check_bool\r\n kernel32.SetConsoleMode.argtypes = (wintypes.HANDLE, wintypes.DWORD)\r\n\r\n def set_conout_mode(new_mode, mask=0xffffffff):\r\n # Don't assume StandardOutput is a console.\r\n # Open CONOUT$ instead\r\n fdout = os.open('CONOUT$', os.O_RDWR)\r\n try:\r\n hout = msvcrt.get_osfhandle(fdout)\r\n old_mode = wintypes.DWORD()\r\n kernel32.GetConsoleMode(hout, ctypes.byref(old_mode))\r\n mode = (new_mode & mask) | (old_mode.value & ~mask)\r\n kernel32.SetConsoleMode(hout, mode)\r\n return old_mode.value\r\n finally:\r\n os.close(fdout)\r\n\r\n mode = mask = ENABLE_VIRTUAL_TERMINAL_PROCESSING\r\n\r\n try:\r\n return set_conout_mode(mode, mask)\r\n except WindowsError as e:\r\n if e.winerror == ERROR_INVALID_PARAMETER:\r\n raise NotImplementedError\r\n raise\r\n\r\n\r\ndef old_windows():\r\n if sys.platform.startswith('win'):\r\n if (float(platform.release()) < 10):\r\n return(True)\r\n\r\n return(False)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"clonerel.py","file_name":"clonerel.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"407920980","text":"import re\n'''\nBefore\ndef main():\n fh = open('raven.txt')\n for line in fh:\n if re.search('(Len|Neverm)ore', line):\n print(line, end='')\n\nif __name__ == \"__main__\": main()\n'''\n\n\ndef main():\n # re.compile() More efficient, only need to compile once.\n # re.compile() allow for use other cool methods such as re.IGNORECASE\n fh = open('raven.txt')\n for line in fh:\n pattern = re.compile('(Len|Neverm)ore', re.IGNORECASE)\n # Find the lines where words Lenore and Nevermore where found\n if re.search(pattern, line):\n # Prints the qualifying lines\n print(line, end='')\n # Replaces Lenore and Nevermore with ####\n # Print the changed lines\n print(pattern.sub('####', line), end='')\n\n\nif __name__ == \"__main__\": main()","sub_path":"p3_essentials/09 Regexes/regexes_preconpiled.py","file_name":"regexes_preconpiled.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"126848435","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api\nimport odoo.addons.decimal_precision as dp\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\n def _total_discount_po(self):\n for rec in self:\n discount_amount_po = 0\n for line in rec.invoice_line_ids:\n discount_amount_po += line.discount_amount_po\n rec.discount_amount_po = discount_amount_po\n rec.avg_discount_po = (discount_amount_po*100)/rec.amount_untaxed if rec.amount_untaxed else 0\n\n\n discount_amount_po = fields.Float('Total Disocunt', compute=\"_total_discount_po\", digits=dp.get_precision('Discount'))\n avg_discount_po = fields.Float('Avg Disocunt', compute=\"_total_discount_po\", digits=dp.get_precision('Discount'))\n print_discount_po = fields.Boolean('Print Discount')\n print_discount_amount_po = fields.Boolean('Print Discount Amount')\n\n def _prepare_invoice_line_from_po_line(self, line):\n res = super(AccountInvoice, self)._prepare_invoice_line_from_po_line(line)\n res.update({'multi_discount_po': line.multi_discount,\n 'discounted_unit_price_po': line.discounted_unit_price,\n 'discount_per_unit_po': line.discount_per_unit,\n 'discount': line.disocunt_po,\n 'discount_amount_po': line.discount_amount})\n return res\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n def _total_discount_po(self):\n for rec in self:\n discount = ((rec.discount*rec.price_unit)/100)\n rec.discount_per_unit_po = discount\n rec.discount_amount_po = discount*rec.quantity\n rec.discounted_unit_price_po = rec.price_unit - discount\n\n discount_amount_po = fields.Float('Discount Amount', compute=\"_total_discount_po\", digits=dp.get_precision('Discount'))\n discount_per_unit_po = fields.Float('Discount Per Unit', compute=\"_total_discount_po\", digits=dp.get_precision('Discount'))\n multi_discount_po = fields.Char('Discounts')\n discounted_unit_price_po = fields.Float('Discounted Unit Price', compute=\"_total_discount_po\", digits=dp.get_precision('Discount'))\n\n\n @api.onchange('multi_discount_po')\n def _onchange_multi_discount(self):\n def get_disocunt(percentage,amount):\n new_amount = (percentage * amount)/100\n return (amount - new_amount)\n if self.multi_discount_po:\n amount = 100\n splited_discounts = self.multi_discount_po.split(\"+\")\n for disocunt in splited_discounts:\n amount = get_disocunt(float(disocunt),amount)\n self.discount = 100 - amount\n else:\n self.discount = 0\n\n","sub_path":"beta-dev1/laborindo_modifier_multidiscount_po/models/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"314322625","text":"class Solution(object):\n def oneEdit(self, s, t): # using an external variable. O(n)\n m = len(s)\n n = len(t)\n if abs(m-n)>1:\n return False\n if m < n:\n return self.oneEdit(t,s)\n\n i = 0\n j = 0\n edit = 0\n while(i 1:\n return False\n i = i+1\n j = j+1\n\n return True\n\n def oneEdit2(self,s ,t): # using string concat. O(n)\n m = len(s)\n n = len(t)\n if abs(m-n)>2:\n return False\n if m > n:\n return self.oneEdit2(t,s)\n \n i = 0\n j = 0\n while(i < m and j < n):\n if m == n and s[i] != t[j]:\n s = s[:i] + t[j] + s[i+1:] # split and join string as 'ab' + 'b' + 'd'\n return s == t\n elif m != n and s[i] != t[j]:\n s = s[:i] + t[j] + s[i:] # split and join string as 'ab' + 'e' + 'cd'\n return s == t\n else:\n i = i+1\n j = j+1\n continue\n return True\n \ndef main():\n sol = Solution()\n s = 'abcd'\n t = 'abbd'\n assert sol.oneEdit2(s,t) == True\n s = 'abdd'\n t = 'abfg'\n assert sol.oneEdit2(s,t) == False\n s = 'abcd'\n t = 'abecd'\n assert sol.oneEdit2(s,t) == True\n s = 'abcd'\n t = 'abefd'\n assert sol.oneEdit2(s,t) == False\n s = 'abcde'\n t = 'abcd'\n assert sol.oneEdit2(s,t) == True\n\n\n\nif __name__ == \"__main__\":\n print ('Calling main')\n main()","sub_path":"Luxmi/CLEANCODE_HANDBOOK/code/one_edit.py","file_name":"one_edit.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"265699666","text":"import cv2 as cv\n\ncascade_file = cv.CascadeClassifier(\"xml/haarcascade_frontalface_default.xml\")\n\ncap = cv.VideoCapture(0)\n\nwhile cap.isOpened():\n _, Frame = cap.read()\n\n gray = cv.cvtColor(Frame,cv.COLOR_BGR2GRAY)\n faces = cascade_file.detectMultiScale(gray,1.1,4)\n\n for(x,y,w,h) in faces:\n cv.rectangle(Frame,(x,y),(x+w,y+h),(255,0,0),3)\n\n cv.imshow(\"image\",Frame)\n if cv.waitKey(1) & 0xFF==ord('e'):\n break\n\ncap.release()\ncv.destroyAllWindows()","sub_path":"Practice_in_steps/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"28888403","text":"# -*- coding: utf-8 -*-\r\n'''节日礼物服务\r\n'''\r\nimport endPoint\r\nimport holiday_pb2\r\n\r\nclass cService(holiday_pb2.terminal2main):\r\n\t\r\n\t@endPoint.result\r\n\tdef rpcHolidayTake(self, ep, who, reqMsg): return rpcHolidayTake(who, reqMsg)\r\n\r\ndef rpcHolidayTake(who, reqMsg):\r\n\t'''领取节日礼物\r\n\t'''\r\n\tholidayId = holidayData.getCurrentHoliday()#reqMsg.holidayId\r\n\tif holiday.isTakeGift(who, holidayId): # 已领取\r\n\t\treturn\r\n\tif not holidayId or holidayId != holidayData.getCurrentHoliday(): # 不是当前节日或当前没有节日\r\n\t\tmessage.tips(who,\"已经过去了,下个节日也有大礼哦\")\r\n\t\trpcHolidayUI(who)\r\n\t\treturn\r\n\t\r\n\tlevelLimit = holidayData.getConfig(holidayId, \"领取等级\")\r\n\tif who.level < levelLimit:\r\n\t\treturn\r\n\r\n\trewardData = holidayData.getConfig(holidayId, \"奖励\")\r\n\tif not who.propsCtn.validCapacity(rewardData):\r\n\t\tmessage.tips(who, \"包裹已满,请清理包裹才能领取节日礼物\")\r\n\t\treturn\r\n\r\n\tholiday.markTakeGift(who, holidayId)\r\n\trpcHolidayChange(who, holidayId, \"isTaken\")\r\n\twriteLog(\"holiday/take\", \"%d %d\" % (who.id, holidayId))\r\n\t\r\n\tfor propsNo, amount in rewardData.iteritems():\r\n\t\tlaunch.launchBySpecify(who, propsNo, amount, False, \"节日礼物\")\r\n\r\n\ttask.removeTask(who,10001)\r\n\tmessage.tips(who,\"你领取了节日大奖\")\r\n\r\n#===============================================================================\r\n# 服务端发往客户端\r\n#===============================================================================\r\ndef packetStateMsg(who, holidayId):\r\n\t'''节日状态\r\n\t'''\r\n\tisTaken = holiday.isTakeGift(who, holidayId)\r\n\r\n\tmsgObj = holiday_pb2.stateMsg()\r\n\tmsgObj.holidayId = holidayId\r\n\tmsgObj.isTaken = isTaken\r\n\treturn msgObj\r\n\r\ndef packetUIMsg(who):\r\n\t'''节日礼物界面信息\r\n\t'''\r\n\tholidayId = holidayData.getCurrentHoliday()\r\n\tif holidayId:\r\n\t\tstateMsgObj = packetStateMsg(who, holidayId)\r\n\telse:\r\n\t\tstateMsgObj = None\r\n\t\r\n\tmsgObj = holiday_pb2.UIMsg()\r\n\tif stateMsgObj:\r\n\t\tmsgObj.currentHoliday.CopyFrom(stateMsgObj)\r\n\tmsgObj.currentTime = getSecond()\r\n\treturn msgObj\r\n\r\ndef rpcHolidayUI(who):\r\n\t'''打开节日礼物界面\r\n\t'''\r\n\tmsgObj = packetUIMsg(who)\r\n\twho.endPoint.rpcHolidayUI(msgObj)\r\n\r\ndef rpcHolidayChange(who, holidayId, *attrNameList):\r\n\t'''改变节日状态\r\n\t'''\r\n\tmsg = {\"holidayId\": holidayId}\r\n\tfor attrName in attrNameList:\r\n\t\tif attrName == \"isTaken\":\r\n\t\t\tisTaken = holiday.isTakeGift(who, holidayId)\r\n\t\t\tmsg[attrName] = isTaken\r\n\t\t\t\r\n\twho.endPoint.rpcHolidayChange(**msg)\r\n\r\n\r\nfrom common import *\r\nimport holidayData\r\nimport holiday\r\nimport message\r\nimport launch\r\nimport task","sub_path":"logic/holiday/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"395093113","text":"from django import template\nfrom user_profile.models import *\n\nregister = template.Library()\n\n@register.filter(name='profile_slug')\ndef profile_slug(user):\n\tif user.is_authenticated:\n\t\tprofile_slug = UserProfile.objects.get(user=user).slug\n\telse:\n\t\t# Not logged in\n\t\tprofile_slug = None\n\n\treturn profile_slug\n\n\n@register.filter(name='show_verify')\ndef verified_number(user):\n\tval=True\n\tif user.is_authenticated:\n\t\tprofile = UserProfile.objects.get(user=user)\n\t\tverified_number = profile.verified_number\n\t\t# If already verified, return False (hides button)\n\t\tif verified_number or not profile.phonenumber:\n\t\t\tval = False\n\t# If not verified yet, return True (shows button)\n\t\telse:\n\t\t\tval = True\n\n\treturn val\n\n\n\n@register.filter(name=\"website\")\ndef get_website_style(obj):\n\tuser_style = UserStyle.objects.all()\n\tif len(user_style)>0:\n\t\treturn user_style[0].website\n\n\treturn None\n\n@register.filter(name=\"admin_page\")\ndef get_admin_page_style(obj):\n\tuser_style = UserStyle.objects.all()\n\tif len(user_style)>0:\n\t\treturn user_style[0].admin_page\n\n\treturn None","sub_path":"apps/user_profile/templatetags/user_profile_template_tags.py","file_name":"user_profile_template_tags.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"635019045","text":"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport logging as log\n\nimport numpy as np\n\nfrom mo.front.extractor import FrontExtractorOp\nfrom mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version, onnx_node_has_attr\nfrom mo.ops.clamp import Clamp, AttributedClamp\n\n\nclass ClipFrontExtractor(FrontExtractorOp):\n op = 'Clip'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n if get_onnx_opset_version(node) < 11:\n attrs = {\n 'min': onnx_attr(node, 'min', 'f', np.finfo(np.float32).min),\n 'max': onnx_attr(node, 'max', 'f', np.finfo(np.float32).max),\n }\n AttributedClamp.update_node_stat(node, attrs)\n else:\n if onnx_node_has_attr(node, 'min') or onnx_node_has_attr(node, 'max'):\n log.error(\"ONNX Clip-11 operation '{}' shouldn't have attributes 'min' and 'max', this may mean that \"\n \"this operation created with older opset version.\".format(\n node.soft_get('name', node.id)), extra={'is_warning': True})\n Clamp.update_node_stat(node)\n return cls.enabled\n","sub_path":"model-optimizer/extensions/front/onnx/clip_ext.py","file_name":"clip_ext.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"144134697","text":"bound = 2 ** 51 * 9999\n\ndef f(n):\n def rev(n):\n r = 0\n while n > 0:\n r = r * 10 + n % 10\n n //= 10\n return r\n\n def Lychrel(n):\n if n > bound:\n return True\n r = rev(n)\n if r == n:\n return False\n return Lychrel(r + n)\n \n return Lychrel(n + rev(n))\n\nprint(len(list(filter(f, range(10000)))))\n","sub_path":"page02/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"522771040","text":"from termcolor import colored\nfrom datetime import date\n\nano = int(input(\"Digite um ano qualquer, ou coloque 0 para o ano atual: \"))\n\nif ano == 0:\n ano = date.today().year\n\nif (ano % 4 == 0) and (ano % 100 != 0) or (ano % 400 == 0):\n print(f\"O ano de {ano} é {colored('BISSEXTO', 'yellow')}\")\nelse:\n print(f\"O ano de {ano} {colored('NÃO', 'red')} é {colored('BISSEXTO', 'yellow')}\")\n","sub_path":"Curso em Video/ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"109591673","text":"import re\nimport socket\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\n\n# number of seconds to wait before trying to load the webpage again\nsocket.setdefaulttimeout(10)\n\n# number of failures to wait before giving up on a page\nFAILS = 100\n\ndef mysoupopen(url):\n loaded = False\n ret = None\n fails = 0\n while(not loaded):\n try:\n ret = BeautifulSoup(urllib2.urlopen(url))\n loaded = True\n except:\n fails += 1\n if(fails > FAILS):\n break;\n return ret\n\ndef cleanhtml(str):\n str = str.replace(\""\",\"\\\"\"); # " -> \"\n str = str.replace(\"®\",\"\"); # registered tradmark symbol\n str = str.replace(\"&\",\"&\"); # & -> &\n str = str.replace(\""\",\"\\\"\"); # " -> \"\n return str;\n","sub_path":"battleshiplive.com/kies_b_hw1/my_timeseries_sketch/scraper2/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"185045307","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def swapPairs(self, head: 'ListNode') -> 'ListNode':\n dummy = ListNode(0)\n dummy.next = head\n last = dummy\n cur = dummy.next\n if not cur:\n return dummy.next\n _next = cur.next\n if not _next:\n return dummy.next\n while cur:\n if _next:\n last.next = _next\n cur.next = _next.next\n _next.next = cur\n\n last = cur\n cur = last.next\n if cur and cur.next:\n _next = cur.next\n else:\n return dummy.next\n","sub_path":"24/python/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"318149826","text":"import xml.etree.ElementTree as ET\nimport requests\nfrom bs4 import BeautifulSoup\nimport sqlite3\ndata = {}\n\n\n#conn = sqlite3.connect('data.db')\n\ntree = ET.parse('sitemap.xml')\nroot = tree.getroot()\n\nfor child in root:\n url = child[0].text\n url = url.replace(\"https://staging.kfzteile24.de:37443/\",\"https://www.kfzteile24.de/\")\n #print(url)\n result = requests.get(url)\n response = result.content\n soup = BeautifulSoup(response, \"html.parser\")\n try:\n h1 = soup.h1\n print(h1)\n except:\n print('error')\n\n #break\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"493656725","text":"from __future__ import print_function\nimport os, time, sys, math\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\nimport numpy as np\nimport time, datetime\nimport argparse\nimport random\nimport os, sys\nimport subprocess\nimport cv2\nimport matplotlib.pyplot as plt\n\nimport matplotlib\n#matplotlib.use('Agg') # to generate plot without X server\n\nfrom utils import data_tool, model_tool, general_tool\nfrom utils.parser import parse\nfrom builders import model_builder\n\nargs = parse()\n\n\n#######################################################################################\n# prepare model and dataset\n#######################################################################################\n\ndataset_dir = (args.dataset_path if args.dataset_path != None else \"dataset/\" + args.dataset)\nclass_names_list, label_values_list, class_names_str = data_tool.get_label_info(dataset_dir)\nnb_class = len(class_names_list)\ndataset_file_name = data_tool.get_dataset_file_name(dataset_dir=dataset_dir)\n\ninput_size = (\n data_tool.get_minimal_size( dataset_dir=dataset_dir )\n if (not args.crop_height and not args.crop_width)\n else {'height':args.crop_height, 'width':args.crop_width} )\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n# Compute your softmax cross entropy loss\nnet_input = tf.placeholder( tf.float32,shape=[None, input_size['height'], input_size['width'], 3] )\nnet_output = tf.placeholder( tf.float32,shape=[None, input_size['height'], input_size['width'], nb_class] )\n\n# load the model\nnetwork, init_fn = model_builder.build_model(\n model_name=args.model,\n frontend=args.frontend,\n net_input=net_input,\n num_classes=nb_class,\n image_width=input_size['width'],\n image_height=input_size['height'],\n is_training=True)\n\nloss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=network, labels=net_output) )\n\noptimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate, decay=args.regularization).minimize(loss, var_list=[var for var in tf.trainable_variables()])\n\nsaver = tf.train.Saver(max_to_keep=1000)\nsess.run( tf.global_variables_initializer() )\n\n# If pre-trained ResNet required, load weights (must be done AFTER variables are initialized with sess.run(tf.global_variables_initializer())\nif init_fn is not None:\n init_fn(sess)\n\n# Load a previous checkpoint if desired\nmodel_checkpoint_name = \"../output/checkpoints/latest_model_\" + args.model + \"_\" + args.dataset + \".ckpt\"\nif args.continue_training:\n print('Loaded latest model checkpoint')\n saver.restore(sess, model_checkpoint_name)\n\ngeneral_tool.display_info( args, input_size, nb_class )\n\n\n#######################################################################################\n# train the model\n#######################################################################################\n\nprint(\"\\n***** Begin training *****\")\n\navg_loss_per_epoch, avg_scores_per_epoch, avg_iou_per_epoch = [], [], []\n\n# Which validation images do we want\nval_indices = []\nnum_vals = min(args.num_val_images, len(dataset_file_name['validation']['input']))\n\n# Set random seed to make sure models are validated on the same validation images.\n# So you can compare the results of different models more intuitively.\nrandom.seed(16)\nval_indices=random.sample(range(0,len(dataset_file_name['validation']['input'])),num_vals)\n\nfor epoch in range(args.epoch_start_i, args.nb_epoch):\n\n current_losses, cnt = [], 0\n\n id_list = np.random.permutation( len(dataset_file_name['training']['input']) ) # Equivalent to shuffling\n\n nb_iters = (\n int( np.floor( len(id_list) / args.batch_size) )\n if args.redux == 1.0\n else int( np.floor( len(id_list) * args.redux / args.batch_size ) ) )\n\n st = time.time()\n epoch_st=time.time()\n\n for i in range(nb_iters):\n\n input_img_batch, label_img_batch = [], []\n \n for j in range(args.batch_size): # Collect a batch of images\n\n id = id_list[ i*args.batch_size + j ]\n\n input_img = data_tool.load_image( dataset_file_name['training']['input'][id] )\n label_img = data_tool.load_image( dataset_file_name['training']['output'][id] )\n\n with tf.device('/cpu:0'):\n\n #input_img, label_img = data_tool.data_augmentation( args, input_img, label_img )\n input_img, label_img = data_tool.crop_image_and_label( input_img, label_img, input_size )\n\n input_img_batch.append( np.expand_dims( np.float32( input_img ) / 255.0 , axis=0) )\n\n label_img_batch.append( np.expand_dims( np.float32( data_tool.rgb_to_onehot( label_img, label_values_list) ) , axis=0) ) \n\n if args.batch_size == 1:\n input_img_batch = input_img_batch[0]\n label_img_batch = label_img_batch[0]\n else:\n input_img_batch = np.squeeze(np.stack(input_img_batch, axis=1))\n label_img_batch = np.squeeze(np.stack(label_img_batch, axis=1))\n\n _, current = sess.run(\n fetches=[optimizer,loss],\n feed_dict={net_input:input_img_batch,net_output:label_img_batch}\n )\n\n current_losses.append(current)\n cnt = cnt + args.batch_size\n\n if cnt % 20 == 0:\n string_print = \"Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f\"%(epoch,cnt,current,time.time()-st)\n general_tool.LOG(string_print)\n st = time.time()\n\n mean_loss = np.mean(current_losses)\n avg_loss_per_epoch.append(mean_loss)\n\n # Create directories if needed\n if not os.path.isdir(\"%s/%04d\"%(\"checkpoints\",epoch)):\n os.makedirs(\"%s/%04d\"%(\"checkpoints\",epoch))\n\n # Save latest checkpoint to same file name\n print(\"Saving latest checkpoint\")\n saver.save(sess,model_checkpoint_name)\n\n if val_indices != 0 and epoch % args.checkpoint_step == 0:\n print(\"Saving checkpoint for this epoch\")\n saver.save(sess,\"%s/%04d/model.ckpt\"%(\"checkpoints\",epoch))\n\n if epoch % args.validation_step == 0:\n\n print(\"Performing validation\")\n target=open(\"%s/%04d/val_scores.csv\"%(\"checkpoints\",epoch),'w')\n target.write(\"val_name, avg_accuracy, precision, recall, f1 score, mean iou, %s\\n\" % (class_names_str))\n\n scores_list = []\n class_scores_list = []\n precision_list = []\n recall_list = []\n f1_list = []\n iou_list = []\n\n for ind in val_indices: # Do the validation on a small set of validation images\n\n input_img = data_tool.load_image( dataset_file_name['validation']['input'][ind] )\n label_img = data_tool.load_image( dataset_file_name['validation']['output'][ind] )\n\n input_img, label_img = data_tool.crop_image_and_label( input_img, label_img, input_size )\n input_img = np.expand_dims( np.float32( input_img ), axis=0) / 255.0\n label_img_code = data_tool.onehot_to_code( data_tool.rgb_to_onehot( label_img, label_values_list) )\n\n output_tensor = sess.run(\n network,\n feed_dict={net_input:input_img})\n\n output_tensor = np.array(output_tensor[0,:,:,:])\n output_img_code = data_tool.onehot_to_code( output_tensor )\n output_img = data_tool.onehot_to_rgb( output_tensor, label_values_list )\n\n accuracy, class_accuracies, prec, rec, f1, iou = model_tool.evaluate_segmentation(pred=output_img_code, label=label_img_code, nb_class=nb_class)\n\n file_name = general_tool.filepath_to_name( dataset_file_name['validation']['input'][ind] )\n target.write(\"%s, %f, %f, %f, %f, %f\"%(file_name, accuracy, prec, rec, f1, iou))\n\n for item in class_accuracies:\n target.write(\", %f\"%(item))\n target.write(\"\\n\")\n\n scores_list.append(accuracy)\n class_scores_list.append(class_accuracies)\n precision_list.append(prec)\n recall_list.append(rec)\n f1_list.append(f1)\n iou_list.append(iou)\n\n file_name = os.path.basename(dataset_file_name['validation']['input'][ind])\n file_name = os.path.splitext(file_name)[0]\n\n cv2.imwrite(\"%s/%04d/%s_pred.png\"%(args.output_path + \"checkpoints\",epoch, file_name),cv2.cvtColor(np.uint8(output_img), cv2.COLOR_RGB2BGR))\n cv2.imwrite(\"%s/%04d/%s_gt.png\"%(args.output_path + \"checkpoints\",epoch, file_name),cv2.cvtColor(np.uint8(label_img), cv2.COLOR_RGB2BGR))\n\n\n target.close()\n\n avg_score = np.mean(scores_list)\n class_avg_scores = np.mean(class_scores_list, axis=0)\n avg_scores_per_epoch.append(avg_score)\n avg_precision = np.mean(precision_list)\n avg_recall = np.mean(recall_list)\n avg_f1 = np.mean(f1_list)\n avg_iou = np.mean(iou_list)\n avg_iou_per_epoch.append(avg_iou)\n\n print(\"\\nAverage validation accuracy for epoch # %04d = %f\"% (epoch, avg_score))\n print(\"Average per class validation accuracies for epoch # %04d:\"% (epoch))\n\n for index, item in enumerate(class_avg_scores):\n print(\"%s = %f\" % (class_names_list[index], item))\n\n print(\"Validation precision = \", avg_precision)\n print(\"Validation recall = \", avg_recall)\n print(\"Validation F1 score = \", avg_f1)\n print(\"Validation IoU score = \", avg_iou)\n\n epoch_time=time.time()-epoch_st\n remain_time=epoch_time*(args.nb_epoch-1-epoch)\n m, s = divmod(remain_time, 60)\n h, m = divmod(m, 60)\n if s!=0:\n train_time=\"Remaining training time = %d hours %d minutes %d seconds\\n\"%(h,m,s)\n else:\n train_time=\"Remaining training time : Training completed.\\n\"\n general_tool.LOG(train_time)\n scores_list = []\n\n fig1, ax1 = plt.subplots(figsize=(11, 8))\n ax1.plot(range(epoch+1), avg_scores_per_epoch)\n ax1.set_title(\"Average validation accuracy vs epochs\")\n ax1.set_xlabel(\"Epoch\")\n ax1.set_ylabel(\"Avg. val. accuracy\")\n plt.savefig(args.output_path + 'accuracy_vs_epochs.png')\n\n plt.clf()\n fig2, ax2 = plt.subplots(figsize=(11, 8))\n ax2.plot(range(epoch+1), avg_loss_per_epoch)\n ax2.set_title(\"Average loss vs epochs\")\n ax2.set_xlabel(\"Epoch\")\n ax2.set_ylabel(\"Current loss\")\n plt.savefig(args.output_path + 'loss_vs_epochs.png')\n\n plt.clf()\n fig3, ax3 = plt.subplots(figsize=(11, 8))\n ax3.plot(range(epoch+1), avg_iou_per_epoch)\n ax3.set_title(\"Average IoU vs epochs\")\n ax3.set_xlabel(\"Epoch\")\n ax3.set_ylabel(\"Current IoU\")\n\n plt.savefig(args.output_path + 'iou_vs_epochs.png')","sub_path":"trainer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"588345413","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# pets.py\n# \n# Copyright 2018 Administrator \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\n\ndef describe_pet(animal_type, pet_name):\n\t\"\"\"显示宠物的信息\"\"\"\n\tprint(\"\\nI have a \" + animal_type + \".\")\n\tprint(\"My \" + animal_type +\"'s name is \" + pet_name.title() + \".\")\n\ndescribe_pet('hamster', 'harry')\ndescribe_pet('dog', 'willie')\n","sub_path":"chapter_8/pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"642973977","text":"class Man :\n def __init__(self, name):\n self.name = name\n\n def solve_task (self):\n return \" {}, I`m not ready yet\".format(self.name)\n\nname_man = Man (input (\" Enter name: \"))\nprint(name_man.solve_task())\n\nfrom time import sleep\nfrom random import randint\nclass Pupil(Man) :\n def solve_task (self) :\n print (\"Wait.......\")\n sleep(randint(3,6))\n return \" {}, I`m not ready yet\".format(self.name)\n\nname_pupil = Pupil (input (\" Enter name: \"))\nprint(name_pupil.solve_task())\n","sub_path":"Practice/o.dagestanski/H_work5/Hw5_task1_and_2.py","file_name":"Hw5_task1_and_2.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"516016580","text":"#REDDIT PROGRAMMING EASY CHALLENGES\n\n#Challenge 1\n'''create a program that will ask the users name, age, and reddit username.\nhave it tell them the information back, in the format: your name is (blank),\nyou are (blank) years old, and your username is (blank) for extra credit,\nhave the program log this information in a file to be accessed later.'''\n\n#grab info\ndef info():\n name = input(\"please enter your name: \")\n age = int(input(\"please enter your age: \"))\n username = input(\"please enter you Reddit user name: \")\n print('Your name is {0} you are {1} years old, and your username is {2}'.format(name,\n age,\n username))\n return name, age, username\n\n#Logging the information in a file\ndef write():\n name, age, username = info()\n out = open(\"easy_1.txt\", 'w') #'r' for reading 'w' for writing 'a' for append\n line = \"Name: {0}\\nAge: {1}\\nUsername: {2}\".format(name,age,username) \n out.write(line) \n out.close()\n\nif __name__ == '__main__':\n write()\n\n\n","sub_path":"Easy_Challenges/Easy1to10/easy1.py","file_name":"easy1.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"12803902","text":"\"\"\"ADD MODULE DOCSTRING HERE\"\"\"\n\n# Standard Library\nimport os\nimport logging\n\n# Third-party\nimport yaml\n\n#------------------------------------------------------------------------------#\n\nlogger = logging.getLogger(__name__)\n\n#------------------------------------------------------------------------------#\n\n__all__ = ['AWAREDIR',\n 'AWS10MDIR',\n 'AWS3HRDIR',\n 'AWSDIR',\n 'DATADIR',\n 'LGGEDIR',\n 'MODISDIR',\n 'MODISDIRHDF',\n 'MODISDIRNC',\n 'MODISGEOTIFF',\n 'OUTPUT_IMG',\n 'OUTPUT_NC',\n 'OUTPUT_TXT',\n 'OUTPUTDIR',\n 'PYTHONPATH',\n 'NATEARTH10',\n 'NATEARTH50',\n 'NATEARTH110',\n 'NSIDC_DIR',\n 'NSIDC_ES25R',\n 'NSIDC_ES25R_BIN',\n 'NSIDC_ES25R_NC',\n 'NSIDC_ORIG',\n 'NSIDC_ORIG_BIN',\n 'NSIDC_ORIG_NC',\n 'NSIDC_PS25R',\n 'NSIDC_PS25R_BIN',\n 'NSIDC_PS25R_NC',\n 'TMPDIR',\n 'WKDIR',\n 'check_dirs',\n 'check_nsidc_dirs',\n 'list_dirs']\n\n#------------------------------------------------------------------------------#\n\n# General\nDATADIR = os.environ['DATADIR']\nPYTHONPATH = os.environ['PYTHONPATH']\nWKDIR = os.path.abspath(os.path.join(PYTHONPATH, '../work'))\nTMPDIR = os.path.abspath(os.path.join(PYTHONPATH, '../work/tmp'))\nOUTPUTDIR = os.path.abspath(os.path.join(PYTHONPATH, '../work/output'))\nOUTPUT_IMG = os.path.abspath(os.path.join(PYTHONPATH, '../work/output/img'))\nOUTPUT_NC = os.path.abspath(os.path.join(PYTHONPATH, '../work/output/nc'))\nOUTPUT_TXT = os.path.abspath(os.path.join(PYTHONPATH, '../work/output/txt'))\n\n# AWS data\nAWSDIR = os.path.join(DATADIR, 'amrc/aws')\nAWS10MDIR = os.path.join(DATADIR, 'amrc/aws/10mn/rdr')\nAWS3HRDIR = os.path.join(DATADIR, 'amrc/aws/3h')\n\n# NSIDC data\nNSIDC_DIR = os.path.join(DATADIR, 'ssmi')\nNSIDC_ORIG = os.path.join(DATADIR, 'ssmi/orig')\nNSIDC_ORIG_BIN = os.path.join(DATADIR, 'ssmi/orig/binary')\nNSIDC_ORIG_NC = os.path.join(DATADIR, 'ssmi/orig/netcdf')\nNSIDC_PS25R = os.path.join(DATADIR, 'ssmi/ps25r')\nNSIDC_PS25R_BIN = os.path.join(DATADIR, 'ssmi/ps25r/binary')\nNSIDC_PS25R_NC = os.path.join(DATADIR, 'ssmi/ps25r/netcdf')\nNSIDC_ES25R = os.path.join(DATADIR, 'ssmi/es25r')\nNSIDC_ES25R_BIN = os.path.join(DATADIR, 'ssmi/es25r/binary')\nNSIDC_ES25R_NC = os.path.join(DATADIR, 'ssmi/es25r/netcdf')\n\n# LGGE Melt Data\nLGGEDIR = os.path.join(DATADIR, 'lgge')\n\n# MODIS data\nMODISDIR = os.path.join(DATADIR, 'modis/products')\nMODISDIRHDF = os.path.join(DATADIR, 'modis/products/hdf')\nMODISDIRNC = os.path.join(DATADIR, 'modis/products/netcdf')\nMODISGEOTIFF = os.path.join(DATADIR, 'modis/geotiff')\n\n# AWARE Data\nAWAREDIR = os.path.join(DATADIR, 'aware')\n\n# Natural Earth\nNATEARTH10 = os.path.join(DATADIR, 'naturalearth/10m_physical')\nNATEARTH50 = os.path.join(DATADIR, 'naturalearth/50m_physical')\nNATEARTH110 = os.path.join(DATADIR, 'naturalearth/110m_physical')\n\n#------------------------------------------------------------------------------#\n\ndef list_dirs():\n \"\"\"List all config directories\"\"\"\n\n for var in sorted(globals()):\n if var.isupper():\n path = globals()[var]\n print('{0:15} : {1}'.format(var, path))\n\n#------------------------------------------------------------------------------#\n\ndef check_dirs(create=False):\n \"\"\"Make sure all directories exist.\"\"\"\n\n all_dirs_exist = True\n variables = [var for var in globals() if var.isupper()]\n for var in sorted(variables):\n path = globals()[var]\n if not os.path.isdir(path):\n all_dirs_exist = False\n if create:\n logger.info('Creating %s', path)\n os.makedirs(path)\n else:\n logger.info('%s does not exist', path)\n if all_dirs_exist:\n logger.info('All directories already exist. Nothing to do.')\n\n#------------------------------------------------------------------------------#\n\ndef check_nsidc_dirs(create=False):\n \"\"\"Make sure the full NSIDC directory tree exists.\"\"\"\n\n fname = 'nsidc_dirs.yml'\n f = open(fname)\n tree = yaml.load(f)\n for root in [NSIDC_ORIG, NSIDC_PS25R, NSIDC_ES25R]:\n build_tree(tree, root, create)\n\n#------------------------------------------------------------------------------#\n\ndef build_tree(tree, root='', create=False):\n \"\"\"Build a directory tree.\"\"\"\n\n for branch in tree:\n if isinstance(branch, dict):\n for key, value in branch.items():\n if key == 'PARAMETERS':\n continue\n path = os.path.join(root, key)\n build_tree(value, path, create)\n else:\n path = os.path.join(root, str(branch))\n if not os.path.isdir(path):\n if create:\n logger.info('Creating %s', path)\n os.makedirs(path)\n else:\n logger.info('%s does not exist', path)\n\n#------------------------------------------------------------------------------#\n","sub_path":"packages/config/directories.py","file_name":"directories.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"37495688","text":"\n# @Title: 下一个数 (Closed Number LCCI)\n# @Author: qinxinlei\n# @Date: 2020-07-15 17:53:50\n# @Runtime: 64 ms\n# @Memory: 13.3 MB\n\nclass Solution:\n def findClosedNumbers(self, num: int) -> List[int]:\n mn, mx = 1, 2147483647\n\n def findLarge(n):\n # 从右开始找到第1个1\n # 然后记录1的个数ones直到再遇到0或到最高位\n # 然后将这个0变成1\n # 然后右边的位数用000...111(ones-1个1)填充\n checkMask = 1\n bits = 0\n while checkMask <= n and checkMask & n == 0:\n checkMask <<= 1\n bits += 1\n ones = 0 # 直接构造出000...111\n while checkMask <= n and checkMask & n != 0:\n ones = (ones << 1) + 1\n checkMask <<= 1\n bits += 1\n # 因为在改变的位已经将1个0转成1了, 所以这里ones要向右移动一位\n ones >>= 1\n # 将0转成1\n n |= checkMask\n # 清除右边的0\n n = (n >> bits) << bits\n # 将右边填充上ones\n n |= ones\n return n if mn <= n <= mx else -1\n\n def findSmall(n):\n # 从右开始找到第1个0, 记录此过程1的个数ones\n # 然后继续往左找直到再遇到1\n # 然后将这个1变成0, ones也要左移一位(也可以初始化为1)\n # 然后右边的位数用高位ones个1填充, 即构造出111...000, 可以直接基于ones构造\n # 注意如果全为1的话是无解的, 直接返回-1\n checkMask = 1\n bits = 0\n ones = 1\n while checkMask <= n and checkMask & n != 0:\n checkMask <<= 1\n bits += 1\n ones = (ones << 1) + 1\n if checkMask > n:\n # 全部是1\n return -1\n while checkMask <= n and checkMask & n == 0:\n checkMask <<= 1\n bits += 1\n ones <<= 1\n # 因为ones初始化为1, 所以ones需要右移一位\n ones >>= 1\n # 将需要改变的1变成0\n n &= ~checkMask\n # 清除右边的0\n n = (n >> bits) << bits\n # 将右边填充上ones\n n |= ones\n return n if mn <= n <= mx else -1\n\n return [findLarge(num), findSmall(num)]\n\n","sub_path":"Problemset/closed-number-lcci/closed-number-lcci.py","file_name":"closed-number-lcci.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"431651915","text":"'''\n Heads and Legs\n “A farm contains chickens and cows. There are x legs and y heads.\n How many chickens and cows are there?”\n'''\n\ndef animals(heads, legs):\n if heads < 0 or legs < 0 or heads > 1000 or legs > 1000:\n return 'No solutions'\n for i in range(heads + 1):\n j = heads - i\n if 2 * i + 4 * j == legs:\n if isinstance(i, int) and isinstance(j, int):\n return (i, j)\n return 'No solutions'\n\n'''\n chickens, cows = 2*heads-legs/2, legs/2-heads\n if chickens < 0 or cows < 0 or not chickens == int(chickens) or not cows == int(cows):\n return \"No solutions\"\n return chickens, cows\n'''\n\nprint(animals(72,200))","sub_path":"python/animals.py","file_name":"animals.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"285362835","text":"from django.shortcuts import render, HttpResponse,HttpResponseRedirect\nfrom django.http import HttpRequest\nfrom django.template.context import RequestContext\nfrom django.template import loader\nfrom .models import Article,Category\nfrom django.db.models.aggregates import Count,Sum,Avg\nfrom django.db.models.functions import ExtractHour,ExtractDay,ExtractMonth,TruncMonth\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\n# Create your views here.\n\n\ndef getRigthtIndex():\n arts = Article.articles.all()\n # 获取所有的分类信息\n cates = Category.cates.all()\n # 文章按照日期进行分类\n # select={\"create_time\":\"DATE_FORMAT(create_time,'%%e')\"}\n month_count = Article.articles.extra(select={\"create_time\":\"DATE_FORMAT(create_time,'%%Y-%%m')\"})\\\n .values(\"create_time\").annotate(count_num=Count('create_time')).values('create_time','count_num')\n list1 = []\n for map in month_count:\n value = map.values()\n list1.append(list(value))\n data = {'cates': cates, 'month_count': list1}\n return data\n\n\n\n\ndef boot(request):\n index_loader = loader.get_template(\"my/index_left.html\")\n\n arts = Article.articles.all()\n # print(\"#########00000000: \",arts[1].title)\n # 获取所有的分类信息\n cates = Category.cates.all()\n\n # 文章按照日期进行分类\n # select={\"create_time\":\"DATE_FORMAT(create_time,'%%e')\"}\n month_count = Article.articles.extra(select={\"create_time\":\"DATE_FORMAT(create_time,'%%Y-%%m')\"})\\\n .values(\"create_time\").annotate(count_num=Count('create_time')).values('create_time','count_num')\n\n # 使用另一种方法,按照月份进行分组 # .values('create_times') \\ .values('create_times','sum_counts')\n # hours_counts = Article.articles.annotate(create_times=ExtractMonth('create_time')) \\\n # .values('create_times') \\\n # .annotate(sum_counts=Count('id')) \\\n\n # hours_counts = Article.articles.annotate(cids='cid').values('cids') \\\n # .annotate(sum_counts=Count('id')) \\\n # aa = Article.articles.annotate(c=ExtractMonth('create_time')).values('c')\n # print(\"%%%%%%%:: \",hours_counts,aa[0])\n\n print(\"!!!!!!!!!!!!: \",month_count)\n list1 = []\n for map in month_count:\n value = map.values()\n list1.append(list(value))\n\n print(\"list########### \",list1)\n\n a = 100\n b = [1,2,3,4,5]\n c = {'name':'yuange','age':30}\n\n # 分页处理数据\n map = page_list(request,arts)\n\n data = {'a':a,'b':b,'c':c,'arts':arts,'cates': cates,'month_count':list1,\n 'blogs':map.get(\"blogs\"),'page_of_blogs': map.get(\"page_of_blogs\"),'page_range': map.get(\"page_range\")}\n content = index_loader.render(data)\n return HttpResponse(content)\n\ndef more(request):\n index_loader = loader.get_template(\"my/detail.html\")\n get = request.GET\n art = get.get(\"art\")\n print(\"art::: \",art)\n title = get.get(\"title\")\n content = get.get(\"content\")\n\n map = getRigthtIndex() # data = {'cates': cates, 'month_count': list1}\n data = {'title':title,'content':content,'art':art,'cates':map.get('cates'),'month_count':map.get('month_count')}\n content = index_loader.render(data) # index_loader.render()\n return HttpResponse(content)\n\n\ndef add_artitcle(request):\n index_loader = loader.get_template(\"my/add_artilce.html\")\n a = 100\n b = [1,2,3,4,5]\n c = {'name':'yuange','age':30}\n data = {'a':a,'b':b,'c':c}\n content = index_loader.render(data)\n return HttpResponse(index_loader.render())\n\n# 插入数据,添加文章信息\ndef insert_artitcle(request):\n index_loader = loader.get_template(\"my/index_left.html\")\n post = request.POST\n title = post.get(\"title\")\n contents = post.get(\"contents\")\n\n art = Article.articles.create(title,title,contents)\n # 保存数据到数据库\n art.save()\n\n a = 100\n b = [1,2,3,4,5]\n c = {'name':'yuange','age':30}\n data = {'a':a,'b':b,'c':c}\n content = index_loader.render(data)\n\n # return HttpResponse(index_loader.render())\n return boot(request)\n\n\ndef get_category(request):\n index_loader = loader.get_template(\"my/cate_left.html\")\n gets = request.GET\n id = gets.get(\"cid\")\n # print(\"cid: \",id)\n arts = Article.articles.filter(cid=id)\n # 获取所有的分类信息\n cates = Category.cates.all()\n\n #按照日期进行分类\n # 文章按照日期进行分类\n # select={\"create_time\":\"DATE_FORMAT(create_time,'%%e')\"}\n month_count = Article.articles.extra(select={\"create_time\": \"DATE_FORMAT(create_time,'%%Y-%%m')\"}) \\\n .values(\"create_time\").annotate(count_num=Count('create_time')).values('create_time', 'count_num')\n\n list1 = []\n for map in month_count:\n value = map.values()\n list1.append(list(value))\n\n # 分页处理数据\n map = page_list(request, arts)\n\n print(\"@@@@@: \",arts.count())\n data = {'arts': arts,'cates': cates,'month_count':list1,'cid': id,\n 'blogs':map.get(\"blogs\"),'page_of_blogs': map.get(\"page_of_blogs\"),'page_range': map.get(\"page_range\")}\n content = index_loader.render(data)\n return HttpResponse(content)\n # return boot(request)\n\n\n\n# 按照时间分类进行查询\ndef query_byDate(request):\n index_loader = loader.get_template(\"my/dateSearch_left.html\")\n gets = request.GET\n dates = gets.get(\"dates\")\n arts = Article.articles.filter(create_time__contains=dates)\n map = getRigthtIndex() # data = {'cates': cates, 'month_count': list1}\n\n # 分页处理数据\n pages = page_list(request, arts)\n\n # 获取所有的分类信息\n print(\"@@@@@: \", arts.count())\n data = {'arts': arts,'cates':map.get('cates'),'month_count':map.get('month_count'),'dates':dates,\n 'blogs':pages.get(\"blogs\"),'page_of_blogs': pages.get(\"page_of_blogs\"),'page_range': pages.get(\"page_range\")}\n content = index_loader.render(data)\n return HttpResponse(content)\n # return boot(request)\n\n\n#按照标题的关键字进行搜索查询\ndef search_byWords(request):\n words = request.GET.get(\"words\")\n if words == '' or len(words) == 0:\n return boot(request)\n else:\n index_loader = loader.get_template(\"my/keywords.html\")\n arts = Article.articles.filter(title__contains=words)\n map = getRigthtIndex() # data = {'cates': cates, 'month_count': list1}\n # 分页处理数据\n pages = page_list(request, arts)\n # 获取所有的分类信息\n print(\"@@@@@: \", arts.count())\n data = {'arts': arts, 'cates': map.get('cates'), 'month_count': map.get('month_count'),'words': words,\n 'blogs':pages.get(\"blogs\"),'page_of_blogs': pages.get(\"page_of_blogs\"),'page_range': pages.get(\"page_range\")}\n content = index_loader.render(data)\n return HttpResponse(content)\n\n\n\n# 分页条显示所有的内容\n# 显示博客列表\ndef page_list(request,pages_list_all):\n # pages_list_all = Blog.objects.all()\n #获取所有的文章\n # pages_list_all = Article.articles.all()\n # 每10页进行分页\n # paginator 分页器的实例化\n # paginator = Paginator(pages_list_all, 2)\n paginator = Paginator(pages_list_all, 1,2)\n page_num = request.GET.get('page', 1) # 获取GET请求的内容,默认为1 获取url的页面参数(GET请求)\n page_of_blogs = paginator.get_page(page_num) # 将获取的第几页的文章列表值给赋值,get方法会自动处理异常\n current_page_num = page_of_blogs.number # 获取当前页码\n\n # 页面范围 显示 分页时的 结果, 就是如果有多个分页,不完全显示,就显示4个页面\n # 获取当前页码和 当前页码的前后两页\n page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \\\n list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))\n\n # 加上省略页码\n if page_range[0] - 1 >= 2:\n page_range.insert(0, '...')\n if paginator.num_pages - page_range[-1] >= 2:\n page_range.append('...')\n\n # 加上首页和尾页\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n\n context = {}\n # 获取当所有的文章列表 ,两行都是\n # context['blogs'] = Blog.objects.all()\n context['blogs'] = page_of_blogs.object_list\n\n # 获取当前页的文章列表 ,两行都是\n context['page_of_blogs'] = page_of_blogs\n\n context['page_range'] = page_range\n\n # 获取文章分类列表\n # context['blog_types'] = BlogType.objects.all()\n return context\n","sub_path":"blob/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"617369544","text":"from __future__ import with_statement\n\nfrom pyramid.view import view_config\nfrom pyramid.httpexceptions import HTTPFound, HTTPForbidden\nfrom sqlalchemy.orm.query import aliased\n\nfrom intranet3.forms.common import DeleteForm\nfrom intranet3.utils.views import BaseView\nfrom intranet3.models import Tracker, TrackerCredentials, Project, DBSession\nfrom intranet3.forms.tracker import (TrackerForm, TRACKER_TYPES, TrackerLoginForm,\n trackers_login_validators)\nfrom intranet3.log import INFO_LOG\n\nLOG = INFO_LOG(__name__)\n\nclass UserCredentialsMixin(object):\n def _get_current_users_credentials(self):\n creds = aliased(\n TrackerCredentials,\n TrackerCredentials.query.filter(TrackerCredentials.user_id==self.request.user.id).subquery()\n )\n query = DBSession.query(\n Tracker,\n creds\n ).outerjoin((creds, Tracker.credentials))\n if self.request.user.client:\n client = self.request.user.client\n query = query.filter(Project.tracker_id==Tracker.id).filter(Project.client_id==client.id)\n\n return [\n {\"tracker\": tracker, \"has_creds\": bool(credentials), \"creds\": credentials}\n for tracker, credentials in query\n ]\n\n def _get_current_users_credentials_for_tracker(self, tracker):\n if self.request.user.client:\n client = self.request.user.client\n query = Tracker.query.filter(Project.tracker_id==tracker.id)\\\n .filter(Project.client_id==client.id)\\\n .filter(Tracker.id==Project.tracker_id)\n result = query.first()\n if not result:\n raise HTTPForbidden\n\n return TrackerCredentials.query.filter(TrackerCredentials.user==self.request.user)\\\n .filter(TrackerCredentials.tracker_id==tracker.id).first()\n\n\n@view_config(route_name='tracker_list', permission='can_see_own_bugs')\nclass List(UserCredentialsMixin, BaseView):\n def get(self):\n trackers = self._get_current_users_credentials()\n return dict(trackers=trackers)\n\n\n@view_config(route_name='tracker_view', permission='can_edit_trackers')\nclass View(BaseView):\n def get(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n return dict(tracker=tracker, TRACKER_TYPES=TRACKER_TYPES)\n\n\n@view_config(route_name='tracker_add', permission='can_edit_trackers')\nclass Add(BaseView):\n def get(self):\n form = TrackerForm()\n return dict(form=form)\n\n def post(self):\n form = TrackerForm(self.request.POST)\n if form.validate():\n tracker = Tracker(\n type=form.type.data,\n name=form.name.data,\n url=form.url.data,\n mailer=form.mailer.data\n )\n DBSession.add(tracker)\n self.flash(self._(u\"New tracker added\"))\n LOG(u\"Tracker added\")\n url = self.request.url_for('/tracker/list')\n return HTTPFound(location=url)\n return dict(form=form)\n\n\ndef _add_tracker_login_validator(tracker_name, form):\n validators = {}\n for tracker_name in (tracker_name, 'all'):\n if tracker_name in trackers_login_validators:\n for validator_name, validator in trackers_login_validators[tracker_name].items():\n if validator_name not in validators:\n validators[validator_name] = []\n\n validators[validator_name].append(validator)\n\n for validator_name, validator in validators.items():\n getattr(form, validator_name).validators = validators[validator_name]\n\n@view_config(route_name='tracker_login', permission='can_see_own_bugs')\nclass Login(UserCredentialsMixin, BaseView):\n def get(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n credentials = self._get_current_users_credentials_for_tracker(tracker)\n form = TrackerLoginForm(obj=credentials)\n return dict(form=form, tracker=tracker)\n\n def post(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n credentials = self._get_current_users_credentials_for_tracker(tracker)\n form = TrackerLoginForm(self.request.POST, obj=credentials)\n\n _add_tracker_login_validator(tracker.name, form)\n\n if form.validate():\n if credentials is None:\n credentials = TrackerCredentials(\n user_id=self.request.user.id,\n tracker_id=tracker.id,\n login=form.login.data,\n password=form.password.data,\n )\n DBSession.add(credentials)\n else:\n credentials.login=form.login.data\n credentials.password=form.password.data\n self.flash(self._(u\"Credentials saved\"))\n LOG(u\"Credentials saved\")\n url = self.request.url_for('/tracker/list')\n return HTTPFound(location=url)\n return dict(form=form, tracker=tracker)\n\n@view_config(route_name='tracker_edit', permission='can_edit_trackers')\nclass Edit(BaseView):\n def get(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n form = TrackerForm(obj=tracker)\n return dict(tracker_id=tracker.id, form=form)\n\n def post(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n form = TrackerForm(self.request.POST, obj=tracker)\n if form.validate():\n tracker.type = form.type.data\n tracker.name = form.name.data\n tracker.url = form.url.data\n tracker.mailer = form.mailer.data\n self.flash(self._(u\"Tracker saved\"))\n LOG(u\"Tracker saved\")\n url = self.request.url_for('/tracker/list')\n return HTTPFound(location=url)\n return dict(tracker_id=tracker.id, form=form)\n\n\n\n@view_config(route_name='tracker_delete', renderer='intranet3:templates/common/delete.html', permission='can_edit_trackers')\nclass Delete(BaseView):\n\n def dispatch(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n form = DeleteForm(self.request.POST)\n if self.request.method == 'POST' and form.validate():\n tracker.credentials.delete()\n tracker.projects.delete()\n DBSession.delete(tracker)\n back_url = self.request.url_for('/tracker/list')\n return HTTPFound(location=back_url)\n return dict(\n type_name=u'tracker',\n title=u'%s' % tracker.name,\n url=self.request.url_for('/tracker/delete', tracker_id=tracker.id),\n back_url=self.request.url_for('/tracker/list'),\n form=form\n )\n\n\n@view_config(route_name='tracker_delete_login', renderer='intranet3:templates/common/delete.html', permission='can_see_own_bugs')\nclass DeleteLogin(BaseView):\n def dispatch(self):\n tracker_id = self.request.GET.get('tracker_id')\n tracker = Tracker.query.get(tracker_id)\n form = DeleteForm(self.request.POST)\n if self.request.method == 'POST' and form.validate():\n credentials = tracker.credentials.filter(TrackerCredentials.user_id==self.request.user.id).one()\n DBSession.delete(credentials)\n back_url = self.request.url_for('/tracker/list')\n return HTTPFound(location=back_url)\n return dict(\n type_name=u'tracker',\n title=self._(u'Credentials for user ${name} on tracker ${tracker}', name=self.request.user.name, tracker=tracker.name),\n url=self.request.url_for('/tracker/delete_login', tracker_id=tracker.id),\n back_url=self.request.url_for('/tracker/list'),\n form=form\n )\n","sub_path":"src/intranet3/intranet3/views/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"172520090","text":"from pathlib import Path\nimport ujson as json\nfrom typing import Dict, List\nfrom random import choice\n\nfrom nonebot import MatcherGroup\nfrom nonebot_adapter_gocq.event import GroupIncreaseNoticeEvent, GroupDecreaseNoticeEvent\n\nfrom src.common import Bot, GroupMessageEvent, T_State, Message, logger, CANCEL_EXPRESSION\nfrom src.common.rules import sv_sw, comman_rule\n\n\nplugin_name = '入群退群提醒'\nplugin_usage = ''\n\n\n#———————————————————————————————入群提醒———————————————————————————————\n\nwelcome_name = '入群欢迎'\nwelcome_usage = \"\"\"\n入群仪式,支持图片,可设置多个欢迎语句,随机触发\n[设置入群欢迎] 显示当前群聊欢迎语句,具体设置按照操作提示进行\n\"\"\".strip()\n\n\nwelcome_settings_file = Path(__file__).parent/\"welcome_settings.json\"\n\n\nif not welcome_settings_file.exists():\n with welcome_settings_file.open('w', encoding='utf-8') as j:\n json.dump({}, j, ensure_ascii=False, escape_forward_slashes=False, indent=4)\n\n\nwith welcome_settings_file.open(encoding='utf-8') as j:\n welcome_settings :Dict = json.load(j)\n\"\"\"\nsetting 结构:\n{\n gid(str): {\n approve: [str, ...], (主动加群)\n invite: [str, ...], (被邀请入群)\n locked: bool (是否被锁定,决定群员是否可以修改邀请语句)\n },\n ...\n}\n\"\"\"\n\nDEFAULT_SPEECH = {\n 'approve': [\n '你已经是群主啦,快穿上女装参加登基大典吧!',\n '{name}加入了女装大家庭,大家快快穿上女装夹道欢迎吧!'\n ],\n # 'invite': [\n # '欢迎{name}来到{admin}的女装殿堂,让{admin}亲自为您挑选合适的款式吧!',\n # '{admin}邀请{name}给大家展示女装姐妹丼啦,欸欸~米娜桑不要脱裤子啊(#°Д°)'\n # ],\n 'locked': False\n } # 暂时没发现有条件能上报operator_id,所以invite就先不管了\n\n\ndef save_wl_settings():\n \"\"\"保存群欢迎语句设置\"\"\"\n with welcome_settings_file.open('w', encoding='utf-8') as j:\n json.dump(welcome_settings, j, ensure_ascii=False, escape_forward_slashes=False, indent=4)\n\n\nwelcome = MatcherGroup()\nwelcome_sw = sv_sw(welcome_name, welcome_usage, hierarchy='群助手')\n\nspeech_editor = welcome.on_command('设置入群欢迎', rule=welcome_sw&comman_rule(GroupMessageEvent), priority=2)\n\n\n@speech_editor.handle()\nasync def show_speech(bot: Bot, event: GroupMessageEvent, state: T_State):\n gid = str(event.group_id)\n if gid not in welcome_settings:\n wl_setting = DEFAULT_SPEECH\n else:\n wl_setting = welcome_settings[gid]\n ap_speeches = '\\n'.join([f'{i+1}.{speech}' for i, speech in enumerate(wl_setting['approve'])])\n # in_speeches = '\\n'.join([f'{i+1+len(wl_setting[\"approve\"])}.{speech}' for i, speech in enumerate(wl_setting['invite'])])\n status = '已锁定' if wl_setting['locked'] else '未锁定'\n msg = '当前新人入群欢迎语句为:\\n' + ap_speeches + '\\n────────────\\n可修改状态:' + status\n if wl_setting['locked'] and event.sender.role == 'member':\n await speech_editor.finish(Message(msg))\n else:\n # msg += '\\n────────────\\n使用以下命令修改通知语句(不带中括号):\\n[添加] 添加一个迎新语句(使用{name}字段可自动替换新人的昵称,参考默认欢迎语句)\\n[添加邀请入群] 添加一个被邀请入群欢迎语句(除{name}之外可使用{admin}字段��自动替换邀请人的昵称,参考默认邀请语句)\\n[删除+序号] 删除指定的语句\\n[切换锁定] 更改锁定状态,锁定状态下群员不可修改欢迎语句'\n msg += '\\n────────────\\n使用以下命令修改通知语句(不带中括号):\\n[添加] 添加一个迎新语句(使用{name}字段可自动替换新人的昵称,参考默认欢迎语句)\\n[删除+序号] 删除指定的语句\\n[切换锁定] 更改锁定状态,锁定状态下群员不可修改欢迎语句'\n await speech_editor.send(Message(msg))\n\n\n@speech_editor.receive()\nasync def edit_speech(bot: Bot, event: GroupMessageEvent, state: T_State):\n operation = event.raw_message\n gid = str(event.group_id)\n if gid not in welcome_settings:\n welcome_settings[gid] = DEFAULT_SPEECH\n settings = welcome_settings[gid]\n\n if operation.startswith('添加'):\n if len(settings['approve']) >= 5:\n await speech_editor.finish('最大支持存储5条迎新语句,请先删除不需要的语句')\n arg = operation[2:].strip()\n if arg:\n settings['approve'].append(arg)\n save_wl_settings()\n await speech_editor.finish('好的,已经添加一个迎新语句')\n else:\n # state['operation'] = 'add_approve'\n state['operation'] = 'add'\n await speech_editor.pause('请输入要添加的迎新语句(使用{name}字段可自动替换新人的昵称,参考默认欢迎语句),输入[取消]退出当前操作')\n \n # elif operation.startswith('添加邀请入群'):\n # if len(settings['invite']) >= 5:\n # await speech_editor.finish('最大支持存储5条被邀请入群欢迎语句,请先删除不需要的语句')\n # arg = operation[6:].strip()\n # if arg:\n # settings['invite'].append(arg)\n # save_wl_settings()\n # await speech_editor.finish('好的,已经添加一个被邀请入群欢迎语句')\n # else:\n # state['operation'] = 'add_invite'\n # await speech_editor.pause('请输入要添加的被邀请入群欢迎语句,(使用{admin}字段可自动替换邀请人的昵称,参考默认邀请语句),输入[取消]退出当前操作')\n\n elif operation.startswith('删除'):\n arg = operation[2:].strip()\n if arg:\n if arg.isdigit():\n index = int(arg)\n # if index > 0 and index <= len(settings['approve']) + len(settings['invite']):\n if index > 0 and index <= len(settings['approve']):\n # if index <= len(settings['approve']):\n # del settings['approve'][index - 1]\n # else:\n # index -= len(settings['approve'])\n # del settings['invite'][index - 1]\n del settings['approve'][index - 1]\n save_wl_settings()\n await speech_editor.finish(f'已删除序号为{index}的迎新语句')\n else:\n await speech_editor.finish('输入的参数不在列表内,请检查序号')\n else:\n await speech_editor.finish('只支持纯数字参数,请重新开启此对话进行操作')\n else:\n state['operation'] = 'delete'\n await speech_editor.pause('请输入需要删除的语句的序号,输入[取消]退出当前操作')\n\n elif operation.strip() == '切换锁定':\n settings['locked'] = not settings['locked']\n save_wl_settings()\n if settings['locked']:\n await speech_editor.finish('已锁定群欢迎语句')\n else:\n await speech_editor.finish('已解锁群欢迎语句,群员可随意修改语句')\n\n else:\n await speech_editor.finish('已退出编辑欢迎语句操作')\n\n\n@speech_editor.handle()\nasync def wl_secondary_operation(bot: Bot, event: GroupMessageEvent, state: T_State):\n gid = str(event.group_id)\n if event.message.extract_plain_text().strip() in CANCEL_EXPRESSION:\n await speech_editor.finish('已退出编辑操作')\n if gid not in welcome_settings:\n welcome_settings[gid] = DEFAULT_SPEECH\n settings = welcome_settings[gid]\n # if state['operation'] == 'add_approve':\n if state['operation'] == 'add':\n settings['approve'].append(event.raw_message)\n msg = '好的,已经添加一个迎新语句'\n # elif state['operation'] == 'add_invite':\n # settings['invite'].append(event.raw_message)\n # msg = '好的,已经添加一个被邀请入群欢迎语句'\n elif state['operation'] == 'delete':\n if event.raw_message.isdigit():\n index = int(event.raw_message)\n # if index > 0 and index <= len(settings['approve']) + len(settings['invite']):\n if index > 0 and index <= len(settings['approve']):\n # if index <= len(settings['approve']):\n # del settings['approve'][index - 1]\n # msg = f'已删除序号为{index}的入群欢迎语句'\n # else:\n # index -= len(settings['approve'])\n # del settings['invite'][index - 1]\n # msg = f'已删除序号为{index}的被邀请群欢迎语句'\n del settings['approve'][index - 1]\n msg = f'已删除序号为{index}的迎新语句'\n else:\n await speech_editor.finish('输入的参数不在列表内,请检查序号')\n else:\n await speech_editor.finish('只支持纯数字参数,请重新开启此对话进行操作')\n else:\n logger.error(f\"Unkown session with operation: {state['operation']}\")\n await speech_editor.finish('未知的对话进度,请联系维护组进行排查')\n save_wl_settings()\n await speech_editor.finish(msg)\n\n\n#———————————————入群触发————————————————\n\n\ndef welcome_rule(bot: Bot, event: GroupIncreaseNoticeEvent, state: T_State):\n \"\"\"排除自己加群的情况,排除加群语句被删除到没有了的情况\"\"\"\n if not isinstance(event, GroupIncreaseNoticeEvent):\n return False\n logger.debug(f'Group {event.group_id} increase Got!!')\n logger.debug(isinstance(event, GroupIncreaseNoticeEvent))\n if event.user_id == event.self_id:\n return False\n logger.debug('非自身加群')\n gid = str(event.group_id)\n if gid in welcome_settings:\n if event.sub_type == 'approve' and len(welcome_settings[gid]['approve']) == 0:\n # or event.sub_type == 'invite' and len(welcome_settings[gid]['invite']) == 0:\n return False\n return True\n\n\nentry_welcome = welcome.on_notice(rule=welcome_sw&welcome_rule)\n\n\n@entry_welcome.handle()\nasync def welcome_newcomers(bot: Bot, event: GroupIncreaseNoticeEvent):\n gid = str(event.group_id)\n if gid not in welcome_settings:\n welcome_settings[gid] = DEFAULT_SPEECH\n settings = welcome_settings[gid]\n userinfo = await bot.get_group_member_info(group_id=event.group_id, user_id=event.user_id)\n name = userinfo['card'] or userinfo['nickname'] or str(event.user_id)\n # admininfo = await bot.get_group_member_info(group_id=event.group_id, user_id=event.operator_id)\n # admin = admininfo['card'] or admininfo['nickname'] or str(event.user_id)\n # msg = Message(choice(settings[event.sub_type]).format(name=name, admin=admin))\n msg = Message(choice(settings['approve']).format(name=name))\n await entry_welcome.finish(msg, at_sender=True)\n\n\n#———————————————————————————————退群提醒———————————————————————————————\n\n\nexitremind_name = '退群提醒'\nexitremind_usage = \"\"\"\n退群提示,支持图片,可设置多个欢迎语句,随机触发\n[设置退群提醒] 显示当前群聊退群提醒语句,具体设置按照操作提示进行\n\"\"\".strip()\n\n\nexitremind_settings_file = Path(__file__).parent/\"exitremind_settings.json\"\n\n\nif not exitremind_settings_file.exists():\n with exitremind_settings_file.open('w', encoding='utf-8') as j:\n json.dump({}, j, ensure_ascii=False, escape_forward_slashes=False, indent=4)\n\n\nwith exitremind_settings_file.open(encoding='utf-8') as j:\n exitremind_settings :Dict = json.load(j)\n\"\"\"\nsetting 结构:\n{\n gid(str): {\n leave: [str, ...], (主动退群)\n kick: [str, ...], (成员被踢)\n locked: bool (是否被锁定,决定群员是否可以修改退群语句)\n },\n ...\n}\n\"\"\"\n\nDEFAULT_REMIND = {\n 'leave': [\n '丑逼狗群主把{name}吓退群啦!'\n ],\n 'kick': [\n '狗比管理{admin}为了掩盖自己援交的黑历史把{name}踢出群啦!'\n ],\n 'locked': False\n }\n\n\ndef save_en_settings():\n \"\"\"保存群退群提醒语句设置\"\"\"\n with exitremind_settings_file.open('w', encoding='utf-8') as j:\n json.dump(exitremind_settings, j, ensure_ascii=False, escape_forward_slashes=False, indent=4)\n\n\nexitremind = MatcherGroup()\nexitremind_sw = sv_sw(exitremind_name, exitremind_usage, hierarchy='群助手')\n\nremind_editor = exitremind.on_command('设置退群提醒', rule=exitremind_sw&comman_rule(GroupMessageEvent), priority=2)\n\n\n@remind_editor.handle()\nasync def show_remind(bot: Bot, event: GroupMessageEvent, state: T_State):\n gid = str(event.group_id)\n if gid not in exitremind_settings:\n en_setting = DEFAULT_REMIND\n else:\n en_setting = exitremind_settings[gid]\n lv_reminds = '\\n'.join([f'{i+1}.{speech}' for i, speech in enumerate(en_setting['leave'])])\n kk_reminds = '\\n'.join([f'{i+1+len(en_setting[\"leave\"])}.{speech}' for i, speech in enumerate(en_setting['kick'])])\n status = '已锁定' if en_setting['locked'] else '未锁定'\n msg = '当前群内主动退群提醒语句为:\\n' + lv_reminds + '\\n────────────\\n被管理踢出群聊提醒语句为:\\n' + kk_reminds + '\\n────────────\\n可修改状态:' + status\n if en_setting['locked'] and event.sender.role == 'member':\n await remind_editor.finish(Message(msg))\n else:\n msg += '\\n────────────\\n使用以下命令修改通知语句(不带中括号):\\n[添加主动退群] 添加一个主动退群提醒语句(使用{name}字段可自动替换退群者的昵称,参考默认邀请语句)\\n[添加管理踢人] 添加一个管理踢人提醒语句(除{name}之外可使用{admin}字段可自动替换执行的管理的昵称,参考默认踢人提醒语句)\\n[删除+序号] 删除指定的语句\\n[切换锁定] 更改锁定状态,锁定状态下群员不可修改退群提醒语句'\n await remind_editor.send(Message(msg))\n\n\n@remind_editor.receive()\nasync def edit_remind(bot: Bot, event: GroupMessageEvent, state: T_State):\n operation = event.raw_message\n gid = str(event.group_id)\n if gid not in exitremind_settings:\n exitremind_settings[gid] = DEFAULT_SPEECH\n settings = exitremind_settings[gid]\n\n if operation.startswith('添加主动退群'):\n if len(settings['leave']) >= 5:\n await remind_editor.finish('最大支持存储5条主动退群提醒语句,请先删除不需要的语句')\n arg = operation[6:].strip()\n if arg:\n settings['leave'].append(arg)\n save_en_settings()\n await remind_editor.finish('好的,已经添加一个主动退群提醒语句')\n else:\n state['operation'] = 'add_leave'\n await remind_editor.pause('请输入要添加的主动退群提醒语句, 输入[取消]退出当前操作')\n \n elif operation.startswith('添加管理踢人'):\n if len(settings['kick']) >= 5:\n await remind_editor.finish('最大支持存储5条管理踢人提醒语句,请先删除不需要的语句')\n arg = operation[6:].strip()\n if arg:\n settings['kick'].append(arg)\n save_en_settings()\n await remind_editor.finish('好的,已经添加一个管理踢人提醒语句')\n else:\n state['operation'] = 'add_kick'\n await remind_editor.pause('请输入要添加的管理踢人提醒语句,(使用{admin}字段可自动替换执行的管理的昵称,参考默认踢人提醒语句),输入[取消]退出当前操作')\n\n elif operation.startswith('删除'):\n arg = operation[2:].strip()\n if arg:\n if arg.isdigit():\n index = int(arg)\n if index > 0 and index <= len(settings['leave']) + len(settings['kick']):\n if index <= len(settings['leave']):\n del settings['leave'][index - 1]\n else:\n index -= len(settings['leave'])\n del settings['kick'][index - 1]\n save_en_settings()\n await remind_editor.finish(f'已删除序号为{index}的提醒语句')\n else:\n await remind_editor.finish('输入的参数不在列表内,请检查序号')\n else:\n await remind_editor.finish('只支持纯数字参数,请重新开启此对话进行操作')\n else:\n state['operation'] = 'delete'\n await remind_editor.pause('请输入需要删除的语句的序号,输入[取消]退出当前操作')\n\n elif operation.strip() == '切换锁定':\n settings['locked'] = not settings['locked']\n save_en_settings()\n if settings['locked']:\n await remind_editor.finish('已锁定退群提醒语句')\n else:\n await remind_editor.finish('已解锁退群提醒语句,群员可随意修改语句')\n\n else:\n await remind_editor.finish('已退出编辑退群提醒操作')\n\n\n@remind_editor.handle()\nasync def en_secondary_operation(bot: Bot, event: GroupMessageEvent, state: T_State):\n logger.debug(f'Handle brach with {state[\"operation\"]}')\n gid = str(event.group_id)\n if gid not in exitremind_settings:\n exitremind_settings[gid] = DEFAULT_REMIND\n settings = exitremind_settings[gid]\n if state['operation'] == 'add_leave':\n settings['leave'].append(event.raw_message)\n msg = '好的,已经添加一个主动退群提醒语句'\n elif state['operation'] == 'add_kick':\n settings['kick'].append(event.raw_message)\n msg = '好的,已经添加一个管理踢人提醒语句'\n elif state['operation'] == 'delete':\n if event.raw_message.isdigit():\n index = int(event.raw_message)\n if index > 0 and index <= len(settings['leave']) + len(settings['kick']):\n if index <= len(settings['leave']):\n del settings['leave'][index - 1]\n msg = f'已删除序号为{index}的退群提醒语句'\n else:\n index -= len(settings['leave'])\n del settings['kick'][index - 1]\n msg = f'已删除序号为{index}的管理踢人提醒语句'\n else:\n await remind_editor.finish('输入的参数不在列表内,请检查序号')\n else:\n await remind_editor.finish('只支持纯数字参数,请重新开启此对话进行操作')\n else:\n logger.error(f\"Unkown session with operation: {state['operation']}\")\n await remind_editor.finish('未知的对话进度,请联系维护组进行排查')\n save_en_settings()\n await remind_editor.finish(msg)\n\n\n#———————————————退群触发————————————————\n\n\ndef exitremind_rule(bot: Bot, event: GroupDecreaseNoticeEvent, state: T_State):\n \"\"\"排除自己被踢出群的情况,排除退群语句被删除到没有了的情况\"\"\"\n if not isinstance(event, GroupDecreaseNoticeEvent):\n return False\n logger.debug(f'Group {event.group_id} decrease Got!!')\n logger.debug(isinstance(event, GroupDecreaseNoticeEvent))\n if event.sub_type == 'kick_me':\n return False\n gid = str(event.group_id)\n if gid in exitremind_settings:\n if event.sub_type == 'leave' and len(exitremind_settings[gid]['leave']) == 0 \\\n or event.sub_type == 'kick' and len(exitremind_settings[gid]['kick']) == 0:\n return False\n return True\n\n\nentry_exitremind = exitremind.on_notice(rule=exitremind_sw&exitremind_rule)\n\n\n@entry_exitremind.handle()\nasync def member_exit_remind(bot: Bot, event: GroupDecreaseNoticeEvent):\n gid = str(event.group_id)\n if gid not in exitremind_settings:\n exitremind_settings[gid] = DEFAULT_REMIND\n settings = exitremind_settings[gid]\n userinfo = await bot.get_stranger_info(user_id=event.user_id)\n name = userinfo['nickname'] or str(event.user_id)\n if event.user_id != event.operator_id:\n admininfo = await bot.get_group_member_info(group_id=event.group_id, user_id=event.operator_id)\n admin = admininfo['card'] or admininfo['nickname'] or str(event.user_id)\n else:\n admin = name\n msg = Message(choice(settings[event.sub_type]).format(name=name, admin=admin))\n await entry_exitremind.finish(msg)","sub_path":"src/plugins/group_aide/entry_exit_notice.py","file_name":"entry_exit_notice.py","file_ext":"py","file_size_in_byte":21163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"174150316","text":"import GrabWeb.spider_me.teacherInfo_zhilifang_detailPage.html_downloader as html_downloader\nimport GrabWeb.spider_me.teacherInfo_zhilifang_detailPage.html_outputer as html_outputer\nimport GrabWeb.spider_me.teacherInfo_zhilifang_detailPage.html_parser as html_parser\nimport urllib.parse as parse\nimport gc\n\n# 本代码抓取的页面是作者查询页面中的一部分信息\n# 如页面:http://buidea.com:9001/writer/writersearch.aspx?invokemethod=search&q=%7B%22search%22%3A%22%E5%91%A8%E5%82%B2%E8%8B%B1%20%E5%8D%8E%E4%B8%9C%E5%B8%88%E8%8C%83%E5%A4%A7%E5%AD%A6%22%2C%22sType%22%3A%22writer%22%7D&\nclass SpiderMain(object):\n\n # 初始化\n def __init__(self):\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n # 抓取\n def craw(self, urls):\n count = 1\n for url in urls:\n # 需要姓名和学校来自数据库,因此添加内容如下:\n urlTmp1 = url.replace(\"http://buidea.com:9001/writer/writersearch.aspx?invokemethod=search&q=%7B%22search%22%3A%22\", \"\")\n urlTmp2 = urlTmp1.replace(\"%22%2C%22sType%22%3A%22writer%22%7D&\", \"\")\n info = parse.unquote(urlTmp2, encoding='utf-8')\n writer = info.split(' ')[0]\n school = info.split(' ')[1].replace('\\n', '')\n try:\n print(\"craw\", count, \": \", url)\n html_cont = self.downloader.download(url)\n # print('test for html_cont')\n\n new_data = self.parser.parse(html_cont, writer, school)\n # print('new_data: ', new_data)\n self.outputer.collect_data(new_data)\n except:\n print(\"craw failed: count = \", count)\n if count % 3000 == 0:\n self.outputer.output_html()\n print(\"前\", count, \"行已输出到output.txt文件\")\n # 将之前已经添加到缓存的内容清空\n self.outputer.reset()\n count = count + 1\n del urlTmp1, urlTmp2, info, writer, school\n gc.collect()\n self.outputer.output_html()\n print('最后一波写入输出文件')\n self.outputer.reset()\n print(count - 1)\n\nif __name__ == \"__main__\":\n #queries = {\"高明, 华东师范大学\", \"金澈清, 华东师范大学\"}\n #queryFile = open(\"xwbaxx_xlbs.txt\", 'r', encoding='utf-8')\n #queryFile = open(\"in.txt\", 'r', encoding='utf-8')\n queryFile = open(\"teacherInfo.txt\", 'r', encoding='utf-8')\n urls = set()\n for query in queryFile:\n # 将输入文件中的每一行分割成导师姓名和学校\n splitRes = query.split('\\t')\n if len(splitRes) != 2:\n print(query, ' 格式不正确')\n else:\n name = query.split('\\t')[0]\n university = query.split('\\t')[1]\n # new_url = \"https://baike.baidu.com/item/\"+parse.quote(name)\n # 根据导师姓名和学校构造需要访问的url\n new_url = \"http://buidea.com:9001/writer/writersearch.aspx?invokemethod=search&q=%7B\"+parse.quote(\"\\\"\")\\\n +\"search\"+parse.quote(\"\\\"\")+\"%3A\"+parse.quote(\"\\\"\")+parse.quote(name)+\"%20\"\\\n +parse.quote(university)+parse.quote(\"\\\"\")+\"%2C\"+parse.quote(\"\\\"\")+\"sType\"\\\n +parse.quote(\"\\\"\")+\"%3A\"+parse.quote(\"\\\"\")+\"writer\"+parse.quote(\"\\\"\")+\"%7D&\"\n urls.add(new_url)\n obj_spider = SpiderMain()\n obj_spider.craw(urls)\n del urls\n gc.collect()\n","sub_path":"SpiderWeb/spider_me/teacherInfo_zhilifang_detailPage/spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"199520446","text":"from Reporting import load_data, Write, template_new_v3, st_brul3_test\r\nimport time\r\nimport datetime\r\n\r\nif __name__ == \"__main__\":\r\n start = time.time()\r\n ################################################\r\n\r\n file_name = r'C:\\Users\\sunsh\\Documents\\Daily Approval Report\\Data\\master dataset\\master_reporting.csv'\r\n df = load_data(file_name)\r\n print('*************************')\r\n print('Generating Report ...')\r\n b = Write(template_new_v3, df, st_brul3_test)\r\n b.time_frame(start=20181101)\r\n b.write()\r\n\r\n ###############################################\r\n end = time.time()\r\n time_secs = end - start\r\n time_str = str(datetime.timedelta(seconds=time_secs))[:-4]\r\n print(f'Runtime of the program is {time_str}')","sub_path":"Archive/newrp_3.py","file_name":"newrp_3.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"409950379","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import status, permissions\nfrom rest_framework.decorators import permission_classes, api_view, renderer_classes\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom .models import Memo, Clip, Category, LikeMemo\nfrom .serializers import MemoSerializer\nfrom .finder import get_or_create_page, find_memo, get_or_create_category\nfrom .magic import grab_img_from_content\nfrom django.db.utils import IntegrityError\n\n\n# List & Create API view\n@api_view(['GET', 'POST'])\n@permission_classes((permissions.IsAuthenticated,))\ndef list_create(request):\n # Memo list of user\n if request.method == 'GET':\n '''\n Filter memo by category\n If memo is uncategorized, category_id is 0 in request, but map as None because of query\n In short, 1: category_pk, 0: uncategorized, None: all memo\n '''\n if 'category_pk' in request.query_params:\n if request.query_params['category_pk'] is '0':\n category_pk = None\n template_title = 'uncategorized'\n else:\n category_pk = request.query_params['category_pk']\n template_title = Category.objects.get(pk=category_pk).name\n query_set = Memo.objects.filter(user=request.user, category_id=category_pk).order_by('-pk')\n # No category assigned, return all memo\n else:\n query_set = Memo.objects.filter(user=request.user).order_by('-pk')\n template_title = 'All memo'\n\n paginator = LimitOffsetPagination()\n paginated_query_set = paginator.paginate_queryset(query_set, request)\n serializer = MemoSerializer(paginated_query_set, many=True, context={'user': request.user})\n return Response({'memo_list': serializer.data,\n 'prev': paginator.get_previous_link(),\n 'next': paginator.get_next_link(),\n 'template_title': template_title,\n }, template_name='memo_list.html')\n\n # Create Memo\n elif request.method == 'POST':\n serializer = MemoSerializer(data=request.data)\n if serializer.is_valid():\n page = get_or_create_page(request.data['page'])\n category = get_or_create_category(request.data['category_name'], request.user)\n content = grab_img_from_content(request.data['content'])\n serializer.save(user=request.user, page=page, category=category, content=content)\n return Response({'memo': serializer.data}, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# Retrieve & Update & Destory API view\n@api_view(['GET', 'POST', 'DELETE'])\ndef detail_update_delete(request, pk):\n try:\n memo = Memo.objects.get(pk=pk)\n except Memo.DoesNotExist:\n data = {'msg': 'memo does not exist'}\n return Response(data, status=status.HTTP_404_NOT_FOUND, template_name='error_msg.html')\n '''\n Check object permissions(GET)\n Let A be a set for is_private=True, B be a set for is_owner=True\n We should kick A-B out first. ref) A-B <-> A and ~B\n '''\n if memo.is_private and memo.user != request.user:\n data = {'msg': 'this memo is private OR you are not owner. Please log in'}\n return Response(data, status=status.HTTP_403_FORBIDDEN, template_name='error_msg.html')\n\n # Retrieve\n if request.method == 'GET':\n serializer = MemoSerializer(memo, context={'user': request.user})\n template_title = 'uncategorized' if memo.category is None else memo.category.name\n return Response({'memo': serializer.data, 'template_title': template_title}, template_name='memo_detail.html')\n\n '''\n Second check object permissions(POST, DELETE)\n We should kick ~B out, but we already kicked A-B out.\n So we should kick ~A and ~B out additionally\n '''\n if not memo.is_private and memo.user != request.user:\n data = {'msg': 'you are not an owner'}\n return Response(data, status=status.HTTP_403_FORBIDDEN, template_name='error_msg.html')\n\n # Update\n if request.method == 'POST':\n serializer = MemoSerializer(memo, data=request.data)\n if serializer.is_valid():\n category = get_or_create_category(request.data['category_name'], request.user)\n serializer.save(user=request.user, category=category)\n return Response({'memo': serializer.data}, template_name='memo_detail.html')\n return Response({'memo': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n # Delete\n elif request.method == 'DELETE':\n memo.delete()\n # No need to redirect for web user ..?\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n# Memo list clipped by user\n@api_view()\n@permission_classes((permissions.IsAuthenticated,))\ndef clip_list(request):\n clips_by_user = Clip.objects.filter(user=request.user).order_by('-timestamp')\n\n # for faster response speed\n if not clips_by_user:\n return Response({'memo_list': []}, template_name='memo_list.html')\n\n memo_list = []\n for clip in clips_by_user:\n memo_list.append(clip.memo)\n\n paginator = LimitOffsetPagination()\n # It is okay that memo_list is not query_set..?\n paginated_query_set = paginator.paginate_queryset(memo_list, request)\n serializer = MemoSerializer(paginated_query_set, many=True, context={'user': request.user})\n\n return Response({'memo_list': serializer.data,\n 'prev': paginator.get_previous_link(),\n 'next': paginator.get_next_link(),\n 'template_title': 'clipped memo'\n }, template_name='memo_list.html')\n\n\n# Clip or Unclip a memo\n@api_view(['POST', 'DELETE'])\n@permission_classes((permissions.IsAuthenticated,))\ndef clip_unclip(request, pk):\n memo = get_object_or_404(Memo, pk=pk)\n\n # check object permissions\n if memo.is_private and memo.user != request.user:\n data = {'msg': 'this memo is private'}\n return Response(data, status=status.HTTP_403_FORBIDDEN, template_name='error_msg.html')\n\n # POST request: clip\n if request.method == 'POST':\n # check if there is no objects\n if Clip.objects.filter(user=request.user, memo=memo).exists():\n data = {'msg': 'there is no clip'}\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n\n clip = Clip(user=request.user, memo=memo)\n clip.save()\n\n # DELETE request: unclip\n elif request.method == 'DELETE':\n if Clip.objects.filter(user=request.user, memo=memo).count() == 1:\n # must return 1 object\n Clip.objects.get(user=request.user, memo=memo).delete()\n else:\n data = {'return multiple or no clip'}\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_200_OK)\n\n\n@api_view()\n@permission_classes((permissions.IsAuthenticated,))\ndef memo_square(request):\n return Response({'msg': 'Coming Soon!'}, template_name='error_msg.html')\n\n\n# Memo list of an URL. If memo not exists, return None\n@api_view()\n@permission_classes((permissions.IsAuthenticated,))\n@renderer_classes([JSONRenderer])\ndef find_by_page(request):\n if 'url' in request.query_params:\n page_url = request.query_params['url']\n else:\n data = 'param \"url\" does not exist'\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n\n query_set = find_memo(page_url, request)\n\n # for faster response speed\n if not query_set:\n return Response({'memo_list': []})\n\n paginator = LimitOffsetPagination()\n paginated_query_set = paginator.paginate_queryset(query_set, request)\n serializer = MemoSerializer(paginated_query_set, many=True, context={'user': request.user})\n return Response({'memo_list': serializer.data,\n 'prev': paginator.get_previous_link(),\n 'next': paginator.get_next_link(),\n 'count': len(query_set)})\n\n\n@api_view(['POST'])\n@permission_classes((permissions.IsAuthenticated,))\ndef lock_unlock(request, pk):\n memo = get_object_or_404(Memo, pk=pk)\n # check object permissions\n if memo.user != request.user:\n data = {'msg': 'you are not owner'}\n return Response(data, status=status.HTTP_403_FORBIDDEN, template_name='error_msg.html')\n\n # toggle boolean value\n memo.is_private = not memo.is_private\n memo.save()\n\n if memo.is_private:\n result = 'private'\n else:\n result = 'public'\n\n return Response(result, status=status.HTTP_200_OK)\n\n\n@api_view(['POST', 'DELETE'])\n@renderer_classes([JSONRenderer])\n@permission_classes((permissions.IsAuthenticated,))\ndef like_dislike(request, pk):\n # When Memo is None, DoesNotExist error cannot be caught in following try statement\n memo = get_object_or_404(Memo, pk=pk)\n\n if request.method == 'POST':\n try:\n LikeMemo.objects.create(user=request.user, memo=memo)\n except IntegrityError:\n return Response('already liked', status=status.HTTP_400_BAD_REQUEST)\n result = 'liked'\n elif request.method == 'DELETE':\n try:\n like = LikeMemo.objects.get(user=request.user, memo=memo)\n except LikeMemo.DoesNotExist:\n return Response('already disliked', status=status.HTTP_400_BAD_REQUEST)\n like.delete()\n result = 'disliked'\n\n return Response(result, status=status.HTTP_200_OK)\n","sub_path":"MemoSquare/views_memo.py","file_name":"views_memo.py","file_ext":"py","file_size_in_byte":9617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"651434001","text":"s = input(\"Enter your string:\")\n\nalphabets = 0\ndigits = 0\nothers = 0\n\nfor char in s:\n if char.isalpha():\n alphabets += 1\n elif char.isdigit():\n digits += 1\n else:\n others += 1\n\n\nprint(f\"Alpahbest: {alphabets}\\ndigits:{digits}\\nothers:{others}\")\n","sub_path":"Power of IT Job/count_alpha.py","file_name":"count_alpha.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"650755239","text":"# -*- encoding: utf-8 -*-\nfrom experimental.tools.constrainttools import *\n\n\ndef test_VariableLengthStreamSolver_01():\n domain = Domain([1, 2, 3, 4], 1)\n boundary_sum = GlobalConstraint(lambda x: sum(x) < 6)\n target_sum = GlobalConstraint(lambda x: sum(x) == 5)\n\n ordered_solver = VariableLengthStreamSolver(domain, [boundary_sum], [target_sum], randomized=False)\n\n ordered_solutions = [x for x in ordered_solver]\n assert ordered_solutions == [\n [1, 1, 1, 1, 1],\n [1, 1, 1, 2],\n [1, 1, 2, 1],\n [1, 1, 3],\n [1, 2, 1, 1],\n [1, 2, 2],\n [1, 3, 1],\n [1, 4],\n [2, 1, 1, 1],\n [2, 1, 2],\n [2, 2, 1],\n [2, 3],\n [3, 1, 1],\n [3, 2],\n [4, 1],\n ]\n\n random_solver = VariableLengthStreamSolver(domain, [boundary_sum], [target_sum], randomized=True)\n random_solutions = [x for x in random_solver]\n\n assert list(sorted(random_solutions)) == ordered_solutions\n\n more_random_solutions = [x for x in random_solver]\n\n assert list(sorted(random_solutions)) == list(sorted(more_random_solutions))\n assert random_solutions != more_random_solutions\n","sub_path":"abjad/experimental/tools/constrainttools/test/test_VariableLengthStreamSolver.py","file_name":"test_VariableLengthStreamSolver.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"42525037","text":"import requests\r\nimport json\r\n\r\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}\r\n\r\nname = input('Введите аккаунт (необходимо писать латинскими буквами): ')\r\nrepos = '/repos'\r\n\r\nmain_link='https://api.github.com/users/'+name+repos\r\n\r\nresponse = requests.get(main_link)\r\ndata = json.loads(response.text)\r\n\r\nfor key in data:\r\n print(f'Cписок репазиториев {key[\"name\"],key[\"full_name\"] }')","sub_path":"lesson1.py","file_name":"lesson1.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"238124608","text":"from har_utils import *\nfrom models.cnn_lstm import build_cnn_lstm_model\n\n# fit and evaluate a model\ndef evaluate_model(trainX, trainy, testX, testy):\n\t# define model\n\tverbose, epochs, batch_size = 1, 25, 64\n\tn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\n\t# reshape data into time steps of sub-sequences\n\tn_steps, n_length = 4, 32\n\ttrainX = trainX.reshape((trainX.shape[0], n_steps, n_length, n_features))\n\ttestX = testX.reshape((testX.shape[0], n_steps, n_length, n_features))\n\t\n\tmodel = build_cnn_lstm_model(n_length, n_features, n_outputs)\n\n\t# fit network\n\tmodel.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)\n\t# evaluate model\n\t_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)\n\treturn accuracy\n\n\n# run an experiment\ndef run_experiment(repeats=3):\n\t# load data\n\ttrainX, trainy, testX, testy = load_dataset()\n\t# repeat experiment\n\tscores = list()\n\tfor r in range(repeats):\n\t\tscore = evaluate_model(trainX, trainy, testX, testy)\n\t\tscore = score * 100.0\n\t\tprint('>#%d: %.3f' % (r+1, score))\n\t\tscores.append(score)\n\t# summarize results\n\tsummarize_results(scores)\n\n# run the experiment\nrun_experiment()","sub_path":"har/example_cnn_lstm.py","file_name":"example_cnn_lstm.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"561422391","text":"# -*- coding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\nSTOP_RENDERING = runtime.STOP_RENDERING\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1574208770.767524\n_enable_loop = True\n_template_filename = '/Users/dwim/Developer/AlejandroTurboGears/Python/myproject/myproject/templates/weather.mak'\n_template_uri = '/Users/dwim/Developer/AlejandroTurboGears/Python/myproject/myproject/templates/weather.mak'\n_source_encoding = 'utf-8'\nfrom markupsafe import escape_silent as escape\n_exports = []\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n pressure = context.get('pressure', UNDEFINED)\n temperature = context.get('temperature', UNDEFINED)\n humidity = context.get('humidity', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\n\\n\\n \\n Weather\\n \\n \\n
\\n
\\n

State of Mexico\\'s Weather

\\n

Temperature: ')\n __M_writer(escape(temperature))\n __M_writer(' Celcius

\\n

Pressure: ')\n __M_writer(escape(pressure))\n __M_writer(' Mb

\\n

Humidity: ')\n __M_writer(escape(humidity))\n __M_writer(' %

\\n \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"filename\": \"/Users/dwim/Developer/AlejandroTurboGears/Python/myproject/myproject/templates/weather.mak\", \"uri\": \"/Users/dwim/Developer/AlejandroTurboGears/Python/myproject/myproject/templates/weather.mak\", \"source_encoding\": \"utf-8\", \"line_map\": {\"28\": 0, \"36\": 1, \"37\": 11, \"38\": 11, \"39\": 12, \"40\": 12, \"41\": 13, \"42\": 13, \"48\": 42}}\n__M_END_METADATA\n\"\"\"\n","sub_path":"Python/myproject/data/templates/Users/dwim/Developer/AlejandroTurboGears/Python/myproject/myproject/templates/weather.mak.py","file_name":"weather.mak.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"571099377","text":"def mutate_string(string, position, character):\n\n str1 = \"\"\n for count in range(len(string)):\n char = string[count]\n if count == position:\n str1 = str1 + character\n else:\n str1 = str1 + string[count]\n # print(string[count])\n\n return str1\n\n\nif __name__ == '__main__':\n s = input()\n i, c = input().split()\n s_new = mutate_string(s, int(i), c)\n print(s_new)","sub_path":"HackerRankCodes/StringMutation.py","file_name":"StringMutation.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"96511818","text":"## An implentation of several algorithms for training Maximum Entropy model,\n## including:\n## Generalized Iterative Scaling (described in Goodman 2002) \n## Sequential Conditional Generalized Iterative Scaling (as above)\n## maximize Log Likelihood using (Nonlinaer) Conjugate Gradient method (Goldwater & Johnson 2003)\n\nimport math\nimport warnings\n\n#import scipy.optimize ## for Conjugate Gradient ## no longer needed\nimport cg ## my handmade CG\n\ndef MaximumEntropy(t, method='CG', **d) :\n ''' Maximum Entropy model\n method = 'GIS'|'SCGIS'|'CG', 'CG' is default.\n \n GIS: Generalized Iterative Scaling,\n (described in Goodman 2002)\n SCGIS: Sequential Conditional Generalized Iterative Scaling, \n (Goodman 2002)\n //problem _scgis1: may produce mismatching predictions (This is solved. \n See comments in maxent_scgis.)\n CG: Nonlinear Conjugate Gradient method,\n (Goldwater & Johnson 2003)\n Some critical parameter may need to be adjusted, e.g. sigma0 to achieve convergence.'''\n return {'GIS':maxent_gis,\n 'SCGIS':maxent_scgis,\n 'CG':maxent_cg, ## under construction\n }.get(method)(t, **d)\n\nclass __ins_object :\n def __init__(self) :\n self.cand = None\n self.freq = None\n def __str__(self) :\n return '%s%s'%(self.freq, self.cand)\n __repr__ = __str__\nclass __cand_object :\n def __init__(self) :\n self.vio = None\n self.freq = None\n def __str__(self) :\n return '%s%s'%(self.freq, self.vio)\n __repr__ = __str__\ndef get_maxent_input(t) :\n cnt_examples = 0\n for d in t.datum :\n for frequency in d.winners.values() :\n cnt_examples += frequency\n cons_ind = t.get_constraint_indices()\n instance = list()\n for d in t.datum :\n ins = __ins_object()\n ins.cand = list()\n for cand, vio_dict in d.candidates.items() :\n c = __cand_object()\n c.vio = tuple(vio_dict.get(i, 0) for i in cons_ind)\n c.freq = d.winners.get(cand, 0) / cnt_examples\n ins.cand.append(c)\n ins.freq = sum(c.freq for c in ins.cand)\n instance.append(ins)\n observed = tuple(sum(sum(c.vio[i]*c.freq\n for c in ins.cand)\n for ins in instance) \n for i in range(len(cons_ind)))\n slowing_factor = max(max(sum(c.vio)\n for c in ins.cand)\n for ins in instance)\n slowing_factor_list = tuple(max(max(c.vio[i]\n for c in ins.cand)\n for ins in instance)\n for i in range(len(cons_ind)))\n \n ans = {'ins':instance, 'obs':observed, 'slo':slowing_factor, \n 'ind':cons_ind, 'slolist':slowing_factor_list}\n print(ans)\n return ans\n \ndef maxent_gis(t, maxiter=10000, needtrim=True, lower_lim=-50, upper_lim=0, \n callback=None, **unknown_opt) :\n '''Generrized Iterative Scaling'''\n _check_unknown_options(unknown_opt)\n inp = get_maxent_input(t)\n instance = inp['ins']\n observed = inp['obs']\n slowing_factor = inp['slo']\n cons_ind = inp['ind']\n all0 = list(0 for _ in cons_ind)\n cons_n = len(cons_ind)\n if needtrim :\n def trim(w) :\n return max(min(w, upper_lim), lower_lim)\n else :\n def trim(w) : return w\n \n w = tuple(all0)\n for _ in range(maxiter) :\n expected = all0.copy()\n for ins in instance :\n sj = tuple(sum(wi*fi for wi, fi in zip(w,c.vio) if fi != 0)\n for c in ins.cand)\n z = sum(math.exp(sjy) for sjy in sj)\n for y, c in enumerate(ins.cand) :\n for i, fi in enumerate(c.vio) :\n if fi != 0 :\n expected[i] += fi * math.exp(sj[y]) / z * ins.freq\n #expected = tuple(e/z for e in expected)\n \n delta = tuple(math.log(oi/ei)/slowing_factor if oi!=0 else lower_lim\n for oi,ei in zip(observed, expected)) \n w = tuple(trim(wi+di) for wi,di in zip(w,delta))\n if callback : callback(w)\n return dict(zip(cons_ind, w))\n\ndef maxent_scgis(t, maxiter=10000, needtrim=True, lower_lim=-50, upper_lim=0, \n callback=None, **unknown_opt) :\n '''Sequential Conditional Generalized Iterative Scaling'''\n _check_unknown_options(unknown_opt)\n inp = get_maxent_input(t)\n instance = inp['ins']\n observed = inp['obs']\n sf_list = inp['slolist']\n cons_ind = inp['ind']\n all0 = list(0 for _ in cons_ind)\n cons_n = len(cons_ind)\n if needtrim :\n def trim(w) :\n return max(min(w, upper_lim), lower_lim)\n else :\n def trim(w) : \n return w\n toupper = upper_lim - lower_lim\n tolower = - toupper\n \n w = list(all0)\n z = list(len(ins.cand) for ins in instance)\n s = list(list(0 for c in ins.cand)\n for ins in instance)\n for _ in range(maxiter) :\n for i in range(cons_n) :\n expectedi = sum(ins.freq*sum(c.vio[i]*math.exp(s[j][y])/z[j]\n for y, c in enumerate(ins.cand) if c.vio[i] != 0)\n for j, ins in enumerate(instance))\n if observed[i] != 0 :\n #print('expectedi:', expectedi)\n di = (math.log(observed[i]/expectedi) / sf_list[i]\n if expectedi > 0 else toupper)\n wi = trim(w[i]+di)\n else : \n wi = lower_lim\n if wi != w[i] :\n di = wi - w[i]\n w[i] = wi\n for j, ins in enumerate(instance) :\n for y, c in enumerate(ins.cand) :\n if c.vio[i] != 0 :\n z[j] -= math.exp(s[j][y])\n s[j][y] += di * c.vio[i] \n ## facter $c.vio[i] is not presented in the psuadecode (Figure 2, Goodman 2002)\n ## After adding this, problem _scgis1 solved.\n z[j] += math.exp(s[j][y])\n ## Due to the cumulative float point error, the value of $z and $s \n ## may need to be recalculated by definition after every several \n ## iterations or when the error will cause an notable harm to con-\n ## vergence. The suitable timing is left to be determined.\n if callback : callback(w)\n return dict(zip(cons_ind, w))\n\ndef loglikelihood(t) :\n inp = get_maxent_input(t)\n instance = inp['ins']\n def f(w) :\n ans = 0\n #w = tuple(wi if coi > 0 else 0 for wi, coi in zip(w, co))\n for ins in instance :\n logw = tuple(sum(wi*fi \n for wi, fi in zip(w, c.vio) if fi != 0)\n for c in ins.cand)\n #print('logw:', logw)\n logz = math.log(sum(math.exp(x) for x in logw))\n #print('logz:', logz)\n ans += sum(c.freq * logwi \n for logwi, c in zip(logw, ins.cand) if c.freq != 0)\n ans -= ins.freq * logz\n return ans\n return f\n \n\ndef maxent_cg(t, prior=None, trim0=False,\n epsilon=cg.epsilon, tol=1e-9, maxiter=None, \n linear=cg.linear_secant, linear_tol=1e-5, linear_maxiter=4, sigma0=0.01, approx_hessian=False, \n callback=None, **unknown_opt) :\n _check_unknown_options(unknown_opt)\n# cnt_examples = 0\n# for d in t.datum :\n# for frequency in d.winners.values() :\n# cnt_examples += frequency\n inp = get_maxent_input(t)\n instance = inp['ins']\n #observed = inp['obs']\n cons_ind = inp['ind']\n #all0 = list(0 for _ in cons_ind)\n #cons_n = len(cons_ind)\n if prior == None : prior = lambda _ : 0\n \n co = list(1 for _ in cons_ind)\n def f(w) :\n ans = 0\n w = tuple(wi if coi > 0 else 0 for wi, coi in zip(w, co))\n for ins in instance :\n logw = tuple(sum(wi*fi \n for wi, fi in zip(w, c.vio) if fi != 0)\n for c in ins.cand)\n #print('logw:', logw)\n logz = math.log(sum(math.exp(x) for x in logw))\n #print('logz:', logz)\n ans += sum(c.freq * logwi \n for logwi, c in zip(logw, ins.cand) if c.freq != 0)\n ans -= ins.freq * logz\n return ans\n\n w0 = tuple(0 for _ in t.get_constraint_indices())\n \n fcg = lambda w : - f(w) + prior(w)\n cg_opt = {'epsilon':epsilon, 'tol':tol, 'maxiter':maxiter, \n 'linear':linear, 'linear_tol':linear_tol, 'linear_maxiter':linear_maxiter, \n 'sigma0':sigma0, 'approx_hessian':approx_hessian, \n 'callback':callback}\n while True :\n ans = cg.nonlinear_cg(fcg, w0, **cg_opt)\n icons, w = max(enumerate(ans), key=lambda x:x[1])\n if w <= 0 or not trim0 : break\n co[icons] = 0\n# ans = scipy.optimize.fmin_cg(f, w0, callback=callback)#, bounds=list((None, 0) for w in w0))\n print('f(x):', f(ans))\n \n return dict(zip(cons_ind, ans))\n \n## extracted from scipy.optimize\nclass MaxentWarning(UserWarning) :\n pass\ndef _check_unknown_options(unknown_options):\n if unknown_options:\n msg = \", \".join(map(str, unknown_options.keys()))\n # Stack level 4: this is called from _minimize_*, which is\n # called from another function in Scipy. Level 4 is the first\n # level in user code.\n warnings.warn(\"Unknown solver options: %s\" % msg, MaxentWarning, 4)\n","sub_path":"maxent.py","file_name":"maxent.py","file_ext":"py","file_size_in_byte":9409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"653898190","text":"#!/usr/bin/env python3 \nfrom sys import argv\nfrom render import rendering\nfrom customMathFunc import paddingRightMostBin\nimport numpy as np\n\ndef integrator(ndims, cv, force, half_boxboundary, outputfile):\n\n cv = np.array(cv)\n force = np.array(force)\n\n if ndims == 1: # OK\n\n intg_interval = abs(cv[0] - cv[1])\n accIntg = 0\n FE = np.zeros(force.shape[0]) \n factor = intg_interval * 0.5 \n\n for i in range(len(force) - 1):\n accIntg -= (force[i] + force[i+1]) # freeE = -Sf(x)dx\n FE[i] = accIntg * factor \n\n FE = paddingRightMostBin(FE)\n\n with open(outputfile, \"w\") as fout:\n for i, j in zip(cv, FE):\n fout.write(str(i) + \" \" + str(j) + \"\\n\")\n\n#---------------------------------------------------------------------#\n\n if ndims == 2: # probably OK? \n\n intg_interval = abs(cv[1][0][0] - cv[1][0][1])\n factor = intg_interval * 0.5 \n FE_X = 0\n FE_Y = 0\n acc_at_x = np.zeros((force.shape[1]))\n acc_at_y = np.zeros((force.shape[1], force.shape[2]))\n FE = np.zeros((force.shape[1], force.shape[2]))\n\n for i in range(force.shape[1] - 1):\n FE_X = (force[0][i][0] + force[0][i+1][0])\n acc_at_x[i] -= FE_X \n for j in range(force.shape[2]- 1):\n FE_Y -= (force[1][i][j] + force[1][i][j+1])\n acc_at_y[i][j] = FE_Y\n FE_Y = 0\n\n acc_at_x = np.append(acc_at_x, acc_at_x[-1])\n\n for i in range(force.shape[1]):\n for j in range(force.shape[2]):\n FE[i][j] = (acc_at_x[i] + acc_at_y[i][j]) * factor\n\n FE += 29.5 \n FE = paddingRightMostBin(FE)\n\n with open(outputfile, \"w\") as fout:\n for i in range(force.shape[1]):\n for j in range(force.shape[2]):\n fout.write(str(cv[0][i][j]) + \" \" + str(cv[1][i][j]) + \" \" + str(FE[i][j]) + \"\\n\")\n\n s = rendering(ndims, half_boxboundary, force.shape[1] - 1)\n s.render(FE, name=\"FreeE_2D\")\n \n\n\nif __name__ == \"__main__\":\n\n ndims = int(argv[1])\n\n if ndims == 1:\n cv = []\n force = []\n half_boxboundary = np.pi\n with open(\"force_1D.dat\", \"r\") as fin:\n for line in fin:\n line = line.split() \n cv.append(float(line[0]))\n force.append(float(line[1]))\n\n if ndims == 2:\n dsz = 41 \n cv = np.zeros((ndims, dsz, dsz))\n force = np.zeros((ndims, dsz, dsz))\n i = 0\n j = 0\n half_boxboundary = 2\n with open(\"force_2D.dat\", \"r\") as fin:\n for line in fin:\n line = line.split()\n cv[0][i][j] = float(line[0])\n cv[1][i][j] = float(line[1])\n force[0][i][j] = float(line[2])\n force[1][i][j] = float(line[3])\n j += 1\n if j == dsz:\n i += 1\n j = 0\n\n integrator(ndims, cv, force, half_boxboundary, \"FreeE_\" + str(ndims) + \"D.dat\")\n","sub_path":"FreeEnergySampling/annSampling/new_MD_engine/wo_StopCriteria_init/integrator/integrator.py","file_name":"integrator.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"304761239","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef plt_learning_curve(neural_res, ax, fig_width, fig_height, title, l_ylim, r_ylim, xlim):\n neural_responses = np.load(neural_res)\n R_fea_list, R_null_list = neural_responses['arr_0'], neural_responses['arr_1']\n\n matplotlib.rcParams['figure.figsize'] = (fig_width, fig_height)\n cycles = np.arange(0, R_null_list.shape[0], 1)\n\n color = ['r','b','g','m', '#FF6600', '#00ffff', '#FDEE00', '#D71868', 'y', 'c', 'k']\n \n ax1 = ax.twinx()\n ax.plot(cycles, R_null_list, 'k')\n count = 0\n for fea in R_fea_list:\n ax1.plot(cycles, fea, color[count])\n count += 1\n\n ax.set_xlabel('Cycles', fontsize=10)\n ax.set_ylabel('Rate (Hz)', fontsize=10)\n ax1.set_ylabel('Spikes', fontsize=10)\n\n if title:\n plt.title(title, fontsize=15)\n if l_ylim:\n ax.set_ylim((0, l_ylim))\n if r_ylim:\n ax1.set_ylim((0, r_ylim))\n if xlim:\n plt.xlim((0, xlim))\n\n","sub_path":"training_pool/l_curve.py","file_name":"l_curve.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"159588699","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nmp =r'D:/1/'\r\nsave_path = r'D:/3/'\r\nflist = os.listdir(mp)#第一级目录,日期\r\nsku = []\r\nfor f in flist:\r\n if \"ed\" in f:#判断是否带ed\r\n for f2 in os.listdir(os.path.join(mp,f)):#第二级目录,一次拍摄\r\n class_id = open(os.path.join(mp,f,f2, \"class_id.txt\"),\"r+\",encoding =\"utf-8-sig\")\r\n for line in class_id.readlines():#读取class_id文件\r\n sku.append(line)\r\nsku = set(sku)\r\nprint(sku)\r\nresult = open(os.path.join(mp, \"sku.txt\"),\"w+\",encoding =\"utf-8-sig\")\r\nfor i in sku:\r\n result.write(i)\r\nresult.close()\r\nprint(\"完成\")\r\n","sub_path":"Split_picture/sku.py","file_name":"sku.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"88576674","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Write a program that computes the value of a+aa+aaa+aaaa with a given digit as the value of a.\r\nSuppose the following input is supplied to the program:\r\n9\r\nThen, the output should be:\r\n11106\r\nCreated on Sun Mar 3 11:20:39 2019\r\n\r\n@author: mahtab faraji\r\nwww.onlinebme.com\r\n\"\"\"\r\n\r\na=input('Enter the value as a: ')\r\n\r\nout=eval(a)+eval(a+a)+eval(a+a+a)+eval(a+a+a+a)\r\nprint(out)\r\n \r\n \r\n","sub_path":"project15.py","file_name":"project15.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"498841695","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom statsmodels.stats.proportion import proportions_ztest\nfrom sklearn import linear_model\nfrom sklearn.preprocessing import scale, robust_scale, minmax_scale\n\n\n# In[3]:\n\n\ndata = pd.read_csv('실습화일/FITNESS.csv', engine='python', encoding='euc-kr')\n\n\n# In[94]:\n\n\nds_fitness = data.copy()\nds_fitness.head()\n\n\n# In[95]:\n\n\nds_fitness.isnull().sum()\n\n\n# In[96]:\n\n\nds_fitness[\"GENDER\"].fillna(\"여성\", inplace = True)\nds_fitness.head()\n\n\n# In[97]:\n\n\nds_fitness.groupby(\"GENDER\")[\"WEIGHT\"].agg(\"mean\")\n\n\n# In[98]:\n\n\nds_fitness.groupby(\"GENDER\")[\"WEIGHT\"].transform(\"mean\").head()\n\n\n# In[99]:\n\n\nfit = ds_fitness\n\n\n# In[100]:\n\n\nfit.groupby(\"GENDER\")[\"WEIGHT\"].transform(\"mean\").head()\n\n\n# In[101]:\n\n\nfit[\"WEIGHT\"] = fit[\"WEIGHT\"].fillna(fit.groupby(\"GENDER\")[\"WEIGHT\"].transform(\"mean\")).round(3)\n\n\n# In[102]:\n\n\nfit.head()\n\n\n# In[103]:\n\n\nfit_char = fit.select_dtypes(include=\"object\")\n\n\n# In[104]:\n\n\nfit_numeric = fit.select_dtypes(exclude=\"object\")\n\n\n# In[105]:\n\n\n#page 252\n# scale : 데이터 표준화 함수\nds_scale = scale(fit_numeric)\n#scale함수를 사용하면 numpy의 형태로 반환되므로 DataFrame으로 변환\nds_scale = pd.DataFrame(ds_scale, columns = fit_numeric.columns)\nds_scale.head()\n\n#pandas.DataFrame.describe():요약통계량\nds_scale_describe = ds_scale.describe()\nds_scale_describe.round(3)\n\n\n# In[106]:\n\n\n#page 253 데이터 Scaling 변환(minmax_scale)\n\n#minmax_scale(): 최소 최대값을 이용하여 데이터 반환\nds_minmax_scale = minmax_scale(fit_numeric)\nds_minmax_scale = pd.DataFrame(ds_minmax_scale, columns = fit_numeric.columns)\nds_minmax_scale.head()\n\n#요약 통계량\nds_minmax_scale_describe = ds_minmax_scale.describe()\nds_minmax_scale_describe.round(3)\n\n\n# In[107]:\n\n\n#page 254\n#robust_scale():데이터 변환 함수\nds_robust_scale = robust_scale(fit_numeric)\nds_robust_scale = pd.DataFrame(ds_robust_scale, columns = fit_numeric.columns)\nds_robust_scale.head()\n\n#요약통계량\nds_robust_scale_describe = ds_robust_scale.describe()\nds_robust_scale_describe.round(3)\n\n\n# In[108]:\n\n\n#page 255\n#Scale, Robust, MinMax scale 변환 비교\nds_rstpulse = pd.DataFrame()\nds_rstpulse[\"Raw\"] = ds_fitness[\"RSTPULSE\"]\nds_rstpulse[\"Scale\"] = ds_scale[\"RSTPULSE\"]\nds_rstpulse[\"Robust\"] = ds_robust_scale[\"RSTPULSE\"]\nds_rstpulse[\"MinMax\"] = ds_minmax_scale[\"RSTPULSE\"]\nds_rstpulse.round(3)\n\n\n# In[109]:\n\n\n#boxplot : 상자 수염도, figsize: plot의 크기(x축, y축)\n#이상치 확인 및 처리 : 상자수염도를 이용한 이상치 확인\nds_fitness.boxplot(figsize = (10,4))\n\n\n# In[110]:\n\n\n#RSTPULSE 값 중 100보다 큰 값 확인\nds_fitness[\"RSTPULSE\"]>=100\n\n#RSTPULSE 값 중 100보다 작은 값만 가져오기\nds_fitness = ds_fitness[ds_fitness[\"RSTPULSE\"]<100]\nds_fitness\n\n\n# In[4]:\n\n\ndata\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Downloads/homework/2019-01-28 bigData_Day3.py","file_name":"2019-01-28 bigData_Day3.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"611067925","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 21 15:52:16 2017\n\n@author: Leslie Murphy, Patrick Marquard, Ian Gorman\n\"\"\"\nimport pygame\nfrom vec2d import Vec2d\nimport math\n\nclass Shape:\n def __init__(self, pos, vel, angle, angvel, color, mass, moment, points):\n self.pos = Vec2d(pos)\n self.vel = Vec2d(vel)\n self.force = Vec2d(0,0)\n self.color = color\n self.origpoints = []\n self.points = []\n for p in points:\n self.origpoints.append(Vec2d(p))\n self.points.append(Vec2d(p))\n self.create_origaxes()\n self.angle = angle\n self.angvel = float(angvel)\n self.torque = 0.0\n self.mass = mass\n self.massinv = 1.0/mass\n self.moment = moment\n self.momentinv = 1.0/moment\n self.visible = True\n self.update_points()\n self.update_axes() \n \n def create_origaxes(self):\n self.origaxes = []\n self.axes = []\n for i in range(len(self.origpoints)):\n a = (self.origpoints[i]-self.origpoints[i-1]).perpendicular_normal()\n self.origaxes.append(a)\n self.axes.append(a)\n \n def update_points(self):\n for i in range(len(self.origpoints)):\n newX = self.origpoints[i].x*math.cos(self.angle) + self.origpoints[i].y*-math.sin(self.angle)\n newY = self.origpoints[i].x*math.sin(self.angle) + self.origpoints[i].y*math.cos(self.angle)\n newPoint = Vec2d(newX,newY)\n\n self.points[i] = newPoint + self.pos\n \n def update_axes(self):\n for i in range(len(self.origaxes)):\n #update axes\n newX = self.origaxes[i].x*math.cos(self.angle) + self.origaxes[i].y*-math.sin(self.angle)\n newY = self.origaxes[i].x*math.sin(self.angle) + self.origaxes[i].y*math.cos(self.angle)\n newAxes = Vec2d(newX,newY)\n \n self.axes[i] = newAxes\n \n def add_impulse(self, imp, pos):\n self.vel += imp/self.mass\n vec = Vec2d(pos - self.pos)\n self.angvel += (vec.cross(imp))/self.moment\n \n def draw(self, screen):\n if self.visible:\n self.update_points()\n n = len(self.points)\n if n > 2:\n pygame.draw.polygon(screen, self.color, self.points, 0) \n else:\n pygame.draw.line(screen, self.color, self.points[0], self.points[-1], 1)","sub_path":"ShapeClass.py","file_name":"ShapeClass.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"274081147","text":"\"\"\"\nRoutes and views for the flask application.\n\"\"\"\n\nfrom datetime import datetime\nfrom flask import Flask, render_template, redirect, url_for, request\nfrom deVOTEd import app\nfrom .RegistrationForm import RegistrationForm\nfrom .User import User\n\n@app.route('/')\n@app.route('/home')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n )\n\n@app.route('/business')\ndef business():\n \"\"\"Renders the contact page.\"\"\"\n return render_template(\n 'business.html',\n title='Business',\n year=datetime.now().year,\n message='Your business page.'\n )\n\n\n@app.route('/contact')\ndef contact():\n \"\"\"Renders the contact page.\"\"\"\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm(request.form)\n if request.method == 'POST' and form.validate():\n user = User(form.username.data, form.email.data,\n form.password.data, form.userStatus.data)\n \n return redirect(url_for('login'))\n return render_template('register.html', form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n error = 'Invalid Credentials. Please try again.'\n else:\n if request.form['userStatus'] == \"Business\":\n return redirect(url_for(\"businesses\"))\n if request.form['userStatus'] == \"Individual\":\n return redirect(url_for(\"individuals\"))\n return render_template('login.html', error=error)\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/Individual')\ndef Individual():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'Individual.html',\n title='Individual',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/businesses')\ndef businesses():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'businesses.html',\n title='Businesses',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/marketplace')\ndef marketplace():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'marketplace.html',\n title='marketplace',\n year=datetime.now().year,\n message='Your application description page.'\n )\n","sub_path":"deVOTEd/deVOTEd/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"306269619","text":"\"\"\"\nCls and bbox head for resnet-c4 feature map.\n\"\"\"\n\nimport torch.nn as nn\n\n\nclass ClsBBoxHead(nn.Module):\n \"\"\"Classification and bounding box regression head using fully-connected style.\n \"\"\"\n\n def __init__(self, depth, num_classes):\n super(ClsBBoxHead, self).__init__()\n self.num_classes = num_classes\n self.fc_cls = nn.Linear(depth, num_classes)\n self.fc_bbox = nn.Linear(depth, num_classes * 4)\n self.log_softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, x):\n \"\"\"\n \n Args:\n x: (NxS)xCxHxW, roi fixed dimensional representation after pooling like RoIAlign,\n HxW: fixed size, like 7x7.\n\n Returns:\n cls_prob: (NxS)x num_classes, probability of class.\n bbox_reg: (NxS)x num_classes x 4(dx, dy, dw, dh), defined in R-CNN paper.\n \n Notes: In above, S: number of rois per image feed to predict heads\n \n \"\"\"\n\n fc_out_cls = self.fc_cls(x)\n cls_prob = self.log_softmax(fc_out_cls)\n bbox_reg = self.fc_bbox(x)\n bbox_reg = bbox_reg.view(bbox_reg.size(0), self.num_classes, 4)\n\n return cls_prob, bbox_reg\n","sub_path":"heads/cls_bbox.py","file_name":"cls_bbox.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"534059574","text":"#!/usr/bin/env python3\n\nimport csv\nfrom datetime import datetime\nfrom pprint import pprint\n\n# 1. load input.csv into a variable called `polls`\nwith open(\"input.csv\") as f:\n\treader = csv.DictReader(f)\n\trows = list(reader)\n\trows = [dict(row) for row in rows]\n\n# 2. write a new file called output.csv and write a row with two headers: \"date\" and \"approve\"\nwith open(\"output.csv\", \"w\") as f:\n\twriter = csv.writer(f)\n\twriter.writerow([\"date\", \"approve\"])\n\n\t# 3. Loop through each row of `polls` \n\tfor row in rows:\n\t # 4. and within that loop... convert the format of `enddate` from \"1/22/2017\" to \"22-Jan-17\"\n\t\traw_date = row[\"enddate\"]\n\t\tapprove = row[\"approve\"]\n\n\t\tdate = datetime.strptime(raw_date, \"%m/%d/%Y\")\n\t\tnew_date = datetime.strftime(date, \"%-d-%b-%y\")\n \n\t # 5. write a new row of data with the transformed date and value for \"approve\" \n\t\twriter.writerow([new_date, approve])\n\n# Just done by myself, please see Abdullah's file for the in-class work","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"104168202","text":"import os\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport scipy.signal\nimport scipy.stats\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import LeaveOneGroupOut\nimport itertools\nimport activity_classifier_utils\n\n\n\nfs = 256\ndata = activity_classifier_utils.LoadWristPPGDataset()\nlabels, subjects, features = activity_classifier_utils.GenerateFeatures(data,\n fs,\n window_length_s=10,\n window_shift_s=10)\n\n\nn_estimators_opt = [170, 50, 100, 150, 300]\nmax_tree_depth_opt = range(2, 10)\nlearning_rate_opt = [1, 0.1]\n\n\nclass_names = np.array(['bike', 'run', 'walk'])\nlogo = LeaveOneGroupOut()\naccuracy_table = []\n\n\n\n\n#----------------------------------------NESTED CROSS VALIDATIONS ----------------------------------------------#\nclass_names = ['bike', 'run', 'walk']\n\n# Store the confusion matrix for the outer CV fold.\nnested_cv_cm = np.zeros((3, 3), dtype='int')\n\nnested_cv_cm_gb = np.zeros((3, 3), dtype='int')\n\nnested_cv_cm_ada= np.zeros((3, 3), dtype='int') \n\nsplits = 0\naccuracy_table = []\n\n\n\n#-----------------------------------------------------------------------------------------------------------------#\n\n\n\n\nfor train_val_ind, test_ind in logo.split(features, labels, subjects):\n # Split the dataset into a test set and a training + validation set.\n # Model parameters (the random forest tree nodes) will be trained on the training set.\n # Hyperparameters (how many trees and the max depth) will be trained on the validation set.\n # Generalization error will be computed on the test set.\n X_train_val, y_train_val = features[train_val_ind], labels[train_val_ind]\n subjects_train_val = subjects[train_val_ind]\n X_test, y_test = features[test_ind], labels[test_ind]\n \n # Keep track of the best hyperparameters for this training + validation set.\n best_hyper_params = None\n best_accuracy = 0\n \n for n_estimators, max_tree_depth in itertools.product(n_estimators_opt,\n max_tree_depth_opt):\n # Optimize hyperparameters as above.\n inner_cm = np.zeros((3, 3), dtype='int')\n clf = RandomForestClassifier(n_estimators=n_estimators,\n max_depth=max_tree_depth,\n random_state=42,\n class_weight='balanced')\n \n for train_ind, validation_ind in logo.split(X_train_val, y_train_val,\n subjects_train_val):\n X_train, y_train = X_train_val[train_ind], y_train_val[train_ind]\n X_val, y_val = X_train_val[validation_ind], y_train_val[validation_ind]\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_val)\n c = confusion_matrix(y_val, y_pred, labels=class_names)\n inner_cm += c\n classification_accuracy = np.sum(np.diag(inner_cm)) / np.sum(np.sum((inner_cm)))\n \n # Keep track of the best pair of hyperparameters.\n if classification_accuracy > best_accuracy:\n best_accuracy = classification_accuracy\n best_hyper_params = (n_estimators, max_tree_depth)\n \n # Create a model with the best pair of hyperparameters for this training + validation set.\n best_clf = RandomForestClassifier(n_estimators=best_hyper_params[0],\n max_depth=best_hyper_params[1],\n class_weight='balanced')\n \n # Finally, train this model and test it on the test set.\n best_clf.fit(X_train_val, y_train_val)\n y_pred = best_clf.predict(X_test)\n \n # Aggregate confusion matrices for each CV fold.\n c = confusion_matrix(y_test, y_pred, labels=class_names)\n nested_cv_cm += c\n splits += 1\n print('Done split {}'.format(splits))\n\n\n\n\n print('random forest over ')\n\n\n\n best_hyper_params_gb = None\n best_accuracy_gb = 0\n \n for n_estimators, max_tree_depth, lr in itertools.product(n_estimators_opt,\n max_tree_depth_opt, learning_rate_opt):\n # Optimize hyperparameters as above.\n inner_cm = np.zeros((3, 3), dtype='int')\n \n clf = GradientBoostingClassifier(n_estimators=n_estimators,\n max_depth=max_tree_depth,\n learning_rate = lr,\n random_state=42)\n\n\n \n for train_ind, validation_ind in logo.split(X_train_val, y_train_val,\n subjects_train_val):\n X_train, y_train = X_train_val[train_ind], y_train_val[train_ind]\n X_val, y_val = X_train_val[validation_ind], y_train_val[validation_ind]\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_val)\n c = confusion_matrix(y_val, y_pred, labels=class_names)\n inner_cm += c\n classification_accuracy = np.sum(np.diag(inner_cm)) / np.sum(np.sum((inner_cm)))\n \n # Keep track of the best pair of hyperparameters.\n if classification_accuracy > best_accuracy_gb:\n best_accuracy_gb = classification_accuracy\n best_hyper_params_gb = (n_estimators, max_tree_depth, lr)\n \n # Create a model with the best pair of hyperparameters for this training + validation set.\n best_clf = GradientBoostingClassifier(n_estimators=best_hyper_params_gb[0],\n max_depth=best_hyper_params_gb[1],\n learning_rate = best_hyper_params_gb[2]\n )\n \n # Finally, train this model and test it on the test set.\n best_clf.fit(X_train_val, y_train_val)\n y_pred = best_clf.predict(X_test)\n \n # Aggregate confusion matrices for each CV fold.\n c_gb = confusion_matrix(y_test, y_pred, labels=class_names)\n nested_cv_cm_gb += c_gb\n \n print('Done split {}'.format(splits))\n \n\n print('gradient boosting over ')\n\n best_hyper_params_ada = None\n best_accuracy_ada = 0\n \n for n_estimators, max_tree_depth in itertools.product(n_estimators_opt,\n learning_rate_opt):\n # Optimize hyperparameters as above.\n inner_cm = np.zeros((3, 3), dtype='int')\n \n clf = AdaBoostClassifier(RandomForestClassifier(n_estimators=best_hyper_params[0],\n max_depth=best_hyper_params[1]),\n n_estimators=n_estimators,\n learning_rate=max_tree_depth,\n random_state=42)\n\n\n \n for train_ind, validation_ind in logo.split(X_train_val, y_train_val,\n subjects_train_val):\n X_train, y_train = X_train_val[train_ind], y_train_val[train_ind]\n X_val, y_val = X_train_val[validation_ind], y_train_val[validation_ind]\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_val)\n c = confusion_matrix(y_val, y_pred, labels=class_names)\n inner_cm += c\n classification_accuracy = np.sum(np.diag(inner_cm)) / np.sum(np.sum((inner_cm)))\n \n # Keep track of the best pair of hyperparameters.\n if classification_accuracy > best_accuracy_ada:\n best_accuracy_ada = classification_accuracy\n best_hyper_params_ada = (n_estimators, max_tree_depth)\n \n # Create a model with the best pair of hyperparameters for this training + validation set.\n print(best_hyper_params_ada)\n \n\n\n\n best_clf = AdaBoostClassifier(RandomForestClassifier(n_estimators=best_hyper_params[0],\n max_depth=best_hyper_params[1]),\n n_estimators= best_hyper_params_ada[0],\n learning_rate=best_hyper_params_ada[1])\n \n # Finally, train this model and test it on the test set.\n best_clf.fit(X_train_val, y_train_val)\n y_pred = best_clf.predict(X_test)\n \n # Aggregate confusion matrices for each CV fold.\n c_ada = confusion_matrix(y_test, y_pred, labels=class_names)\n nested_cv_cm_ada += c_ada\n \n print('Done split {}'.format(splits))\n print(\"ada-boosting over\")\n\nacc_ada = np.sum(np.diag(nested_cv_cm_ada)) / np.sum(np.sum(nested_cv_cm_ada))\n\nacc_gb = np.sum(np.diag(nested_cv_cm_gb)) / np.sum(np.sum(nested_cv_cm_gb))\n\nacc = np.sum(np.diag(nested_cv_cm)) / np.sum(np.sum(nested_cv_cm))\n\nprint(\"classification accuracy of gradient boosting :\" , acc_ada)\nprint(\"classification accuracy of gradient boosting :\" , acc_gb)\nprint(\"classification accuracy of random forest :\" , acc)\n\n\n\n\n\n\n\n\n\n\n\n\n\nclf = RandomForestClassifier(n_estimators=100,\n max_depth=4,\n random_state=42,\n class_weight='balanced')\nactivity_classifier_utils.LOSOCVPerformance(features, labels, subjects, clf)\nfeat = clf.feature_importances_\nprint(len(feat))\n\nplt.figure(figsize=(15,8))\nplt.xticks(rotation=45)\nplt.bar(x =activity_classifier_utils.FeatureNames(), height= feat)\n\nplt.savefig(\"feature_importance\")\n\nprint( sorted(list(zip(clf.feature_importances_, \nactivity_classifier_utils.FeatureNames())), reverse=True)[:10]\n)\n\nsorted_features = sorted(zip(clf.feature_importances_, np.arange(len(clf.feature_importances_))), reverse=True)\nbest_feature_indices = list(zip(*sorted_features))[1]\nX = features[:, best_feature_indices[:10]]\n\nprint(\"Shape of the input data :\" , X.shape)\n\ncm = activity_classifier_utils.LOSOCVPerformance(X, labels, subjects, clf)\nplt.close()\nactivity_classifier_utils.PlotConfusionMatrix(cm, class_names, normalize=True)\nplt.savefig(\"confusion_matrix_normalized\")\nprint('Classification accuracy = {:0.2f}'.format(np.sum(np.diag(cm)) / np.sum(np.sum(cm))))\n\n\n","sub_path":"AI in Healthcare/AI_in_Wearable_Devices/Project_activity_classification/train_cv_nested.py","file_name":"train_cv_nested.py","file_ext":"py","file_size_in_byte":10403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"57324498","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 4 14:01:57 2019\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\nimport xgboost as xgb\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import precision_score\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn import preprocessing\r\nfrom sklearn.decomposition import PCA\r\n\r\ndf = pd.read_excel('return.xlsx',index_col= \"股票代號\") #讀擋\r\nindex = df.index\r\ntotal = df.iloc[:,1:]\r\ncol = [total.columns]\r\n\r\n#Z-Score標準化\r\n#建立StandardScaler物件\r\nzscore = preprocessing.StandardScaler()\r\n# 標準化處理\r\ndata1 = zscore.fit_transform(total)\r\ndata2 = zscore.fit_transform(total)\r\n\r\ndata1 = pd.DataFrame(data=data1,index= index)\r\ndata2 = pd.DataFrame(data=data2,index= index)\r\ndata1 = data1.sort_values(by=[1999], ascending=False)\r\ndata2 = data2.sort_values(by=[1998], ascending=False)\r\n\r\nfor i in range(len(data1)):\r\n if (0<=i<10):\r\n data1 = data1.replace( data1.iloc[i,-1] , 0)\r\n elif (10<=i<15):\r\n data1 = data1.replace( data1.iloc[i,-1] , 1)\r\n elif (15<=i<25):\r\n data1 = data1.replace( data1.iloc[i,-1] , 2)\r\n elif (25<=i<35):\r\n data1 = data1.replace( data1.iloc[i,-1] , 3)\r\n else:\r\n data1 = data1.replace( data1.iloc[i,-1] , 4)\r\n\r\nfor i in range(len(data2)):\r\n if (0<=i<10):\r\n data2 = data2.replace( data2.iloc[i,-2] , 0)\r\n elif (10<=i<15):\r\n data2 = data2.replace( data2.iloc[i,-2] , 1)\r\n elif (15<=i<25):\r\n data2 = data2.replace( data2.iloc[i,-2] , 2)\r\n elif (25<=i<35):\r\n data2 = data2.replace( data2.iloc[i,-2] , 3)\r\n else:\r\n data2 = data2.replace( data2.iloc[i,-2] , 4)\r\n#X= data[:,0:-1]\r\n#y = data[:,-1]\r\n\r\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\r\n \r\nX_train = data2.iloc[:,0:-2]\r\ny_train = data2.iloc[:,-2]\r\nX_test = data1.iloc[:,1:-1]\r\ny_test = data1.iloc[:,-1]\r\nX_train = np.nan_to_num(X_train)\r\nX_test = np.nan_to_num(X_test)\r\npca_model = PCA(n_components=1)\r\npca_model.fit(X_train)\r\npca_model.fit(X_test)\r\nX_train = pca_model.transform(X_train)\r\nX_test = pca_model.transform(X_test)\r\n# use DMatrix for xgboost\r\ndtrain = xgb.DMatrix(X_train, label=y_train)\r\ndtest = xgb.DMatrix(X_test, label=y_test)\r\n\r\n# set xgboost params\r\nparam = {\r\n 'max_depth': 5, # the maximum depth of each tree\r\n 'eta': 0.2, # the training step for each iteration\r\n 'silent': 1, # logging mode - quiet\r\n 'objective': 'multi:softprob', # error evaluation for multiclass training\r\n 'num_class': 5} # the number of classes that exist in this datset\r\nnum_round = 100 # the number of training iterations\r\n\r\n#------------- numpy array ------------------\r\n# training and testing - numpy matrices\r\nbst = xgb.train(param, dtrain, num_round)\r\npreds = bst.predict(dtest)\r\n\r\n# extracting most confident predictions\r\nbest_preds = np.asarray([np.argmax(line) for line in preds])\r\nprint (\"Numpy array precision:\", precision_score(y_test, best_preds, average='micro'))\r\n\r\ndef plot_confusion_matrix(confusion_matrix, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = confusion_matrix\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n fig, ax = plt.subplots(figsize=(20, 10))\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n plt.show()\r\n\r\n\r\ncmdt = confusion_matrix(y_test, best_preds)\r\nplot_confusion_matrix(cmdt,[0,1])\r\nprint(classification_report(y_test, best_preds))\r\n\r\n#extract\r\ndf2 = pd.read_excel('return.xlsx') #原始檔案 才可抓profit\r\n\r\nprofit = []\r\npickrow = []\r\n\r\nfor i in range(len(best_preds)):\r\n if best_preds[i] ==0 : #test資料中 預測等於0\r\n #print(i)\r\n print('屬於0的個股:',df2.iloc[i,0])\r\n #print('機率:',preds[i,0])\r\n print('漲跌幅:',df2.iloc[i,-1])\r\n profit.append(df2.iloc[i,-1])\r\n pickrow.append(i)\r\n\r\nif len(profit)==0:\r\n print('平均漲跌幅:',0)\r\nelse:\r\n print('平均漲跌幅:',sum(profit)/len(profit))","sub_path":"Xgboost_50 ver2.0(PCA).py","file_name":"Xgboost_50 ver2.0(PCA).py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"278114702","text":"#!/usr/bin/env python\n#coding:utf-8\n#author Zhang Shijie\n\nimport hashlib\nimport re\nimport json\nimport os\nimport sys,shutil\nimport time,datetime\n\ndef file_exit(): #判断文件是否存在\n filename = \"haproxy.cfg.new\" #定义新的文件名称,之后先对就文件做操作\n print(os.path.exists(filename)) #取出来文件是否存在\n if not filename:\n shutil.copy(\"haproxy.cfg\",\"haproxy.cfg.new\") #复制出一份新文件交haproxy.cfg.new\n print(\"copy------>\")\n\n else:\n print(\"========>\")\n\ndef user_login_func(): #用户登录认认证函数\n for i in range(3):\n login_user = input(\"请输入登陆用户:\")\n login_password = input(\"请输入用户密码:\")\n if login_user == \"admin\" and int(login_password) == int(123456):\n print(\"欢迎登陆haproxy管理系统\")\n\n break\n else:\n print(\"用户或密码错误,请重新输入!\")\n\n\ndef haproxy_add_func():\n backend_name = '{\"backend\":\"test.oldboy.org\",\"record\":{\"server\":\"100.1.7.999 100.1.7.999\",\"weight\":20,\"maxconn\":30}}' #举例要添加的格式\n user_input_data = input(\"\\n请输入要添加的完整的backend名称,可复制--> %s\\033[31;1m\\n请在此输入要添加的backend的全称:\\033[1m\" % backend_name)\n json_user_input_data = json.loads(user_input_data) #使用json转换成字典\n input_backend_name = json_user_input_data[\"backend\"] #取出用户输入的backend后面的名称\n\n #backend的名称 test.oldboy.org\n print(input_backend_name,\"---------->\")\n input_haproxy_add_func_record = json_user_input_data[\"record\"] #取出用户输入的record的主机配置明细\n add_backend_name = \"backend %s \" % input_backend_name #生成与haproxy同样的backend xxx行,目前字典,需要拼接一下\n #backend test.oldboy.org #要添加的backend行\n\n #{\"maxconn\": 30, \"server\": \"100.1.7.999\", \"weight\": 20} #拼接之前的字典格式\n add_haproxy_add_func_record = \"server %s maxconn %s weight %s \" % (input_haproxy_add_func_record[\"server\"],input_haproxy_add_func_record[\"maxconn\"],input_haproxy_add_func_record[\"weight\"]) #生成与生成与haproxy同样的record\n #server 100.1.7.999 maxconn 30 weight 20 要添加的主机记录内容,添加在backend下面的地方\n print(\"要添加的主记录: %s\" % add_haproxy_add_func_record) #拼接成和haprox同样的主机行,add_haproxy_add_func_record是dd_backend_name下面如果就要添加的一行主机的记录\n print(\"要添加的backend名称: %s \" % add_backend_name) #输出用户输入的bacnend\n str = \"backend {0}\" #字串格式化\n add_list = [] #空列表\n num = 0 #标记,记录backend的行号\n num1 = 0 #标记,用于记录行号\n #shutil.copy(\"haproxy.cfg\",\"haproxy.cfg.new\")\n src = open(\"haproxy.cfg\", \"r+\")\n des = open(\"haproxy.cfg.new\", \"w+\")\n des.writelines(src.read())\n src.close()\n des.close()\n\n #time.sleep(1000)\n with open(\"haproxy.cfg.new\") as f: #以只读方式度配置文件\n flag = False #标记位\n for line in f: #对配置文件做循环\n #print(line)\n if str.format(json_user_input_data[\"backend\"]) == line.strip(): #加输入的行和文件的入行是匹配的\n flag = True #设置标记位为True,表示已经匹配到backend行\n num = num1 #赋值给num\n continue #跳出本次循环\n elif flag and line.startswith(\"backend\"): #找到下一个以backend开头行,表示已经跳出当前bacnend的主机配置范围\n break #则结束循环\n elif flag and line != \"\\n\": #非空得行附加到礼列表\n add_list.append(line.strip()) #附加到列表\n #print(add_list)\n continue #结束本次循环进入下一次\n num1 += 1 #行的记录值加1\n if flag: #假如backend名称不在文件中\n haproxy_add_func_dict = json_user_input_data[\"record\"] #从字典取出主机记录\n haproxy_add_func_record = \"server %s weight %d maxconn %d\" % (haproxy_add_func_dict[\"server\"],haproxy_add_func_dict[\"weight\"],haproxy_add_func_dict[\"maxconn\"]) #拼成主机记录\n if haproxy_add_func_record in add_list: #加入主机记录在列表中\n print(\"该主机的配置文件已经存在,无需添加!\") #输出主机已经存在\n else: #如果不存在列表中\n with open(\"haproxy.cfg.new\") as f: #打开配置文件\n haproxy_data_add = list(f) #转换成列表\n haproxy_add_func_record = \"%s%s\\n\" % (\" \" * 8,haproxy_add_func_record) #拼接出主机配置信息\n haproxy_data_add.insert(num+1,haproxy_add_func_record) #在下方插入新的主机信息\n f = open(\"haproxy.cfg.new\",\"w\") #以写方式再打开配置文件并写入保存\n f.writelines(haproxy_data_add) #\n f.flush()\n f.close()\n\n else: #如果要增加的backend不在记录中,,即要新增加记录\n haproxy_add_func_dict = json_user_input_data[\"record\"] #样式格式化\n haproxy_add_func_record = \"\\nbackend %s\\n%sserver %s weight %d maxconn %d\\n\" % (json_user_input_data[\"backend\"],\" \" * 8,haproxy_add_func_dict[\"server\"],haproxy_add_func_dict[\"weight\"],haproxy_add_func_dict[\"maxconn\"])\n with open(\"haproxy.cfg.new\",\"w\") as new_f: #打开配置文件并保存新增��的主机记录\n new_f.write(haproxy_add_func_record)\n\n\n write_time = datetime.datetime.now()\n os.rename(\"haproxy.cfg\",\"haproxy.cfg.bak.%s\" % (write_time.strftime(\"%Y:%m:%d %H:%M:%S\"))) #备份旧文件\n #shutil.copy(\"haproxy.cfg.new\",\"haproxy.cfg.new.bak\") #替换新文件\n os.rename(\"haproxy.cfg.new\",\"haproxy.cfg\") #替换新文件\n\n\n\ndef del_func(): #删除记录函数\n #backend_name = '{\"backend\":\"test.oldboy.org\",\"record\":{\"server\":\"100.1.7.999 100.1.7.999\",\"weight\":20,\"maxconn\":30}}' #定义删除记录的输入格式\n #del_record = input(\"请输入您要删除的记录:\\n\\t\\t 记录格式如下:%s \\n\" % (backend_name)) #输入删除记录\n\n backend_name = '{\"backend\":\"test.oldboy.org\",\"record\":{\"server\":\"100.1.7.999 100.1.7.999\",\"weight\":20,\"maxconn\":30}}' #举例要添加的格式\n del_record = input(\"\\n请输入要添加的完整的backend名称,可复制--> %s\\033[31;1m\\n请在此输入要添加的backend的全称:\\033[1m\" % backend_name)\n\n del_dict = json.loads(del_record) #将删除记录的字符串转换成字典\n aa = get_data(del_dict[\"backend\"]) #调用read_func()函数,查找要删的记录是否在文件中\n flag = aa[0] #判断要删的backend是否已经在文件中\n que_li = aa[1] #如果要删的backend已经在文件中,会得到此backend下的server记录组成的list,否则为空列表\n if flag: #如果要删的backend在文件中\n server_dict = del_dict[\"record\"] #取要删除的server记录\n record_name = \"server %s weight %d maxconn %d\" % (server_dict[\"server\"],server_dict[\"weight\"],server_dict[\"maxconn\"]) #将server记录的字典拼接成字符串\n if record_name in que_li: #如果要删除的server记录在que_li列表中\n f = open(\"haproxy.cfg\") #打开配置文件,并赋值给f\n fp = list(f) #将文件内容转换成列表\n record_name = \"%s%s\\n\" % (\" \"*8,record_name) #要删除的server记录\n fp.remove(record_name) #将要删除的server记录移出列表\n f = open(\"haproxy.cfg\",\"w\") #以w的方式打开配置文件,会新建配置文件\n f.writelines(fp) #写入配置文件\n f.flush() #刷新到硬盘\n f.close() #关闭文件\n else: #如果要删除的server记录不在文件中\n print(\"backend %s里没有此条记录!\" % del_dict[\"backend\"])\n else: #如果要删除的backend记录不在配置文件中\n print(\"没有此条backend!\")\n\n\ndef get_data(get_args): #获取信息\n #backend_cfg_name = input(\"请输入要查看的后端Server名称:\") #后端backend名称,等于backend + 变量传进来的名称\n backend_new_name = \"backend %s\" % get_args\n list1 = [] #定义一个空的列表,用于保存用户查找到的数据\n #print(backend_new_name)\n with open(\"haproxy.cfg\") as f: #读取配置文件\n flag = False #定义个标记位,用于后续处理\n #print(flag)\n for line in f: #循环文件的每一行,对配置文件逐行做以下处理,一次处理一行\n #print(type(line))\n line = line.strip() #去除每一行的开始和换行符,并赋值给line,line.strip获取到的是一个没有换行符的str\n if backend_new_name == line: #假如循环到的当前行等于要查询的backend 名称,满足之后直接进入下一行\n flag = True #将flag标记位True\n continue #跳出本次循环\n if flag and line.startswith(\"backend\"): #目前flag为True,假如flag为True并且行的开头以backend开头则跳出本次循环,提前退出可以防止遍历下面不符合要求的行,\n flag = False\n break\n if flag and line: #假如flag为True并且line不为空,则将数据写入列表保存,此数据为backend的内容\n list1.append(line) #将筛选出的数据附加到列表\n continue #结束本次并进入下一次循环\n print(\"%s内包含的server为: %s\" % (get_args,list1)) #打印结果\n return list1 #返回给函数\n\nif __name__ == \"__main__\":\n user_login_func()\n get_data_input = input(\"请选择要进行的操作, 1,查看 2,添加 3,退出,4,删除:\")\n if int(get_data_input) == 1: #查询\n backend_cfg_name = input(\"请输入要查看的后端Server名称:\") #后端backend名称,等于backend + 变量传进来的名称\n get_data(backend_cfg_name) #执行获取信息的函数\n\n elif int(get_data_input) == 2: #添加\n haproxy_add_func() #执行添加的函数\n\n elif int(get_data_input) == 4: #添加\n del_func()\n\n\n elif int(get_data_input) == 3:\n print(\"您已经成功退出\")\n sys.exit(0) #退出\n\n\n","sub_path":"day3/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":10474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"273052236","text":"#\n# Copyright 2016-2019 Crown Copyright\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom gafferpy import gaffer as g\nfrom gafferpy import gaffer_connector\n\n\ndef run(host, verbose=False):\n return run_with_connector(create_connector(host, verbose))\n\n\ndef run_with_connector(gc):\n print()\n print('Running Accumulo operations')\n print('--------------------------')\n print()\n\n get_elements_between_sets(gc)\n get_elements_within_set(gc)\n get_elements_in_ranges(gc)\n\n\ndef create_connector(host, verbose=False):\n return gaffer_connector.GafferConnector(host, verbose)\n\n\ndef get_elements_between_sets(gc):\n # Get Elements\n elements = gc.execute_operation(\n g.GetElementsBetweenSets(\n input=[g.EntitySeed('M5')],\n input_b=[g.EntitySeed('M5:10'), g.EntitySeed('M5:11')],\n view=g.View(\n edges=[\n g.ElementDefinition(\n group='RoadUse',\n group_by=[]\n ),\n g.ElementDefinition(\n group='RoadHasJunction',\n group_by=[]\n )\n ]\n )\n )\n )\n print('Elements between sets')\n print(elements)\n print()\n\n\ndef get_elements_within_set(gc):\n # Get Elements within set\n elements = gc.execute_operation(\n g.GetElementsWithinSet(\n input=[\n g.EntitySeed('M5'),\n g.EntitySeed('M5:10'),\n g.EntitySeed('M5:11')\n ],\n view=g.View(\n edges=[\n g.ElementDefinition(\n group='RoadUse',\n group_by=[]\n ),\n g.ElementDefinition(\n group='RoadHasJunction',\n group_by=[]\n )\n ]\n )\n )\n )\n print('Elements within set')\n print(elements)\n print()\n\n\ndef get_elements_in_ranges(gc):\n # Get Elements in ranges\n elements = gc.execute_operation(\n g.GetElementsInRanges(\n input=[\n g.SeedPair(g.EntitySeed('M5:10'), g.EntitySeed('M5:12'))\n ],\n view=g.View(\n edges=[\n g.ElementDefinition(\n group='RoadUse',\n group_by=[]\n )\n ]\n )\n )\n )\n print('Elements in ranges')\n print(elements)\n print()\n\n\nif __name__ == \"__main__\":\n run('http://localhost:8080/rest/latest', False)\n","sub_path":"python-shell/src/example_accumulo.py","file_name":"example_accumulo.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"128561940","text":"import unittest\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n 输入一个杨辉三角形状的二维数组,从根出发,只能往左下或右下走,求从根到底层的最短路径的长度\n 假设层数为n, 除了O(n^2)的DP解,\n 还有2^n(相当于所有路径穷举一次,到n层总共2^n条路径)的DFS搜索法,\n 类似二叉树的分治法O(2^n)遍历,时间复杂度仍然是2^n,没有剪枝,所有抉择都是一分为二,所以还是跟DFS搜索全部路径一样\n 因为分治法没有避免重复计算,没有记忆之前计算的结果,如果用全局HashMap,key为数组坐标, value为最短路径值,能避免重复计算\n 记忆化搜索只是动态规划的一种实现方式,用分治法也能实现动态规划\n O(n)时间复杂度的题不适合用记忆化搜索,栈的深度也是O(n)容易Stack Overflow\n 常用的动态规划实现方法是递推(for循环迭代填表),二维数组dp的下标(i,j)表示一个子问题\n \"\"\"\n\n # noinspection PyMethodMayBeStatic,PyPep8Naming\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n # 最底层的元素个数正好也是i\n size = len(triangle)\n # 用滚动数组的感觉不断往下滚,当前节点的最短路径等于上一层���向该节点的两个节点的最小值\n # 例如: 第三层4的最小路径长度是上一层3和4的最短路径长度+4(动态规划),直接在原数组上改,不需要额外的DP数组\n # 3 4\n # 6 4 7\n\n # 1. 初始化DP数组中三角形的左边和右边,初始化边界这种特殊情况之后才能正确填表\n for i in range(1, size):\n # 每层的左边界\n triangle[i][0] += triangle[i - 1][0]\n # 每层的右边界\n triangle[i][i] += triangle[i - 1][i - 1]\n\n # 2. 开始填DP的表\n for i in range(2, size):\n # 不包含每层的左右边界\n for j in range(1, i):\n triangle[i][j] += min(triangle[i - 1][j - 1], triangle[i - 1][j])\n # print(triangle)\n return min(triangle[size - 1])\n\n @staticmethod\n def try_dp_one_traverse(triangle: List[List[int]]) -> int:\n size = len(triangle)\n root_val = triangle[0][0]\n if size < 2:\n return root_val\n triangle[1][0] += root_val\n triangle[1][1] += root_val\n # 一次遍历的解法\n for i in range(2, size):\n # 每层的左边界\n triangle[i][0] += triangle[i - 1][0]\n # 每层的右边界\n triangle[i][i] += triangle[i - 1][i - 1]\n # 每层的中间\n for j in range(1, i):\n triangle[i][j] += min(triangle[i - 1][j - 1], triangle[i - 1][j])\n return min(triangle[size - 1])\n\n @staticmethod\n def dp_bottom_to_top(triangle: List[List[int]]) -> int:\n # 眼前一亮的解法: 从自下而上的DP\n # 注意面试题是否允许你修改入参的数组\n for i in range(len(triangle) - 2, -1, -1):\n for j in range(i + 1):\n triangle[i][j] = triangle[i][j] + min(triangle[i + 1][j], triangle[i + 1][j + 1])\n return triangle[0][0]\n\n\nclass Testing(unittest.TestCase):\n TEST_CASES = [\n ([\n [2],\n [3, 4],\n [6, 5, 7],\n [4, 1, 8, 3]\n ], 11),\n (\n [\n [2],\n [3, 2],\n [6, 5, 7],\n [4, 4, 8, 1]\n ], 12\n )\n ]\n\n def test(self):\n solution = Solution()\n for triangle, shortest_path in self.TEST_CASES:\n self.assertEqual(shortest_path, solution.minimumTotal(triangle))\n\n def test_try_dp_one_traverse(self):\n for triangle, shortest_path in self.TEST_CASES:\n self.assertEqual(shortest_path, Solution.try_dp_one_traverse(triangle))\n","sub_path":"dp/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"121337981","text":"# -*- coding: UTF-8 -*-#中文编码\n#coding=utf-8\n\nimport matplotlib.pyplot as plt#调用matplotlib的库\nimport numpy as np #\n# linspace 第一个参数序列起始值, 第二个参数序列结束值,第三个参数为样本数默认50\nx = np.linspace(0, 3 * np.pi, 100)# x轴数据\ny = np.sin(x)#y轴表示正弦波\n\nplt.rcParams['font.sans-serif']=['SimHei'] #加上这一句就能在图表中显示中文\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\nplt.subplot(1,2,1)#第一行两列第一幅图\nplt.title(r'$f(x)=sin(x)$') #给第一幅图片加标题\nplt.plot(x, y)#绘制波形\n#plt.show()\n\nx1 = [t*0.375*np.pi for t in x]# x轴数据\ny1 = np.sin(x1)#y轴表示正弦波\nplt.subplot(1,2,2)#第一行两列第二幅图\n# plt.title(u\"测试2\") #注意:在前面加一个u\nplt.title(r'$f(x)=sin(\\omega x), \\omega = \\frac{3}{8} \\pi$') #给第二幅图片加标题\nplt.plot(x, y1)#绘制第二个波形\nplt.show()#输出波形","sub_path":"数字信号处理实验/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"581804200","text":"\n#scraper for the 2010 year of http://www.informs-sim.org/\n#need to extract area, session, chair, title, authors, abstract\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\n\nsoup = BeautifulSoup(open(\"2010.html\"))\nclean = soup.prettify()\n\nsession = []\nchair = []\ntitle = []\nauthors = []\nabstract = [] \n\n#scrape sessions\nfor link in soup.body.find_all('b'):\n\tif link.attrs == {'style': 'font-size: 1.2em'} and not link.string.startswith('Wednesday') and not link.string.startswith('Tuesday') and not link.string.startswith('Monday') and not link.string.startswith('Sunday'):\n\t\tsession.append(link.string)\n\n#scrape chairs\nfor link in soup.body.find_all('span'):\n\tif link.attrs == {'style': 'font-size: 1.1em'}:\n\t\tchair.append(link.get_text()[7:])\n\n#scrape titles\nfor link in soup.body.find_all('i'):\n\ttitle.append(link.string)\ntitle.pop(0)\n\n#scrape authors\n##COULDNT FIGURE THIS OUT!!\n\n#scrape abstracts\nfor link in soup.body.find_all('div'):\n\tif len(link.attrs) == 2:\n\t\tmodified = re.sub('<.*?>', '', str(link))\n\t\tabstract.append(modified.decode('utf-8'))\n\ndf = pd.DataFrame({'Session' : session, 'Chair' :chair})\ndf2 = pd.DataFrame({'Title':title})\ndf3 = pd.DataFrame({'Abstract': abstract})\n\ndf3.to_excel('three.xlsx')\n","sub_path":"Python_Scripts/kevin_2010scraper.py","file_name":"kevin_2010scraper.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"176516087","text":"## Copyright 2018-2020 Intel Corporation\n## SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom glob import glob\nfrom collections import defaultdict\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom config import *\nfrom util import *\nfrom image import *\nfrom color import *\nimport model\nimport tza\n\n# Returns a dataset directory path\ndef get_data_dir(cfg, name):\n return os.path.join(cfg.data_dir, name)\n\n# Returns the ordered list of channel names for the specified features\ndef get_channels(features):\n if ('hdr' in features) or ('ldr' in features):\n channels = ['r', 'g', 'b']\n if 'alb' in features:\n channels += ['alb.r', 'alb.g', 'alb.b']\n if 'nrm' in features:\n channels += ['nrm.x', 'nrm.y', 'nrm.z']\n return channels\n\n# Returns the number of channels for the specified features\ndef get_num_channels(features):\n return len(get_channels(features))\n\n# Returns the indices of the specified channels in the dataset\ndef get_channel_indices(channels, data_channels):\n return [data_channels.index(ch) for ch in channels]\n\n# Shuffles channels according to the specified order\ndef shuffle_channels(channels, first_channel, order):\n first = channels.index(first_channel)\n new_channels = [channels[first+i] for i in order]\n for i in range(len(new_channels)):\n channels[first+i] = new_channels[i]\n\n# Returns the target features given the input features\ndef get_target_features(features):\n return list(set(features).intersection({'hdr', 'ldr'}))\n\n# Checks whether the image with specified features exists\ndef image_exists(name, features):\n return all([os.path.isfile(name + '.' + f + '.exr') for f in features])\n\n# Returns the feature an image represents given its filename\ndef get_image_feature(filename):\n filename_split = filename.rsplit('.', 2)\n if len(filename_split) < 2:\n return 'srgb' # no extension, assume sRGB\n else:\n ext = filename_split[-1].lower()\n if ext in {'exr', 'pfm', 'hdr'}:\n if len(filename_split) == 3:\n return filename_split[-2]\n else:\n return 'hdr' # assume HDR\n else:\n return 'srgb' # assume sRGB\n\n# Loads target image features in EXR format with given filename prefix\ndef load_target_image(name, features):\n if 'hdr' in features:\n color_filename = name + '.hdr.exr'\n else:\n color_filename = name + '.ldr.exr'\n color = load_image(color_filename, num_channels=3)\n if 'hdr' in features:\n color = np.maximum(color, 0.)\n else:\n color = np.clip(color, 0., 1.)\n return color\n\n# Loads input image features in EXR format with given filename prefix\ndef load_input_image(name, features):\n # Color\n color = load_target_image(name, features)\n inputs = [color]\n\n # Albedo\n if 'alb' in features:\n albedo_filename = name + '.alb.exr'\n albedo = load_image(albedo_filename, num_channels=3)\n albedo = np.clip(albedo, 0., 1.)\n inputs.append(albedo)\n\n # Normal\n if 'nrm' in features:\n normal_filename = name + '.nrm.exr'\n normal = load_image(normal_filename, num_channels=3)\n\n # Normalize\n length_sqr = np.add.reduce(np.square(normal), axis=-1, keepdims=True)\n with np.errstate(divide='ignore'):\n rcp_length = np.reciprocal(np.sqrt(length_sqr))\n rcp_length = np.nan_to_num(rcp_length, nan=0., posinf=0., neginf=0.)\n normal *= rcp_length\n\n # Transform to [0..1] range\n normal = normal * 0.5 + 0.5\n\n inputs.append(normal)\n\n return np.concatenate(inputs, axis=2)\n\n# Tries to load metadata for an image with given filename/prefix, returns None if it fails\ndef load_image_metadata(name):\n dirname, basename = os.path.split(name)\n basename = basename.split('.')[0] # remove all extensions\n while basename:\n metadata_filename = os.path.join(dirname, basename) + '.json'\n if os.path.isfile(metadata_filename):\n return load_json(metadata_filename)\n if '_' in basename:\n basename = basename.rsplit('_', 1)[0]\n else:\n break\n return None\n\n# Saves image metadata to a file with given prefix\ndef save_image_metadata(name, metadata):\n save_json(name + '.json', metadata)\n\n# Returns groups of image samples (input and target images at different SPPs) as a list of (group, list of input names, target name)\ndef get_image_sample_groups(dir, features):\n image_filenames = glob(os.path.join(dir, '**', '*.*.exr'), recursive=True)\n target_features = get_target_features(features)\n\n # Make image groups\n image_groups = defaultdict(set)\n for filename in image_filenames:\n image_name = os.path.relpath(filename, dir) # remove dir path\n image_name, _, _ = image_name.rsplit('.', 2) # remove extensions\n group = image_name\n if '_' in image_name:\n prefix, suffix = image_name.rsplit('_', 1)\n suffix = suffix.lower()\n if (suffix.isdecimal() or\n (suffix.endswith('spp') and suffix[:-3].isdecimal()) or\n suffix == 'ref' or suffix == 'reference' or\n suffix == 'gt' or suffix == 'target'):\n group = prefix\n image_groups[group].add(image_name)\n\n # Make sorted image sample (inputs + target) groups\n image_sample_groups = []\n for group in sorted(image_groups):\n # Get the list of inputs and the target\n image_names = sorted(image_groups[group])\n if len(image_names) > 1:\n input_names, target_name = image_names[:-1], image_names[-1]\n else:\n input_names, target_name = image_names, None\n\n # Check whether all required features exist\n if all([image_exists(os.path.join(dir, name), features) for name in input_names]) and \\\n (not target_name or image_exists(os.path.join(dir, target_name), target_features)):\n # Add sample\n image_sample_groups.append((group, input_names, target_name))\n\n return image_sample_groups\n\n# Transforms a feature image to another feature type\ndef transform_feature(image, input_feature, output_feature, exposure=1.):\n if input_feature == 'hdr' and output_feature in {'ldr', 'srgb'}:\n image = tonemap(image * exposure)\n if output_feature == 'srgb':\n if input_feature in {'hdr', 'ldr', 'alb'}:\n image = srgb_forward(image)\n elif input_feature == 'nrm':\n # Transform [-1, 1] -> [0, 1]\n image = image * 0.5 + 0.5\n return image\n\n## -----------------------------------------------------------------------------\n## Preprocessed dataset\n## -----------------------------------------------------------------------------\n\n# Returns a preprocessed dataset directory path\ndef get_preproc_data_dir(cfg, name):\n data_dir = os.path.join(cfg.preproc_dir, name) + '.'\n if 'hdr' in cfg.features:\n data_dir += 'hdr'\n elif 'ldr' in cfg.features:\n data_dir += 'ldr'\n data_dir += '.' + cfg.transfer\n return data_dir\n\nclass PreprocessedDataset(Dataset):\n def __init__(self, cfg, name):\n super(PreprocessedDataset, self).__init__()\n\n # Check whether the preprocessed images have all required features\n data_dir = get_preproc_data_dir(cfg, name)\n if not os.path.isdir(data_dir):\n self.num_images = 0\n return\n data_cfg = load_config(data_dir)\n if not all(f in data_cfg.features for f in cfg.features):\n error('the preprocessed images have an incompatible set of features')\n if data_cfg.transfer != cfg.transfer:\n error('the preprocessed images have a mismatching transfer function')\n\n self.tile_size = cfg.tile_size\n self.features = cfg.features\n self.data_channels = get_channels(data_cfg.features)\n self.channels = get_channels(cfg.features)\n self.channel_order = get_channel_indices(self.channels, self.data_channels)\n\n # Get the image samples\n samples_filename = os.path.join(data_dir, 'samples.json')\n self.samples = load_json(samples_filename)\n self.num_images = len(self.samples)\n\n if self.num_images == 0:\n return\n\n # Create the memory mapping based image reader\n tza_filename = os.path.join(data_dir, 'images.tza')\n self.images = tza.Reader(tza_filename)\n\n## -----------------------------------------------------------------------------\n## Training dataset\n## -----------------------------------------------------------------------------\n\nclass TrainingDataset(PreprocessedDataset):\n def __init__(self, cfg, name):\n super(TrainingDataset, self).__init__(cfg, name)\n\n def __len__(self):\n return self.num_images\n\n def __getitem__(self, index):\n # Get the input and target images\n input_name, target_name = self.samples[index]\n input_image, _ = self.images[input_name]\n target_image, _ = self.images[target_name]\n\n # Get the size of the image\n height = input_image.shape[0]\n width = input_image.shape[1]\n if height < self.tile_size or width < self.tile_size:\n error('image is smaller than the tile size')\n \n # Generate a random crop\n sy = sx = self.tile_size\n if rand() < 0.1:\n # Randomly zero pad later to avoid artifacts for images that require padding\n sy -= randint(model.ALIGNMENT)\n sx -= randint(model.ALIGNMENT)\n oy = randint(height - sy + 1)\n ox = randint(width - sx + 1)\n\n # Randomly permute some channels to improve training quality\n channels = self.channels[:] # copy\n\n # Randomly permute the color channels\n color_order = randperm(3)\n shuffle_channels(channels, 'r', color_order)\n if 'alb' in self.features:\n shuffle_channels(channels, 'alb.r', color_order)\n\n # Randomly permute the normal channels\n if 'nrm' in self.features:\n normal_order = randperm(3)\n shuffle_channels(channels, 'nrm.x', normal_order)\n\n # Compute the indices of the required input channels\n channel_order = get_channel_indices(channels, self.data_channels)\n\n # Crop the input and target images\n input_image = input_image [oy:oy+sy, ox:ox+sx, channel_order]\n target_image = target_image[oy:oy+sy, ox:ox+sx, color_order]\n\n # Randomly transform the tiles to improve training quality\n if rand() < 0.5:\n # Flip vertically\n input_image = np.flip(input_image, 0)\n target_image = np.flip(target_image, 0)\n\n if rand() < 0.5:\n # Flip horizontally\n input_image = np.flip(input_image, 1)\n target_image = np.flip(target_image, 1)\n\n if rand() < 0.5:\n # Transpose\n input_image = np.swapaxes(input_image, 0, 1)\n target_image = np.swapaxes(target_image, 0, 1)\n sy, sx = sx, sy\n\n # Zero pad the tiles (always makes a copy)\n pad_size = ((0, self.tile_size - sy), (0, self.tile_size - sx), (0, 0))\n input_image = np.pad(input_image, pad_size, mode='constant')\n target_image = np.pad(target_image, pad_size, mode='constant')\n\n # Randomly zero the color channels if there are auxiliary features\n # This prevents \"ghosting\" artifacts when the color buffer is entirely black\n if len(self.channels) > 3 and rand() < 0.01:\n input_image[:, :, 0:3] = 0\n target_image[:] = 0\n\n # DEBUG: Save the tile\n #save_image('tile_%d.png' % i, target_image)\n\n # Convert the tiles to tensors\n return image_to_tensor(input_image), image_to_tensor(target_image)\n\n## -----------------------------------------------------------------------------\n## Validation dataset\n## -----------------------------------------------------------------------------\n\nclass ValidationDataset(PreprocessedDataset):\n def __init__(self, cfg, name):\n super(ValidationDataset, self).__init__(cfg, name)\n\n # Split the images into tiles\n self.tiles = []\n\n for sample_index in range(self.num_images):\n # Get the input image\n input_name, _ = self.samples[sample_index]\n input_image, _ = self.images[input_name]\n\n # Get the size of the image\n height = input_image.shape[0]\n width = input_image.shape[1]\n if height < self.tile_size or width < self.tile_size:\n error('image is smaller than the tile size')\n\n # Compute the number of tiles\n num_tiles_y = height // self.tile_size\n num_tiles_x = width // self.tile_size\n\n # Compute the start offset for centering\n start_y = (height % self.tile_size) // 2\n start_x = (width % self.tile_size) // 2\n\n # Add the tiles\n for y in range(num_tiles_y):\n for x in range(num_tiles_x):\n oy = start_y + y * self.tile_size\n ox = start_x + x * self.tile_size\n self.tiles.append((sample_index, oy, ox))\n\n def __len__(self):\n return len(self.tiles)\n\n def __getitem__(self, index):\n # Get the tile\n sample_index, oy, ox = self.tiles[index]\n sy = sx = self.tile_size\n\n # Get the input and target images\n input_name, target_name = self.samples[sample_index]\n input_image, _ = self.images[input_name]\n target_image, _ = self.images[target_name]\n\n # Crop the input and target images\n input_image = input_image [oy:oy+sy, ox:ox+sx, self.channel_order]\n target_image = target_image[oy:oy+sy, ox:ox+sx, :]\n\n # Convert the tiles to tensors\n # Copying is required because PyTorch does not support non-writeable tensors\n return image_to_tensor(input_image.copy()), image_to_tensor(target_image.copy())\n","sub_path":"training/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"62011715","text":"import requests, json, sys\nimport os\n\ndef sendPost(url, headers, data):\n '''Generic wrapper for sending POST requests to NetScaler.'''\n\n global nitro_token\n\n try:\n response = requests.post(url, headers=headers, data=json.dumps(data),verify=False, timeout=10)\n if response.status_code == 200 or response.status_code == 201 or response.status_code == 409 or response.status_code == 404:\n #Setting NITRO Auth Token if it's not set already.\n if nitro_token is None:\n nitro_token = \"NITRO_AUTH_TOKEN=\"+response.cookies.values()[0]\n else:\n print(\"Recieved error status code - \"+str(response.status_code))\n print(\"Error while issueing POST request to: \" + url +\". Exiting.\")\n #Log out only if login passed\n if nitro_token is not None:\n logOut()\n\n sys.exit(1)\n except:\n print(\"Error while connecting to NetScaler\")\n sys.exit(1)\n\n return\n\ndef sendGet(url, headers, data):\n '''Generic wrapper for sending Get requests to NetScaler.'''\n\n global nitro_token\n global get_response_text\n global response_data\n\n\n try:\n response = requests.get(url, headers=headers, data=json.dumps(data),verify=False, timeout=10)\n if response.status_code == 200 or response.status_code == 201:\n #Setting NITRO Auth Token if it's not set already.\n if nitro_token is None:\n nitro_token = \"NITRO_AUTH_TOKEN=\"+response.cookies.values()[0]\n get_response_text=response.text\n else:\n print(\"Recieved error status code - \"+str(response.status_code))\n print(\"Error while issueing Get request to: \" + url +\". Exiting.\")\n #Log out only if login passed\n if nitro_token is not None:\n logOut()\n\n sys.exit(1)\n except:\n print(\"Error while connecting to NetScaler\")\n sys.exit(1)\n\n return\n\ndef sendDelete(url, headers, data):\n '''Generic wrapper for sending Delete requests to NetScaler.'''\n\n global nitro_token\n\n try:\n response = requests.delete(url, headers=headers, data=json.dumps(data),verify=False, timeout=10)\n if response.status_code == 200 or response.status_code == 201 or response.status_code == 404:\n #Setting NITRO Auth Token if it's not set already.\n if nitro_token is None:\n nitro_token = \"NITRO_AUTH_TOKEN=\"+response.cookies.values()[0]\n else:\n print(\"Recieved error status code - \"+str(response.status_code))\n print(\"Error while issueing POST request to: \" + url +\". Exiting.\")\n #Log out only if login passed\n if nitro_token is not None:\n logOut()\n\n sys.exit(1)\n except:\n print(\"Error while connecting to NetScaler\")\n sys.exit(1)\n\n return\n\ndef logOut():\n '''Function to log out of the NetScaler.'''\n global nitro_token\n\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/logout\"\n headers = {\"Content-Type\":\"application/vnd.com.citrix.netscaler.logout+json\", \"Cookie\":nitro_token}\n data = {\"logout\":{}}\n sendPost(url, headers, data)\n\n return\n\nPARENT_JOB_NAME = os.getenv('parentJobName')\nSVC_TYPE_PORT = os.getenv('svc_type_port')\nLB_METHOD = os.getenv('lb_method')\n\n##Main begins here##\nif __name__ == \"__main__\":\n #Read command line arguments\n try:\n option = sys.argv[1]\n except:\n print(\"Please pass an option - \\\"start\\\" or \\\"stop\\\"\")\n sys.exit(1)\n\n #Common section for start or stop logic\n #Reading the JSON resources file and loading into Dictionary\n with open(\"netscaler.json\", \"r\") as file:\n resources = json.load(file)\n\n requests.packages.urllib3.disable_warnings()\n\n nitro_token = None\n #Logging into NetScaler and retrieving NITRO Auth Token\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/login\"\n headers = {\"Content-Type\":\"application/vnd.com.citrix.netscaler.login+json\"}\n data = {\"login\":{\"username\":resources['nsuser'], \"password\":resources['nspasswd']}}\n sendPost(url, headers, data)\n print(\"Successfully logged into NetScaler\")\n\n #Flow for setting up the NetScaler\n if option == \"start\":\n print(\"Setting up NetScaler\")\n\n #Enabling NS Features\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/nsfeature?action=enable\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"nsfeature\":{\"feature\":[\"LB\"]}}\n sendPost(url, headers, data)\n print(\"Finished enabling features\")\n\n #Adding SNIPs\n for snip in resources['snips']:\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/nsip\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"nsip\":{\"ipaddress\":snip, \"netmask\":resources['snipmask']}}\n sendPost(url, headers, data)\n print(\"Finished adding IP address \")\n\n #Adding VIPs\n if SVC_TYPE_PORT == \"80\":\n svctypeval = \"HTTP\"\n portval = \"80\"\n elif SVC_TYPE_PORT == \"443\":\n svctypeval = \"SSL\"\n portval = \"443\"\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/lbvserver\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"lbvserver\":{\"name\":PARENT_JOB_NAME, \"servicetype\":svctypeval, \"ippattern\":resources['lbvserver'], \"ipmask\":\"255.255.255.255\", \"lbmethod\":LB_METHOD, \"port\":portval}}\n sendPost(url, headers, data)\n print(\"Finished adding LB Vserver\")\n\n #Adding Services\n for index, service in enumerate(resources['services']):\n #Adding service\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/service\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"service\":{\"name\":\"S\"+str(index), \"ip\":service, \"servicetype\":svctypeval, \"port\":portval}}\n sendPost(url, headers, data)\n #Binding service to Vserver\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/lbvserver_service_binding\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"lbvserver_service_binding\":{\"name\":PARENT_JOB_NAME, \"servicename\":\"S\"+str(index)}}\n sendPost(url, headers, data)\n\n print(\"Finished adding service \"+service)\n\n print(\"Finished setting up the NetScaler\")\n\n #Flow for deleting services, vip, snips\n elif option == \"stop\":\n #Deleting Services\n for index, service in enumerate(resources['services']):\n #Deleting service\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/service/\"+\"S\"+str(index)\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = \"\"\n # data = {\"service\":{\"name\":\"S\"+str(index), \"ip\":service, \"servicetype\":\"HTTP\", \"port\":\"80\"}}\n sendDelete(url, headers, data)\n\n print(\"Finished Deleting service \"+service)\n\n #Deleting VIPs\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/lbvserver/\" + PARENT_JOB_NAME\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = \"\"\n sendDelete(url, headers, data)\n print(\"Finished deleting LB Vserver\")\n\n # Deleting SNIPs\n for snip in resources['snips']:\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/nsip/\"+snip\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = \"\"\n # data = {\"nsip\":{\"ipaddress\":snip, \"netmask\":resources['snipmask']}}\n sendDelete(url, headers, data)\n print(\"Finished Removing IP address(s) \")\n\n print(\"Finished Removing NetScaler Configuration\")\n\n #Flow for updating services when scaling up & down\n elif option == \"update\":\n #Get all services bound to this load balancer\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/lbvserver_binding/\"+PARENT_JOB_NAME\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = \"\"\n sendGet(url, headers, data)\n\n # Take response from loadbalances service binding call. Parse the IP's that are currently configured\n # as part of the service and load them into an array.\n lbarray = []\n t=json.loads(get_response_text)\n for binding in t['lbvserver_binding']:\n for servicebinding in binding['lbvserver_service_binding']:\n lbarray.append(servicebinding.get('ipv46'))\n\n # Add IP's from netscaler.json file to servicesarray\n servicesarray = []\n for index, service in enumerate(resources['services']):\n servicesarray.append(service)\n\n # Compare the loadbalancer Services(netscaler.json) arra to the array(lbarray) and create an array\n # of the IP's which need to be added to the loadbalancer\n addServers = []\n for ipservices in servicesarray:\n if not any(iplb == ipservices for iplb in lbarray):\n addServers.append(ipservices)\n\n # Compare the loadbalancer array(lbarray) to the Services(netscaler.json) array and create an array\n # of the IP's which need to be removed from the loadbalancer\n removeServers = []\n for ipservices in lbarray:\n if not any(iplb == ipservices for iplb in servicesarray):\n removeServers.append(ipservices)\n\n ascount = len(addServers)\n rmcount = len(removeServers)\n lbcount = len(lbarray)\n servicescount = len(servicesarray)\n\n #Add servers to load balancer\n if ascount > rmcount:\n #Add IP services to load balancer and bind\n addsvcnt = lbcount\n for index, service in enumerate(addServers):\n #Adding services\n if SVC_TYPE_PORT == \"80\":\n svctypeval = \"HTTP\"\n portval = \"80\"\n elif SVC_TYPE_PORT == \"443\":\n svctypeval = \"SSL\"\n portval = \"443\"\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/service\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"service\":{\"name\":\"S\"+str(addsvcnt), \"ip\":service, \"servicetype\":svctypeval, \"port\":portval}}\n sendPost(url, headers, data)\n #Binding service to Vserver\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/lbvserver_service_binding\"\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = {\"lbvserver_service_binding\":{\"name\":PARENT_JOB_NAME, \"servicename\":\"S\"+str(addsvcnt)}}\n sendPost(url, headers, data)\n addsvcnt += 1\n elif rmcount > ascount:\n for x in range((lbcount - 1), (servicescount -1), -1):\n #Deleting service\n url = \"https://\"+resources['nsip']+\"/nitro/v1/config/service/\"+\"S\"+str(x)\n headers = {\"Content-Type\":\"application/json\", \"Cookie\":nitro_token}\n data = \"\"\n sendDelete(url, headers, data)\n print(\"Finished Deleting service \"+service)\n\n #Logging out of NetScaler\n logOut()\n print(\"Successfully logged out of NetScaler\")\n\n","sub_path":"services/netscalerext/SetupNetScaler.py","file_name":"SetupNetScaler.py","file_ext":"py","file_size_in_byte":11461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"653991570","text":"import numpy as np\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.datasets import boston_housing\nimport matplotlib.pyplot as plt\n\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\nprint(train_data.shape)\nprint(test_data.shape)\nprint(test_targets)\nmean = train_data.mean(axis=0)\ntrain_data -= mean\nstd = train_data.std(axis=0)\ntrain_data /= std\ntest_data -= mean\ntest_data /= std\n\ndef build_model():\n model = Sequential()\n model.add(Dense(64, activation='relu', input_shape=(train_data.shape[1],)))\n model.add(Dense(64, activation='relu'))\n model.add(Dense(1))\n model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n return model\n\nk = 5\nnum_val_samples = len(train_data) // k\nnum_epochs = 37\nall_loss = []\nall_mae = []\nhistories = []\n\nfor i in range(k):\n print('processing fold #', i)\n val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]\n val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]\n partial_train_data = np.concatenate([train_data[:i * num_val_samples], train_data[(i + 1) * num_val_samples:]],\n axis=0)\n partial_train_targets = np.concatenate(\n [train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]], axis=0)\n model = build_model()\n H = model.fit(partial_train_data, partial_train_targets, epochs=num_epochs, batch_size=1, verbose=0,\n validation_data=(val_data, val_targets))\n # val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)\n all_loss.append(H.history['val_loss'])\n all_mae.append(H.history['val_mae'])\n histories.append(H)\n\n\nprint(np.mean(all_mae))\n\nfor history in histories:\n history_dict = history.history\n\n mse_values = history_dict['loss']\n val_mse_values = history_dict['val_loss']\n epochs = range(1, len(mse_values) + 1)\n plt.plot(epochs, mse_values, 'red', label='Training loss', linewidth=2.5)\n plt.plot(epochs, val_mse_values, 'green', label='Validation loss', linewidth=2.5)\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()\n\n plt.clf()\n mae_values = history_dict['mae']\n val_mae_values = history_dict['val_mae']\n plt.plot(epochs, mae_values, 'red', label='Training MAE', linewidth=2.5)\n plt.plot(epochs, val_mae_values, 'blue', label='Validation MAE', linewidth=2.5)\n plt.title('Training and validation MAE')\n plt.xlabel('Epochs')\n plt.ylabel('MAE')\n plt.legend()\n plt.show()\n\n\navg_mae = np.asarray(all_mae).mean(axis=0)\navg_loss = np.asarray(all_loss).mean(axis=0)\n\nepochs = range(1, len(avg_mae) + 1)\nplt.plot(epochs, avg_loss, 'orange', linewidth=2.5)\nplt.title('Average validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.show()\n\nplt.clf()\nplt.plot(epochs, avg_mae, 'red', linewidth=2.5)\nplt.title('Average validation MAE')\nplt.xlabel('Epochs')\nplt.ylabel('MAE')\nplt.show()","sub_path":"8382/Ivleva/lb/3/lr3.py","file_name":"lr3.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"81058905","text":"import gym\nimport time\nimport unittest\nimport numpy as np\n\nfrom env import MultiGoalReacherEnv, DiscretizeActionEnv\n\nclass TestOrigin(unittest.TestCase):\n def test(self):\n env = gym.make('Reacher-v2')\n env.reset()\n env.render()\n done = False\n while not done:\n _, r, done, _ = env.step(env.action_space.sample())\n env.render()\n\nclass TestEnv(unittest.TestCase):\n def test_random(self):\n goals = [\n (0, 0.15),\n (0, -0.15),\n (0.15, 0),\n (-0.15, 0),\n ]\n env = DiscretizeActionEnv(\n MultiGoalReacherEnv(goals),\n (5, 5),\n )\n while True:\n o = env.reset()\n env.render()\n for _ in range(100):\n a = env.action_space.sample()\n #a = np.array([0.0, 0.5])\n print(len(o))\n o, r, done, info = env.step(a)\n print(info)\n #assert np.all(env.get_body_com('target') == np.array(env.goal + (0.01,)))\n env.render()\n time.sleep(0.1) \n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"deep_rl/reacher/utest.py","file_name":"utest.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"632075987","text":"from bs4 import BeautifulSoup, NavigableString\n\n\n# List of tags that are allowed in segments\nINLINE_TAGS = [\"a\", \"abbr\", \"acronym\", \"b\", \"code\", \"em\", \"i\", \"strong\", \"br\"]\n\n\ndef lstrip_keep(text):\n \"\"\"\n Like lstrip, but also returns the whitespace that was stripped off\n \"\"\"\n text_length = len(text)\n new_text = text.lstrip()\n prefix = text[0 : (text_length - len(new_text))]\n return new_text, prefix\n\n\ndef rstrip_keep(text):\n \"\"\"\n Like rstrip, but also returns the whitespace that was stripped off\n \"\"\"\n text_length = len(text)\n new_text = text.rstrip()\n if text_length != len(new_text):\n suffix = text[-(text_length - len(new_text)) :]\n else:\n suffix = \"\"\n return new_text, suffix\n\n\ndef extract_html_segments(html):\n \"\"\"\n This function extracts translatable segments from an HTML fragment.\n\n Inline elements and visible text are extracted together.\n\n For example:\n\n

Foo

\n

\n Bar\n

    \n
  • Baz
  • \n
\n

\n\n Will produce the following two outputs (as a 2-tuple)\n\n

\n

\n \n

    \n
  • \n
\n

\n\n [\n \"Foo\",\n \"Bar\",\n \"Baz\",\n ]\n \"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n def wrap(elements):\n \"\"\"\n Wraps the given elements with a tag\n\n The elements must be contiguous siblings or this might screw up the tree.\n \"\"\"\n elements = list(elements)\n\n # Skip if there are no tags to wrap\n # We can get here after filters below have been applied\n if len(elements) == 0:\n return\n\n # If there is a single element and that is an inline tag, wrap just the contents.\n # We only care about inline tags that wrap only part of a segment\n if (\n len(elements) == 1\n and not isinstance(elements[0], NavigableString)\n and elements[0].name in INLINE_TAGS\n ):\n wrap(elements[0].children)\n return\n\n def ignore_if_at_end(element):\n \"\"\"\n Returns True if the given element should be ignored if it is at one of the ends\n \"\"\"\n if isinstance(element, NavigableString):\n return False\n\n # Ignore if there are no text nodes\n # This will exclude both
tags and empty inline tags\n if not any(\n isinstance(desc, NavigableString) for desc in element.descendants\n ):\n return True\n\n return False\n\n if ignore_if_at_end(elements[0]):\n wrap(elements[1:])\n return\n\n if ignore_if_at_end(elements[-1]):\n wrap(elements[:-1])\n return\n\n value = \"\".join(\n element.output_ready()\n if isinstance(element, NavigableString)\n else str(element)\n for element in elements\n )\n\n if value and not value.isspace():\n # Create tag\n elements[0].insert_before(soup.new_tag(\"text\", value=value))\n\n # Remove elements\n for element in elements:\n element.replaceWith(\"\")\n\n def walk(element):\n \"\"\"\n Walks the tree in depth first search post-order.\n\n When it encounters an element that could be extracted, it wraps it with\n a tag. These are extracted in the next stage (because we want to\n preserve order of occurance).\n\n For example:\n\n

\n Foo\n

    \n
  • Bar
  • \n
\n Baz\n

\n\n Is transformed to:\n\n

\n Foo\n

    \n
  • Bar
  • \n
\n Baz\n

\n \"\"\"\n if isinstance(element, NavigableString):\n return False, False\n\n has_block = False\n has_wrap = False\n buffer = []\n\n for child in element.children:\n child_has_wrap, is_block = walk(child)\n\n if child_has_wrap:\n has_wrap = True\n\n if is_block:\n has_block = True\n\n if buffer:\n wrap(buffer)\n buffer = []\n has_wrap = True\n\n else:\n if not child_has_wrap:\n buffer.append(child)\n\n if buffer and has_block:\n wrap(buffer)\n buffer = []\n has_wrap = True\n\n if element.name not in INLINE_TAGS:\n if buffer:\n wrap(buffer)\n has_wrap = True\n\n return has_wrap, True\n\n return has_wrap, False\n\n walk(soup)\n\n # Now extract segments from the tags\n segments = []\n for element in soup.descendants:\n if element.name == \"text\":\n text = element.attrs.pop(\"value\")\n\n # Strip leading and trailing whitespace. We keep the values and reinsert them\n # into the template\n # This is probably not necessary, but just to be on the safe side\n text, prefix = lstrip_keep(text)\n text, suffix = rstrip_keep(text)\n\n element.attrs[\"position\"] = len(segments)\n segments.append(text.strip())\n\n if prefix:\n element.insert_before(prefix)\n\n if suffix:\n element.insert_after(suffix)\n\n return str(soup), segments\n\n\ndef restore_html_segments(template, segments):\n soup = BeautifulSoup(template, \"html.parser\")\n\n for text_element in soup.findAll(\"text\"):\n value = segments[int(text_element.get(\"position\"))]\n text_element.replaceWith(BeautifulSoup(value.strip(), \"html.parser\"))\n\n return str(soup)\n\n\ndef extract_html_elements(html):\n \"\"\"\n Extracts HTML elements from a fragment. Returns the plain text representation\n of the HTML document and an array of elements including their span, type and attributes.\n\n For example:\n\n text, elements = extract_html_elements(\"This is a paragraph. This is some bold and now italic text\")\n\n text == \"This is a paragraph. This is some bold and now italic text\"\n elements == [(39, 53, 'i', {}), (21, 53, 'b', {})]\n \"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n texts = []\n cursor = {\"current\": 0}\n elements = []\n\n def walk(soup):\n for element in soup.children:\n if isinstance(element, NavigableString):\n texts.append(element)\n cursor[\"current\"] += len(element)\n\n else:\n start = cursor[\"current\"]\n walk(element)\n end = cursor[\"current\"]\n\n elements.append((start, end, element.name, element.attrs.copy()))\n\n walk(soup)\n\n return \"\".join(texts), elements\n\n\ndef restore_html_elements(text, elements):\n \"\"\"\n Inserts elements into a plain text string returning a HTML document.\n \"\"\"\n soup = BeautifulSoup(\"\", \"html.parser\")\n stack = []\n cursor = 0\n current_element = soup\n\n # Sort elements by start position\n elements.sort(key=lambda element: element[0])\n\n for i, element in enumerate(elements):\n if cursor < element[0]:\n # Output text and advance cursor\n current_element.append(text[cursor : element[0]])\n cursor = element[0]\n\n stack.append((element[1], current_element))\n new_element = soup.new_tag(element[2], **element[3])\n current_element.append(new_element)\n current_element = new_element\n\n # Close existing elements before going to the next element\n while stack:\n if i < len(elements) - 1:\n if stack[len(stack) - 1][0] > elements[i + 1][0]:\n # New element created before this one closes.\n # Go to next element\n break\n\n element_end, previous_element = stack.pop()\n\n if cursor < element_end:\n # Output text and advance cursor\n current_element.append(text[cursor:element_end])\n cursor = element_end\n\n current_element = previous_element\n\n if cursor < len(text):\n current_element.append(text[cursor:])\n\n return str(soup)\n","sub_path":"wagtail_localize/translation/segments/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":8506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"259419602","text":"#from .regCommands import *\nimport q3.direction as d\n\n\n\ndirection = d\n\n\nprint('Hello3')\n\n\npr('Ciao')\n\nfor i in range(0,100,1):\n print('Dupa')\n\n#ui select module inputs\n#mods().by('name','moduleInputs').view().impl().addIoNode(direction.LEFT) \n\n","sub_path":"q3/q3/bootstrap/beforeShow.py","file_name":"beforeShow.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"85738140","text":"#Capitalize first and last name\ndef capslock(names):\n for i in range(len(names)):\n string = names[i]\n if(names[i].isalpha() == True):\n string = string.title();\n names[i] = string\n final = ' '.join(names)\n return final\nnames = input().split(' ')\ncapital = capslock(names)\nprint(capital)\n\n\n#q w e r t y u i o p a s d f g h j k l z x c v b n m Q W E R T Y U I O P A S D F G H J K L Z X C V B N M\n#1 2 2 3 4 5 6 7 8 9\n","sub_path":"Hackerrank/Strings/capslock.py","file_name":"capslock.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"503456410","text":"import pandas as pd, numpy as np\nimport os, matplotlib.pyplot as plt\nimport pmdarima as pm # pip install pmdarima\nimport pandas as pd\n\n# change working directory to script path\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir('C:/Users/camerum/Desktop/Decision_Support_system/SsdWebApi/Models')\n\n\n\n\ndf = pd.read_csv('/FTSE_MIB.csv', names=['FTSE_MIB'], header=0)\nds = df.sales\nmodel = pm.auto_arima(ds.values, start_p=1, start_q=1,\ntest='adf', max_p=3, max_q=3, m=4,\nstart_P=0, seasonal=True,\nd=None, D=1, trace=True,\nerror_action='ignore',\nsuppress_warnings=True,\nstepwise=True) # False full grid\nprint(model.summary())\nmorder = model.order\nmseasorder = model.seasonal_order\nfitted = model.fit(ds)\nyfore = fitted.predict(n_periods=4) # forecast\nypred = fitted.predict_in_sample()\nplt.plot(ds.values)\nplt.plot(ypred)\nplt.plot([None for i in ypred] + [x for x in yfore])\nplt.xlabel('time');plt.ylabel('sales')\n\nplt.show()\n\n\n","sub_path":"Models/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"465287284","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------\n# This is a sample controller\n# this file is released under public domain and you can use without limitations\n# -------------------------------------------------------------------------\nfrom pydal.helpers.methods import smart_query\n\n\n# TODO: \n# Wochenplan bearbeiten ermöglichen\n# Rezepte zu Wochenplan hinzufügen\n# Rezepte im Wochenplan anzeigen (Bild + Link zu Rezept)\n# Rezepte: Schritte als einzelne Steps anlegen\n# Styling \n# Haushalte statt einzelnen Nutzern\n# Rezepte aus Wochenplan löschen, wenn Tag vorbei\n# Meine Rezepte schöne Anzeige\n# Profilbereich und meinCookr an eine Stelle\n# Passwort vergessen nach Login weg\n# Login, etc: Sprache Deutsch\n# Schönes/ Sinnvolles Home\n\n\n \n# http://hostname/Cookr/default/index\ndef index():\n return locals()\n\n#http://hostname/Cookr/default/addToWeekplan/\n#add a recipe to your weekplan: choose at which day you want to cook it\n@auth.requires_login()\n@auth.requires_login()\ndef addToWeekplan():\n id = request.args(0, cast=int)\n \n if (request.vars.weekday):\n day = request.vars.weekday\n #db(db.weekplan.household == auth.user.id).update(day = id)\n \n #no weekplan stored yet, insert new weekplan in database\n if (db(db.weekplan.household == auth.user.id).isempty()):\n db.weekplan.insert()\n \n \n #store recipe in selected day\n \n if (day == 'monday'):\n db(db.weekplan.household == auth.user.id).update(monday = id)\n elif (day == 'tuesday'):\n db(db.weekplan.household == auth.user.id).update(tuesday = id)\n elif (day == 'wednesday'):\n db(db.weekplan.household == auth.user.id).update(wednesday = id)\n elif (day == 'thursday'):\n db(db.weekplan.household == auth.user.id).update(thursday = id)\n elif (day == 'friday'):\n db(db.weekplan.household == auth.user.id).update(friday = id)\n elif (day == 'saturday'):\n db(db.weekplan.household == auth.user.id).update(saturday = id)\n elif (day == 'sunday'):\n db(db.weekplan.household == auth.user.id).update(sunday = id)\n\n\n #if recipe was to add: redirect to showWeekplan\n redirect(URL('showWeekplan'))\n\n recipe = db.recipe(id)\n\n usersWeekplan = db(db.weekplan.household == auth.user.id).select()\n\n\n if usersWeekplan:\n monday = usersWeekplan[0].monday\n tuesday = usersWeekplan[0].tuesday\n wednesday = usersWeekplan[0].wednesday\n thursday = usersWeekplan[0].thursday\n friday = usersWeekplan[0].friday\n saturday = usersWeekplan[0].saturday\n sunday = usersWeekplan[0].sunday\n\n form=SQLFORM(db.weekplan).process()\n\n\n return locals()\n\n\n# http://hostname/Cookr/default/showWeekplan\n# shows your weekplan: which recipes to cook on what day of the week\n@auth.requires_login()\ndef showWeekplan():\n usersWeekplan = db(db.weekplan.household == auth.user.id).select()\n \n if usersWeekplan:\n monday = usersWeekplan[0].monday\n tuesday = usersWeekplan[0].tuesday\n wednesday = usersWeekplan[0].wednesday\n thursday = usersWeekplan[0].thursday\n friday = usersWeekplan[0].friday\n saturday = usersWeekplan[0].saturday\n sunday = usersWeekplan[0].sunday\n\n return locals()\n\n# http://hostname/Cookr/default/newRecipe\n@auth.requires_login()\ndef newRecipe():\n newRecipeForm = SQLFORM(db.recipe).process()\n\n addIngredientForm = SQLFORM(db.recipeContainsIngredient).process()\n\n currentIngredients = db(db.ingredient).select()\n\n newRecipeForm.element(_type='submit')['_class']='mainButton'\n #redirect to http://hostname/default/showRecipe/\n if newRecipeForm.accepted: redirect(URL('showRecipe', args=(newRecipeForm.vars.id)))\n return locals()\n\n# http://hostname/Cookr/default/recipeList/\ndef recipeList():\n RECIPES_PER_PAGE = 5\n\n page = request.args(0, cast=int)\n\n startValue = page * RECIPES_PER_PAGE\n endValue = page * RECIPES_PER_PAGE + RECIPES_PER_PAGE\n\n recipes = db(db.recipe).select(orderby=db.recipe.title, limitby=(startValue, endValue))\n\n\n #ingredients = db(db.ingredient).select()\n return locals()\n\n\n\n\n\n# http://hostname/Cookr/default/search\ndef search():\n #list_of_searchable_fields = [db.category.name, db.origin.name, db.recipe.title]\n #query = smart_query(list_of_searchable_fields, search_text)\n #rows = db(query).select()\n\n rows = []\n searchKeyword = request.vars[\"search\"]\n if searchKeyword != None and searchKeyword != \"\":\n list_of_searchable_fields = [db.recipe.title, db.category.name]\n query = smart_query(list_of_searchable_fields, searchKeyword).select()\n rows = db(query).select()\n return locals()\n\n# http://hostname/Cookr/default/categoryRecipeList//\ndef categoryRecipeList():\n RECIPES_PER_PAGE = 5\n \n categoryId = request.args(0) \n page = request.args(1, cast = int)\n \n startValue = page * RECIPES_PER_PAGE\n endValue = page * RECIPES_PER_PAGE + RECIPES_PER_PAGE\n \n recipes = db(db.recipe.category == categoryId).select(orderby = db.recipe.title, limitby = (startValue, endValue))\n \n category = db(db.category.id == categoryId).select()\n categoryName = category[0].name\n return locals()\n\n\n# http://hostname/Cookr/default/originRecipeList//\ndef originRecipeList():\n RECIPES_PER_PAGE = 5\n \n originId = request.args(0)\n page = request.args(1, cast = int)\n \n startValue = page * RECIPES_PER_PAGE\n endValue = page * RECIPES_PER_PAGE + RECIPES_PER_PAGE\n \n recipes = db(db.recipe.origin == originId).select(orderby = db.recipe.title, limitby = (startValue, endValue))\n \n origin = db(db.origin.id == originId).select()\n originName = origin[0].name\n return locals()\n\n# http://hostname/Cookr/default/showIngredient/\ndef showIngredient():\n ingredient = db.ingredient(request.args(0, cast=int))\n return locals()\n\n# http://hostname/Cookr/default/showRecipe/\ndef showRecipe():\n recipe = db.recipe(request.args(0, cast=int))\n items = db(db.recipeContainsItem.recipe == recipe.id).select()\n #ingredients = db(db.recipeContainsIngredient.recipe == recipe.id).select()\n #steps = db(db.recipe_step.recipe == recipe.id).select(orderby = db.recipe_step.stepNumber)\n return locals()\n\n\n\ndef randomRecipe():\n return 'Zeige zufälliges Rezept'\n\n# http://hostname/Cookr/default/editRecipe/\ndef editRecipe():\n id = request.args(0, cast=int)\n recipeForm = SQLFORM(db.recipe, id).process(next=URL('showRecipe', args=id))\n recipeForm.element(_type='submit')['_class']='mainButton'\n return locals()\n\n\n\n# http://hostname/Cookr/default/manageRecipes\n# shows all own recipes -> edit and delete them easily\n@auth.requires_login()\ndef manageRecipes():\n recipesGrid = SQLFORM.grid(db.recipe.writer == auth.user.id)\n return locals()\n\n\n\n# ---- API (example) -----\n@auth.requires_login()\ndef api_get_user_email():\n if not request.env.request_method == 'GET': raise HTTP(403)\n return response.json({'status':'success', 'email':auth.user.email})\n\n# ---- Smart Grid (example) -----\n@auth.requires_membership('admin') # can only be accessed by members of admin groupd\ndef grid():\n response.view = 'generic.html' # use a generic view\n tablename = request.args(0)\n if not tablename in db.tables: raise HTTP(403)\n grid = SQLFORM.smartgrid(db[tablename], args=[tablename], deletable=False, editable=False)\n return dict(grid=grid)\n\n# ---- Embedded wiki (example) ----\ndef wiki():\n auth.wikimenu() # add the wiki to the menu\n return auth.wiki() \n\n# ---- Action for login/register/etc (required for auth) -----\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/bulk_register\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users\n \"\"\"\n return dict(form=auth())\n\n# ---- action to server uploaded static content (required) ---\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"144441130","text":"def make_dict(ctx, ks, x_ticks, x_axis):\n dict = {}\n dict[\"header\"] = {}\n dict[\"header\"][\"x_axis\"] = x_axis\n dict[\"header\"][\"x_ticks\"] = []\n dict[\"header\"][\"num_bands\"] = ctx.num_bands()\n dict[\"header\"][\"num_mag_dims\"] = ctx.num_mag_dims()\n\n for e in enumerate(x_ticks):\n j = {}\n j[\"x\"] = e[1][0]\n j[\"label\"] = e[1][1]\n dict[\"header\"][\"x_ticks\"].append(j)\n\n dict[\"bands\"] = []\n\n for ik in range(ks.num_kpoints()):\n bnd_k = {}\n bnd_k[\"kpoint\"] = [0.0, 0.0, 0.0]\n for x in range(3):\n bnd_k[\"kpoint\"][x] = ks(ik).vk()(x)\n bnd_e = []\n\n bnd_e = ks.get_band_energies(ik, 0)\n\n bnd_k[\"values\"] = bnd_e\n dict[\"bands\"].append(bnd_k)\n return dict\n","sub_path":"python_module/sirius/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"326029356","text":"#!/usr/bin/env python\nfrom duckie_utils.configurable import Configurable\nfrom duckie_utils.instantiate_utils import instantiate\nfrom duckie_utils.image import DuckieImageToBGRMat\nfrom duckie_utils.stats import Stats\nfrom duckie_utils.timekeeper import TimeKeeper\nfrom rr_utils import (RRNodeInterface, LaunchRRNode, FormatRobdefString)\nfrom line_detector.line_detector_plot import *\nimport cv2\nimport numpy as np\nimport threading\nimport time\nimport yaml\nimport sys, argparse\n\nimport RobotRaconteur as RR\nRRN = RR.RobotRaconteurNode.s\n\nclass LineDetectorNode(Configurable,RRNodeInterface):\n \"\"\"Line Detector Node will return detected lines.\"\"\"\n def __init__(self, configuration):\n self.node_name = \"lineDetector\"\n\n # initialize configurable\n param_names = [\n 'img_size',\n 'top_cutoff',\n 'detector'\n ]\n Configurable.__init__(self,param_names,configuration)\n\n\n self._segments = None\n self._verbose = False\n self._verboseImage = None\n\n self._pub_im = RRN.NewStructure(\"Duckiebot.Image\")\n self._pub_im.height = self.img_size[0] - self.top_cutoff\n self._pub_im.width = self.img_size[1]\n self._pub_im.format = 'bgr'\n\n self.DuckieConsts = RRN.GetConstants(\"Duckiebot\")\n \n # Thread lock\n self.thread_lock = threading.Lock()\n\n self.active = True\n\n self.stats = Stats()\n\n # only print every 10 cycles\n self.intermittent_interval = 100\n self.intermittent_counter = 0\n\n # extract the detector params\n c = self.detector\n assert isinstance(c,list) and len(c) == 2, c\n\n self.log(\"new detector config: %s\"%(str(c)) )\n\n # instantiate it\n # c[0] is the detector type -- e.g. line_detector.LineDetectorHSV\n # c[1] is any inpit args (the configuration dictionary)\n self.detector = instantiate(c[0],c[1])\n \n # Find and connect to the image service\n self.duckie_cam = self.FindAndConnect(\"Duckiebot.Camera.Camera\")\n \n try:\n self.duckie_cam.changeFormat('jpeg')\n except: pass\n\n # connect to the pipe\n self.imstream = self.duckie_cam.ImageStream.Connect(-1) # connect to the pipe\n self.imstream.PacketReceivedEvent+=self._cbImage\n\n try:\n self.duckie_cam.startCapturing()\n except: pass\n\n def toggleVerbose(self):\n self._verbose = (not self._verbose)\n\n @property \n def verboseImage(self):\n return self._verboseImage\n \n @verboseImage.setter\n def verboseImage(self,value):\n self._verboseImage = value\n self._verboseImagestream = RR.PipeBroadcaster(self._verboseImage,1)\n\n \n @property\n def segments(self):\n return self._segments\n\n @segments.setter\n def segments(self,value):\n self._segments = value\n self._segments_wire = RR.WireBroadcaster(self._segments)\n\n def intermittent_log_now(self):\n return self.intermittent_counter % self.intermittent_interval == 1\n\n def intermittent_log(self,s):\n if not self.intermittent_log_now():\n return\n msg = \"%3d:%s\"%(self.intermittent_counter, s)\n self.log(msg)\n\n def _cbImage(self, pipe_ep):\n self.stats.received()\n image=pipe_ep.ReceivePacket()\n\n if not self.active:\n return\n\n #start a daemon thread to process the image\n thread = threading.Thread(target=self._processImage, args=(image,))\n thread.setDaemon(True)\n thread.start()\n # this returns right away...\n\n def _processImage(self,image):\n if not self.thread_lock.acquire(False): # False indicates non-blocking\n self.stats.skipped()\n # return immediately if the thread is locked\n return\n\n try:\n self.__processImage(image)\n finally:\n # release the thread lock\n self.thread_lock.release()\n\n def __processImage(self,image):\n self.stats.processed()\n\n if self.intermittent_log_now():\n self.intermittent_log(self.stats.info())\n self.stats.reset()\n\n tk = TimeKeeper(image.header)\n self.intermittent_counter += 1\n\n # extract the image data\n try:\n image_cv = DuckieImageToBGRMat(image)\n except ValueError as e:\n self.log(\"Could not decode image: %s\"%(e))\n return\n\n tk.completed('decode')\n\n # resize and crop image\n h_original, w_original = image_cv.shape[0:2]\n\n if self.img_size[0] != h_original or self.img_size[1] != w_original:\n image_cv = cv2.resize(image_cv, (self.img_size[1], self.img_size[0]),\n interpolation=cv2.INTER_NEAREST)\n image_cv = image_cv[self.top_cutoff:,:,:]\n\n tk.completed('resized')\n # apply color correction ... \n # ADD IN LATER IF NEEDED\n\n # set the image to be detected\n self.detector.setImage(image_cv)\n\n # Detect lines and normals\n white = self.detector.detectLines('white')\n yellow = self.detector.detectLines('yellow')\n red = self.detector.detectLines('red')\n\n tk.completed('detected')\n\n # Reset the segments list\n segmentList = []\n # convert to normalized pixel coordinates, and add segments to segment list\n arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))\n arr_ratio = np.array((1./self.img_size[1], 1./self.img_size[0], 1./self.img_size[1], 1./self.img_size[0] ))\n \n if len(white.lines) > 0:\n lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)\n segmentList.extend(self.toSegment(lines_normalized_white, white.normals, self.DuckieConsts.WHITE))\n \n if len(yellow.lines) > 0:\n lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)\n segmentList.extend(self.toSegment(lines_normalized_yellow, yellow.normals, self.DuckieConsts.YELLOW))\n \n if len(red.lines) > 0:\n lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)\n segmentList.extend(self.toSegment(lines_normalized_red, red.normals, self.DuckieConsts.RED))\n\n self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),\n len(yellow.lines), len(red.lines)))\n\n tk.completed('prepared')\n\n #Publish\n self._segments_wire.OutValue = segmentList\n tk.completed('--pub_lines--')\n\n # VISUALIZATION\n if self._verbose:\n\n # Draw lines and normals\n image_with_lines = np.copy(image_cv)\n drawLines(image_with_lines, white.lines, (0,0,0))\n drawLines(image_with_lines, yellow.lines, (255,0,0))\n drawLines(image_with_lines, red.lines, (0,255,0))\n\n tk.completed('drawn')\n\n # publish the image with lines\n self._pub_im.data = np.reshape(image_with_lines,\n image_with_lines.size)\n self._verboseImagestream.AsyncSendPacket(self._pub_im, lambda:None)\n tk.completed('pub_image')\n\n self.intermittent_log(tk.getall())\n\n def toSegment(self, lines, normals, color):\n segmentList = []\n segment = RRN.NewStructure(\"Duckiebot.Segment\")\n segment.pixels_normalized = [RRN.NewStructure(\"Duckiebot.Vector2D\"),\n RRN.NewStructure(\"Duckiebot.Vector2D\")]\n segment.normal = RRN.NewStructure(\"Duckiebot.Vector2D\")\n for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):\n segment.color = color\n segment.pixels_normalized[0].x = x1\n segment.pixels_normalized[0].y = y1\n segment.pixels_normalized[1].x = x2\n segment.pixels_normalized[1].y = y2\n segment.normal.x = norm_x\n segment.normal.y = norm_y\n\n segmentList.append(segment)\n return segmentList\n\n def onShutdown(self):\n self.log(\"Stopping...\")\n self.imstream.Close()\n self.log(\"Shutdown.\")\n\n\nif __name__ == '__main__':\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Initialize the line detector')\n parser.add_argument('--config', type=open,\n help='A config file for the line detector (Otherwise use Default)')\n parser.add_argument('--port',type=int,default=0,\n help='TCP port to host service on' +\\\n '(will auto-generate if not specified)')\n parser.add_argument('args', nargs=argparse.REMAINDER)\n\n args = parser.parse_args(sys.argv[1:])\n\n config_file = args.config\n if config_file is None:\n config_file = '${DEFAULT_LD_PARAMS}'\n\n launch_file = \"\"\"\\\nnode_name: Duckiebot.LineDetector\n\nobjects:\n - name: Duckiebot\n robdef: ${DUCKIEBOT_ROBDEF}\n\n - name: LineDetector\n robdef: ${LINEDETECTOR_ROBDEF}\n class: LineDetectorNode.LineDetectorNode\n configuration: %s \n\ntcp_port: %d \n \"\"\"%(config_file,args.port)\n \n launch_config = yaml.load(launch_file)\n LaunchRRNode(**launch_config)\n","sub_path":"laneInfo/src/LineDetectorNode.in.py","file_name":"LineDetectorNode.in.py","file_ext":"py","file_size_in_byte":9179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"328962814","text":"from owlready2 import *\nimport types\nfrom mister_data.data_models import MrDataThing, MrDataProperty\nfrom pprint import pprint\n\nonto = get_ontology(\"mrdata.owl\")\n\nwith onto:\n class MrDataOwlThing(Thing):\n uri = \"mrdata:thing\"\n\n def __init__(self, name=None, properties=None, ontology=None, **kargs):\n properties = properties or []\n self._properties = []\n for p in properties:\n self.add_property(p)\n Thing.__init__(self, name, ontology, **kargs)\n if name and name != self.__class__.__name__:\n self.uri = \"mrdata_ex:\" + name\n else:\n self.uri = \"mrdata:\" + self.__class__.__name__\n\n def add_property(self, prop):\n assert isinstance(prop, MrDataOwlProperty)\n self._properties.append(prop)\n\n def as_sql(self):\n return self.as_standard_format().as_sql()\n\n def as_owl(self):\n return self\n\n def as_standard_format(self, include_parents=False):\n # create thing object\n thingClass = types.new_class(self.uri.split(\":\")[-1],\n (MrDataThing,))\n if self.uri.split(\":\")[-1] == self.__class__.__name__:\n thingClass.uri = \"mrdata:\" + \\\n self.__class__.__bases__[0].__name__\n else:\n thingClass.uri = \"mrdata:\" + self.__class__.__name__\n thing = thingClass(self.name)\n thing.uri = self.uri\n if hasattr(self, \"has_for_property\"):\n for prop in self.has_for_property():\n prop_class_name = prop.name\n prop_value = prop.has_value.name\n\n if callable(prop.has_value):\n\n propClass = types.new_class(prop_class_name,\n (MrDataProperty,))\n propClass.uri = \"mrdata_prop:\" + prop_class_name\n\n p = propClass()\n po = prop.has_value().as_standard_format().__class__\n po.uri = \"mrdata:\" + prop_value\n p._target = po\n p.uri = prop_class_name + \":\" + prop_value\n thing._properties.append(p)\n else:\n inst = prop.uri.split(\":\")[-1]\n thingClass = prop.is_a[0]().as_standard_format().__class__\n thingClass.uri = \"mrdata:\" + thingClass.__name__\n po = prop.is_a[1]().as_standard_format()\n po.uri = self.name + \":\" + inst\n po.__class__.uri = po.__class__.__name__ + \":\" + \\\n thingClass.__name__\n po._target = thingClass(inst)\n thing._properties.append(po)\n else:\n #pprint(self.__class__().has_for_property)\n if hasattr(self.__class__, \"class_properties\"):\n for prop in self.__class__.class_properties:\n prop_class_name = prop.name\n prop_value = prop.has_value.name\n\n if callable(prop.has_value):\n propClass = types.new_class(prop_class_name,\n (MrDataProperty,))\n propClass.uri = \"mrdata_prop:\" + prop_class_name\n\n p = propClass()\n po = prop.has_value().as_standard_format().__class__\n po.uri = \"mrdata:\" + prop_value\n p._target = po\n p.uri = prop_class_name + \":\" + prop_value\n thing._properties.append(p)\n else:\n thingClass = prop.is_a[0]().as_standard_format().__class__\n thingClass.uri = \"mrdata:\" + thingClass.__name__\n po = prop.is_a[1]().as_standard_format()\n po.uri = po.__class__.__name__ + \":\" + \\\n thingClass.__name__\n po.__class__.uri = \"mrdata_prop:\" + \\\n po.__class__.__name__\n po._target = thingClass\n thing._properties.append(po)\n\n return thing\n\n def as_standard_format_expanded(self):\n thing = self.as_standard_format(True)\n for ance in self.__class__.ancestors():\n if ance.__name__ in [\"Thing\", \"MrDataOwlThing\",\n self.__class__.__name__]:\n continue\n inst = ance().as_standard_format().__class__\n propClass = types.new_class(\"is_a\", (MrDataProperty,))\n prop = propClass(inst)\n prop._target = inst\n thing._properties += [prop]\n for ance in self.__class__.descendants():\n if ance.__name__ in [\"Thing\", \"MrDataOwlThing\",\n self.__class__.__name__]:\n continue\n inst = ance().as_standard_format().__class__\n propClass = types.new_class(\"has_for_example\",\n (MrDataProperty,))\n prop = propClass(inst)\n prop._target = inst\n thing._properties += [prop]\n\n return thing\n\n def deduced_triples(self):\n triples = self.as_triples()\n all = self.as_standard_format_expanded().as_triples()\n return [t for t in all if t not in triples]\n\n def as_triples(self):\n std = self.as_standard_format()\n triples = []\n bucket = []\n if isinstance(std, list):\n for a in std:\n bucket += a.as_triples()\n else:\n bucket = std.as_triples()\n for t in bucket:\n if t not in triples:\n triples += [t]\n return triples\n\n def has_for_property(self):\n as_dict = self.__dict__\n if hasattr(self.__class__, \"class_properties\"):\n props = self.__class__.class_properties\n else:\n props = []\n for k in as_dict:\n if k in [\"storid\", \"namespace\"]:\n continue\n if k.startswith(\"_\"):\n continue\n continue # TODO\n propClass = types.new_class(k, (MrDataOwlProperty,))\n\n prop = propClass(self.name + \":\" + k)\n prop.has_value = as_dict[k]\n prop.property_of = self\n props.append(prop)\n return props + self._properties\n\n\n class MrDataOwlProperty(MrDataOwlThing):\n uri = \"mrdata:property\"\n has_value = None\n property_of = None\n\n def __init__(self, target_object=None, source_object=None,\n properties=None, ontology=None, **kargs):\n if target_object:\n p = target_object.__class__.__bases__[-1].__name__\n t = target_object.name\n self._target_dict = target_object.__dict__\n MrDataOwlThing.__init__(self, self.__class__.__name__,\n properties, ontology, **kargs)\n\n if target_object is None:\n self.uri = \"mrdata_prop:\" + self.__class__.__name__\n else:\n assert hasattr(target_object, \"uri\")\n self.uri = p + \":\" + t\n self.has_value = target_object\n\n def as_standard_format(self):\n\n # create prop object\n #print(self.is_a)\n propClass = types.new_class(self.uri.split(\":\")[-1],\n (MrDataProperty,))\n propClass.uri = \"mrdata_prop:\" + self.uri.split(\":\")[0]\n prop = propClass(self.name)\n prop.uri = self.uri\n\n if hasattr(self, \"has_for_property\"):\n for prop in self.has_for_property():\n print(prop.as_triples())\n\n if isinstance(self.has_value, list):\n # TODO\n print(1, self.__class__)\n else:\n if not callable(self.has_value):\n # individual\n thingClass = types.new_class(self.is_a[0].name,\n (MrDataThing,))\n thingClass.uri = \"mrdata:\" + self.is_a[0].name\n thing = thingClass()\n thing.uri = \"mrdata_ex:\" + self.uri.split(\":\")[-1]\n\n prop._target = thing\n else:\n # class\n # TODO\n print(3, self.has_value.__class__)\n\n return prop\n\nif __name__ == \"__main__\":\n from pprint import pprint\n\n pprint(onto.get_triples(None, None, None))\n","sub_path":"mister_data/data_models/owl.py","file_name":"owl.py","file_ext":"py","file_size_in_byte":9199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"317453619","text":"import unittest\nfrom riSearchBuilder import RISearchBuilder, RITriple\n\n\nclass RISearchBuilderTest( unittest.TestCase ):\n\n def test_using_string(self):\n ri = RISearchBuilder()\n ri.and_clause( \"$item \" % (\"isMemberOf\", \"bdr:2559\") )\n ri.and_clause( \"$item $page\" ).order_by(\"$page\")\n self.assertEqual(\n ri.serialize(joiner=' '),\n 'select $page $item from <#ri> where $item and $item $page order by $page'\n )\n self.assertEqual(\n type( ri.serialize(joiner=' ') ),\n str # not unicode? :)\n )\n\n def test_using_ritriple(self):\n ri2 = RISearchBuilder()\n ri2.and_clause( RITriple( \"$item\", \"\" % \"isAnnotationOf\", \"\" % \"bdr:11111\") )\n ri2.and_clause( \"$item $page\" ).order_by(\"$monkey\")\n self.assertEqual(\n ri2.serialize(joiner=\"\\n\"),\n 'select $page $item\\nfrom <#ri>\\nwhere\\n$item and $item $page'\n )\n\n def test_passing_in_ritriple(self):\n myClause = RITriple(\"$item\", \"\" % \"isMemberOf\", \"\" % \"bdr:2222\")\n ri3 = RISearchBuilder( [myClause] ).order_by(\"$item\")\n self.assertEqual(\n ri3.serialize(\"\\n\"),\n 'select $item\\nfrom <#ri>\\nwhere\\n$item \\norder by $item'\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test_RISearchBuilder.py","file_name":"test_RISearchBuilder.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"335654204","text":"import unittest\nimport numpy as np\nimport re\n\n\ndef input_file():\n # return the input_test file in a text\n file = open('input', 'r')\n lines = [line.rstrip('\\n') for line in file]\n file.close()\n return lines\n\n\ndef output_file():\n # read line of output_1 file\n file = open('output', 'r')\n res = file.read()\n file.close()\n return res\n\n\nclass Position:\n # class that illustrate a point\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def get_position(self):\n # return the current position\n return self.x, self.y\n\n def get_x(self):\n # return the current position\n return self.x\n\n def get_y(self):\n # return the current position\n return self.y\n\n def set_position(self, x, y):\n # set the current position\n self.x = x\n self.y = y\n\n def move(self, vx, vy):\n # move into the next position using\n self.x += vx\n self.y += vy\n\n\nclass Velocity:\n # class that illustrate a velocity\n def __init__(self, vx, vy):\n self.vx = vx\n self.vy = vy\n\n def get_velocity(self):\n # return the velocity\n return self.vx, self.vy\n\n\nclass Cloud_point:\n # class that illustrate a node containing header, child nodes and metadata entries\n def __init__(self, position, velocity):\n self.position = position\n self.velocity = velocity\n\n def step(self):\n # get the current velocity\n vx, vy = self.velocity.get_velocity()\n # move into the next position\n self.position.move(vx, vy)\n\n def back_step(self):\n # get the current velocity\n vx, vy = self.velocity.get_velocity()\n # move into the next position\n self.position.move(-vx, -vy)\n\n def get_position_x(self):\n # return the x position\n return self.position.get_x()\n\n def get_position_y(self):\n # return the y position\n return self.position.get_y()\n\n\nclass Cloud:\n # class that illustrate all the points of the cloud with the respectives velocities\n def __init__(self, cloud_points, velocities):\n self.cloud_points = cloud_points\n self.velocities = velocities\n self.max_y, self.max_x = self.get_dimension_max()\n self.cloud = np.full((self.max_y, self.max_x), False)\n self.string_cloud = \"\"\n self.final_step = 0\n\n def visualize(self):\n # return the final step\n return self.final_step\n\n def step_cloud(self):\n # move all cloud by one step\n for i in range(len(self.cloud_points)):\n self.cloud_points[i].step()\n\n def back_step_cloud(self):\n # inverse move all cloud by one step\n for i in range(len(self.cloud_points)):\n self.cloud_points[i].back_step()\n\n def get_dimension_max(self):\n # get the max x and y of the cloud\n max_x, max_y = (0, 0)\n for i in range(len(self.cloud_points)):\n if max_x < self.cloud_points[i].get_position_x():\n max_x = self.cloud_points[i].get_position_x()\n if max_y < self.cloud_points[i].get_position_y():\n max_y = self.cloud_points[i].get_position_y()\n return max_y, max_x\n\n def str_cloud(self, max_x, min_x, max_y, min_y):\n # return the cloud in a string from a little array\n self.cloud = np.full((max_y - min_y, max_x - min_x), False)\n # get number line of cloud\n number_line_cloud = self.cloud.shape[1]\n # get number column of cloud\n number_column_cloud = self.cloud.shape[0]\n # add cloud_points to cloud\n for cloud_point in self.cloud_points:\n self.cloud[cloud_point.get_position_y()-1-min_y][cloud_point.get_position_x()-1-min_x] = True\n # create the cloud\n string_cloud = \"\"\n for i in range(number_column_cloud):\n for j in range(number_line_cloud):\n if self.cloud[i][j]:\n string_cloud += \"#\"\n else:\n string_cloud += \".\"\n string_cloud += \"\\n\"\n return string_cloud\n\n def get_shape_cloud_dimension(self):\n # return the border min max of x y to know where we will end the execution\n max_x, max_y = (0, 0)\n min_x, min_y = (10000000, 10000000)\n # get the border to know how positionning the points\n for cloud_point in self.cloud_points:\n if cloud_point.get_position_x() > max_x:\n max_x = cloud_point.get_position_x()\n if cloud_point.get_position_x() < min_x:\n min_x = cloud_point.get_position_x()\n if cloud_point.get_position_y() > max_y:\n max_y = cloud_point.get_position_y()\n if cloud_point.get_position_y() < min_y:\n min_y = cloud_point.get_position_y()\n return max_x - min_x, max_y - min_y\n\n def get_coordonate_cloud_dimension(self):\n # return the border min max of x y to calculate the result\n max_x, max_y = (0, 0)\n min_x, min_y = (10000000, 10000000)\n # get the border to know how positionning the points\n for cloud_point in self.cloud_points:\n if cloud_point.get_position_x() > max_x:\n max_x = cloud_point.get_position_x()\n if cloud_point.get_position_x() < min_x:\n min_x = cloud_point.get_position_x()\n if cloud_point.get_position_y() > max_y:\n max_y = cloud_point.get_position_y()\n if cloud_point.get_position_y() < min_y:\n min_y = cloud_point.get_position_y()\n return max_x, min_x-1, max_y, min_y-1\n\n def exec(self):\n # execute the print cloud step by step\n i = 0\n while True:\n # get dimension shape\n old_x, old_y = self.get_shape_cloud_dimension()\n # use velocity on cloud\n self.step_cloud()\n # thx nico\n curr_x, curr_y = self.get_shape_cloud_dimension()\n if curr_x < old_x and curr_y < old_y:\n i += 1\n continue\n else:\n self.final_step = i\n # get the cloud shape\n self.back_step_cloud()\n # get the final coordonate\n max_x, min_x, max_y, min_y = self.get_coordonate_cloud_dimension()\n # get the final cloud\n self.string_cloud = self.str_cloud(max_x, min_x, max_y, min_y)\n break\n\n\ndef data_retrieve(lines):\n # return the new lines traited\n data = []\n for line in lines:\n data.append([int(d) for d in re.findall(r'-?\\d+', line)])\n return data\n\n\ndef get_border(data):\n # return the border min max of x y\n max_x, max_y = (0, 0)\n min_x, min_y = (10000000, 10000000)\n # get the border to know how positionning the points\n for raw in data:\n if raw[0] > max_x:\n max_x = raw[0]\n if raw[0] < min_x:\n min_x = raw[0]\n if raw[1] > max_y:\n max_y = raw[1]\n if raw[1] < min_y:\n min_y = raw[1]\n return max_x, min_x, max_y, min_y\n\n\ndef data_preparation(data):\n # return the cloud points and velocities associated\n cloud_points = []\n velocities = []\n # transform points to positions\n max_x, min_x, max_y, min_y = get_border(data)\n # using those borders, we get new points\n # we only have to add the opposite min to each x and y value\n # fufill the cloud points and velocities using input_test text\n for raw in data:\n cloud_points.append(Cloud_point(Position(raw[0]-min_x, raw[1]-min_y), Velocity(raw[2], raw[3])))\n return cloud_points, velocities\n\n\ndef day_10_part_2(lines):\n # data retrieve\n data = data_retrieve(lines)\n # data preparation\n cloud_points, velocities = data_preparation(data)\n # data modelisation\n cloud = Cloud(cloud_points, velocities)\n # data analyse\n cloud.exec()\n # data visualize\n nb_step = cloud.visualize()\n return str(nb_step)\n\n\nclass TestDay10part2(unittest.TestCase):\n\n def test_day_10_part_2(self):\n lines = input_file()\n res = output_file()\n pred = day_10_part_2(lines)\n assert(pred == res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"2018/Day 10/Part two/TestDay10part2.py","file_name":"TestDay10part2.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"240883754","text":"\"\"\"Representing EmotionsList ADT\"\"\"\nfrom nltk.classify import NaiveBayesClassifier\nfrom modules.emotion_list.stopwords import STOPWORDS\nfrom modules.data_structures.arrays import DynamicArray\n\n\nclass EmotionsList:\n \"\"\"Representing EmotionsList ADT that preserves\n emotions and their probabilities\"\"\"\n def __init__(self, database):\n \"\"\"\n (str) ->\n Initialize database and creates two DynamicArray\n objects, and trains the model\n \"\"\"\n self.database = database\n self.tweet = None\n self.emotions = DynamicArray()\n self.probabilities = DynamicArray()\n self._classifier = self._train_model()\n\n def set_tweet(self, tweet):\n \"\"\"\n (str) ->\n Sets tweet which the ADT will analyze\n \"\"\"\n self.tweet = tweet\n\n @staticmethod\n def _remove_stop_words(tweet):\n \"\"\"\n (str) -> str\n Returns tweet with removed stop words\n \"\"\"\n tokens_without_sw = \"\"\n for word in tweet.split():\n if not word.lower() in STOPWORDS:\n tokens_without_sw += word.lower() + \" \"\n return tokens_without_sw\n\n def _tweets_features(self, tweet):\n \"\"\"\n (str) -> dict\n Additional method.\n Creates dictionary with tweet to use it in the\n classifying this tweet\n \"\"\"\n tweet = self._remove_stop_words(tweet)\n return {'tweet': tweet}\n\n def _creating_set(self):\n \"\"\"\n () -> DynamicArray\n Returns an array to train model on it\n \"\"\"\n result_set = DynamicArray()\n file = open(self.database, \"r\", encoding='utf-8')\n file.readline()\n for line in file:\n line = line.split('\\t')\n first_el = self._tweets_features(line[0])\n result_set.append((first_el, line[1]))\n return result_set\n\n def _train_model(self):\n \"\"\"\n () -> NaiveBayesClassifier\n Trains model with data\n \"\"\"\n train_set = self._creating_set()\n return NaiveBayesClassifier.train(train_set)\n\n def get_tweet_emotion(self):\n \"\"\"\n () -> str\n Returns a main emotion of the tweet and\n adds to the array\n \"\"\"\n label = self._classifier.classify(self._tweets_features(self.tweet))\n self.emotions.append(label)\n return label\n\n def get_emotions_probability(self):\n \"\"\"\n () -> DynamicArray\n Returns an array with emotions of the tweet\n and their probabilities and adds to the array\n \"\"\"\n emotions = DynamicArray()\n probabilities = self._classifier.prob_classify(self._tweets_features(self.tweet))\n for sample in probabilities.samples():\n emotions.append((sample, probabilities.prob(sample)))\n self.probabilities.append(emotions)\n return emotions\n","sub_path":"modules/emotion_list/emotions_list.py","file_name":"emotions_list.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"457427388","text":"import os\nimport sys\n\nimport numpy as np\nimport warnings\n\n#stderr = sys.stderr\n#sys.stderr = open(os.devnull, 'w')\nfrom keras import backend as K\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.preprocessing import image\n#sys.stderr = stderr\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# dimensions of our images\nimg_width, img_height = 50, 50\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nTHRESHOLD = 0.05\n\n\nclass Predict:\n\n\n def __init__(self):\n # build model\n\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n # load model\n model.load_weights(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model', 'model.h5'))\n self.model = model\n\n def predict_img_class(self, img):\n result = self.model.predict(img)\n if result < THRESHOLD:\n classification = \"ecell\"\n else:\n classification = \"non-ecell\"\n return classification\n\n def predict_img_class_folder(self, path):\n classifications = {}\n\n for img in os.listdir(path):\n imgpath = path + \"/\" + img\n test_image = image.load_img(imgpath, target_size=(img_width, img_height))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis=0)\n result = self.model.predict(test_image)\n if result < THRESHOLD:\n classification = \"ecell\"\n\n else:\n classification = \"non-ecell\"\n\n classifications[img] = classification\n\n return classifications\n\n \n #image_list - the list of 50 by 50 pixel images as nd.arrays()\n # center list - list of tuples of int, the centers (x,y) of each image\n def filter_positive_classifications(self, image_list, center_list):\n ecell_center_list = []\n for i, img in enumerate(image_list):\n if img.shape != (50,50,3):\n continue\n test_image = img.astype(np.float32)\n test_image = np.expand_dims(test_image, axis=0)\n result = self.model.predict(test_image)\n if result < THRESHOLD:\n ecell_center_list.append(center_list[i])\n return ecell_center_list\n\n\nif __name__ == '__main__':\n p = Predict()\n classes = p.predict_img_class_folder(\"cells\")\n print(classes)\n","sub_path":"digital-pathology-master_modified/src/SlideAnalysis/src/py/predictor_ori.py","file_name":"predictor_ori.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"451556688","text":"#!/usr/bin/python\n\nimport angr\nimport claripy\n\nif __name__ == '__main__':\n \n p = angr.Project('./little-asm', load_options={\"auto_load_libs\": False})\n init_state = p.factory.entry_state()\n \n # constraints\n for i in range(36):\n k = init_state.posix.files[0].read_from(1)\n init_state.se.add(k != 0)\n init_state.se.add(k != 10)\n\n init_state.posix.files[0].seek(0)\n\n simgr = p.factory.simgr(init_state)\n \n \n simgr.explore(find=lambda s: \"got it\" in s.posix.dumps(1))\n \n \n s = simgr.found[0].state\n\n print(s.posix.dumps(0))\n","sub_path":"bamboofox-ctf-2017/little-asm/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"601432755","text":"from common import run_parser\nfrom notes_common import get_notes\nimport re\n\n\ndef get_rows(game_id, contents):\n notes = get_notes(game_id, contents)\n skill_re = '^Reached skill level\\s+(\\d+)\\s+in(.*)$'\n\n output = []\n for line in notes:\n if re.match(skill_re, line[6]) is not None:\n info = re.search(skill_re, line[6])\n\n output.append(line + [\n int(info.group(1)), # skill_level\n info.group(2).strip(), # skill\n ])\n return output\n\nif __name__ == '__main__':\n run_parser(\n input_path_string='input_path',\n output_path_string='output_path',\n get_rows=get_rows,\n )\n\n","sub_path":"transforms/section-parser/skill_progression.py","file_name":"skill_progression.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"181940030","text":"from setuptools import setup, find_packages\nimport os\n\nversion = \"1.0a3\"\n\nsetup(name=\"mr.freeze\",\n version=version,\n description=\"Trigger a pdb in a running zope via a USR1 signal\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Framework :: Zope2\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords=\"zope debugging pdb\",\n author=\"David Glick\",\n author_email=\"davidglick@onenw.org\",\n url=\"http://en.wikipedia.org/wiki/Mr._Freeze\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"ez_setup\"]),\n namespace_packages=[\"mr\"],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"setuptools\",\n \"z3c.deadlockdebugger\"\n ],\n )\n","sub_path":"pypi_install_script/mr.freeze-1.0a3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"93732468","text":"from sebastian.core import MIDI_PITCH, OFFSET_64, DURATION_64\nfrom sebastian.core import Point, OSequence\n\nfrom sebastian.core.notes import modifiers, letter\n\n\ndef add(properties):\n def _(point):\n point.update(properties)\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef degree_in_key(key):\n def _(point):\n degree = point[\"degree\"]\n pitch = key.degree_to_pitch(degree)\n point[\"pitch\"] = pitch\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef degree_in_key_with_octave(key, base_octave):\n def _(point):\n degree = point[\"degree\"]\n pitch, octave = key.degree_to_pitch_and_octave(degree)\n point[\"pitch\"] = pitch\n point[\"octave\"] = octave + base_octave\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef transpose(semitones):\n def _(point):\n if MIDI_PITCH in point:\n point[MIDI_PITCH] = point[MIDI_PITCH] + semitones\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef stretch(multiplier):\n def _(point):\n point[OFFSET_64] = int(point[OFFSET_64] * multiplier)\n if DURATION_64 in point:\n point[DURATION_64] = int(point[DURATION_64] * multiplier)\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef invert(midi_pitch_pivot):\n def _(point):\n if MIDI_PITCH in point:\n interval = point[MIDI_PITCH] - midi_pitch_pivot\n point[MIDI_PITCH] = midi_pitch_pivot - interval\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef reverse():\n def _(sequence):\n new_elements = []\n last_offset = sequence.next_offset()\n if sequence and sequence[0][OFFSET_64] != 0:\n old_sequence = OSequence([Point({OFFSET_64: 0})]) + sequence\n else:\n old_sequence = sequence\n for point in old_sequence:\n new_point = Point(point)\n new_point[OFFSET_64] = last_offset - new_point[OFFSET_64] - new_point.get(DURATION_64, 0)\n if new_point != {OFFSET_64: 0}:\n new_elements.append(new_point)\n return OSequence(sorted(new_elements, key=lambda x: x[OFFSET_64]))\n return _\n\n\ndef midi_pitch():\n def _(point):\n octave = point[\"octave\"]\n pitch = point[\"pitch\"]\n midi_pitch = [2, 9, 4, 11, 5, 0, 7][pitch % 7]\n midi_pitch += modifiers(pitch)\n midi_pitch += 12 * octave\n point[MIDI_PITCH] = midi_pitch\n return point\n return lambda seq: seq.map_points(_)\n\n\ndef lilypond():\n def _(point):\n octave = point[\"octave\"]\n pitch = point[\"pitch\"]\n duration = point[DURATION_64]\n if octave > 4:\n octave_string = \"'\" * (octave - 4)\n elif octave < 4:\n octave_string = \",\" * (4 - octave)\n else:\n octave_string = \"\"\n m = modifiers(pitch)\n if m > 0:\n modifier_string = \"is\" * m\n elif m < 0:\n modifier_string = \"es\" * m\n else:\n modifier_string = \"\"\n pitch_string = letter(pitch).lower() + modifier_string\n duration_string = str(64 / duration) # @@@ doesn't handle dotted notes yet\n point[\"lilypond\"] = \"%s%s%s\" % (pitch_string, octave_string, duration_string)\n return point\n return lambda seq: seq.map_points(_)\n","sub_path":"sebastian/core/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"49010091","text":"def disjoint1(A,B,C):\n\n \"\"\"Return true if there is no common element in all three lists\"\"\"\n for a in A:\n for b in B:\n if a == b:\n for c in C:\n if c == a:\n return False\n return True\n\n\nif __name__ == '__main__':\n\n A = [1,2,3]\n B = [4,3,6]\n C = [7,8,3]\n print(disjoint1(A,B,C))\n\n","sub_path":"DS_And_Algo_in_Python_Book/3.AlgoAnalysis/disjoint2.py","file_name":"disjoint2.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"151709820","text":"#!./env python\n\nimport torch\nimport torch.nn as nn\nfrom ..utils import Logger, AverageMeter, accuracy\nfrom ..utils import ExampleTracker, AvgTracker\nfrom . import attack, scale_step\nfrom .loss import trades_loss\nimport time\n\n__all__ = ['AdTrainer']\n\nclass AdTrainer:\n \"\"\"\n wrapping class for adversary training, including logging\n \"\"\"\n def __init__(self, loaders, net, optimizer, criterion=None, config=None, time_start=None):\n self.loaders = loaders\n self.net = net\n self.optimizer = optimizer # only used for getting current learning rate\n self.criterion = criterion\n self.config = config\n self.device = self.config.device\n self.time_start = time_start\n\n # target\n self.target = None\n if config.target is not None:\n self.target = loaders.class_to_idx[config.target]\n\n # scale epsilon (each channel is different because different range)\n ## save unscaled eps for auto attack\n config.eps_ = config.eps\n config.pgd_alpha_ = config.pgd_alpha\n config.eps = scale_step(config.eps, config.dataset, device=config.device)\n config.pgd_alpha = scale_step(config.pgd_alpha, config.dataset, device=config.device)\n print('scaled eps [train]:', config.eps, config.pgd_alpha)\n\n # sanity check and setup loss function\n self.__ad_setup()\n self.epoch = 0\n\n def _loss(self, inputs, labels, weights, epoch=None):\n # template\n pass\n\n def update(self, epoch, i):\n # make some logs\n\n if self.extra_metrics:\n self.extraLog.step(epoch, i)\n\n if self.config.exTrack:\n self.exLog.step(epoch)\n\n self.epoch = epoch + 1\n\n def reset(self, epoch):\n assert(epoch == self.epoch - 1), 'reset is not called after update!'\n\n ## reset some logger\n if self.extra_metrics:\n self.extraLog.reset()\n\n if self.config.exTrack:\n self.exLog.reset()\n\n def close(self):\n if self.extra_metrics:\n self.extraLog.close()\n\n\n def _clean_loss(self, inputs, labels, weights, epoch=None):\n outputs = self.net(inputs)\n loss = self.criterion(outputs, labels)\n \n # keep for record\n if self.extra_metrics:\n prec1, = accuracy(outputs.data, labels.data)\n self.extraLog.update({'Train-Loss': loss.mean().item(),\n 'Train-Acc': prec1.item()},\n inputs.size(0))\n\n if self.config.exTrack:\n self.exLog.update(outputs, labels, weights['index'].to(self.device), epoch=self.epoch)\n\n return loss.mean()\n\n def _ad_loss(self, inputs, labels, weights, epoch=None):\n\n # -------- clean loss\n loss = 0.\n # if pure ad loss and sample-wise alpha not enabled, don't have to do this part\n if self.config.alpha < 1.0:\n loss = self._clean_loss(inputs, labels, weights)\n\n # ------- ad loss\n self.net.eval()\n ctr = nn.CrossEntropyLoss() # Don't change the criterion in adversary generation part -- maybe change it later\n inputs_ad = attack(self.net, ctr, inputs, labels, weight=None,\n adversary=self.config.adversary,\n eps=self.config.eps,\n pgd_alpha=self.config.pgd_alpha,\n pgd_iter=self.config.pgd_iter,\n randomize=self.config.rand_init,\n target=self.target,\n config=self.config)\n self.net.train()\n outputs_ad = self.net(inputs_ad)\n loss_ad = self.criterion(outputs_ad, labels)\n\n # -------- combine two loss\n loss *= (1 - self.config.alpha)\n loss += self.config.alpha * loss_ad\n loss = loss.mean()\n\n # -------- recording\n if self.extra_metrics:\n prec1_ad, = accuracy(outputs_ad.data, labels.data)\n self.extraLog.update({'Train-Loss-Ad': loss_ad.mean().item(),\n 'Train-Acc-Ad': prec1_ad.item()},\n inputs.size(0))\n\n if self.config.exTrack:\n self.exLog.update(outputs_ad, labels, weights['index'].to(self.device), epoch=self.epoch)\n\n return loss\n\n def _trades_loss(self, inputs, labels, weights, epoch=None):\n loss, outputs_ad = trades_loss(self.net, inputs, labels, weights,\n eps=self.config.eps,\n alpha=self.config.pgd_alpha,\n num_iter=self.config.pgd_iter,\n norm='linf',\n rand_init=self.config.rand_init,\n config=self.config)\n\n # -------- recording\n if self.extra_metrics:\n prec1_ad, = accuracy(outputs_ad.data, labels.data)\n self.extraLog.update({'Train-Loss-Ad': loss_ad.mean().item(),\n 'Train-Acc-Ad': prec1_ad.item()},\n inputs.size(0))\n\n if self.config.exTrack:\n # Generate ad examples using PGD, otherwise not fair!\n outputs_ad = self.__get_pgd_ad(inputs, labels)\n self.exLog.update(outputs_ad, labels, weights['index'].to(self.device), epoch=self.epoch)\n\n return loss\n\n def __ad_setup(self):\n base_names = ['Epoch', 'Mini-batch', 'lr', 'Time-elapse(Min)']\n self.logger_e = Logger('log_extra.txt', title='log for deprecated metrics', resume=self.config.resume)\n\n if not self.config.adversary:\n self._loss = self._clean_loss\n\n # log for 'false' training loss and acc, aligned with previous work\n self.extra_metrics = ['Train-Loss', 'Train-Acc']\n self.extraLog = AvgTracker('log_extra',\n self.optimizer,\n metrics=self.extra_metrics,\n time_start=self.time_start,\n config=self.config)\n\n # log for sample robust correctness\n if self.config.exTrack:\n self.exLog = ExampleTracker(self.loaders, resume=self.config.resume)\n\n return\n\n\n if self.config.adversary in ['gaussian', 'fgsm', 'pgd', 'aa']:\n self._loss = self._ad_loss\n\n # log for 'false' training loss and acc, aligned with previous work\n self.extra_metrics = ['Train-Loss', 'Train-Acc', 'Train-Loss-Ad', 'Train-Acc-Ad']\n self.extraLog = AvgTracker('log_extra',\n self.optimizer,\n metrics=self.extra_metrics,\n time_start=self.time_start,\n config=self.config)\n\n # log for sample robust correctness\n if self.config.exTrack:\n self.exLog = ExampleTracker(self.loaders, resume=self.config.resume)\n\n return\n\n # other \n if self.config.target:\n raise NotImplementedError('Targeted attack not supported! TODO..')\n\n if self.config.adversary == 'trades':\n self._loss = self._trades_loss\n\n # log for 'false' training loss and acc, aligned with previous work\n self.extra_metrics = ['Train-Loss-Ad', 'Train-Acc-Ad']\n self.extraLog = AvgTracker('log_extra',\n self.optimizer,\n metrics=self.extra_metrics,\n time_start=self.time_start,\n config=self.config)\n\n if self.config.exTrack:\n self.exLog = ExampleTracker(self.loaders, resume=self.config.resume)\n\n return\n\n raise KeyError('Unexpected adversary %s' % self.config.adversary)\n\n def __get_pgd_ad(self, inputs, labels):\n self.net.eval()\n ctr = nn.CrossEntropyLoss() # Don't change the criterion in adversary generation part -- maybe change it later\n inputs_ad = attack(self.net, ctr, inputs, labels, weight=None,\n adversary='pgd',\n eps=self.config.eps,\n pgd_alpha=self.config.pgd_alpha,\n pgd_iter=self.config.pgd_iter,\n randomize=self.config.rand_init,\n target=self.target,\n config=self.config)\n # don't affect the training stats, do this in eval mode\n outputs_ad = self.net(inputs_ad)\n self.net.train()\n return outputs_ad\n\n def __get_lr(self):\n lrs = [param_group['lr'] for param_group in self.optimizer.param_groups]\n assert(len(lrs) == 1)\n return lrs[0]\n","sub_path":"src/adversary/defender.py","file_name":"defender.py","file_ext":"py","file_size_in_byte":8970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"316412053","text":"import maya.cmds as cmds\n\ndef findDuplicates():\n\n selExtended = cmds.ls(transforms=True)\n # Find all objects that have the same shortname as another\n # We can indentify them because they have | in the name\n duplicates = [f for f in selExtended if '|' in f]\n # Sort them by hierarchy so that we don't rename a parent before a child.\n duplicates.sort(key=lambda obj: obj.count('|'), reverse=True)\n\n # if we have duplicates, rename them\n if duplicates:\n return duplicates\n else:\n return False\n\n\ndup=(findDuplicates())\n\nif dup:\n cmds.select(dup, replace=True)\nelse:\n cmds.confirmDialog(title=\"DON'T PANIC !\",\n message='There is no duplicate inside your scene')\n\n\n","sub_path":"Maya/divers/findDuplicates.py","file_name":"findDuplicates.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111346086","text":"'''\nThis is the main implementation file for the form widgets. These widgest are designed\nto give clean and automated access to a database. The Database class is assumed to\nalready have the database open and ready to use.\n\nThese widgets are intended to be used with the forms module that is included in this\nsource code.\n'''\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.font as font\nfrom tkinter.messagebox import *\nfrom system.database import Database\nfrom system.logger import *\n\nclass toolTip:\n '''\n This class binds a tool tip to a widget.\n '''\n\n def __init__(self, widget, text):\n self.widget = widget\n self.text = text\n self.widget.bind(\"\", self.enter)\n self.widget.bind(\"\", self.close)\n\n def enter(self, event=None):\n x = y = 0\n x, y, cx, cy = self.widget.bbox(\"insert\")\n x += self.widget.winfo_rootx() + 25\n y += self.widget.winfo_rooty() + 20\n # creates a toplevel window\n self.tw = tk.Toplevel(self.widget)\n # Leaves only the label and removes the app window\n self.tw.wm_overrideredirect(True)\n self.tw.wm_geometry(\"+%d+%d\" % (x, y))\n label = tk.Label(self.tw, text=self.text, justify='left',\n background='yellow', relief='solid', borderwidth=1,\n font=(\"times\", \"10\", \"normal\"))\n label.pack(ipadx=1)\n\n def close(self, event=None):\n if self.tw:\n self.tw.destroy()\n\n@class_wrapper\nclass _form_widget_base(tk.Frame):\n '''\n This class implements the basic functions for all of the form widgets and\n invokes the database.\n '''\n\n def __init__(self, owner, table, column):\n super().__init__(owner)\n self.logger.set_level(Logger.DEBUG)\n self.data = Database.get_instance()\n self.owner = owner\n self.table = table\n self.column = column\n self.row_id = None\n self.changed = False\n\n @func_wrapper\n def getter(self):\n '''\n Stub function. The getter reads the widget and saves the data into the database as\n a single row/column entry.\n '''\n\n @func_wrapper\n def setter(self):\n '''\n Stub function. The setter reads the database as a single row/column entry and saves\n it into the widget.\n '''\n\n @func_wrapper\n def clear(self):\n '''\n Clears the widget.\n '''\n\n @func_wrapper\n def populate(self):\n '''\n For compound widgets, this can grab a list from the database and place it into the\n widget. For other widgets, it simply does the same as a setter().\n '''\n self.setter()\n\n @func_wrapper\n def get_insert_value(self):\n '''\n This is used when inserting a new row into the database.\n '''\n return self.column, self._read_value()\n\n @func_wrapper\n def _read_value(self):\n '''\n This is a \"regular\" way to obtain the value of a widget. This method must have an\n override. It's not supported for fields that have no getter().\n '''\n raise Exception('The _read_value method not supported for this widget.')\n\n @func_wrapper\n def set_row_id(self, row_id):\n '''\n This must be called before the value of the widget can be read or written.\n '''\n self.row_id = row_id\n\n @func_wrapper\n def is_changed(self, clear_flag=False):\n '''\n Return whether the control has changed or not.\n '''\n val = self.changed\n if clear_flag:\n self.changed = False\n return val\n\n @func_wrapper\n def get_column(self):\n '''\n Return the column.\n '''\n return self.column\n\n @func_wrapper\n def get_table(self):\n '''\n Return the table.\n '''\n return self.table\n\n @func_wrapper\n def check_dupes(self):\n '''\n Check for duplicate entries using the table and column along with the value\n held by the widget. Returns a list of dicts containg the entire rows where\n the data in the coumn might match. If there are no matches, then returns\n an empty list\n '''\n return self.data.check_dups(self.table, self.column, self._read_value())\n\n @func_wrapper\n def _bind_key(self, event=None):\n '''\n Callback for key binding to detect if widget has changed.\n '''\n self.changed = True\n\n@class_wrapper\nclass formEntry(_form_widget_base):\n '''\n Wrapper for the tkinter Entry widget.\n '''\n\n def __init__(self, owner, table, column, _type, tool_tip=None, **kw):\n super().__init__(owner, table, column)\n self.logger.set_level(Logger.DEBUG)\n self._type = _type\n\n self.value = tk.StringVar(self)\n self.widget = tk.Entry(self, textvariable=self.value, **kw)\n self.widget.grid()\n self.widget.bind('', self._bind_key)\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n @func_wrapper\n def getter(self):\n val = self._read_value()\n self.data.write_single_value(self.table, self.column, self.row_id, val)\n\n @func_wrapper\n def setter(self):\n state = self.widget.configure()['state']\n if state == 'readonly':\n self.widget.configure(state='normal')\n\n val = self.data.read_single_value(self.table, self.column, self.row_id)\n if val is None:\n self.value.set('')\n else:\n self.value.set(str(val))\n\n if state == 'readonly':\n self.widget.configure(state='readonly')\n\n @func_wrapper\n def clear(self):\n state = self.widget.configure()['state']\n if state == 'readonly':\n self.widget.configure(state='normal')\n\n self.value.set('')\n\n if state == 'readonly':\n self.widget.configure(state='readonly')\n\n @func_wrapper\n def _read_value(self):\n val = self._type(self.value.get())\n # This enforces the NOT NULL clause in the database structure\n if val == '':\n return None\n else:\n return val\n\n@class_wrapper\nclass formText(_form_widget_base):\n\n def __init__(self, owner, table, column, tool_tip=None, **kw):\n super().__init__(owner, table, column)\n self.logger.set_level(Logger.DEBUG)\n\n self.local_frame = tk.Frame(self, bd=1, relief=tk.RIDGE)\n self.widget = tk.Text(self.local_frame, wrap=tk.NONE, **kw)\n self.widget.insert(tk.END, '')\n self.widget.grid(row=0, column=0, sticky='nw')\n\n self.vsb = tk.Scrollbar(self.local_frame, orient=tk.VERTICAL)\n self.vsb.config(command=self.widget.yview)\n self.widget.config(yscrollcommand=self.vsb.set)\n self.vsb.grid(row=0, column=1, sticky='nse')\n\n self.hsb = tk.Scrollbar(self.local_frame, orient=tk.HORIZONTAL)\n self.hsb.config(command=self.widget.xview)\n self.widget.config(xscrollcommand=self.hsb.set)\n self.hsb.grid(row=1, column=0, sticky='wes')\n\n self.local_frame.grid(row=0, column=1, sticky='w')\n self.widget.bind('', self._bind_key)\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n @func_wrapper\n def getter(self):\n value = self._read_value()\n self.data.write_single_value(self.table, self.column, self.row_id, value)\n\n @func_wrapper\n def setter(self):\n value = self.data.read_single_value(self.table, self.column, self.row_id)\n self.widget.delete('1.0', tk.END)\n if not value is None:\n self.widget.insert(tk.END, str(value))\n\n @func_wrapper\n def clear(self):\n self.widget.delete('1.0', tk.END)\n\n @func_wrapper\n def _read_value(self):\n return self.widget.get(1.0, tk.END)\n\n@class_wrapper\nclass formCombobox(_form_widget_base):\n\n def __init__(self, owner, val_tab, val_col, pop_tab, pop_col, tool_tip=None, **kw):\n super().__init__(owner, val_tab, val_col)\n self.logger.set_level(Logger.DEBUG)\n\n self.pop_tab = pop_tab\n self.pop_col = pop_col\n\n self.widget = ttk.Combobox(self, state='readonly', **kw)\n self.populate()\n self.widget.grid()\n self.widget.bind(\"<>\", self._bind_key)\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n @func_wrapper\n def getter(self):\n value = self._read_value()\n self.data.write_single_value(self.table, self.column, self.row_id, value)\n\n @func_wrapper\n def setter(self):\n self.populate()\n value = self.data.read_single_value(self.table, self.column, self.row_id)\n # Bug Fix.\n # the value in the database can never be 0 or None. It has to be a row ID\n # of a table, which starts at 1. So, if we find a None or a 0 here, set it\n # to a reasonable default.\n if value is None or value == 0:\n self.data.write_single_value(self.table, self.column, self.row_id, 1)\n self.data.commit()\n self.widget.current(0)\n else:\n # add or subtract 1 to convert database value to widget index.\n self.widget.current(int(value)-1)\n\n @func_wrapper\n def clear(self):\n try:\n self.widget.current(0)\n except tk.TclError:\n pass # empty content is not an error\n\n @func_wrapper\n def populate(self):\n self.widget['values'] = self.data.get_column_list(self.pop_tab, self.pop_col)\n\n @func_wrapper\n def _read_value(self):\n val = self.widget.current()+1\n self.logger.debug('combo read value = %d'%(val))\n return val\n\n@class_wrapper\nclass formDynamicLabel(_form_widget_base):\n\n def __init__(self, owner, table, column, tool_tip=None, **kw):\n super().__init__(owner, table, column)\n self.logger.set_level(Logger.DEBUG)\n\n self.value = tk.StringVar(self)\n self.widget = tk.Label(self, textvariable=self.value, **kw)\n self.widget.grid()\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n @func_wrapper\n def setter(self):\n value = self.data.read_single_value(self.table, self.column, self.row_id)\n if value is None:\n self.value.set('')\n else:\n self.value.set(str(value))\n\n @func_wrapper\n def getter(self):\n pass\n\n @func_wrapper\n def _read_value(self):\n pass\n\n@class_wrapper\nclass formIndirectLabel(_form_widget_base):\n\n def __init__(self, owner, val_tab, val_col, rem_tab, rem_col, tool_tip=None, **kw):\n super().__init__(owner, val_tab, val_col)\n self.logger.set_level(Logger.DEBUG)\n\n self.rem_tab = rem_tab\n self.rem_col = rem_col\n\n self.value = tk.StringVar(self)\n self.widget = tk.Label(self, textvariable=self.value, **kw)\n self.widget.grid()\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n # @func_wrapper\n # def getter(self):\n # # This is the name\n # value = self.value.get()\n # # find the row ID where the name matches in the rem_tab\n # self.row_id = self._read_value()\n # # set the value with the row_id\n # self.data.write_single_value(self.table, self.column, self.row_id, id)\n\n @func_wrapper\n def setter(self):\n # this is the ID\n self.logger.debug(\"indirect label setter: table = %s, column = %s, row_id = %s\"%(self.table, self.column, self.row_id))\n id = self.data.read_single_value(self.table, self.column, self.row_id)\n if id is None:\n self.value.set('')\n else:\n # find the value with the row ID in the table\n value = self.data.read_single_value(self.rem_tab, self.rem_col, id)\n # set the widget value\n self.value.set(str(value))\n\n\n @func_wrapper\n def clear(self):\n self.value.set('')\n\n # @func_wrapper\n # def _read_value(self):\n # return self.data.get_row_id(self.rem_tab, self.rem_col, value)\n\n@class_wrapper\nclass formTitle(_form_widget_base):\n\n def __init__(self, owner, value, **kw):\n super().__init__(owner, None, None)\n self.logger.set_level(Logger.DEBUG)\n\n self.widget = tk.Label(self, text=value, font=(\"Helvetica\", 14), **kw)\n self.widget.grid()\n\n@class_wrapper\nclass formCheckbox(_form_widget_base):\n\n def __init__(self, owner, table, column, tool_tip=None, **kw):\n super().__init__(owner, table, column)\n self.logger.set_level(Logger.DEBUG)\n\n self.value = tk.BooleanVar()\n self.widget = tk.Checkbutton(self, var=self.value, command=self._bind_key, **kw)\n self.widget.grid()\n if not tool_tip is None:\n self.tool_tip = toolTip(self, tool_tip)\n\n @func_wrapper\n def getter(self):\n val = self._read_value()\n self.data.write_single_value(self.table, self.column, self.row_id, val)\n\n @func_wrapper\n def setter(self):\n val = self.data.read_single_value(self.table, self.column, self.row_id)\n self.value.set(val)\n\n @func_wrapper\n def clear(self):\n self.value.set(0)\n\n @func_wrapper\n def _read_value(self):\n return self.int(self.value.get())\n\n# Note that radio buttons are not represented in this library. I don't need them for the\n# applications that I am writing because I use a combo box instead. They take up less\n# room on the form.","sub_path":"widgets/form_widgets.py","file_name":"form_widgets.py","file_ext":"py","file_size_in_byte":13491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"227539048","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('InfiniFit', '0022_auto_20151009_1812'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='karnet',\n options={'ordering': ('data_rozpoczecia', 'data_zakonczenia'), 'verbose_name': 'karnet', 'verbose_name_plural': 'karnety'},\n ),\n migrations.AlterField(\n model_name='karnet',\n name='data_rozpoczecia',\n field=models.DateField(),\n ),\n migrations.AlterField(\n model_name='karnet',\n name='data_zakonczenia',\n field=models.DateField(),\n ),\n migrations.AlterUniqueTogether(\n name='karnet',\n unique_together=set([('zajecia', 'data_rozpoczecia', 'data_zakonczenia')]),\n ),\n ]\n","sub_path":"InfiniFit/migrations/0023_auto_20151009_1830.py","file_name":"0023_auto_20151009_1830.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"560874846","text":"#! user/bin/env python\nimport uuid\nimport sqlite3\n\n\nclass Uuidhandle:\n\n def __init__(self, dbname):\n self.dbname = dbname\n\n try:\n self.conn = sqlite3.connect(dbname)\n self.cursor = self.conn.cursor()\n except Exception as ex:\n print(\"Connect {name} failed\".format(name=dbname))\n raise ex\n\n\n def createTable(self, tablename):\n\n createSql = \"CREATE TABLE {name}(ID INT PRIMARY KEY NOT NULL, KEY NOT NULL)\"\n try:\n self.cursor.execute(createSql.format(name=tablename))\n except Exception as ex:\n print(\"{name} has existed. Continue.\".format(name=tablename))\n pass\n\n def insertData(self, intable,uuids):\n\n insertSql = \"INSERT INTO {table} VALUES({id}, '{uuid}')\"\n for cid, cuuid in enumerate(uuids):\n try:\n self.cursor.execute(insertSql.format(table=intable, id=cid, uuid=cuuid))\n except IndentationError as ex:\n print(\"ID %d has existed\" %cid)\n raise ex\n\n def generateUuids(self, num):\n return (uuid.uuid4() for i in range(num))\n\n def commit(self):\n\n self.conn.commit()\n self.conn.close()\n\n\n# test\n# myUuid = Uuidhandle('vincent.db')\n# myUuid.createTable('test3')\n# uuids = myUuid.generateUuids(600)\n# myUuid.insertData('test3',uuids)\n# myUuid.commit()\n","sub_path":"0001.py","file_name":"0001.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"248789019","text":"from flask import make_response, jsonify\nfrom tool_example_app.auth.jwt_auth import auth\nfrom .settings import FLASK_SERVER_NAME\n\nSERVER_ERROR_500 = ({\"message\": \"An error occured.\"}, 500)\nNOT_FOUND_404 = ({\"message\": \"Resource could not be found.\"}, 404)\nNO_INPUT_400 = ({\"message\": \"No input data provided.\"}, 400)\nINVALID_INPUT_422 = ({\"status\": 1, \"message\": \"Invalid input.\"}, 422)\n\nPASSWORD_INVALID_421 = ({\"message\": \"Invalid password.\"}, 421)\nALREADY_EXIST = ({\"status\": 1, \"message\": \"Already exists.\"}, 409)\n\nDOES_NOT_EXIST = ({\"message\": \"Does not exists.\"}, 409)\nNOT_ADMIN = ({\"message\": \"Admin permission denied.\"}, 998)\nHEADER_NOT_FOUND = ({\"message\": \"Header does not exists.\"}, 999)\n\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify(\n {\n 'authEndpoint': FLASK_SERVER_NAME + '/api/v1/token/login',\n 'reason': 'No token has been sent.'\n }\n ), 401)\n\n\nclass CustomFlaskErr(Exception):\n status_code = 400\n\n def __init__(self, status_code=None, return_code=None, action_status=None, playbook=None):\n super().__init__(self)\n self.return_code = return_code\n self.status_code = status_code\n self.action_status = action_status\n self.playbook = playbook\n\n def to_dict(self):\n rv = dict()\n if self.playbook is not None:\n rv['data'] = self.playbook\n else:\n print(self.playbook)\n rv['action_status'] = self.action_status\n # rv['message'] = error_list.get(self.return_code)\n rv['return_code'] = self.return_code\n rv['status_code'] = self.status_code\n print(rv)\n return rv\n\n\n","sub_path":"tool_example_app/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"7794468","text":"# 1)What if the given array is already sorted? How would you optimize your algorithm?\n# 用two pointers 比较\n#\n# 2)What if nums1's size is small compared to num2's size? Which algorithm is better?\n# 将nums1 的数据存进hash map,然后遍历nums2 查找是否在nums1中\n#\n# 3)What if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?\n# 因为nums1是可以放到内存的,所以就把nums1放到hashmap里,然后从disk分多次取出nums2,判断一下就好了\n\nclass Solution(object):\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n if len(nums1) > len(nums2):\n return self.intersect(nums2, nums1)\n\n lookup = collections.Counter(nums1)\n\n res = []\n for i in nums2:\n if lookup[i] > 0:\n res.append(i)\n lookup[i] -= 1\n return res\n\n# If the given array is not sorted, and the memory is limited.\n# Time: O(max(m, n) * log(max(m, n)))\n# Space: O(1)\n# Two pointers solution.\nclass Solution(object):\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n if nums1 is None or len(nums1) == 0:\n return []\n if nums2 is None or len(nums2) == 0:\n return []\n nums1, nums2 = sorted(nums1), sorted(nums2)\n res = []\n i = j = 0\n while i < len(nums1) and j < len(nums2):\n if nums1[i] < nums2[j]:\n i += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n res.append(nums1[i])\n i, j = i + 1, j + 1\n return res\n\nclass Solution(object):\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n dic = dict()\n res = []\n for v in nums1:\n if v not in dic:\n # [counter in nums1, counter in nums2]\n dic[v] = [1,0]\n else:\n dic[v][0] += 1\n for v in nums2:\n if v in dic:\n dic[v][1] += 1\n for k,v in dic.items():\n # v appear in both lists\n if v[0]*v[1] > 0:\n for i in range(min(v[0],v[1])):\n res.append(k)\n return res\n","sub_path":"python/350. Intersection of Two Arrays II.py","file_name":"350. Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"49439709","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Database management and initialization\"\"\"\nimport os\n\n\ndef makedirs_touch(path):\n \"\"\"Creates the file and all parent directories in the supplied path\"\"\"\n basedir = os.path.dirname(path)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n\n with open(path, 'a'):\n os.utime(path, None)\n","sub_path":"pyjournal/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"274452562","text":"import csv, os\nfrom tkinter import filedialog\nfrom tkinter import *\nimport xml.etree.ElementTree as ET\n\n# Author: Alex Vorpahl\n# EXE Compile: \n# pyinstaller.exe --onefile --windowed tibco.py\n\n# All accounts that are members of the following group: \n# $admin (\"Administrators\")\n#\n# All accounts that are assigned to the following role within TIBCO Admin Console\n# Super Users\n#\n# All account assigned the following permissions within the TIBCO Admin console: \n# Administer - provision access to accounts\n# Write - start and stop processes, add/modify/delete processes\n\n\ndef select_directory():\n root = Tk()\n root.withdraw()\n current_path = os.getcwd()\n root.directoryname = filedialog.askdirectory(title = \"Select TIBCO XML directory\", initialdir=current_path)\n filelisting = []\n filelisting_fullpath = []\n filelisting = os.listdir(root.directoryname)\n for files in filelisting:\n if \".xls\" not in files:\n fullpath = os.path.join(root.directoryname, files)\n if os.path.isfile(fullpath):\n filelisting_fullpath.append(os.path.join(root.directoryname, files))\n\n return filelisting_fullpath\n\ndef selecting_files(file_type):\n root = Tk()\n root.withdraw()\n \n root.filename = filedialog.askopenfilename(title = \"Select file: \" + file_type,filetypes = ((\"csv files\",\"*.csv\"),(\"all files\",\"*.*\")))\n return root.filename\n\ndef write_files(headers, filename, output_list, output_filename, write_stage):\n with open(output_filename, write_stage, newline = '') as output:\n writer = csv.writer(output)\n if write_stage == 'w':\n writer.writerow(headers)\n for x in output_list:\n writer.writerow([filename, x])\n\ndef readallthefiles(filename):\n tree = ET.parse(filename)\n root = tree.getroot()\n\n users = []\n roles = []\n roledict = {}\n\n for elem in root:\n # Access control lists where OP = Write or OP = Admin are privileged\n # Results will return either an inscope user or inscope role; add to the respective list\n if elem.tag == 'accessControlList':\n attribute = (elem.attrib)\n for k, v in attribute.items():\n if (k == 'op' and v == 'Write') or (k == 'op' and v == 'Admin'):\n for subelem in elem:\n if subelem.tag == 'user':\n users.append(subelem.text)\n elif subelem.tag == 'role':\n roles.append(subelem.text)\n else:\n print(\"Undefined tag\")\n # Add all roles to a dictionary; Roles are defined as either the or tags\n elif elem.tag == 'role' or elem.tag == 'superRole':\n for k, v in elem.items():\n rolename = (v)\n for subelem in elem:\n if subelem.tag == 'members':\n for members_layer in subelem:\n if not rolename in roledict:\n roledict[rolename] = [members_layer.text]\n else:\n roledict[rolename].append(members_layer.text)\n\n # For the inscope roles, add the users associated to the role to the users list\n # Note: Super Users role does not get trigged by the OP codes; this group would always be in scope, so a condition checks for this\n for role in set(roles):\n if role[:2] != 'cn':\n for k, v in roledict.items():\n if role in k or k == 'Super Users':\n for role_users in v:\n users.append(role_users)\n\n users = set(users)\n return users\n\ndef main():\n try:\n # Initilize default value variables\n filelist = []\n headers = ['File', 'Users']\n write_stage = 'w'\n \n # Have the user select the relevant file paths\n filelist = select_directory()\n output_filename = selecting_files(\"Output File\")\n \n for files in filelist:\n # Parse XML\n users_dict = readallthefiles(files)\n filename = files[files.find('export'):].split('.')[1] \n # Write files to csv and set the write method to append\n write_files(headers, filename, users_dict, output_filename, write_stage)\n write_stage = 'a'\n except:\n print(\"There was an error\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"Tibco.py","file_name":"Tibco.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"528399565","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\nroslib.load_manifest('robot_guidance')\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom std_msgs.msg import Float32, Int8\nimport numpy as np\n\nclass dummy_robot:\n def __init__(self):\n rospy.init_node('dummy_robot', anonymous=True)\n self.bridge = CvBridge()\n self.image_pub = rospy.Publisher(\"image_raw\", Image, queue_size=1)\n self.reward_pub = rospy.Publisher(\"reward\", Float32, queue_size=1)\n self.position_pub = rospy.Publisher(\"position\", Float32, queue_size=1)\n self.action_sub = rospy.Subscriber(\"action\", Int8, self.callback_action)\n self.action = 0\n self.pan = 100\n self.reward = 0\n self.cv_image = np.zeros((480,640,3), np.uint8)\n self.cv_image.fill(255)\n self.image = self.bridge.cv2_to_imgmsg(self.cv_image, encoding=\"bgr8\")\n self.arrow_cv_image = np.zeros((100,640,3), np.uint8)\n self.arrow_cv_image.fill(255)\n self.image_timer = rospy.Timer(rospy.Duration(0.033), self.callback_image_timer)\n self.reward_timer = rospy.Timer(rospy.Duration(0.2), self.callback_reward_timer)\n self.count = 0\n self.prev_count = -1\n\n def callback_image_timer(self, data):\n self.cv_image.fill(255)\n cv2.circle(self.cv_image, (640 / 2 + self.pan, 480 / 2), 200, (0, 255, 0), -1)\n self.image = self.bridge.cv2_to_imgmsg(self.cv_image, encoding=\"bgr8\")\n self.image_pub.publish(self.image)\n\n def callback_action(self, data):\n action_list = [0, -10, 10]\n self.action = data.data\n if (self.action < 0 or self.action >= 3):\n return\n self.pan += action_list[self.action]\n self.count += 1\n print(\"callback_action: \"+str(self.count))\n if ((self.count % 30) == 0):\n self.pan = int(np.random.rand() * 400 - 200)\n print(\"change pan angle\")\n\n pt1 = (320, 50)\n if (self.action == 1):\n pt2 = (320 - 200, 50)\n elif (self.action == 2):\n pt2 = (320 + 200, 50)\n elif (self.action == 3):\n pt2 = (320, 50 + 50)\n else:\n pt2 = (320, 50)\n self.arrow_cv_image.fill(255)\n cv2.line(self.arrow_cv_image, pt1, pt2, (0, 0, 200), 10)\n cv2.imshow(\"action\", self.arrow_cv_image)\n cv2.waitKey(1)\n\n def callback_reward_timer(self, data):\n if (self.prev_count == self.count):\n print(\"reward timer is too fast!\")\n self.prev_count = self.count\n self.reward = min(1.0 - abs(self.pan) / 100.0, 1.0)\n self.reward = self.reward ** 3\n# print(\"selected_action: \" + str(self.action) + \", reward: \" + str(self.reward))\n self.reward_pub.publish(self.reward)\n self.position_pub.publish(self.pan)\n\nif __name__ == '__main__':\n dr = dummy_robot()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting Down\")\ncv2.destroyAllWindows()\n","sub_path":"ros/robot_guidance/robot_guidance_check/scripts/dummy_robot_dl.py","file_name":"dummy_robot_dl.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"614757788","text":"import numpy as np, math\n\nUAV_THICKNESS = .4\n\nclass Node:\n def __repr__(self):\n return self.pos.__repr__()\n\nclass Node_astar(Node):\n OBSTACLE = 1\n FREE = 0\n\n def __init__(self, pos, value):\n self.value = value\n self.pos = np.array(pos)\n self.parent = None\n self.H = 0\n self.G = 0\n\n\nclass Node_rrt(Node):\n def __init__(self, pos, parent):\n self.pos = pos\n self.parent = parent\n if parent is not None:\n self.cost = parent.cost + dist(parent, pos)\n else:\n self.cost = 0\n\n\ndef rand(a, b=None, integer=False):\n if integer:\n return np.random.randint(a) if b is None else np.random.randint(b - a) + a\n else:\n return np.random.uniform(0, a) if b is None else np.random.uniform(a, b)\n\n\ndef random_position(world_dim):\n xmin, xmax, ymin, ymax, zmin, zmax = world_dim\n return [rand(xmin, xmax), rand(ymin, ymax), rand(zmin, zmax)]\n\n\ndef dist(p1, p2, sqrt=True):\n if p1 is None or p2 is None:\n return math.inf\n\n p1 = p1.pos if isinstance(p1, Node) else p1\n p2 = p2.pos if isinstance(p2, Node) else p2\n\n sqr = (p1[0] - p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2\n return math.sqrt(sqr) if sqrt else sqr\n","sub_path":"planning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"56562986","text":"a = int(input())\r\nb = int(input())\r\nfor i in range(a, b + 1):\r\n flag = True\r\n for f in range(2, i):\r\n if i % f == 0:\r\n flag = False\r\n break\r\n if flag:\r\n print(i, end=' ')\r\n\r\n\r\n\r\n\r\n","sub_path":"Задача 2 17.py","file_name":"Задача 2 17.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"250373481","text":"import datetime\n\n#parent class\n\nclass VehicleRent:\n def __init__(self,stock):\n self.stock=stock\n self.now=0\n\n def displayStock(self):\n print(\"{} vehicle available to rent\".format(self.stock))\n return self.stock\n\n def rentHourly(self,n): #n araç sayısı\n if n<=0:\n print(\"Number should be positive\")\n return None\n\n elif n>self.stock:\n print(\"Sorry, {} vehicle available to rent\".format(self.stock))\n return None\n\n else:\n self.now=datetime.datetime.now()\n print(\"Rented {} number of vehicle for hourly at {}\".format(n,self.now.hour))\n self.stock -= n\n return self.now\n\n def rentDaily(self,n):\n if n<=0:\n print(\"Number should be positive\")\n return None\n\n elif n>self.stock:\n print(\"Sorry, {} vehicle available to rent\".format(self.stock))\n return None\n\n else:\n self.now=datetime.datetime.now()\n print(\"Rented {} number of vehicle for daily at {}\".format(n,self.now.hour))\n self.stock -= n\n return self.now\n\n def returnVehicle(self,request,brand):\n car_h_price = 10\n car_d_price = car_h_price*8/10*24\n bike_h_price = 5\n bike_d_price = bike_h_price*7/10*24\n\n rentalTime, rentalBasis, numberofVehicle = request\n bill = 0\n\n if brand == \"car\":\n if rentalTime and rentalBasis and numberofVehicle:\n self.stock += numberofVehicle\n now=datetime.datetime.now() #bu now farklı, local!\n\n rentalPeriod = now - rentalTime\n\n if rentalBasis == 1:\n bill = rentalPeriod.seconds/3600*car_h_price*numberofVehicle\n\n elif rentalBasis == 2:\n bill = rentalPeriod.seconds/(3600*24)*car_d_price*numberofVehicle\n\n if 2<=numberofVehicle:\n print(\"You have extra 20% discount!\")\n bill = bill * 0.8\n\n print(\"Thank you for returning your car!\")\n print(\"Price: ${}\".format(bill))\n return bill\n\n\n elif brand == \"bike\":\n if rentalTime and rentalBasis and numberofVehicle:\n self.stock += numberofVehicle\n now=datetime.datetime.now() #bu now farklı, local!\n\n rentalPeriod = now - rentalTime\n\n if rentalBasis == 1:\n bill = rentalPeriod.seconds/3600*bike_h_price*numberofVehicle\n\n elif rentalBasis == 2:\n bill = rentalPeriod.seconds/(3600*24)*bike_d_price*numberofVehicle\n\n if 4<=numberofVehicle:\n print(\"You have extra 20% discount!\")\n bill = bill * 0.8\n\n print(\"Thank you for returning your bike!\")\n print(\"Price: ${}\".format(bill))\n return bill\n\n else:\n print(\"You did not rent a vehicle!!!\")\n return None\n\n\nclass CarRent(VehicleRent):\n global discount_rate\n discount_rate = 15\n def __init__(self,stock):\n super().__init__(stock)\n\n def discount(self,b):\n bill=b-(b*discount_rate)/100\n return bill\n\n\nclass BikeRent(VehicleRent):\n def __init__(self,stock):\n super().__init__(stock)\n\n\nclass Customer:\n def __init__(self):\n self.bikes=0\n self.rentalBasis_b = 0\n self.rentalTime_b = 0\n\n self.cars = 0\n self.rentalBasis_c = 0\n self.rentalTime_c = 0\n\n def requestVehicle(self,brand):\n if brand == \"bike\":\n bikes=input(\"How many bikes would you like to rent?\")\n\n try:\n bikes=int(bikes)\n except ValueError:\n print(\"Number should be an integer!\")\n return -1\n\n if bikes<1:\n print(\"Number of bikes should greater than zero!\")\n return -1\n else:\n self.bikes=bikes\n return self.bikes\n\n elif brand == \"car\":\n cars = input(\"How many cars would you like to rent?\")\n\n try:\n cars = int(cars)\n except ValueError:\n print(\"Number should be an integer!\")\n return -1\n\n if cars < 1:\n print(\"Number of cars should greater than zero!\")\n return -1\n else:\n self.cars = cars\n return self.cars\n\n else:\n print(\"Request vehicle error!\")\n\n def returnVehicle(self,brand):\n if brand == \"bike\":\n if self.rentalTime_b and self.rentalBasis_b and self.bikes:\n return self.rentalTime_b, self.rentalBasis_b, self.bikes\n else:\n return 0,0,0\n elif brand == \"car\":\n if self.rentalTime_c and self.rentalBasis_c and self.cars:\n return self.rentalTime_c, self.rentalBasis_c, self.cars\n else:\n return 0,0,0\n else:\n print(\"Return vehicle error!\")\n\n\n\n","sub_path":"Rent a Car/rent.py","file_name":"rent.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"420050037","text":"\nimport unittest\nimport numpy as np\n\nfrom robo.maximizers.direct import Direct\nfrom robo.maximizers.grid_search import GridSearch\nfrom robo.maximizers.random_sampling import RandomSampling\nfrom robo.maximizers.scipy_optimizer import SciPyOptimizer\nfrom robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction\nfrom test.dummy_model import DemoQuadraticModel\n\n\nclass DemoAcquisitionFunction(BaseAcquisitionFunction):\n\n def __init__(self):\n model = DemoQuadraticModel()\n X = np.random.rand(10, 1)\n y = X ** 2\n model.train(X, y[:, 0])\n super(DemoAcquisitionFunction, self).__init__(model)\n\n def compute(self, x, **kwargs):\n y = (0.5 - x) ** 2\n return np.array([y])\n\n\nclass TestMaximizers1D(unittest.TestCase):\n\n def setUp(self):\n\n self.lower = np.array([0])\n self.upper = np.array([1])\n self.objective_function = DemoAcquisitionFunction()\n\n def test_direct(self):\n maximizer = Direct(self.objective_function, self.lower, self.upper)\n x = maximizer.maximize()\n\n assert x.shape[0] == 1\n assert len(x.shape) == 1\n assert np.all(x >= self.lower)\n assert np.all(x <= self.upper)\n\n def test_grid_search(self):\n maximizer = GridSearch(self.objective_function, self.lower, self.upper)\n x = maximizer.maximize()\n\n assert x.shape[0] == 1\n assert len(x.shape) == 1\n assert np.all(x >= self.lower)\n assert np.all(x <= self.upper)\n\n def test_random_sampling(self):\n maximizer = RandomSampling(self.objective_function, self.lower, self.upper)\n x = maximizer.maximize()\n\n assert x.shape[0] == 1\n assert len(x.shape) == 1\n assert np.all(x >= self.lower)\n assert np.all(x <= self.upper)\n\n def test_scipy(self):\n maximizer = SciPyOptimizer(self.objective_function, self.lower, self.upper)\n x = maximizer.maximize()\n\n assert x.shape[0] == 1\n assert len(x.shape) == 1\n assert np.all(x >= self.lower)\n assert np.all(x <= self.upper)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_maximizer/test_maximizers_one_dim.py","file_name":"test_maximizers_one_dim.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"78678719","text":"import pandas as pd\nimport numpy as np\ndata = pd.ExcelFile('MOVIE.xlsx').parse()\n\npop_temp = data.iloc[1:, [-5,-3]].values.astype(float)\npop = []\n\nfor i in range(len(pop_temp[:,0])):\n if pop_temp[i,0]<=2015:\n pop.append(pop_temp[i,:])\n\npop_mean = np.mean(np.asarray(pop)[:,1])\nprint(\"Population mean of imdb score is\", pop_mean)\n\nsample = []\nfor i in range(len(pop_temp[:,0])):\n if pop_temp[i,0] == 2016:\n sample.append(pop_temp[i,1])\nsample = np.array(sample)\n","sub_path":"Data Analytics/project 2/Topic 1/topic 1.py","file_name":"topic 1.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"532596058","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('auth/', include('auth.urls')),\n path('app/', include('app.urls')),\n path('accounts/', include('allauth.urls')),\n\n\n]\n","sub_path":"shadiregistrar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"147284834","text":"\n\"\"\"The data layer used during training to train a Fast R-CNN network.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.utils.data as data\nfrom imageio import imread \nimport torch\n\nimport numpy as np\nimport numpy.random as npr\nfrom PIL import Image\nimport random\nimport time\nimport pdb\nfrom pyquaternion import Quaternion\n\nfrom nuscenes import NuScenes\nfrom nuscenes import NuScenesExplorer \nfrom nuscenes.utils.data_classes import LidarPointCloud, Box\nfrom nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility\nimport os \n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm \nfrom mpl_toolkits.mplot3d import Axes3D\nimport glob\n\nfrom utils import get_pointcloud \n\nimport cv2 \n\nfrom logger import Logger \n\nclass nuscenes_dataloader(data.Dataset):\n def __init__(self, batch_size, num_classes, training=True, normalize=None):\n self._num_classes = num_classes\n self.training = training\n self.normalize = normalize\n self.batch_size = batch_size\n self.data_path = \"/home/fengjia/data/sets/nuscenes\"\n self.nusc= NuScenes(version='v1.0-trainval', dataroot = self.data_path, verbose= True)\n self.explorer = NuScenesExplorer(self.nusc)\n self.classes = ('__background__', \n 'pedestrian', 'barrier', 'trafficcone', 'bicycle', 'bus', 'car', 'construction', 'motorcycle', 'trailer', 'truck')\n\n # PATH = self.data_path + '/annotations_list.txt'\n PATH = self.data_path + '/car_pedestrian_annotations_list.txt'\n\n with open(PATH) as f:\n self.token = [x.strip() for x in f.readlines()]\n self.token = self.token[:400]\n\n def __getitem__(self, index):\n # gather tokens and samples needed for data extraction\n tokens = self.token[index]\n if len(tokens.split('_')) < 2:\n print(tokens)\n im_token = tokens.split('_')[0]\n annotation_token = tokens.split('_')[1]\n \n sample_data = self.nusc.get('sample_data', im_token)\n image_name = sample_data['filename']\n sample = self.nusc.get('sample', sample_data['sample_token'])\n lidar_token = sample['data']['LIDAR_TOP']\n \n # get the sample_data for the image batch\n #image_path = '/data/sets/nuscenes/' + image_name\n img = imread('/home/fengjia/data/sets/nuscenes/' + image_name)\n im = np.array(img)\n \n # get ground truth boxes \n _, boxes, camera_intrinsic = self.nusc.get_sample_data(im_token, box_vis_level=BoxVisibility.ALL)\n \n for box in boxes:\n corners = view_points(box.corners(), view=camera_intrinsic, normalize=True)\n if box.token == annotation_token:\n # Find the crop area of the box \n width = corners[0].max() - corners[0].min()\n height = corners[1].max() - corners[1].min()\n x_mid = (corners[0].max() + corners[0].min())/2\n y_mid = (corners[1].max() + corners[1].min())/2\n side = max(width, height)*random.uniform(1.0,1.2)\n \n if (x_mid - side/2) < 0:\n side = x_mid*2 \n if (y_mid - side/2) < 0:\n side = y_mid*2\n \n # Crop the image\n bottom_left = [int(x_mid - side/2), int(y_mid - side/2)]\n top_right = [int(x_mid + side/2), int(y_mid + side/2)]\n corners[0]=corners[0] - bottom_left[0]\n corners[1]=corners[1] - bottom_left[1]\n crop_img = im[bottom_left[1]:top_right[1],bottom_left[0]:top_right[0]]\n \n # Scale to same size \n scale = 128/ side\n scaled = cv2.resize(crop_img, (128, 128))\n crop_img = np.transpose(scaled, (2,0,1))\n crop_img = crop_img.astype(np.float32)\n crop_img /= 255\n \n # Get corresponding point cloud for the crop\n pcl, m, offset, camera_intrinsic, box_corners = get_pointcloud(self.nusc, bottom_left, top_right, box, lidar_token, im_token)\n break\n\n pcl = pcl.astype(np.float32)\n box_corners = box_corners.astype(np.float32)\n \n return crop_img, pcl, offset, m, camera_intrinsic, box_corners\n\n def __len__(self):\n return len(self.token)\n\n\nclass local_dataloader(data.Dataset):\n def __init__(self, batch_size, num_classes, training=True, normalize=None):\n self._num_classes = num_classes\n self.training = training\n self.normalize = normalize\n self.batch_size = batch_size\n\n self.classes = ('__background__',\n 'pedestrian', 'barrier', 'trafficcone', 'bicycle', 'bus', 'car', 'construction', 'motorcycle',\n 'trailer', 'truck')\n self.img_list = []\n self.dep_list = []\n self.originalGT_list = []\n self.shiftedGT_list = []\n self.offSet_list = []\n self.cameraMatrix_list = []\n self.cameraFrameBox_list = []\n\n data_path = r'/home/fengjia/data/sets/nuscenes_temp/vehicle'\n img_list = glob.glob(os.path.join(data_path, 'img_*'))\n img_list.sort(key=lambda s:int(s.split('_')[-1].split('.')[0]))\n img_list = img_list[:5000]\n for img in img_list:\n if (not os.path.isfile(img.replace('img', 'dep')) or (not os.path.isfile(img.replace('img', 'originalGT'))) or (not os.path.isfile(img.replace('img', 'shiftedGT')))):\n print(img)\n continue\n self.img_list.append(img)\n self.dep_list.append(img.replace('img', 'dep'))\n self.originalGT_list.append(img.replace('img', 'originalGT'))\n self.shiftedGT_list.append(img.replace('img', 'shiftedGT'))\n self.offSet_list.append(img.replace('img', 'offSet'))\n self.cameraMatrix_list.append(img.replace('img', 'cameraMatrix'))\n self.cameraFrameBox_list.append(img.replace('img', 'cameraFrameBox'))\n\n\n def __getitem__(self, index):\n img = np.load(self.img_list[index])\n dep = np.load(self.dep_list[index])\n originalGT = np.load(self.originalGT_list[index])\n shiftedGT = np.load(self.shiftedGT_list[index])\n #originalGT = originalGT.transpose((1, 0))\n #shiftedGT = shiftedGT.transpose((1, 0))\n offSet = np.load(self.offSet_list[index])\n cameraMatrix = np.load(self.cameraMatrix_list[index])\n cameraFrameBox = np.load(self.cameraFrameBox_list[index])\n return img, dep, originalGT, shiftedGT, offSet, cameraMatrix, cameraFrameBox, self.img_list[index]\n\n def __len__(self):\n return len(self.img_list)\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"286947617","text":"\n# coding: utf-8\n#######################\n####### Task 2.2\n# I used some Gabor filters plus edge detection to brute force my way through a massive random forest.\n# In[1]:\n\n# import libraries needed for this assignment\nimport os\nimport numpy as np\nfrom math import floor\nfrom PIL import Image\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nget_ipython().magic(u'matplotlib inline')\nmatplotlib.rcParams['figure.figsize'] = (14, 8)\nfrom sklearn.neighbors import KNeighborsClassifier\nimport scipy.signal\nfrom challenger import submit_results\n\n\n# In[2]:\n\n# function to get a list of file of a given extension, both the absolute path and the filename\ndef get_file_list(path,ext='',queue=''):\n if ext != '':\n return [os.path.join(path,f) for f in os.listdir(path) if f.endswith(''+queue+'.'+ext+'')], [f for f in os.listdir(path) if f.endswith(''+queue+'.'+ext+'')] \n else:\n return [os.path.join(path,f) for f in os.listdir(path)]\n\n\n# Point to the local copy of training data\n\n# In[3]:\n\ntra_img_dir = './data/DRIVE/training/images'\ntra_msk_dir = './data/DRIVE/training/mask'\ntra_lbl_dir = './data/DRIVE/training/1st_manual'\n\ntra_imgs = sorted(get_file_list(tra_img_dir, 'tif')[0])\ntra_msks = sorted(get_file_list(tra_msk_dir, 'gif')[0])\ntra_lbls = sorted(get_file_list(tra_lbl_dir, 'gif')[0])\n\n# In[6]:\nfrom skimage.filters import gabor_kernel\n\ndef gauss_filter(sigma, x0=0.0, y0=0.0):\n x_vec = np.arange(-sigma*3, sigma*3, 1.0) \n y_vec = np.arange(-sigma*3, sigma*3, 1.0)\n xx,yy = np.meshgrid(x_vec,y_vec)\n \n g = (1/(2*np.pi*sigma**2)) * np.exp(-(xx**2+yy**2)/(2*sigma**2))\n \n gx = np.gradient(g,axis=0)\n gxx = np.gradient(g,axis=0)\n gxy = np.gradient(g,axis=1)\n \n gy = np.gradient(g,axis=1)\n gyy = np.gradient(g,axis=1)\n lgx = gxx+gyy\n \n return g, gx, gxx, gxy, gy, gyy, lgx\n\ndef visfeat(filters):\n plt.figure()\n i=1\n for f in filters:\n plt.subplot(14,8,i)\n plt.imshow(f);i+=1\n\ndef get_gabors():\n kernels = []\n for sigma in (0.3,0.5, 1, 1.5, 2, 2.5):\n if sigma >0.3:\n for frequency in (0.05,0.15):\n for theta in range(10):\n theta = theta / 10. * np.pi\n kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma*1.5))\n kernels.append(kernel)\n else:\n theta=0\n frequency=0.15\n kernel = np.real(gabor_kernel(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma*1.5))\n kernels.append(kernel)\n return kernels\n\n\n# In[9]:\nfrom scipy.ndimage.filters import sobel\nfrom skimage import feature\n\ndef extract_features(img, sigmas, n_features):\n \"\"\"\n Computes features from a given input image with given sigmas.\n Output of this function is a 3 dimensional numpy array containing\n the different computed features for the given input image.\n \"\"\" \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features\n\n\n# In[10]:\nsmpl = 0\nfor idx in range(len(tra_msks)):\n smpl += np.sum(np.asarray(Image.open(tra_msks[idx]))==255)\n \n# set the parameters for your CAD system here\nn_samples_per_class_per_image = 1000 # how many positive/negative pixels per image in the training set?\nn_classes = 2 # how many classes in this problem?\nsigmas = [1,1.5,2,4,8] # what values of sigma?\nn_features = 35 + 101 + 8 # how many features?\n\n# define training data and labels\nx_train = np.zeros((n_classes * n_samples_per_class_per_image * len(tra_imgs), n_features),dtype=np.float32) \ny_train = np.zeros((n_classes * n_samples_per_class_per_image * len(tra_imgs), 1),dtype=np.int16)\n\n# In[11]:\n\ndef get_random_indexes(msk, n_idxs):\n \"\"\" \n Returns rows and columns of user-defined positive and negative indexes.\n The variable msk is a binary map in numpy format.\n \"\"\"\n pos_idxs = np.array(np.where(msk > 0))\n neg_idxs = np.array(np.where(msk == 0))\n n_pos = pos_idxs.shape[1] # number of positives found in the mask\n n_neg = neg_idxs.shape[1] # number of negatives found in the mask\n n_min = min(n_neg, min(n_idxs, n_pos))\n rnd_idxs_pos = range(n_pos)\n np.random.shuffle(rnd_idxs_pos)\n rnd_idxs_neg = range(n_neg)\n np.random.shuffle(rnd_idxs_neg)\n return pos_idxs[:, rnd_idxs_pos[:n_min]], neg_idxs[:, rnd_idxs_neg[:n_min]] \n\n# In[27]:\n\ntmp_array = np.empty((0,n_features),dtype=np.float32)\ny = np.empty(0, dtype=np.int16)\nif 'feats_calculated' not in vars():\n features_tra = np.zeros([len(tra_imgs), 584, 565, n_features], dtype=np.float32)\n \nfor f in range(len(tra_imgs)):\n \n # load training image and annotation\n img = np.asarray(Image.open(tra_imgs[f]))\n lbl = np.asarray(Image.open(tra_lbls[f]))\n \n # extract features from the given images\n print('extracting features for image ' + str(f+1))\n if 'feats_calculated' not in vars(): # only calculate them if we did not already, save computation\n features_tra[f,:,:,:] = extract_features(img, sigmas, n_features) # implement the extract_features function defined above!\n\n # extract random position of samples \n p_idx, n_idx = get_random_indexes(lbl, n_samples_per_class_per_image) \n \n pos_feat = features_tra[f,p_idx[0,:],p_idx[1,:],:]\n neg_feat = features_tra[f,n_idx[0,:],n_idx[1,:],:]\n pos_lbl = lbl[p_idx[0,:],p_idx[1,:]]\n neg_lbl = lbl[n_idx[0,:],n_idx[1,:]]\n \n tmp_array = np.append(tmp_array, pos_feat, axis=0)\n tmp_array = np.append(tmp_array, neg_feat, axis=0) # we append the array\n y = np.append(y, neg_lbl)\n y = np.append(y, pos_lbl)\n\n \nfeats_calculated = True \n\nx_train = tmp_array[:,:] # I copy the array to check for right dimensions\ny_train = y[:] \nranges = np.ptp(x_train,axis=0)\n\n\n# In[17]:\n\ndef normalization(x_train):\n \"\"\"\n Normalization of x_train\n \"\"\" \n # >>> YOUR CODE STARTS HERE <<<\n meanV = np.mean(x_train, axis = 0) # vector of mean values\n stdV = np.std(x_train, axis = 0) # vector of standard deviation values\n x_train_norm = (x_train-meanV)/stdV\n # >>> YOUR CODE ENDS HERE <<<\n \n return x_train_norm, meanV, stdV\n\nx_train_norm, meanV, stdV = normalization(x_train)\n\n\n# For convenience, you can save your training data, which you can load later and use for testing purposes without the need to rebuild it every time you run an experiment. You may want to define a flag to enable/disable training, or just execute some cells in this notebook instead of executing all cells. You may want to add more varables that you think will be necessary to test new samples.\n\n# In[18]:\n\n# save training data to disk in numpy format\nnp.savez('./data/training_data.npz', x_train=x_train, y_train=y_train, \n x_train_norm=x_train_norm, meanV=meanV, stdV=stdV)\n\n# In[22]:\nfrom sklearn.ensemble import RandomForestClassifier\n# load training data, define a nearest-neighbour classifier and train it.\nK = int(np.sqrt(len(y_train))) / n_classes # define the K parameter\nK = K / n_classes * n_classes + 1 # assure that uneven neigbours are used\n\nprint(\"Using Random Forest\")\n\nnpz = np.load('./data/training_data.npz') \n#neigh = KNeighborsClassifier(n_neighbors=K, n_jobs=-1) # use multithreading \n#neigh.fit(npz['x_train_norm'], npz['y_train'].ravel())\nimport time\nstart = time.time()\nprint(\"Starting fit. Takes around 25 minutes on my PC.\")\nforest = RandomForestClassifier(n_estimators=500, n_jobs=-1) # 500 might be a bit of an overkill, but with 100+ features, why not?\nforest.fit(npz['x_train_norm'], npz['y_train'].ravel())\nprint(\"Fitting took {} seconds\".format(\"%.f\" % (time.time()-start)) )\n\n# In[23]:\n\n# test images\ntes_img_dir = './data/DRIVE/test/images'\ntes_msk_dir = './data/DRIVE/test/mask'\n\ntes_imgs = sorted(get_file_list(tes_img_dir, 'tif')[0])\ntes_msks = sorted(get_file_list(tes_msk_dir, 'gif')[0])\n\nresult_output_folder = './data/DRIVE/test/results'\nif not(os.path.exists(result_output_folder)):\n os.mkdirs(result_output_folder)\n\n\n# #### Question\n# \n# Do you think that you have to apply some kind of normalization to test data as well? In case you do, what do you think is the best strategy? Will you compute statistics (mean, std) on the test set, or will you use the ones from the training set? Why?\n\n# We need to standardize it so that the values follow the same distribution as the train set. the kNN is sensitive to the absolutes of values (not scale-invariant) as it calculates the distances on the un-normalized input space. If we now have a few outliers in the test set that are not present in the training set we shift all values of the test set further than the ones of the train set. This will induce a bias.\n\n# In[24]:\n\n# define vectors of mean value and standard deviation for the test set here\nmean_test = meanV\nstd_test = stdV\n\n\n# In[25]:\n\ndef normalization_test(x_test, meanV, stdV):\n \"\"\"\n Normalization of the test data\n \"\"\" \n eps = np.finfo(float).eps \n x_test_post = (x_test - meanV)/(stdV + eps) \n \n return x_test_post\n\n\n# In the next cell we loop over all the images in the test set and do the following for every image:\n# \n# * Extract features for every pixel in the image\n# * Apply normalization\n# * Classify every pixel\n# * Save the output to disk\n# \n# In the classification step, the output of the classifier can be:\n# * the predicted label of the test sample classified\n# * a likelihood value (pseudo-probability), based on processing distance measures\n# \n# In order to optimize the performance of our system, we would like to obtain some kind of probability for each pixel, which we can later post-process by applying a threshold, which we will have to optimize. By changing the threshold, the amount of pixel classified as vessel and as background will change.\n\n# In[30]:\n\n# To optimize the probability threshold we classify each image of the train set \n# and see at which average threshold we get the highest accuracy.\nimport sklearn\n# classify all images in the train set\nthresholds = np.arange(0,255) \naccs = np.zeros([255,20])\n\nfor f in range(len(tra_imgs)):\n \n # load test image and mask\n img = np.asarray(Image.open(tra_imgs[f])) # don't actually need to load this. just to keep to code intact.\n msk = np.asarray(Image.open(tra_msks[f]))\n lbl = np.asarray(Image.open(tra_lbls[f])) \n \n ns = img.shape[0] * img.shape[1] #number of samples is ALL pixels in the image\n x_test = np.zeros((ns, n_features))\n \n # compute features\n features = features_tra[f,:,:,:] # that's why I save all calculated features from before, no need to recalculate\n for k in range(features.shape[2]):\n x_test[:,k] = features[:,:,k].flatten()\n \n # normalize\n x_test_norm = normalization_test(x_test, mean_test, std_test)\n \n print('searching for best threshold in train image {}'.format(f))\n p_test = forest.predict_proba(x_test_norm)\n thresholds = np.array(np.unique(p_test)*255, dtype='int32')[:-1]\n for thres in thresholds:\n p_test_reshaped = p_test[:,0].reshape(img.shape[0], img.shape[1]) * msk\n# p_test_reshaped = scipy.ndimage.grey_closing(p_test_reshaped, size=(1,3)) \n final_output = (p_test_reshaped > thres) * 255 # Threshold the probabilitymap to obtain the final result \n final_output=scipy.ndimage.binary_closing(final_output, iterations=1) \n accs[thres, f] = sklearn.metrics.f1_score(final_output.flatten()>0,lbl.flatten()>0)\n# accs[thres, f] = np.mean(final_output==lbl)\noptimal_threshold = np.mean(np.argmax(np.mean(accs,axis=1)))\n\n\nprint(\"Optimal threshold found at {}\".format(optimal_threshold))\n\n\n#%%\n####################\n \n\n# classify all images in the test set\nfor f in range(len(tes_imgs)):\n \n # load test image and mask\n img = np.asarray(Image.open(tes_imgs[f]))\n msk = np.asarray(Image.open(tes_msks[f]))\n \n ns = img.shape[0] * img.shape[1] #number of samples is ALL pixels in the image\n x_test = np.zeros((ns, n_features))\n \n # compute features\n print ('extraction features for image ' + str(f+1))\n features = extract_features(img, sigmas, n_features) \n for k in range(features.shape[2]):\n x_test[:,k] = features[:,:,k].flatten()\n \n # normalize\n x_test_norm = normalization_test(x_test, mean_test, std_test)\n\n print('labeling pixels with nearest-neighbor...')\n p_test = forest.predict_proba(x_test_norm)\n \n p_test_reshaped = p_test[:,0].reshape(img.shape[0], img.shape[1]) * msk\n# p_test_reshaped = scipy.ndimage.grey_closing(p_test_reshaped, size=(1,3)) \n final_output = (p_test_reshaped > optimal_threshold) * 255 # Threshold the probabilitymap to obtain the final result\n final_output=scipy.ndimage.binary_closing(final_output, iterations=1) \n plt.subplot(1,3,1)\n plt.imshow(img)\n plt.subplot(1,3,2)\n plt.imshow(p_test_reshaped)\n plt.subplot(1,3,3)\n plt.imshow(final_output)\n plt.show()\n \n im = Image.fromarray(final_output.astype('uint8'))\n im.save(os.path.join(result_output_folder, str(f+1) + \"_mask.png\"))\n \n\n\n# ### Submit your result\n# \n# After processing all the images in the test set you can upload your result to the challenge website and see how well you performed compared to your collegues! You can submit as often as you want, only the best result counts.\n# \n# Enter your username and password in the cell below to submit your result. You can add a description for your own reference. This description will be also shown in the website, which you can use as a reference to keep track of your the development of your system.\n# \n# You should have received your username and password by email. Otherwise, TAs will give you a temporary spare account.\n\n# In[ ]:\n\nuser = {'username': 'S.Kern', 'password' : '5CCN6PW2'} # enter you username and password\ndescription = {'notes' : 'deep CNN 17 layers. lol jk.'}\n\nsubmit_results (user, os.path.abspath(result_output_folder), description)\n\n\n# **Check your result!** http://ismi17.diagnijmegen.nl/\n\n# ## 2. Improve your results! [optional]\n# \n# Try to improve your results and resubmit.\n# \n# A few ideas to improve the system are:\n# \n# * Design a segmentation system based on morphology, without using pixel classification.\n# * Improve the performance of your existing system by:\n# * Using more features (Local Binary Patterns, Gabor filters, use rotated derivative of Gaussian filters, etc.). Get creative!\n# * Postprocessing to improve the results using morphological filtering\n# * Using more training samples\n# * etc.\n# \n# \n","sub_path":"assignment_2/assignment2.2.py","file_name":"assignment2.2.py","file_ext":"py","file_size_in_byte":16047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"640587724","text":"\"\"\"\nSimple unit tests to confirm that ModelRegistryClient properly calls the registry Store methods\nand returns values when required.\n\"\"\"\nfrom mock import ANY\nimport pytest\nimport mock\n\nfrom mlflow.entities.model_registry import ModelVersion, ModelVersionDetailed, RegisteredModel, \\\n RegisteredModelDetailed\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.tracking._model_registry.client import ModelRegistryClient\n\n\n@pytest.fixture\ndef mock_store():\n with mock.patch(\"mlflow.tracking._model_registry.utils._get_store\") as mock_get_store:\n yield mock_get_store.return_value\n\n\ndef newModelRegistryClient():\n return ModelRegistryClient(\"uri:/fake\")\n\n\ndef _model_version_detailed(name, version, stage, source=\"some:/source\", run_id=\"run13579\"):\n return ModelVersionDetailed(RegisteredModel(name), version, \"2345671890\", \"234567890\",\n \"some description\", \"UserID\", stage, source, run_id)\n\n\n# Registered Model API\n\ndef test_create_registered_model(mock_store):\n mock_store.create_registered_model.return_value = RegisteredModel(\"Model 1\")\n result = newModelRegistryClient().create_registered_model(\"Model 1\")\n mock_store.create_registered_model.assert_called_once_with(\"Model 1\")\n assert result.name == \"Model 1\"\n\n\ndef test_update_registered_model(mock_store):\n mock_store.update_registered_model.return_value = RegisteredModel(\"New Name\")\n result = newModelRegistryClient().update_registered_model(\n name=\"Model 1\",\n new_name=\"New Name\",\n description=\"New Description\")\n mock_store.update_registered_model.assert_called_with(ANY, \"New Name\", \"New Description\")\n assert result.name == \"New Name\"\n\n mock_store.update_registered_model.return_value = RegisteredModel(\"New Name 2\")\n result2 = newModelRegistryClient().update_registered_model(\n name=\"Model 1\",\n new_name=\"New Name 2\")\n mock_store.update_registered_model.assert_called_with(ANY, \"New Name 2\", ANY)\n assert result2.name == \"New Name 2\"\n\n result3 = newModelRegistryClient().update_registered_model(\n name=\"Model 1\",\n description=\"New Description 2\")\n mock_store.update_registered_model.assert_called_with(ANY, ANY, \"New Description 2\")\n assert result3.name == \"New Name 2\"\n\n\ndef test_update_registered_model_validation_errors(mock_store):\n with pytest.raises(MlflowException):\n newModelRegistryClient().update_registered_model(\"Model 1\")\n\n\ndef test_update_registered_model_validation_errors_on_empty_new_name(mock_store):\n with pytest.raises(MlflowException):\n newModelRegistryClient().update_registered_model(\"Model 1\", new_name=\" \",\n description=\"Blah\")\n\n\ndef test_delete_registered_model(mock_store):\n newModelRegistryClient().delete_registered_model(\"Model 1\")\n mock_store.delete_registered_model.assert_called_once()\n\n\ndef test_list_registered_models(mock_store):\n mock_store.list_registered_models.return_value = [\n RegisteredModel(\"Model 1\"),\n RegisteredModel(\"Model 2\")\n ]\n result = newModelRegistryClient().list_registered_models()\n mock_store.list_registered_models.assert_called_once()\n assert len(result) == 2\n\n\ndef test_get_registered_model_details(mock_store):\n mock_store.get_registered_model_details.return_value = RegisteredModelDetailed(\n \"Model 1\", \"1263283747835\", \"1283168374623874\", \"I am a model\",\n [_model_version_detailed(\"Model 1\", 3, \"None\"),\n _model_version_detailed(\"Model 1\", 2, \"Staging\"),\n _model_version_detailed(\"Model 1\", 1, \"Production\")]\n )\n result = newModelRegistryClient().get_registered_model_details(\"Model 1\")\n mock_store.get_registered_model_details.assert_called_once()\n assert result.name == \"Model 1\"\n assert len(result.latest_versions) == 3\n\n\ndef test_get_latest_versions(mock_store):\n mock_store.get_latest_versions.return_value = [\n _model_version_detailed(\"Model 1\", 3, \"None\"),\n _model_version_detailed(\"Model 1\", 2, \"Staging\"),\n _model_version_detailed(\"Model 1\", 1, \"Production\")\n ]\n result = newModelRegistryClient().get_latest_versions(\"Model 1\", [\"Stage1\", \"Stage2\"])\n mock_store.get_latest_versions.assert_called_once_with(ANY, [\"Stage1\", \"Stage2\"])\n assert len(result) == 3\n\n\n# Model Version API\n\n\ndef test_create_model_version(mock_store):\n mock_store.create_model_version.return_value = ModelVersion(\n RegisteredModel(\"Model 1\"),\n 1\n )\n result = newModelRegistryClient().create_model_version(\"Model 1\", \"uri:/for/source\", \"run123\")\n mock_store.create_model_version.assert_called_once_with(\"Model 1\", \"uri:/for/source\", \"run123\")\n assert result.get_name() == \"Model 1\"\n assert result.version == 1\n\n\ndef test_update_model_version(mock_store):\n newModelRegistryClient().update_model_version(\"Model 1\", 12, \"stageX\", \"new description\")\n mock_store.update_model_version.assert_called_once_with(ANY, \"stageX\", \"new description\")\n\n\ndef test_update_model_version_validation_errors(mock_store):\n with pytest.raises(MlflowException):\n newModelRegistryClient().update_model_version(\"Model 1\", 12)\n\n\ndef test_update_model_version_validation_errors_on_empty_stage(mock_store):\n with pytest.raises(MlflowException):\n newModelRegistryClient().update_model_version(\"Model 1\", 12, stage=\" \")\n\n\ndef test_delete_model_version(mock_store):\n newModelRegistryClient().delete_model_version(\"Model 1\", 12)\n mock_store.delete_model_version.assert_called_once()\n\n\ndef test_get_model_version_details(mock_store):\n mock_store.get_model_version_details.return_value = _model_version_detailed(\"Model 1\", 12,\n \"Production\")\n result = newModelRegistryClient().get_model_version_details(\"Model 1\", 12)\n mock_store.get_model_version_details.assert_called_once()\n assert result.get_name() == \"Model 1\"\n\n\ndef test_get_model_version_download_uri(mock_store):\n mock_store.get_model_version_download_uri.return_value = \"some:/uri/here\"\n result = newModelRegistryClient().get_model_version_download_uri(\"Model 1\", 12)\n mock_store.get_model_version_download_uri.assert_called_once()\n assert result == \"some:/uri/here\"\n\n\ndef test_search_model_versions(mock_store):\n mock_store.search_model_versions.return_value = [\n ModelVersion(RegisteredModel(\"Model 1\"), 1),\n ModelVersion(RegisteredModel(\"Model 1\"), 2)\n ]\n result = newModelRegistryClient().search_model_versions(\"name=Model 1\")\n mock_store.search_model_versions.assert_called_once_with(\"name=Model 1\")\n assert len(result) == 2\n\n\ndef test_get_model_version_stages(mock_store):\n mock_store.get_model_version_stages.return_value = [\"Stage A\", \"Stage B\"]\n result = newModelRegistryClient().get_model_version_stages(\"Model 1\", 1)\n mock_store.get_model_version_stages.assert_called_once()\n assert len(result) == 2\n assert \"Stage A\" in result\n assert \"Stage B\" in result\n","sub_path":"tests/tracking/_model_registry/test_model_registry_client.py","file_name":"test_model_registry_client.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"62218459","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 17 13:21:29 2019\r\n\r\n@author: giles\r\n\"\"\"\r\n\r\ndef add(L):\r\n ''' Adds the integer items of a list'''\r\n size = len(L)\r\n total = 0\r\n iterator = 0\r\n# pdb.set_trace()\r\n# print('Reached the while loop')\r\n while iterator < size:\r\n total = total + L[iterator]\r\n iterator +=1\r\n# print(f'Iterator is {iterator} total is {total}')\r\n return total \r\n\r\nmy_list = [1,2,3,4,5,6,7,'eight']\r\nadd(my_list)\r\n","sub_path":"Python 3 Tutorial Resources/1. Spyder python files/9.9.3_debug_spyder.py","file_name":"9.9.3_debug_spyder.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"285917848","text":"#\n# Copyright (C) 2008 Tisel\n#\n# Classes for handling the show list\n\nimport gtk, gobject\nfrom uuid import uuid4\nimport widgets, datastore\n\nclass ShowView(object):\n \"\"\"Sets up the TreeView that contains all shows, and contains callbacks\n for handling actions in it\"\"\"\n def __init__(self, view, store):\n self.show_view = view\n self.show_view.set_reorderable(True)\n\n renderers = []\n for i, id in enumerate(datastore.Columns.get_ids()):\n label = datastore.Columns.get_label(id)\n # If it hasn't got a label, don't try to show it\n if not label:\n continue\n\n renderer = datastore.Columns.get_renderer(id)()\n signal = datastore.Columns.get_signal(id)\n\n column = gtk.TreeViewColumn(label, renderer)\n column.add_attribute(renderer, signal, i)\n \n column.set_clickable(True)\n column.set_resizable(True)\n column.set_reorderable(True)\n column.set_min_width(1)\n if id != 'delete':\n column.set_sort_indicator(True)\n column.set_sort_column_id(i)\n column.set_sort_order(gtk.SORT_ASCENDING)\n\n column.set_visible(datastore.Columns.get_visible(id))\n \n renderers.append(renderer)\n self.show_view.append_column(column)\n\n id_name = datastore.Columns.get_num('show_name')\n id_season = datastore.Columns.get_num('season')\n id_episode = datastore.Columns.get_num('episode')\n id_out_of_eps = datastore.Columns.get_num('out_of_episodes')\n\n self.show_view.set_search_column(id_name)\n self.show_view.set_enable_search(True)\n for i in [id_season, id_episode]:\n renderers[i].connect(\"activated\", self.on_activated, store.get_model(), i)\n renderers[i].set_property(\"mode\", gtk.CELL_RENDERER_MODE_ACTIVATABLE)\n renderers[id_name].set_property(\"editable\", True)\n renderers[id_name].connect(\"edited\", self.on_edited, store.get_model(), 1)\n renderers[id_season].set_property(\"baselabel\", \"Season\")\n renderers[id_episode].set_property(\"baselabel\", \"Episode\")\n renderers[id_out_of_eps].connect(\"toggled\", self.on_toggled, store.get_model(), 4)\n renderers[id_out_of_eps].set_property(\"activatable\", True)\n \n self.show_view.connect(\"button-press-event\", self.on_button_pressed, store.get_model())\n self.show_view.set_model(store)\n self.show_view.set_rules_hint(True)\n\n def reload_visibility(self):\n \"\"\"Goes through all columns and sets their visibility\"\"\"\n for column in self.show_view.get_columns():\n id = datastore.Columns.get_id(column.get_title())\n column.set_visible(datastore.Columns.get_visible(id))\n\n def focus_new(self):\n \"\"\"Move the focus to the name of the newly added show\"\"\"\n column = self.show_view.get_column(datastore.Columns.get_num(\"show_name\"))\n path = self.show_view.get_model().get_model()[-1].path\n self.show_view.set_cursor_on_cell(path, column, None, True)\n\n def on_button_pressed(self, widget, event, store):\n \"\"\"Because of limitations in GTK's TreeViews, we cannot send all\n signals directly to the renderers. This function works around those\n limitations.\n \n Warning: eats babies!\"\"\"\n if event.window != widget.get_bin_window():\n return False\n result = widget.get_path_at_pos(int(event.x), int(event.y))\n if not result:\n return False\n path, column, x, y = result\n columns = widget.get_columns()\n num = columns.index(column)\n if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:\n if column.get_title() in (datastore.Columns.get_label(\"episode\"), datastore.Columns.get_label(\"season\")):\n self.on_activated(None, path, store, datastore.Columns.get_num(datastore.Columns.get_id(column.get_title())), countdown=True)\n return True\n if event.type == gtk.gdk.BUTTON_PRESS and event.button == 1:\n if column.get_title() == datastore.Columns.get_label(\"delete\"):\n self.on_delete_clicked(store, store.get_iter(path))\n return True\n return False\n\n def on_activated(self, cell, path, model, num, countdown=False):\n \"\"\"Function that gets called either when the button renderers recieves\n the activated signal (only happens on left click), or when the user\n right clicks on the treeview, and the button-pressed callback determines\n that the click was on one of the button renderers\"\"\"\n if countdown:\n model.set_value(model.get_iter(path), num, model[path][num] - 1)\n else:\n model.set_value(model.get_iter(path), num, model[path][num] + 1)\n if num == datastore.Columns.get_num(\"season\"):\n model.set_value(\n model.get_iter(path), \n datastore.Columns.get_num(\"episode\"),\n 1\n )\n\n def on_toggled(self, cell, path, model, num):\n \"\"\"Callback for when a checkbox recieves the toggled signal\"\"\"\n model.set_value(model.get_iter(path), num, not model[path][num])\n\n def on_edited(self, cell, path, new_text, model, num):\n \"\"\"Callback for when a text entry has been updated\"\"\"\n model.set_value(model.get_iter(path), num, new_text)\n\n def on_delete_clicked(self, model, iter):\n \"\"\"Callback for when a user clicks the Delete icon\"\"\"\n name = model[iter][datastore.Columns.get_num(\"show_name\")]\n dd = widgets.DeleteDialog(name)\n if dd.run() == gtk.RESPONSE_ACCEPT:\n model.remove(iter)\n dd.destroy()\n","sub_path":"src/showlist.py","file_name":"showlist.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"102355529","text":"import os\nimport shutil\n\nfrom pyhocon import ConfigFactory\n\n\nclass Option(object):\n def __init__(self, conf_path):\n super(Option, self).__init__()\n self.conf = ConfigFactory.parse_file(conf_path)\n\n # ------------- general options ----------------------------------------\n self.save_path = self.conf['save_path'] # log path\n self.save_path_p = self.conf['save_path_p']\n self.data_path = self.conf['data_path'] # path for loading data set\n self.dataset = self.conf['dataset'] # options: imagenet | cifar10\n self.seed = self.conf['seed'] # manually set RNG seed\n self.gpu = self.conf['gpu'] # GPU id to use, e.g. \"0,1,2,3\"\n self.n_gpus = len(self.gpu.split(',')) # number of GPUs to use by default\n\n # ------------- data options -------------------------------------------\n self.n_threads = self.conf['n_threads'] # number of threads used for data loading\n\n # ------------- discrimination-aware options ---------------------------\n self.n_losses = self.conf['n_losses'] # number of additional losses\n self.pruning_rate = self.conf['pruning_rate'] # pruning rate\n self.softmax_weight = self.conf['softmax_weight'] # weight of the softmax loss\n self.mse_weight = self.conf['mse_weight'] # weight of the mean square loss\n self.max_samples = self.conf['max_samples'] # maximum sample size used for channel selection, -1 means using whole data set\n\n # ------------- common optimization options ----------------------------\n self.batch_size = self.conf['batch_size'] # mini-batch size\n self.momentum = self.conf['momentum'] # momentum\n self.weight_decay = self.conf['weight_decay'] # weight decay\n\n # ------------- segment-wise optimization options ----------------------\n self.segment_wise_n_epochs = self.conf['segment_wise_n_epochs'] # number of total epochs to fine tune in Algorihtm 1\n self.segment_wise_lr = self.conf['segment_wise_lr'] # initial learning rate\n self.segment_wise_step = self.conf['segment_wise_step'] # multi-step for linear learning rate\n\n # ------------- layer-wise optimization options ------------------------\n self.layer_wise_lr = self.conf['layer_wise_lr'] # initial learning rate\n\n # ------------- network-wise optimization options ----------------------\n self.network_wise_n_epochs = self.conf['network_wise_n_epochs'] # number of total epochs to train\n self.network_wise_lr = self.conf['network_wise_lr'] # initial learning rate\n self.network_wise_step = self.conf['network_wise_step'] # multi-step for linear learning rate\n\n # ------------- model options ------------------------------------------\n self.net_type = self.conf['net_type'] # options: resnet | preresnet | vgg\n self.experiment_id = self.conf['experiment_id'] # identifier for experiment\n self.depth = self.conf['depth'] # resnet depth: (n-2)%6==0\n self.n_classes = self.conf['n_classes'] # number of classes in the dataset\n\n # ---------- resume or retrain options ---------------------------------\n # path to model to retrain with, load model state_dict only\n self.retrain = None if len(self.conf['retrain']) == 0 else self.conf['retrain']\n # path to directory containing checkpoint, load state_dicts of model and optimizer, as well as training epoch\n self.resume = None if len(self.conf['resume']) == 0 else self.conf['resume']\n self.retrain_p = None if len(self.conf['retrain_p']) == 0 else self.conf['retrain_p']\n\n def params_check(self):\n if self.dataset in [\"cifar10\"]:\n self.n_classes = 10\n elif self.dataset == \"imagenet\":\n self.n_classes = 1000\n\n def set_save_path(self):\n self.params_check()\n\n if self.net_type in [\"preresnet\", \"resnet\"]:\n self.save_path = self.save_path + \\\n \"log_{}{:d}_{}_bs{:d}_sglr{:.3f}_nwlr{:.3f}_lylr{:.3f}_channel_selection_r{}_n{}_{}/\".format(\n self.net_type, self.depth, self.dataset,\n self.batch_size, self.segment_wise_lr, self.network_wise_lr, self.layer_wise_lr,\n self.pruning_rate, self.max_samples, self.experiment_id)\n else:\n self.save_path = self.save_path + \\\n \"log_{}_{}_bs{:d}_sglr{:.3f}_nwlr{:.3f}_lylr{:.3f}_channel_selection_r{}_n{}_{}/\".format(\n self.net_type, self.dataset,\n self.batch_size, self.segment_wise_lr, self.network_wise_lr, self.layer_wise_lr,\n self.pruning_rate, self.max_samples, self.experiment_id)\n\n if os.path.exists(self.save_path):\n print(\"{} file exist!\".format(self.save_path))\n action = input(\"Select Action: d (delete) / q (quit):\").lower().strip()\n act = action\n if act == 'd':\n shutil.rmtree(self.save_path)\n else:\n raise OSError(\"Directory {} exits!\".format(self.save_path))\n\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n","sub_path":"dcp/option.py","file_name":"option.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"186695310","text":"import os\nimport shutil\nfrom datetime import datetime\n\nimport pandas as pd\n\nfrom prepare_db.parse_csv import SPCParser\n\nCELLS_ML_FLAG = True\n\ncsv_fname = '/data6/phytoplankton-db/csv/hab_micro_raw_2017_2019.csv'\nif CELLS_ML_FLAG:\n csv_fname = csv_fname.replace('.csv', '_cells-mL.csv')\n\n# Load dataset\nNUM_CLASS = 9\ndf = pd.read_csv(csv_fname)\nDATE_COL = 'SampleID'\nTIME_COL = 'Time Collected LOCAL PT (hhmm)'\nCELL_COUNT_LIMIT_COL = 'Cell Count Detection Limit'\nCOUNTED_VOLUME_COL = 'Volume Counted (mL)'\nDATETIME_COL = 'datetime'\nCLASS_COL = 'class'\nCELLS_ML_COL = 'micro cells/mL'\nRAW_COUNT_COL = 'micro raw count'\n\nspc = SPCParser()\n\n# Preprocess classes\ndf = df.drop(['Polykrikos spp.', 'Prorocentrum gracile', 'Prorocentrum micans'], axis=1)\ndf = df.rename({\n 'Akashiwo sanguinea': \"Akashiwo\",\n 'Ceratium falcatiforme & C. fusus': \"Ceratium falcatiforme or fusus\",\n \"Chattonella spp.\": \"Chattonella\",\n \"Cochlodinium spp.\": \"Cochlodinium\",\n \"Gyrodinium spp.\": \"Gyrodinium\",\n 'Pseudo-nitzschia spp.': 'Pseudo-nitzschia chain',\n 'Prorocentrum micans + Prorocentrum spp.': 'Prorocentrum micans',\n 'Total Phytoplankton (Diatoms + DinoS)': 'Total Phytoplankton'}, axis=1)\nclass_col = df.columns[5:-1]\nprint('Class columns extracted: {}'.format(class_col))\n\n# Process dates\ndf[DATETIME_COL] = df[DATE_COL].astype(str) + \" \" + df[TIME_COL].astype(str)\ndf[DATETIME_COL] = pd.to_datetime(df[DATETIME_COL], format='%Y%m%d %H%M').dt.strftime(\n '%Y-%m-%d %H:%M')\n# df[DATE_COL] = pd.to_datetime(df[DATE_COL], format='%Y%m%d').dt.strftime('%Y-%m-%d')\n# df['datetime'] = df['SampleID (YYYYMMDD)'].str.cat(df['Time Collected hhmm (PST)'].astype(str), sep=' ')\ndates = df[DATETIME_COL].to_dict()\nvol_count = pd.Series(df[COUNTED_VOLUME_COL].values, index=df[DATETIME_COL]).to_dict()\ncell_detection_limit = pd.Series(df[CELL_COUNT_LIMIT_COL].values,\n index=df[DATETIME_COL]).to_dict()\nhab_species = list(set(class_col).intersection(set(spc.hab_species[:-1])))\nassert len(hab_species) == NUM_CLASS, f'Number of classes do not match {len(hab_species)} != {NUM_CLASS}'\n\n# Get raw microscopy counts\n# each cell ~ cells/Liter. Normalize this by the cell count detection limit to get the\n# raw counts\ntemp = df[hab_species]\n\n\ndef stack_count_data(data, metric_name):\n data = data.stack().reset_index()\n data = data.rename({'level_0': DATETIME_COL,\n 'level_1': CLASS_COL,\n 0: metric_name}, axis=1)\n data[DATETIME_COL] = data[DATETIME_COL].map(dates)\n return data\n\n\n# Get cells/mL\nconverted_volume_rate = 1 / 1000\ntemp_cells_ml = temp.iloc[:, :].multiply(converted_volume_rate, axis=0)\ntemp_cells_ml = stack_count_data(temp_cells_ml, metric_name=CELLS_ML_COL)\n\n# Get raw count\ntemp = temp.iloc[:, :].div(df[CELL_COUNT_LIMIT_COL], axis=0)\ntemp = stack_count_data(temp, metric_name=RAW_COUNT_COL)\n\n# Merge raw counts and cells/mL into one dataframe\ntemp = temp.merge(temp_cells_ml, on=[DATETIME_COL, CLASS_COL])\n\n# Get the relative abundance and total abundance\ndef compute_rel_abundance(df, count_col):\n count = count_col.replace('micro ', '')\n if count == 'raw count':\n df[count_col] = df[count_col].round()\n total_abundances = (df.groupby(DATETIME_COL)[count_col].sum()).to_dict()\n df[f'micro {count} total abundance'] = df[DATETIME_COL].map(total_abundances)\n df[f'micro {count} relative abundance'] = df[count_col] / df[\n f'micro {count} total abundance'] * 100.0\n return df\n\ntemp['label'] = 'gtruth'\ntemp = compute_rel_abundance(temp, RAW_COUNT_COL)\ntemp = compute_rel_abundance(temp, CELLS_ML_COL)\ntemp = temp.sort_values(by=[DATETIME_COL, CLASS_COL])\ntemp['micro ' + CELL_COUNT_LIMIT_COL.lower()] = temp[DATETIME_COL].map(\n cell_detection_limit)\ntemp['micro ' + COUNTED_VOLUME_COL.lower()] = temp[DATETIME_COL].map(vol_count)\ntemp[[DATETIME_COL, 'sampling time']] = temp[DATETIME_COL].str.split(' ', expand=True)\n\n# FIX datatime\nFIX_DAYLIGHT_SAVINGS_TIME_FLAG = True # 20200408 HAB 2017_2019 has time error\nif FIX_DAYLIGHT_SAVINGS_TIME_FLAG:\n print('Fixing daylight savings for valid dates')\n VALID_DATES = f'/data6/phytoplankton-db/valid_collection_dates_master.txt'\n valid_dates = open(VALID_DATES, 'r').read().splitlines()\n t = temp[temp['datetime'].isin(valid_dates)].index\n converted_times = (pd.to_datetime(temp.loc[t, 'sampling time']) + pd.DateOffset(\n hours=1)).dt.strftime(\"%H:%M\")\n temp.loc[t, 'sampling time'] = converted_times\n\n# Save and backup dataset\ncsv_fname = '/data6/phytoplankton-db/csv/hab_micro_2017_2019.csv'\nif os.path.exists(csv_fname):\n backedup_csv_fname = csv_fname + f'.{datetime.now().strftime(\"%Y%m%d\")}'\n print(f'Micro csv detected. Backing up original csv as {backedup_csv_fname}')\n shutil.copy(csv_fname, backedup_csv_fname)\ntemp.to_csv(csv_fname, index=False)\nprint(f'CSV saved as {csv_fname}')\n","sub_path":"prepare_db/set_micro_counts.py","file_name":"set_micro_counts.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"167866339","text":"# import moxing as mox\n# mox.file.shift('os', 'mox')\n\nimport os\nimport argparse\nimport time\nimport random\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nos.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'\nimport pandas as pd\n\nimport sys\n# current_path = os.path.dirname('obs://d-cheap-net-shanghai/hanyz/sarNet/main_sar.py')\n# sys.path.append(current_path)\n\nimport models\nfrom utils import *\nfrom optimizer import get_optimizer\nfrom criterion import get_criterion\nfrom scheduler import get_scheduler\nfrom transform import get_transform\nfrom hyperparams import get_hyperparams\nfrom config import Config\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.datasets as datasets\nimport torchvision.models as pytorchmodels\n\nfrom queue_jump import check_gpu_memory\n\nparser = argparse.ArgumentParser(description='PyTorch SARNet')\nparser.add_argument('--config', help='train config file path')\nparser.add_argument('--data_url', type=str, metavar='DIR', default='/home/data/ImageNet/', help='path to dataset')\nparser.add_argument('--train_url', type=str, metavar='PATH', default='/data/hanyz/code/sarNet/log/',\n help='path to save result and checkpoint (default: results/savedir)')\nparser.add_argument('--dataset', metavar='DATASET', default='imagenet', choices=['cifar10', 'cifar100', 'imagenet'],\n help='dataset')\nparser.add_argument('-j', '--workers', default=64, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch_size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning_rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate (default: 0.1)')\nparser.add_argument('--scheduler', default='multistep', type=str, metavar='T',\n help='learning rate strategy (default: multistep)',\n choices=['cosine', 'multistep', 'linear'])\nparser.add_argument('--warmup_epoch', default=None, type=int, metavar='N',\n help='number of epochs to warm up')\nparser.add_argument('--warmup_lr', default=0.1, type=float,\n metavar='LR', help='initial warm up learning rate (default: 0.1)')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum (default: 0.9)')\nparser.add_argument('--weight_decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print_freq', '-p', default=200, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--finetune_from', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--evaluate', action='store_true',\n help='evaluate model on validation set (default: false)')\nparser.add_argument('--evaluate_from', default=None, type=str, metavar='PATH',\n help='path to saved checkpoint (default: none)')\n# hyperparameter\nparser.add_argument('--hyperparams_set_index', default=1, type=int,\n help='choose which hyperparameter set to use')\n# huawei cloud\nparser.add_argument('--no_train_on_cloud', dest='train_on_cloud', action='store_false', default=True,\n help='whether to run the code on huawei cloud')\nparser.add_argument('--init_method', type=str, default='',\n help='an argument needed in huawei cloud, but i do not know its usage')\nparser.add_argument('--test_code', default=0, type=int,\n help='whether to test the code')\n\n# multiprocess\nparser.add_argument('--world_size', default=1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=0, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist_url', default='tcp://127.0.0.1:29501', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist_backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--visible_gpus', type=str, default='0',\n help='visible gpus')\nparser.add_argument('--multiprocessing_distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--t0', default=1.0, type=float, metavar='M', help='momentum')\nparser.add_argument('--t_last', default=0.01, type=float, metavar='M', help='momentum')\nparser.add_argument('--target_rate', default=2.5, type=float, metavar='M', help='momentum')\nparser.add_argument('--lambda_act', default=1.0, type=float, metavar='M', help='momentum')\nparser.add_argument('--temp', default=0.1, type=float, metavar='M', help='momentum')\nparser.add_argument('--lrfact', default=1, type=float,\n help='learning rate factor')\nparser.add_argument('--dynamic_rate', default=0, type=int)\n\nparser.add_argument('--arch', default='sar_resnet_imgnet4stage_alphaBase', type=str)\nparser.add_argument('--arch_config', default='sar_resnet34_alphaBase_4stage_cifar', type=str)\nparser.add_argument('--patch_groups', default=2, type=int)\nparser.add_argument('--alpha', default=2, type=int)\nparser.add_argument('--beta', default=1, type=int)\nparser.add_argument('--base_scale', default=2, type=int)\nparser.add_argument('--mask_size', default=7, type=int)\nparser.add_argument('--temp_scheduler', default='exp', type=str)\nparser.add_argument('--t_last_epoch', default=160, type=int)\nparser.add_argument('--ta_begin_epoch', default=30, type=int)\nparser.add_argument('--ta_last_epoch', default=60, type=int)\n\nparser.add_argument('--use_amp', type=int, default=0,\n help='apex')\n\nargs = parser.parse_args()\nargs.train_on_cloud = False\n# args.dynamic_rate = True if args.dynamic_rate > 0 else False\nif args.use_amp > 0:\n try:\n from apex import amp\n from apex.parallel import DistributedDataParallel as DDP\n from apex.parallel import convert_syncbn_model\n has_apex = True\n except ImportError:\n os.system('pip --default-timeout=100 install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./apex-master')\n from apex import amp\n from apex.parallel import DistributedDataParallel as DDP\n from apex.parallel import convert_syncbn_model\n has_apex = True\n print('successfully install apex')\n\nbest_acc1 = 0\nbest_acc1_corresponding_acc5 = 0\nval_acc_top1 = []\nval_acc_top5 = []\ntr_acc_top1 = []\ntr_acc_top5 = []\ntrain_loss = []\nvalid_loss = []\nlr_log = []\nepoch_log = []\nval_act_rate = []\nval_FLOPs = []\nargs.temp = args.t0\n\ndef main():\n # check_gpu_memory()\n str_t0 = str(args.t0).replace('.', '_')\n str_lambda = str(args.lambda_act).replace('.', '_')\n str_ta = str(args.target_rate).replace('.', '_')\n str_t_last = str(args.t_last).replace('.', '_')\n save_path = f'{args.train_url}_ImageNet/{args.arch_config}_OptimRate/g{args.patch_groups}_a{args.alpha}b{args.beta}_s{args.base_scale}/t0_{str_t0}_tLast{str_t_last}_tempScheduler_{args.temp_scheduler}_target{str_ta}_optimizeFromEpoch{args.ta_begin_epoch}to{args.ta_last_epoch}_dr{args.dynamic_rate}_lambda_{str_lambda}/'\n args.train_url = save_path\n if not args.train_on_cloud:\n if not os.path.exists(args.train_url):\n os.makedirs(args.train_url)\n\n assert args.dataset == 'imagenet'\n args.num_classes = 1000\n args.IMAGE_SIZE = 224\n\n args.multiprocessing_distributed = True\n args.use_amp = True if args.use_amp == 1 else 0\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n global best_acc1_corresponding_acc5\n global val_acc_top1\n global val_acc_top5\n global tr_acc_top1\n global tr_acc_top5\n global train_loss\n global valid_loss\n global lr_log\n global epoch_log\n global val_FLOPs\n global val_act_rate\n args.gpu = gpu\n args.cfg = Config.fromfile(args.config)\n print(args.cfg)\n args.hyperparams_set_index = args.cfg['train_cfg']['hyperparams_set_index']\n args = get_hyperparams(args, test_code=args.test_code)\n print('Hyper-parameters:', str(args))\n\n if args.train_on_cloud:\n with mox.file.File(args.train_url+'train_configs.txt', \"w\") as f:\n f.write(str(args))\n else:\n with open(args.train_url+'train_configs.txt', \"w\") as f:\n f.write(str(args))\n\n # assert(0==1)\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n ### Create model\n # model = pytorchmodels.resnet50(pretrained=False)\n model_type = args.arch_config\n model = eval(f'models.{args.arch}.{args.arch_config}')(args)\n\n print('Model Struture:', str(model))\n if args.train_on_cloud:\n with mox.file.File(args.train_url+'model_arch.txt', \"w\") as f:\n f.write(str(model))\n else:\n with open(args.train_url+'model_arch.txt', \"w\") as f:\n f.write(str(model))\n ### Calculate FLOPs & Param\n model.eval()\n rand_inp = torch.rand(1, 3, 224,224)\n _, _, args.full_flops = model.forward_calc_flops(rand_inp, temperature=1e-8)\n args.full_flops /= 1e9\n print(f'FULL FLOPs: {args.full_flops}')\n\n ### Optionally evaluate from a model\n if args.evaluate_from is not None:\n args.evaluate = True\n state_dict = torch.load(args.evaluate_from, map_location='cpu')['state_dict']\n\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n # load params\n model.load_state_dict(new_state_dict)\n\n ### Define loss function (criterion) and optimizer\n criterion = get_criterion(args).cuda(args.gpu)\n optimizer = get_optimizer(args, model)\n scheduler = get_scheduler(args)\n \n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n if args.use_amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n if args.use_amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n if args.use_amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n model = torch.nn.DataParallel(model).cuda()\n\n \n # optionally resume from a checkpoint\n # args.gpu = None\n if args.finetune_from is not None:\n if os.path.isfile(args.finetune_from):\n print(\"=> loading checkpoint '{}'\".format(args.finetune_from))\n if args.gpu is None:\n checkpoint = torch.load(args.finetune_from)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.finetune_from, map_location=loc)\n \n model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.finetune_from, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.finetune_from))\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n best_acc1_corresponding_acc5 = ['best_acc1_corresponding_acc5']\n # if args.gpu is not None:\n # # best_acc1 may be from a checkpoint from a different GPU\n # best_acc1 = best_acc1.to(args.gpu)\n # best_acc1_corresponding_acc5 = best_acc1_corresponding_acc5.to(args.gpu)\n # pass\n\n model.load_state_dict(checkpoint['state_dict'])\n if not args.evaluate:\n optimizer.load_state_dict(checkpoint['optimizer'])\n val_acc_top1 = checkpoint['val_acc_top1']\n val_acc_top5 = checkpoint['val_acc_top5']\n tr_acc_top1 = checkpoint['tr_acc_top1']\n tr_acc_top5 = checkpoint['tr_acc_top5']\n train_loss = checkpoint['train_loss']\n valid_loss = checkpoint['valid_loss']\n lr_log = checkpoint['lr_log']\n val_act_rate = checkpoint['val_act_rate']\n val_FLOPs = checkpoint['val_FLOPs']\n args.temp = checkpoint['temp']\n try:\n epoch_log = checkpoint['epoch_log']\n except:\n print('There is no epoch_log in checkpoint!')\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n \n # for k, m in model.named_modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n # if 'gs' in str(k):\n # m.weight.data.normal_(0, 0.001)\n # elif isinstance(m, nn.BatchNorm2d):\n # if 'gs' in str(k):\n # nn.init.constant_(m.weight, 1)\n # nn.init.constant_(m.bias, 0)\n # print('init gs ok')\n\n cudnn.benchmark = True\n\n ### Data loading\n print('Train data augmentaion:', get_transform(args, is_train_set=True))\n print('Valid data augmentaion:', get_transform(args, is_train_set=False))\n\n traindir = args.data_url + 'train/'\n valdir = args.data_url + 'val/'\n\n train_dataset = datasets.ImageFolder(\n traindir,\n get_transform(args, is_train_set=True))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(\n valdir,\n get_transform(args, is_train_set=False)),\n batch_size=args.batch_size * torch.cuda.device_count(), shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n # target_rate = args.target_rate\n validate(val_loader, model, criterion, args, target_rate=args.target_rate)\n return\n\n epoch_time = AverageMeter('Epoch Tiem', ':6.3f')\n start_time = time.time()\n # args.temp = args.t0\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n ### Train for one epoch\n target_rate = adjust_target_rate(epoch, args)\n print(f'Epoch {epoch}, Target rate: {target_rate}')\n print(f'Temperature: {args.temp}')\n \n tr_acc1, tr_acc5, tr_loss, lr = \\\n train(train_loader, model, criterion, optimizer, scheduler, epoch, args, target_rate)\n\n if epoch % 10 == 0 or epoch >= args.start_eval_epoch:\n ### Evaluate on validation set\n val_acc1, val_acc5, val_loss, val_rate, val_flops = validate(val_loader, model, criterion, args, target_rate)\n # assert(0==1)\n ### Remember best Acc@1 and save checkpoint\n is_best = val_acc1 > best_acc1\n if is_best:\n best_acc1_corresponding_acc5 = val_acc5\n best_acc1 = max(val_acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n val_acc_top1.append(val_acc1)\n val_acc_top5.append(val_acc5)\n tr_acc_top1.append(tr_acc1)\n tr_acc_top5.append(tr_acc5)\n val_act_rate.append(val_rate)\n val_FLOPs.append(val_flops)\n train_loss.append(tr_loss)\n valid_loss.append(val_loss)\n lr_log.append(lr)\n epoch_log.append(epoch)\n df = pd.DataFrame({'val_acc_top1': val_acc_top1, 'val_acc_top5': val_acc_top5, \n 'val_act_rate': val_act_rate, 'val_FLOPs': val_FLOPs, \n 'tr_acc_top1': tr_acc_top1,\n 'tr_acc_top5': tr_acc_top5, 'train_loss': train_loss, \n 'valid_loss': valid_loss,\n 'lr_log': lr_log, 'epoch_log': epoch_log})\n log_file = args.train_url + 'log.txt'\n if args.train_on_cloud:\n with mox.file.File(log_file, \"w\") as f:\n df.to_csv(f)\n else:\n with open(log_file, \"w\") as f:\n df.to_csv(f)\n ckpt_name = 'checkpoint.pth.tar' if epoch <= args.epochs-10 else f'checkpoint{epoch}.pth.tar'\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': model_type,\n 'hyper_set': str(args),\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'best_acc1_corresponding_acc5': best_acc1_corresponding_acc5,\n 'optimizer': optimizer.state_dict(),\n 'val_acc_top1': val_acc_top1,\n 'val_acc_top5': val_acc_top5,\n 'val_act_rate': val_act_rate,\n 'val_FLOPs': val_FLOPs,\n 'tr_acc_top1': tr_acc_top1,\n 'tr_acc_top5': tr_acc_top5,\n 'train_loss': train_loss,\n 'valid_loss': valid_loss,\n 'lr_log': lr_log,\n 'epoch_log': epoch_log,\n 'temp': args.temp,\n }, args, is_best, filename=ckpt_name)\n\n epoch_time.update(time.time() - start_time, 1)\n print('Duration: %4f H, Left Time: %4f H' % (\n epoch_time.sum / 3600, epoch_time.avg * (args.epochs - epoch - 1) / 3600))\n start_time = time.time()\n\n print(' * Best Acc@1 {best_acc1:.3f} Acc@5 {best_acc1_corresponding_acc5:.3f}'\n .format(best_acc1=best_acc1, best_acc1_corresponding_acc5=best_acc1_corresponding_acc5))\n return\n\n\ndef train(train_loader, model, criterion, optimizer, scheduler, epoch, args, target_rate):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses_cls = AverageMeter('Loss_cls', ':.4e')\n losses_act = AverageMeter('loss_act', ':.4e')\n losses = AverageMeter('Loss', ':.4e')\n act_rates = AverageMeter('Activation rate', ':.2e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n # FLOPs = AverageMeter('FLOPs', ':.4e')\n train_batches_num = len(train_loader)\n\n train_progress = ProgressMeter(\n train_batches_num,\n [batch_time, data_time,act_rates, losses, losses_cls, losses_act, top1, top5],\n prefix=\"Epoch: [{}/{}]\".format(epoch, args.epochs))\n\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n\n ### Adjust learning rate\n lr = scheduler.step(optimizer, epoch, batch=i, nBatch=len(train_loader))\n\n ### Measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n ### Compute output\n adjust_gs_temperature(epoch, i, train_batches_num, args)\n if args.mixup > 0.0:\n input, target_a, target_b, lam = mixup_data(input, target, args.mixup)\n output, _masks = model(input, temperature=args.temp, inference=False)\n loss_cls = mixup_criterion(criterion, output, target_a, target_b, lam)\n else:\n output, _masks = model(input, temperature=args.temp, inference=False)\n loss_cls = criterion(output, target)\n ### Measure accuracy and record loss\n if args.mixup > 0.0:\n acc1_a, acc5_a = accuracy(output.data, target_a, topk=(1, 5))\n acc1_b, acc5_b = accuracy(output.data, target_b, topk=(1, 5))\n acc1 = lam * acc1_a + (1 - lam) * acc1_b\n acc5 = lam * acc5_a + (1 - lam) * acc5_b\n else:\n acc1, acc5 = accuracy(output.data, target, topk=(1, 5))\n \n\n act_rate = 0.0\n loss_act = 0.0\n for act in _masks:\n act_rate += torch.mean(act)\n loss_act += torch.pow(target_rate-torch.mean(act), 2)\n act_rate = torch.mean(act_rate / len(_masks))\n loss_act = args.lambda_act * torch.mean(loss_act/len(_masks))\n # print(target_rate, act_rate, loss_act, args.lambda_act)\n if args.dynamic_rate > 0:\n loss = loss_cls + loss_act\n else:\n loss = loss_cls + loss_act if epoch >= args.ta_begin_epoch else loss_cls\n\n # FLOPs.update(flops.item(), input.size(0))\n act_rates.update(act_rate.item(), input.size(0))\n losses_act.update(loss_act.item(), input.size(0))\n losses_cls.update(loss_cls.item(), input.size(0))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1.item(), input.size(0))\n top5.update(acc5.item(), input.size(0))\n \n if math.isnan(loss.item()):\n optimizer.zero_grad()\n continue\n elif math.isnan(loss_act.item()):\n optimizer.zero_grad()\n if args.use_amp:\n with amp.scale_loss(loss_cls, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss_cls.backward()\n continue \n\n ### Compute gradient and do SGD step\n optimizer.zero_grad()\n if args.use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n ### Measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n train_progress.display(i)\n print('LR: %6.4f' % (lr))\n # print('FLOPs: %6.4f' % (flops))\n\n return top1.avg, top5.avg, losses.avg, lr\n\ndef validate(val_loader, model, criterion, args, target_rate):\n batch_time = AverageMeter('Time', ':6.3f')\n losses_cls = AverageMeter('Loss_cls', ':.4e')\n losses_act = AverageMeter('loss_act', ':.4e')\n FLOPs = AverageMeter('Activation rate', ':.2e')\n losses = AverageMeter('Loss', ':.4e')\n act_rates = AverageMeter('Activation rate', ':.2e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time,FLOPs, act_rates,losses, losses_cls, losses_act, top1, top5],\n prefix='Test: ')\n\n model.eval()\n\n end = time.time()\n with torch.no_grad():\n for i, (input, target) in enumerate(val_loader):\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n ### Compute output single crop\n # output = model(input)\n output, _masks, flops = model.module.forward_calc_flops(input, temperature=args.t_last, inference=False)\n flops /= 1e9\n loss_cls= criterion(output, target)\n \n act_rate = 0.0\n loss_act = 0.0\n for act in _masks:\n act_rate += torch.mean(act)\n loss_act += torch.pow(target_rate-torch.mean(act), 2)\n act_rate = torch.mean(act_rate/len(_masks))\n loss_act = torch.mean(loss_act/len(_masks))\n loss_act = args.lambda_act * loss_act\n loss = loss_cls + loss_act\n\n acc1, acc5 = accuracy(output.data, target, topk=(1, 5))\n\n dist.all_reduce(flops)\n flops /= args.world_size\n dist.all_reduce(acc1)\n acc1 /= args.world_size\n dist.all_reduce(acc5)\n acc5 /= args.world_size\n dist.all_reduce(loss)\n loss /= args.world_size\n dist.all_reduce(loss_cls)\n loss_cls /= args.world_size\n dist.all_reduce(loss_act)\n loss_act /= args.world_size\n dist.all_reduce(act_rate)\n act_rate /= args.world_size\n \n FLOPs.update(flops.item(), input.size(0))\n act_rates.update(act_rate.item(), input.size(0))\n losses_act.update(loss_act.item(),input.size(0))\n losses_cls.update(loss_cls.item(), input.size(0))\n\n # Compute output ten crop\n # bs, ncrops, c, h, w = input.size()\n # output_ncrop = model(input.view(-1, c, h, w))\n # output = output_ncrop.view(bs, ncrops, -1).mean(1)\n # loss = criterion(output, target)\n\n ### Measure accuracy and record loss\n \n losses.update(loss.data.item(), input.size(0))\n top1.update(acc1.item(), input.size(0))\n top5.update(acc5.item(), input.size(0))\n\n ### Measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 10 == 0:\n progress.display(i)\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg, act_rates.avg, FLOPs.avg\n\ndef adjust_gs_temperature(epoch, step, len_epoch, args):\n if epoch >= args.t_last_epoch:\n return args.t_last\n else:\n T_total = args.t_last_epoch * len_epoch\n T_cur = epoch * len_epoch + step\n if args.temp_scheduler == 'exp':\n alpha = math.pow(args.t_last / args.t0, 1 / T_total)\n args.temp = math.pow(alpha, T_cur) * args.t0\n elif args.temp_scheduler == 'linear':\n args.temp = (args.t0 - args.t_last) * (1 - T_cur / T_total) + args.t_last\n else:\n args.temp = 0.5 * (args.t0-args.t_last) * (1 + math.cos(math.pi * T_cur / (T_total))) + args.t_last\n\ndef adjust_target_rate(epoch, args):\n if args.dynamic_rate == 0:\n return args.target_rate\n elif args.dynamic_rate == 1:\n if epoch < args.ta_last_epoch // 2:\n target_rate = 1.0\n else:\n target_rate = args.target_rate\n else:\n if epoch < args.ta_begin_epoch :\n target_rate = 1.0\n elif epoch < args.ta_begin_epoch + (args.ta_last_epoch-args.ta_begin_epoch)//2:\n target_rate = args.target_rate + (1.0 - args.target_rate)/3*2\n elif epoch < args.ta_last_epoch:\n target_rate = args.target_rate + (1.0 - args.target_rate)/3\n else:\n target_rate = args.target_rate\n return target_rate\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_sar_imgnet_finetune_rate_local.py","file_name":"main_sar_imgnet_finetune_rate_local.py","file_ext":"py","file_size_in_byte":31641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"162727076","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 24 02:17:38 2015\n\n@author: Aleksey\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport statsmodels.api as sm \n \ndef GetDrawDowns(ts): \n \"\"\"\n Returns a time series of drawdowns for given time series\n Could be used to plot underwater curve\n \"\"\"\n local_dd = lambda subts: subts[-1]/max(subts)-1\n cumret = (1+ts).cumprod()\n T = len(cumret)\n dds = np.zeros(T)\n \n for t in range(1,T):\n dds[t] = local_dd(cumret[0:t])\n \n dds[0] = cumret[0]/1 - 1\n return pd.Series(dds,index=ts.index)\n \ndef MaxDD(ts):\n \"\"\" \n Calculate Maximum drawdown from time series\n \"\"\"\n dds = GetDrawDowns(ts)\n return min(dds)\n\ndef CalcCumRet(rets,i):\n c = rets.icol(i)\n cr = (1+c).cumprod()\n T = c.count()\n lr = cr.irow(T-1)\n return lr \n\n\n\ndef BasicStats(rets):\n T = rets.count()[0]\n C = len(rets.columns)\n mu = np.mean(rets)\n sd = np.std(rets)\n annvol = sd * np.sqrt(12)\n vami= np.array([CalcCumRet(rets,c) for c in range(C)])\n annret = vami ** (12/T)\n cumret = vami - 1\n cv = rets.cov().values\n variances = cv.diagonal()\n cv1 = cv[:,0]\n betas = cv1/variances\n \n dds = [MaxDD(rets.icol(i)) for i in range(C)] \n \n dict = {'mu':mu.values,\n 'std':sd.values,\n 'cumret':cumret,\n 'annvol':annvol.values,\n 'annret':annret,\n 'vami':vami * 1000,\n 'var':mu - 1.96 * sd,\n 'skew':stats.skew(rets),\n 'kurt':stats.kurtosis(rets),\n 'betas':betas,\n 'maxdd':dds\n }\n return pd.DataFrame(dict) \n\ndef RegimeAnalysis(rets):\n groupByRegime = rets.groupby('Regime')\n retByRegime = groupByRegime.mean()\n return retByRegime\n\n\n\ndef convexityAnalysis(tsdata,namex,namey):\n x = tsdata[namex]\n y = tsdata[namey]\n x2 = x**2\n tmydf = pd.DataFrame(data=[x,x2]).transpose()\n tmydf.columns = ['x','x2']\n model = sm.OLS(y,tmydf)\n modelfit = model.fit()\n betas = modelfit.params\n mn = x.min()\n mx = x.max()\n xfit = np.linspace(mn,mx,100)\n yfit = betas[0]*xfit + betas[1]*(xfit**2)\n res = {'betas':betas,\n 'xhat':xfit,\n 'yhat':yfit,\n 'x':x,\n 'y':y}\n return res\n ","sub_path":"hedgefundguy/commonlib/commonlib.py","file_name":"commonlib.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"358148289","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## CS1403 — Design and Analysis of Algorithms\n# \n# ### Session — 08\n\n# ### Min Heap implementation\n\n# In[1]:\n\n\nclass minheap(object):\n __slots__ = ['_size','_array']\n\n def __init__(self,firstOb):\n self._size = 0\n self._array = [firstOb]*1005\n \n def __len__(self):\n return self._size\n\n def display(self):\n i = 1\n while i <= self._size:\n print(str(self._array[i]))\n i = i + 1\n\n def insert(self,newOb):\n self._size = self._size + 1\n i = self._size\n \n while self._array[i//2] > newOb:\n self._array[i] = self._array[i//2]\n i = i // 2\n \n self._array[i] = newOb\n\n def delete(self):\n if self._size == 0:\n return None\n \n min = self._array[1]\n last = self._array[self._size]\n self._size = self._size - 1\n\n i = 1\n while i * 2 <= self._size:\n child = i * 2\n \n if child != self._size and self._array[child + 1] < self._array[child]:\n child += 1\n \n if last > self._array[child]:\n self._array[i] = self._array[child]\n else:\n break\n \n i = child\n\n self._array[i] = last\n return min\n\n\n# ### Kruskal’s algorithm\n\n# In[3]:\n\n\nimport numpy as np\n\nclass edge(object):\n __slots__ = ['_edge']\n\n def __init__(self,l=(-1,-1,-1)):\n self._edge = l\n\n def __lt__(self,other):\n return self._edge[2] < other._edge[2]\n\n def __gt__(self,other): \n return self._edge[2] > other._edge[2]\n\n def getEdge(self):\n return self._edge\n\n def __str__(self):\n return str(self._edge)\n\nclass disjoint_set(object):\n __slots__ = ['_arr','_size']\n\n def __init__(self):\n self._arr = np.array([-1 for i in range(200)])\n self._size = 0\n\n def initialise(self,size):\n assert(type(size) == int and size <= 200)\n self._size = size - 1 \n\n def findClass(self,a):\n assert(type(a) == int)\n assert(a-1 <= self._size)\n\n a = a - 1\n while self._arr[a] > -1:\n a=self._arr[a]\n\n return a\n\n def merge(self,a,b):\n assert(type(a) == int and type(b) == int)\n assert(a-1 <= self._size and b-1 <= self._size)\n\n class_a = self.findClass(a)\n class_b = self.findClass(b)\n\n\n #Signs swapped since we are comparing negative numbers\n if self._arr[class_a] > self._arr[class_b]:\n self._arr[class_a] = class_b\n elif self._arr[class_a] < self._arr[class_b]:\n self._arr[class_b] = class_a\n else:\n self._arr[class_a] -= 1\n self._arr[class_b] = class_a\n\n def __str__(self):\n string = ''\n for x in range(self._size+1):\n string += ( str(self._arr[x]) + ' ' )\n return string\n\nn = 100\ne = 1000\n\nwith open('graph_ip.txt','r') as input_file:\n ip = input_file.read().split('\\n')\n\n edges = []\n\n for i in range(e):\n edges.append(edge(tuple(map(int,ip[i].split(' ')))))\n\n\n heap = minheap(edge())\n\n for i in range(e):\n heap.insert(edges[i])\n\n ds = disjoint_set()\n ds.initialise(n)\n\n selected = []\n count = 0\n length = 0\n\n while len(heap):\n edgeTuple = heap.delete().getEdge()\n class_l = ds.findClass(edgeTuple[0])\n class_r = ds.findClass(edgeTuple[1])\n\n if class_l != class_r:\n selected.append(edgeTuple)\n ds.merge(edgeTuple[0],edgeTuple[1])\n count += 1\n length += edgeTuple[2]\n if count == n - 1:\n break\n\n print('The selected edges are: ',selected)\n print('Length of the path is : ',length)\n\n","sub_path":"Assignment-8/mst.py","file_name":"mst.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"268260010","text":"def solution(A, B):\n stack = []\n size = count = 0\n for i in range(len(A)):\n if B[i] == 0:\n while size > 0 and A[i] > stack[size - 1]:\n stack.pop()\n size -= 1\n if size == 0:\n count += 1\n elif B[i] == 1:\n stack += [A[i]]\n size += 1\n return count + size\n","sub_path":"Fish.py","file_name":"Fish.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"614648458","text":"'''\nhttp://www.tutorialspoint.com/python/python_networking.htm\n\nmust convert data to binary to send it by using\n\n.encode('utf-8') to convert strings to binary for .send(port#)\n.decode('utf-8') to convert binary back into regular strings for .recv(port#)\n\n'''\nimport time\nimport datetime \nimport socket # Import socket module\n\n\n\ns = socket.socket() # Create a socket object\nhost = socket.gethostname() # Get local machine name\nport = 12345 # Reserve a port for your service.\ns.bind((host, port)) # Bind to the port\n\ns.listen(5) # Now wait for client connection.\n\n\ncurTime=datetime.datetime.now() #get the current time using datetime\nprint('server started at: ',curTime)\n\nwhile True:\n \n curTime=time.ctime(time.time()) #get the current time using time\n c, addr = s.accept() # Establish connection with client.\n print ('Got connection from ',addr,' at ',curTime)\n c.send('Thank you for connecting'.encode('utf-8'))\n c.close() # Close the connection\n \n ","sub_path":"PythonLearn/Draps_tutorial/networking/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"520501838","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nimport random\nfrom applicant.models import Applicant\n\n# Create your views here.\ndef indexPageView(request) :\n return render(request, 'applicant/newindex.html')\n\ndef registerPageView(request) :\n return render(request, 'applicant/register.html')\n\ndef deleteApplicantPageView(request) :\n return render(request, 'applicant/deleteapplicant.html')\n\ndef updateApplicantsPageView(request) :\n return render(request, 'applicant/updateapplicant.html')\n\ndef alterApplicantPageView(request) :\n if request.method == 'POST':\n firstName = request.POST.get('first_name')\n lastName = request.POST.get('last_name')\n username = request.POST.get('username')\n new_firstName = request.POST.get('new_first_name')\n new_lastName = request.POST.get('new_last_name')\n new_username = request.POST.get('new_username')\n\n applicant = Applicant.objects.get(first_name=firstName, last_name=lastName, username=username)\n applicant.first_name = new_firstName\n applicant.last_name = new_lastName\n applicant.username = new_username\n\n applicant.save()\n \n return HttpResponseRedirect(\"/applicant/\")\n\ndef removeApplicantPageView(request) :\n applicant = Applicant.objects.get(first_name=request.POST['first_name'], last_name=request.POST['last_name'], username=request.POST['username'])\n applicant.delete()\n\n return HttpResponseRedirect(\"/applicant/\") \n\ndef displayApplicantPageView(request) :\n return HttpResponse('Display Applicant View')\n\ndef addApplicantPageView(request) :\n if request.method == 'POST' :\n\n new_applicant = Applicant()\n\n new_applicant.first_name = request.POST.get('first_name')\n new_applicant.last_name = request.POST.get('last_name')\n new_applicant.username = request.POST.get('username')\n new_applicant.email = request.POST.get('email')\n new_applicant.applicant_id = (random.randint(209,10000)) # had to input an applicant_id FIX ME: AUTOFILL PK\n\n new_applicant.save()\n\n applicant_data = Applicant.objects.all()\n\n context = {\n 'all_applicants' : applicant_data,\n 'new_applicant' : new_applicant\n }\n return render(request, 'applicant/viewapplicant.html', context)\n else :\n return HttpResponse(\"NOT FOUND\")","sub_path":"applicant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"339749015","text":"#!/usr/bin/python3\n\"\"\" This script handles returning JSON format of\n certain variables \"\"\"\nfrom flask import Flask, jsonify\nfrom api.v1.views import app_views\nfrom models import storage\n\n\n@app_views.route('/api/v1/status')\ndef return_status():\n \"\"\" Returns the status code OK in JSON format \"\"\"\n return {\"status\": \"OK\"}\n\n\n@app_views.route('/api/v1/stats', strict_slashes=False)\ndef status():\n classes = {\n 'amenities': storage.count('Amenity'),\n 'cities': storage.count('City'),\n 'places': storage.count('Place'),\n 'reviews': storage.count('Review'),\n 'states': storage.count('State'),\n 'users': storage.count('User')\n }\n return jsonify(classes)\n","sub_path":"api/v1/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"418511046","text":"# Let H(n) be the number of distinct integer sided equiangular\n# convex hexagons with perimeter not exceeding n.\n# Hexagons are distinct if and only if they are not congruent.\n#\n# You are given H(6) = 1, H(12) = 10, H(100) = 31248.\n# Find H(55106).\n\n# THEORY:\n#\n# The sides of an equiangular hexagon can be separated into three\n# parallel pairs: a <= a', b <= b', c <= c', and a <= b <= c.\n# Furthermore, a' - a = b' - b = c' - c,\n# and a, b, and c are not adjacent to each other\n# (or else the sides wouldn't form a closed polygon.)\n# The side order, clockwise, thus goes a -> b' -> c -> a' -> b -> c'\n# or a -> c' -> b -> a' -> c -> b'.\n# These two hexagons are congruent.\n#\n# The problem therefore simplifies to finding all 4-tuples (a, b, c, d)\n# where 1 <= a <= b <= c, 0 <= d, and 2(a + b + c) + 3 * d <= n.\n#\n# Let F(n) be the number of such tuples for which 2(a + b + c) + 3 * d = n.\n# If n <= 5, F(n) = 0.\n#\n# Consider any 4-tuple (a, b, c, d) which is counted in F(n).\n# If a > 1, then a tuple (a-1, b-1, c-1, d) is counted in F(n-6).\n# Similarly, any such tuple counted in F(n-6) corresponds to a tuple in F(n).\n# Therefore F(n) = F(n-6) + F'(n),\n# where F'(n) is the number of tuples with a = 1.\n#\n# If a = 1 and we fix d, then 2 * b + 2 * c = n - 3d - 2.\n# (Note that d must have the same divisibility by 2 as n does.)\n# b can vary between 1 and floor((n - 3d - 2) / 4), so there are\n# floor((n - 3d - 2) / 4) tuples included in F(n) for that value of d.\n# Thus F'(n) = floor((n - 2) / 4) + floor((n - 5) / 4) + floor((n - 8) / 4)...\n#\n# Some mathematical analysis reveals the following formula:\n# F'(n) = (x' * x) - (3 * x * (x + 1) / 2) + (y' * y) - (3 * y * (y + 1) / 2)\n# where x, x', y, and y' are dependent on n modulo 4.\n#\n# n = 4 * w: x = floor((w + 2) / 3), y = floor((w + 1) / 3)\n# n = 4 * w + 1: x = floor((w + 2) / 3), y = floor(w / 3)\n# n = 4 * w + 2: x = floor((w + 3) / 3), y = floor((w + 1) / 3)\n# n = 4 * w + 3: x = floor((w + 2) / 3), y = floor((w + 1) / 3)\n#\n# In all cases, x' and y' are the numerators of the fractions in the floor\n# functions that define x and y, respectively (e.g. if n = 4 * w, x' = w + 2).\n#\n# Find H(n) by summing F(i) where i <= n.\n\nfrom time import time\nimport sys\nsys.path.append(\"../Library\")\nfrom peresult import peresult\nfrom math import floor\n\ndef solve(cap = 55106):\n hexagon_counts = [0 for i in range(6)] # Stores most recent 6 F(i) values\n result = 0\n for perimeter in range(6, cap + 1):\n hexagon_counts = hexagon_counts[1:] + [hexagon_counts[0]]\n w = perimeter // 4\n if perimeter % 4 == 0 or perimeter % 4 == 3:\n x = (w + 2) // 3\n xP = w + 2\n y = (w + 1) // 3\n yP = w + 1\n elif perimeter % 4 == 1:\n x = (w + 2) // 3\n xP = w + 2\n y = w // 3\n yP = w\n else:\n x = (w + 3) // 3\n xP = w + 3\n y = (w + 1) // 3\n yP = w + 1\n hexagon_counts[5] += (xP * x) - (3 * x * (x + 1) // 2) \\\n + (yP * y) - (3 * y * (y + 1) // 2)\n result += hexagon_counts[5]\n return result\n\nif __name__ == \"__main__\":\n start = time()\n peresult(600, solve(), time() - start)\n","sub_path":"Problems 601-700/pe600IntegerSidedEquiangularHexagons.py","file_name":"pe600IntegerSidedEquiangularHexagons.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"119150831","text":"import importlib\nimport os\nimport re\nimport shutil\nimport sys\nimport traceback\nfrom types import ModuleType\nfrom typing import Tuple, Optional\nimport requests\n\nimport timeout_decorator\n\nbasedir = os.path.abspath(os.getcwd())\n\nCHECKER_PACKAGES_PATH = '/dev/shm/' + os.urandom(8).hex()\nPACKAGE = os.urandom(8).hex()\nFULL_PACKAGE_PATH = CHECKER_PACKAGES_PATH + '/' + PACKAGE\nsys.path.append(CHECKER_PACKAGES_PATH)\n\nignore_patterns = [\n\tre.compile(r'^__pycache__$'),\n\tre.compile(r'\\.pyc$'),\n\tre.compile(r'^\\.idea$'),\n\tre.compile(r'^\\.git'),\n\tre.compile(r'^\\.mypy_cache$'),\n\tre.compile(r'^gamelib$')\n]\n\n\ndef is_ignored(folder: str) -> bool:\n\treturn any(p.match(folder) for p in ignore_patterns)\n\n\ndef create_package(folder):\n\t# Code is basically a mocked copy of the DB-Filesystem code from the gameserver.\n\tos.makedirs(FULL_PACKAGE_PATH, exist_ok=True)\n\tfor root, subdirs, files in os.walk(folder, followlinks=True):\n\t\t# add directories\n\t\tsubdirs[:] = [dir for dir in subdirs if not is_ignored(dir)]\n\t\tfor dir in subdirs:\n\t\t\tpath = dir if root == folder else root[len(folder) + 1:] + '/' + dir\n\t\t\tos.makedirs(os.path.join(FULL_PACKAGE_PATH, path), exist_ok=True)\n\n\t\t# add files\n\t\tfor file in files:\n\t\t\tif is_ignored(file):\n\t\t\t\tcontinue\n\t\t\tfname = root + '/' + file\n\t\t\tpath = file if root == folder else root[len(folder) + 1:] + '/' + file\n\t\t\tshutil.copy(fname, os.path.join(FULL_PACKAGE_PATH, path))\n\n\t# Find and link gamelib\n\tif os.path.exists(basedir + '/gamelib'):\n\t\tos.symlink(basedir + '/gamelib', CHECKER_PACKAGES_PATH + '/gamelib')\n\telif os.path.exists(basedir + '/checkers/gamelib'):\n\t\tos.symlink(basedir + '/checkers/gamelib', CHECKER_PACKAGES_PATH + '/gamelib')\n\telif os.path.exists(basedir + '/ci/service-scripts/gamelib'):\n\t\tos.symlink(basedir + '/ci/service-scripts/gamelib', CHECKER_PACKAGES_PATH + '/gamelib')\n\telse:\n\t\traise Exception('gamelib not found!')\n\n\tprint(f'[OK] Created package {FULL_PACKAGE_PATH}')\n\n\ndef import_module_from_package(filename: str) -> ModuleType:\n\tmodulename = '{}.{}'.format(PACKAGE, filename.replace('.py', '').replace('/', '.'))\n\tspec = importlib.util.spec_from_file_location(modulename, FULL_PACKAGE_PATH + '/' + filename)\n\tmodule = importlib.util.module_from_spec(spec)\n\tif spec.loader is None:\n\t\traise Exception('Loader is not present')\n\ttry:\n\t\tspec.loader.exec_module(module) # type: ignore\n\texcept ImportError:\n\t\tprint('=== IMPORT ERROR ===')\n\t\tprint('Remember: ')\n\t\tprint('1. Only use relative imports (with dot) for your own script files: import .my_other_python_file')\n\t\tprint('2. If you need additional libraries for your script (not in requirements-checker), report them to the orgas.')\n\t\traise\n\tprint('[OK] PackageLoader imported {}'.format(modulename))\n\treturn module\n\n\ndef get_checker_class():\n\t# Find checkerfile\n\twith open(os.path.join(basedir, 'checkers', 'config'), 'r') as f:\n\t\tchecker_script_filename, checker_classname = f.read().strip().split(':')\n\t# Create package\n\tcreate_package(os.path.join(basedir, 'checkers'))\n\t# Import checkerscript\n\tmodule = import_module_from_package(checker_script_filename)\n\treturn getattr(module, checker_classname)\n\n\n@timeout_decorator.timeout(30)\ndef run_checker(func, team, tick) -> Tuple[str, Optional[str]]:\n\timport gamelib\n\timport pwnlib\n\ttry:\n\t\tfunc(team, tick)\n\t\treturn 'SUCCESS', None\n\n\texcept gamelib.FlagMissingException as e:\n\t\ttraceback.print_exc()\n\t\treturn 'FLAGMISSING', e.message\n\texcept gamelib.MumbleException as e:\n\t\ttraceback.print_exc()\n\t\treturn 'MUMBLE', e.message\n\texcept AssertionError as e:\n\t\ttraceback.print_exc()\n\t\tif len(e.args) == 1 and type(e.args[0]) == str:\n\t\t\treturn 'MUMBLE', e.args[0]\n\t\treturn 'MUMBLE', repr(e.args)\n\texcept requests.ConnectionError as e:\n\t\ttraceback.print_exc()\n\t\treturn 'OFFLINE', 'Connection timeout'\n\texcept pwnlib.exception.PwnlibException as e:\n\t\tif 'Could not connect to' in e.args[0]:\n\t\t\treturn 'OFFLINE', str(e.args[0])\n\t\treturn 'CRASHED', None\n\texcept gamelib.OfflineException as e:\n\t\ttraceback.print_exc()\n\t\treturn 'OFFLINE', e.message\n\t# except SoftTimeLimitExceeded:\n\t#\ttraceback.print_exc()\n\t#\treturn 'TIMEOUT', 'Timeout, service too slow'\n\texcept MemoryError:\n\t\ttraceback.print_exc()\n\t\treturn 'CRASHED', None\n\texcept:\n\t\ttraceback.print_exc()\n\t\treturn 'CRASHED', None\n","sub_path":"ci/testscripts/checker_utils.py","file_name":"checker_utils.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"138393562","text":"\"\"\"\nphoto utilities module\n\n\"\"\"\n\nimport os\n\nfrom PIL import Image\n\n\nclass Photo:\n \"\"\" Describes a photo image with parameters extracted from Exif tags \"\"\"\n \n def __init__(self, path):\n self.path = path\n self.filename = os.path.basename(path) \n self.size = os.path.getsize(path)\n # Year and Month attributes set to zero(int) by default\n self.year = None\n self.month = None\n self.day = None\n \n \n def extractTags(self):\n \"\"\" set year and month attributes from exif tags \"\"\"\n exif = getExif(self.path)\n tag = extractExifTag(exif, 0x9003)[0]\n self.year = int(tag[0:4])\n self.month = int(tag[5:7])\n self.day = int(tag[8:10])\n \n\ndef getExif(path):\n \"\"\" Extract Exif tags from image \n \n Returns a dict with TAG_ID and values\n \n \"\"\"\n #\n # Use PIL library to extract exif\n #\n img = Image.open(path)\n exif = img._getexif()\n \n return exif\n\n\ndef extractExifTag(exif, *tagID):\n \"\"\" Extract specific values from Exif dict returned by getExif()function\n \n Return in a tuple the values associated to *tagID parameters\n \n \"\"\"\n tagValue = []\n\n for key in tagID:\n tagValue.append(exif[key])\n \n return tuple(tagValue)\n\n\n\n","sub_path":"philae/phototools.py","file_name":"phototools.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"69505366","text":"from django.shortcuts import render, get_object_or_404\nfrom coursefetcher.models import Course\nimport requests\nimport sys\nimport re\nfrom bs4 import BeautifulSoup\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',\n 'Cookie': '{DTUCoursesPublicLanguage}=da-DK; ASP.NET_SessionId=dpsgztrtgtouu2cgb1ganfx3'\n}\n\ndef index(request):\n courses = Course.objects.all()\n context = {\n 'courses' : courses\n }\n return render(request,'coursefetcher/index.html',context)\ndef details(request, course_id):\n course = get_object_or_404(Course,courseNumber=course_id)\n return render(request, 'coursefetcher/details.html', {'course': course})\n\ndef find_course_dependencies():\n courses = Course.objects.all()\n for course in courses:\n course.courseRequiredFor.clear()\n # Loop over recommended pre req for each course\n siblingcourses = []\n siblingcourses.append(course)\n\n for recommendedPreReq in course.courseRecommendedPrerequisites.iterator():\n for recommendedPreReqSiblings in recommendedPreReq.coursePointLock.iterator():\n add_course_dependency(course,recommendedPreReqSiblings)\n add_course_dependency(course, recommendedPreReq)\n\n for mandatoryPreReq in course.courseMandatoryPreequisites.iterator():\n for mandatoryPreReqSiblings in mandatoryPreReq.coursePointLock.iterator():\n add_course_dependency(course,mandatoryPreReqSiblings)\n add_course_dependency(course, recommendedPreReq)\n\n# Adds a new course dependency\ndef add_course_dependency(course, required_for):\n if course != required_for:\n # print(f\"{required_for.courseNumber} required for {course}\")\n required_for.courseRequiredFor.add(course)\n required_for.save()\n # we need to add dependency both ways\n course.courseRecommendedPrerequisites.add(required_for)\n course.save()\n\ndef save_courses(courses):\n for course in courses:\n # Copy values from one object model to another\n if Course.objects.filter(courseNumber = course.courseNumber).exists():\n tempCourse = Course.objects.get(courseNumber = course.courseNumber)\n else:\n tempCourse = Course()\n tempCourse.courseNumber = course.courseNumber\n tempCourse.courseTitle = course.courseTitle\n tempCourse.courseLanguage = course.courseLanguage\n tempCourse.coursePoints = course.coursePoints\n tempCourse.courseType = course.courseType\n tempCourse.courseEmpty = course.courseEmpty\n tempCourse.coursePlacement = course.coursePlacement\n tempCourse.courseLocation = course.courseLocation\n tempCourse.courseLength = course.courseLength\n tempCourse.courseExamLocation = course.courseExamLocation\n tempCourse.courseExamLength = course.courseExamLength\n tempCourse.courseExamHelpingAids = course.courseExamHelpingAids\n tempCourse.courseEvaluationStyle = course.courseEvaluationStyle\n tempCourse.courseExamGradingStyle = course.courseExamGradingStyle\n tempCourse.courseResponsible = course.courseResponsible\n tempCourse.courseInstitue = course.courseInstitute\n tempCourse.courseHomepage = course.courseHomepage\n tempCourse.courseSignUp = course.courseSignup\n tempCourse.courseGreenChallange = course.courseGreenChallenge\n tempCourse.courseJoiningInstitute = course.courseJoiningInstitute\n tempCourse.courseExternalCooperationInstitute = course.courseExternalCooperationInstitute\n tempCourse.courseAttendantLimit = course.courseAttendantLimit\n # Should probably check if the course is already present before saving, and then instead merging the changes,\n # this will make it more future proof\n tempCourse.save()\n # Copy references to other courses here, and check if they are present\n # If they are not present create dummy objects.\n for course in courses:\n current_course = Course.objects.get(courseNumber = course.courseNumber)\n for point_lock_course in course.coursePointLock:\n if Course.objects.filter(courseNumber=point_lock_course).exists():\n current_course.coursePointLock.add(point_lock_course)\n else:\n create_dummy_course(point_lock_course)\n for recommended_pre_req_course in course.courseRecommendedPrerequisites:\n if Course.objects.filter(courseNumber=recommended_pre_req_course).exists():\n current_course.courseRecommendedPrerequisites.add(recommended_pre_req_course)\n else:\n create_dummy_course(recommended_pre_req_course)\n for mandatory_pre_req_course in course.courseMandatoryPrerequisites:\n if Course.objects.filter(courseNumber=mandatory_pre_req_course).exists():\n current_course.courseRecommendedPrerequisites.add(mandatory_pre_req_course)\n else:\n create_dummy_course(mandatory_pre_req_course)\n for pervious_course_number in course.coursePreviousCourse:\n if Course.objects.filter(courseNumber=pervious_course_number).exists():\n current_course.coursePreviousCourse.add(pervious_course_number)\n else:\n create_dummy_course(pervious_course_number)\n\n current_course.save()\n find_course_dependencies()\n fixInstituteNames()\n\n\ndef create_dummy_course(courseNumber):\n # print(f\"Found a non existing course {courseNumber}, creating it now\")\n newCourse = Course()\n newCourse.courseNumber = courseNumber\n newCourse.courseTitle = \"Currently non existing course\"\n newCourse.save()\n\n\nclass TemporaryCourse:\n def __init__(self):\n self.courseNumber = \"\"\n self.courseTitle = \"\"\n self.courseLanguage = \"\"\n self.coursePoints = 0\n self.courseType = \"\"\n self.courseEmpty = False\n self.coursePlacement = \"\"\n self.courseLocation = \"\"\n self.courseTeachingStyle = \"\"\n self.courseLength = \"\"\n self.courseExamLocation = \"\"\n self.courseExamLength = \"\"\n self.courseExamHelpingAids = \"\"\n self.courseEvaluationStyle = \"\"\n self.courseExamGradingStyle = \"\"\n self.coursePointLock = []\n self.courseResponsible = \"\"\n self.courseInstitute = \"\"\n self.courseHomepage = \"\"\n self.courseSignup = \"\"\n self.courseGreenChallenge = False\n self.courseJoiningInstitute = \"\"\n self.courseExternalCooperationInstitute = \"\"\n self.courseRecommendedPrerequisites = []\n self.courseMandatoryPrerequisites = []\n self.coursePreviousCourse = []\n self.courseAttendantLimit = \"\"\n\n\ndef fetch_data():\n def remove_line_break(str):\n str = str.replace(\"\\r\", \"\")\n str = str.replace(\"\\n\", \"\")\n return str\n\n def label_not_found(label, i):\n # print(f\"Error, unknown label {label}, table {i}\")\n sys.exit(1)\n\n # Should probably save the links somewhere with persistence\n links = fetchcourselinks()\n fetchedCourses = []\n print(\"Starting to process each course\")\n for j in range(len(links)):\n tempCourse = TemporaryCourse()\n response = requests.get(links[j], headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n tables = soup.findAll('table')\n tempCourse.courseNumber = str(soup.title.getText()).split(\" \")[4]\n temp = str(remove_line_break(soup.title.getText())).split(\" \")\n tempTitle = \"\"\n if len(temp) >5:\n for i in range(len(temp)-6):\n if i == 0:\n tempTitle += str(temp[i+5])\n else:\n tempTitle += str(\" \"+temp[i+5])\n for i in range(len(tables)):\n rows = tables[i].findAll('tr')\n for j in range(len(rows)):\n contents = rows[j].findAll('td')\n label = remove_line_break(str(contents[0].getText()))\n content = remove_line_break(str(contents[1].getText()))\n if i == 0: # Collect info from first box on the left side of page\n if label == \"Engelsk titel\":\n tempCourse.courseTitle = tempTitle\n #tempCourse.courseTitle = content # We use contents[1] as the first part is the label i.e Title: Mat1\n #if \"Summer\" in tempCourse.courseTitle: # If it is a summer course it contains a linebreak, we remove them\n #tempCourse.courseTitle = content\n elif label == \"Undervisningssprog\":\n tempCourse.courseLanguage = content\n elif label == \"Point( ECTS )\":\n content = content.replace(\",\",\".\")\n tempCourse.coursePoints = float(content)\n elif label == \"Kursustype\":\n tempStr = contents[1].findAll(\"div\")\n if len(tempStr) > 2:\n if str(tempStr[1].getText()) == \"Kurset udbydes under tompladsordningen\":\n tempCourse.courseEmpty = True\n content = content.replace(\"Kurset udbydes under tompladsordningen\", \"\")\n tempCourse.courseType = content\n else:\n label_not_found(label, i)\n elif i == 1:\n if label == \"Skemaplacering\":\n tempCourse.coursePlacement = content\n elif label == \"Undervisningens placering\":\n tempCourse.courseLocation = content\n elif label == \"Undervisningsform\":\n tempCourse.courseTeachingStyle = content\n elif label == \"Kursets varighed\":\n tempCourse.courseLength = content\n elif label == \"Eksamensplacering\":\n tempCourse.courseExamLocation = content\n elif label == \"Evalueringsform\":\n tempCourse.courseEvaluationStyle = content\n elif label == \"Eksamens varighed\":\n tempCourse.courseExamLength = content\n elif label == \"Hjælpemidler\":\n tempCourse.courseExamHelpingAids = content\n elif label == \"Bedømmelsesform\":\n tempCourse.courseExamGradingStyle = content\n elif label == \"Pointspærring\":\n tempCourse.coursePointLock = re.findall(r'[0-9][0-9][0-9ST][0-9][0-9]', content)\n elif label == \"Anbefalede forudsætninger\":\n tempCourse.courseRecommendedPrerequisites = re.findall(r'[0-9][0-9][0-9ST][0-9][0-9]', content)\n elif label == \"Obligatoriske forudsætninger\":\n tempCourse.courseMandatoryPrerequisites = re.findall(r'[0-9][0-9][0-9ST][0-9][0-9]', content)\n elif label == \"Tidligere kursus\":\n tempCourse.coursePreviousCourse = re.findall(r'[0-9][0-9][0-9ST][0-9][0-9]', content)\n elif label == \"Deltagerbegrænsning\":\n tempCourse.courseAttendantLimit = content\n else:\n label_not_found(label, i)\n elif i == 2:\n if label == \"Kursusansvarlig\": # Should also fetch links here\n tempCourse.courseResponsible = content\n elif label == \"Institut\":\n tempCourse.courseInstitute = content\n elif label == \"Kursushjemmeside\":\n tempCourse.courseHomepage = content\n elif label == \"Tilmelding\":\n tempCourse.courseSignup = content\n elif label == \"Mulighed for GRØN DYST deltagelse\":\n if content.startswith(\"Kontakt underviseren\"): # Should probably not be a boolean\n tempCourse.courseGreenChallenge = True\n elif content.startswith(\"Dette kursus giver den studerende en mulighed\"):\n tempCourse.courseGreenChallenge = True\n else:\n # print(f\"Error green challange: {content}\")\n sys.exit(1)\n elif label == \"Deltagende institut\":\n tempCourse.courseJoiningInstitute = content\n elif label == \"Ekstern samarbejdsinstitution\":\n tempCourse.courseExternalCooperationInstitute = content\n else:\n label_not_found(label, i)\n else:\n #print(f\"Error, unkown table!\")\n sys.exit(1)\n fetchedCourses.append(tempCourse)\n #print(f\"Processed {len(fetchedCourses)}/{len(links)}\")\n print(\"Finished processing courses\")\n return fetchedCourses\n\n\ndef fetchcourselinks():\n print(\"Started fetching course links\")\n response = requests.get(\n 'http://kurser.dtu.dk/search?CourseCode=&SearchKeyword=&SchedulePlacement=E1%3BE2%3BE3%3BE4%3BE5%3BE1A%3BE2A'\n '%3BE3A%3BE4A%3BE5A%3BE1B%3BE2B%3BE3B%3BE4B%3BE5B%3BE&SchedulePlacement=E1%3BE1A%3BE1B&SchedulePlacement=E1A'\n '&SchedulePlacement=E1B&SchedulePlacement=E2%3BE2A%3BE2B&SchedulePlacement=E2A&SchedulePlacement=E2B'\n '&SchedulePlacement=E3%3BE3A%3BE3B&SchedulePlacement=E3A&SchedulePlacement=E3B&SchedulePlacement=E4%3BE4A'\n '%3BE4B&SchedulePlacement=E4A&SchedulePlacement=E4B&SchedulePlacement=E5%3BE5A%3BE5B&SchedulePlacement=E5A'\n '&SchedulePlacement=E5B&SchedulePlacement=F1%3BF2%3BF3%3BF4%3BF5%3BF1A%3BF2A%3BF3A%3BF4A%3BF5A%3BF1B%3BF2B'\n '%3BF3B%3BF4B%3BF5B%3BF&SchedulePlacement=F1%3BF1A%3BF1B&SchedulePlacement=F1A&SchedulePlacement=F1B'\n '&SchedulePlacement=F2%3BF2A%3BF2B&SchedulePlacement=F2A&SchedulePlacement=F2B&SchedulePlacement=F3%3BF3A'\n '%3BF3B&SchedulePlacement=F3A&SchedulePlacement=F3B&SchedulePlacement=F4%3BF4A%3BF4B&SchedulePlacement=F4A'\n '&SchedulePlacement=F4B&SchedulePlacement=F5%3BF5A%3BF5B&SchedulePlacement=F5A&SchedulePlacement=F5B'\n '&SchedulePlacement=January&SchedulePlacement=August%3BJuly%3BJune&SchedulePlacement=August&SchedulePlacement'\n '=July&SchedulePlacement=June&CourseType=&TeachingLanguage=',\n headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n links = []\n for link in soup.findAll('a'):\n postfix = str(link.get('href'))\n if postfix.startswith('/course/') and not postfix.endswith('gotoStudyplanner'):\n url = 'http://kurser.dtu.dk' + link.get('href')\n links.append(url)\n #print(f\"Finished fetching course links, fetched {len(links)}\")\n return links\n\ndef fixInstituteNames():\n for course in Course.objects.all().iterator():\n if course.courseInstitute.startswith(\"01\"):\n course.courseInstitute = \"01 Institut for Matematik og Computer Science\"\n elif course.courseInstitute.startswith(\"10\"):\n course.courseInstitute = \"10 Institut for Fysik\"\n elif course.courseInstitute.startswith(\"11\"):\n course.courseInstitute = \"11 Institut for Byggeri og Anlæg\"\n elif course.courseInstitute.startswith(\"12\"):\n course.courseInstitute = \"12 Institut for Vand og Miljøteknologi\"\n elif course.courseInstitute.startswith(\"23\"):\n course.courseInstitute = \"23 Fødevare instituttet\"\n elif course.courseInstitute.startswith(\"24\"):\n course.courseInstitute = \"24 Veterinær instituttet\"\n elif course.courseInstitute.startswith(\"25\"):\n course.courseInstitute = \"25 Institut for Akvatiske Ressourcer\"\n elif course.courseInstitute.startswith(\"26\"):\n course.courseInstitute = \"26 Institut for Kemi\"\n elif course.courseInstitute.startswith(\"27\"):\n course.courseInstitute = \"27 DTU Bioengineering\"\n elif course.courseInstitute.startswith(\"28\"):\n course.courseInstitute = \"28 Institut for Kemiteknik\"\n elif course.courseInstitute.startswith(\"29\"):\n course.courseInstitute = \"29 DTU Biosustain\"\n elif course.courseInstitute.startswith(\"30\"):\n course.courseInstitute = \"30 Institut for Rumforskning og teknologi\"\n elif course.courseInstitute.startswith(\"31\"):\n course.courseInstitute = \"31 Institut for Elektroteknologi\"\n elif course.courseInstitute.startswith(\"33\"):\n course.courseInstitute = \"33 Institut for Mikro og Nanoteknologi\"\n elif course.courseInstitute.startswith(\"34\"):\n course.courseInstitute = \"34 Institut for Fotonik\"\n elif course.courseInstitute.startswith(\"36\"):\n course.courseInstitute = \"36 DTU Bioinformatik\"\n elif course.courseInstitute.startswith(\"41\"):\n course.courseInstitute = \"41 Institut for Mekanisk Teknologi\"\n elif course.courseInstitute.startswith(\"42\"):\n course.courseInstitute = \"42 DTU Management Engineering\"\n elif course.courseInstitute.startswith(\"46\"):\n course.courseInstitute = \"46 DTU Vindenergi\"\n elif course.courseInstitute.startswith(\"47\"):\n course.courseInstitute = \"47 DTU Energi\"\n elif course.courseInstitute.startswith(\"88\"):\n course.courseInstitute = \"88 Andre kurser\"\n elif course.courseInstitute.startswith(\"62\"):\n course.courseInstitute = \"62 DTU Diplom\"\n course.save()\n","sub_path":"coursefetcher/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"528082641","text":"import scrapy\nimport time\nimport logging\nimport pandas as pd\nimport redis\nfrom selenium import webdriver\n\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom IMDB_SCRAPY.items import MetaCritic_UserReview_Item,MetaCritic_Trailer_Item,MetaCritic_CriticReview_Item\n\nclass MetaCritic_UserReview(scrapy.Spider):\n name = 'METACRITIC_USERREVIEW'\n custom_settings = {\n \"USER_AGENT\": \"Chrome/41.0.2228.0\",\n }\n allowed_domains = [\"metacritic.com\", \"imdb.com\"]\n start_urls = ['https://www.metacritic.com/movie/children-of-the-corn-ii-the-final-sacrifice/user-reviews']\n\n movie_nums = 250\n def __init__(self):\n super(MetaCritic_Trailer, self).__init__()\n self.red = redis.Redis(host='localhost', port=6379, db=1)\n\n def start_requests(self):\n global candidate_movies\n # 从imdb入口转入MetaCritic\n basics_tsv = pd.read_csv('Data/title.basics.tsv', sep='\\t')\n movie_tconsts = basics_tsv['tconst']\n movie_names = basics_tsv['primaryTitle']\n movie_tconsts = list(movie_tconsts)\n movie_names = list(movie_names)\n\n # ⬇ 下面2行代码,从制定列表获取 ⬇ #\n movie_tconsts = candidate_movies\n movie_names = candidate_movies\n\n for idx in range(self.movie_nums):\n movie_tconst = movie_tconsts[idx]\n movie_name = movie_names[idx]\n # ⬇ 检查电影tconst是否已经获取过 ⬇ #\n if self.red.sismember('metacritic_userreview_set', movie_tconst):\n print(idx, movie_tconst, movie_name, \"已经爬去过,跳过\")\n continue\n else:\n print(idx, movie_tconst, movie_name, \"未爬取,进行处理\")\n self.red.sadd('metacritic_set', movie_tconst)\n url = \"https://www.imdb.com/title/{}/criticreviews?ref_=tt_ov_rt\".format(movie_tconst)\n yield scrapy.Request(url, callback=lambda response, tconst=movie_tconst: self.get_metacritic_url(response,tconst),errback=self.err)\n\n def get_metacritic_url(self, response, tconst):\n try:\n nexturl = response.xpath(\"//div[@class='see-more']/a[@class='offsite-link']/@href\").extract()\n nexturl = nexturl[0].split('?')[0] + \"/user-reviews\"\n print(nexturl)\n yield scrapy.Request(nexturl, callback = lambda response, tconst = tconst: self.parse(response, tconst))\n except:\n pass\n #print(\"no\")\n\n def err(self, err):\n print(err)\n\n def parse(self, response, tconst = 1):\n pagenum = response.xpath(\"//a[@class='page_num']/text() | //span[@class='page_num']/text()\").extract()\n print(pagenum)\n try:\n name = response.url.split(\"/\")[4]\n except:\n logging.warning(response.url + \"解析出错\")\n return\n if len(response.xpath(\"//div[@class='review_body']\")) == 0:\n logging.warning(name + \"电影评论页面不存在\")\n else:\n if len(pagenum) == 0:\n pagenum.append(1)\n for i in range(int(pagenum[-1])):\n nexturl = \"{}?page={}\".format(response.url, i)\n yield scrapy.Request(nexturl, callback = lambda response, name = name ,tconst = tconst: self.parse_movie(response, name, tconst))\n\n def parse_movie(self, response, movie_name, tconst):\n lines = response.xpath(\"//div[@class='review_body']\")\n contents = list()\n for line in lines:\n content = \"\"\n if len(line.re(\"blurb blurb_expanded\")) == 0:\n content = line.xpath(\"span/text()\").extract()[0]\n else:\n content = line.xpath(\"span\").re(\"expanded\\\">([^<]*)\")[0]\n contents.append(content)\n\n dates = response.xpath(\"//span[@class='date']/text()\").extract()\n users = response.xpath(\"//span[@class='author']//text()\").extract()\n scores = response.xpath(\"//div[@class='left fl']/div/text()\").extract()\n\n for i in range(len(lines)):\n try:\n item = MetaCritic_UserReview_Item()\n item['imdb_tconst'] = tconst\n item['movie_name'] = movie_name\n item['content_user'] = users[i]\n item['content_score'] = scores[i]\n item['content_date'] = dates[i]\n item['content_text'] = contents[i]\n yield item\n except:\n pass\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\nclass MetaCritic_Trailer(scrapy.Spider):\n name = 'METACRITIC_TRAILER'\n custom_settings = {\n \"USER_AGENT\": \"Chrome/41.0.2228.0\",\n }\n allowed_domains = [\"metacritic.com\", \"imdb.com\"]\n start_urls = ['https://www.metacritic.com/movie/joker/trailers']\n\n movie_nums = 250\n def __init__(self):\n super(MetaCritic_Trailer, self).__init__()\n self.red = redis.Redis(host='localhost', port=6379, db=1)\n\n def start_requests(self):\n global candidate_movies\n # 从imdb入口转入MetaCritic\n basics_tsv = pd.read_csv('Data/title.basics.tsv', sep='\\t')\n movie_tconsts = basics_tsv['tconst']\n movie_names = basics_tsv['primaryTitle']\n movie_tconsts = list(movie_tconsts)\n movie_names = list(movie_names)\n\n # ⬇ 下面2行代码,从制定列表获取 ⬇ #\n movie_tconsts = candidate_movies\n movie_names = candidate_movies\n\n for idx in range(self.movie_nums):\n movie_tconst = movie_tconsts[idx]\n movie_name = movie_names[idx]\n # ⬇ 检查电影tconst是否已经获取过 ⬇ #\n if self.red.sismember('metacritic_trailer_set', movie_tconst):\n print(idx, movie_tconst, movie_name, \"已经爬去过,跳过\")\n continue\n else:\n print(idx, movie_tconst, movie_name, \"未爬取,进行处理\")\n self.red.sadd('metacritic_set', movie_tconst)\n url = \"https://www.imdb.com/title/{}/criticreviews?ref_=tt_ov_rt\".format(movie_tconst)\n yield scrapy.Request(url, callback=lambda response, tconst=movie_tconst: self.get_metacritic_url(response,tconst),errback=self.err)\n\n def get_metacritic_url(self, response, tconst):\n try:\n nexturl = response.xpath(\"//div[@class='see-more']/a[@class='offsite-link']/@href\").extract()\n nexturl = nexturl[0].split('?')[0] + \"/trailers\"\n print(nexturl)\n yield scrapy.Request(nexturl, callback=lambda response, tconst=tconst: self.parse(response, tconst))\n except:\n pass\n # print(\"no\")\n\n def err(self, err):\n if err.check(HttpError):\n # these exceptions come from HttpError spider middleware\n # you can get the non-200 response\n response = err.value.response\n self.logger.error('HttpError on %s', response.url)\n\n def parse(self, response, tconst):\n try:\n name = response.url.split(\"/\")[4]\n except:\n logging.warning(response.url + \"解析出错\")\n return\n\n item = MetaCritic_Trailer_Item()\n video_url = response.xpath(\"//div[@id='videoContainer_wrapper']/@data-mcvideourl\").extract()\n try :\n video_url = video_url[0]\n item['imdb_tconst'] = tconst\n item['movie_name'] = name\n item['trailer_url'] = video_url\n yield item\n except:\n logging.warning(response.url + \"识别错误\")\n\n\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------\n\nclass MetaCritic_CriticReview(scrapy.Spider):\n name = 'METACRITIC_CRITICREVIEW'\n custom_settings = {\n \"USER_AGENT\": \"Chrome/41.0.2228.0\",\n }\n allowed_domains = [\"metacritic.com\"]\n start_urls = ['https://www.imdb.com/title/tt0111161/criticreviews?ref_=tt_ov_rt']\n movie_nums = 250\n\n def __init__(self):\n super(MetaCritic_Trailer, self).__init__()\n self.red = redis.Redis(host='localhost', port=6379, db=1)\n\n def start_requests(self):\n global candidate_movies\n # 从imdb入口转入MetaCritic\n basics_tsv = pd.read_csv('Data/title.basics.tsv', sep='\\t')\n movie_tconsts = basics_tsv['tconst']\n movie_names = basics_tsv['primaryTitle']\n movie_tconsts = list(movie_tconsts)\n movie_names = list(movie_names)\n\n # ⬇ 下面2行代码,从制定列表获取 ⬇ #\n movie_tconsts = candidate_movies\n movie_names = candidate_movies\n\n for idx in range(self.movie_nums):\n movie_tconst = movie_tconsts[idx]\n movie_name = movie_names[idx]\n # ⬇ 检查电影tconst是否已经获取过 ⬇ #\n if self.red.sismember('metacritic_criticreview_set', movie_tconst):\n print(idx, movie_tconst, movie_name, \"已经爬去过,跳过\")\n continue\n else:\n print(idx, movie_tconst, movie_name, \"未爬取,进行处理\")\n self.red.sadd('metacritic_set', movie_tconst)\n\n url = \"https://www.imdb.com/title/{}/criticreviews?ref_=tt_ov_rt\".format(movie_tconst)\n yield scrapy.Request(url, callback=lambda response, tconst=movie_tconst: self.get_metacritic_url(response,tconst),errback=self.err)\n\n def get_metacritic_url(self, response, tconst):\n try:\n nexturl = response.xpath(\"//div[@class='see-more']/a[@class='offsite-link']/@href\").extract()\n nexturl = nexturl[0].split('?')[0] + \"/critic-reviews\"\n print(nexturl)\n yield scrapy.Request(nexturl, callback = lambda response, tconst = tconst : self.parse(response, tconst))\n except:\n pass\n # print(\"no\")\n\n\n def err(self, err):\n print(err)\n\n def parse(self, response, tconst = -1):\n pagenum = response.xpath(\"//a[@class='page_num']/text() | //span[@class='page_num']/text()\").extract()\n try:\n name = response.url.split(\"/\")[4]\n except:\n logging.warning(response.url + \"解析出错\")\n return\n if len(response.xpath(\"//a[@class='no_hover']/text() | //div[@class='summary']/text()\")) == 0:\n logging.warning(name + \"电影评论页面不存在\")\n else:\n if len(pagenum) == 0:\n pagenum.append(1)\n for i in range(int(pagenum[-1])):\n nexturl = \"{}?page={}\".format(response.url, i)\n yield scrapy.Request(nexturl, callback = lambda response, name = name, tconst = tconst : self.parse_movie(response,name,tconst))\n\n def parse_movie(self, response, movie_name, tconst):\n lines = response.xpath(\"//div[@class='summary']\")\n contents = list()\n urls = list()\n for line in lines:\n if len(line.xpath(\"a\")) != 0:\n contents.append(line.xpath(\"a/text()\").extract()[0])\n urls.append(line.xpath(\"a/@href\").extract()[0])\n else:\n contents.append(line.xpath(\"text()\").extract()[0])\n urls.append(\"\")\n users = response.xpath(\"//span[@class='author']//text()\").extract()\n scores = response.xpath(\"//div[@class='left fl']/div/text()\").extract()\n for i in range(len(contents)):\n try:\n item = MetaCritic_CriticReview_Item()\n item['imdb_tconst'] = tconst\n item['movie_name'] = movie_name\n item['content_user'] = users[i]\n item['content_score'] = scores[i]\n item['content_text'] = contents[i]\n item['content_url'] = urls[i]\n yield item\n except:\n pass\n\n\ncandidate_movies = ['tt0111161',\n 'tt0068646',\n 'tt0071562',\n 'tt0468569',\n 'tt0050083',\n 'tt0108052',\n 'tt0167260',\n 'tt0110912',\n 'tt0060196',\n 'tt0137523',\n 'tt0120737',\n 'tt0109830',\n 'tt1375666',\n 'tt7286456',\n 'tt0080684',\n 'tt0167261',\n 'tt0133093',\n 'tt0073486',\n 'tt0099685',\n 'tt0047478',\n 'tt0114369',\n 'tt0317248',\n 'tt0118799',\n 'tt0102926',\n 'tt0038650',\n 'tt0076759',\n 'tt0120815',\n 'tt0245429',\n 'tt0120689',\n 'tt0110413',\n 'tt0816692',\n 'tt0056058',\n 'tt0114814',\n 'tt0110357',\n 'tt0120586',\n 'tt0088763',\n 'tt0253474',\n 'tt0027977',\n 'tt0103064',\n 'tt1675434',\n 'tt0054215',\n 'tt0172495',\n 'tt0021749',\n 'tt0407887',\n 'tt2582802',\n 'tt0064116',\n 'tt0482571',\n 'tt0034583',\n 'tt0095327',\n 'tt4154796',\n 'tt0047396',\n 'tt0095765',\n 'tt0078748',\n 'tt0082971',\n 'tt0209144',\n 'tt0078788',\n 'tt0032553',\n 'tt6751668',\n 'tt0405094',\n 'tt1853728',\n 'tt4154756',\n 'tt4633694',\n 'tt0081505',\n 'tt0050825',\n 'tt0910970',\n 'tt0119698',\n 'tt0043014',\n 'tt0057012',\n 'tt0364569',\n 'tt0051201',\n 'tt1345836',\n 'tt0087843',\n 'tt0090605',\n 'tt0169547',\n 'tt5311514',\n 'tt2380307',\n 'tt0112573',\n 'tt0082096',\n 'tt1187043',\n 'tt0986264',\n 'tt0086190',\n 'tt0114709',\n 'tt0105236',\n 'tt0086879',\n 'tt5074352',\n 'tt0361748',\n 'tt0119217',\n 'tt0180093',\n 'tt0022100',\n 'tt0062622',\n 'tt0052357',\n 'tt0338013',\n 'tt0033467',\n 'tt2106476',\n 'tt0093058',\n 'tt0053125',\n 'tt0066921',\n 'tt0208092',\n 'tt0012349',\n 'tt0211915',\n 'tt0040522',\n 'tt0086250',\n 'tt0045152',\n 'tt0056172',\n 'tt0075314',\n 'tt0435761',\n 'tt0070735',\n 'tt0017136',\n 'tt0059578',\n 'tt0044741',\n 'tt1832382',\n 'tt0036775',\n 'tt0056592',\n 'tt8267604',\n 'tt0097576',\n 'tt0053604',\n 'tt1049413',\n 'tt0119488',\n 'tt0071853',\n 'tt1255953',\n 'tt0113277',\n 'tt0042876',\n 'tt0055630',\n 'tt0372784',\n 'tt0095016',\n 'tt0105695',\n 'tt6966692',\n 'tt0363163',\n 'tt0118849',\n 'tt0053291',\n 'tt0347149',\n 'tt0057115',\n 'tt0096283',\n 'tt0089881',\n 'tt0268978',\n 'tt0042192',\n 'tt0457430',\n 'tt0091251',\n 'tt0112641',\n 'tt1305806',\n 'tt3417422',\n 'tt0081398',\n 'tt0120735',\n 'tt0040897',\n 'tt0476735',\n 'tt0993846',\n 'tt0055031',\n 'tt5027774',\n 'tt0071315',\n 'tt0015864',\n 'tt0046912',\n 'tt0469494',\n 'tt2096673',\n 'tt0434409',\n 'tt0050976',\n 'tt1291584',\n 'tt3170832',\n 'tt0477348',\n 'tt0117951',\n 'tt0080678',\n 'tt0167404',\n 'tt1130884',\n 'tt8108198',\n 'tt0084787',\n 'tt0031381',\n 'tt0050212',\n 'tt0083658',\n 'tt0041959',\n 'tt0047296',\n 'tt0050986',\n 'tt0107290',\n 'tt0266543',\n 'tt0116231',\n 'tt1205489',\n 'tt0116282',\n 'tt0266697',\n 'tt0077416',\n 'tt0079944',\n 'tt0046438',\n 'tt0118715',\n 'tt0120382',\n 'tt3011894',\n 'tt0978762',\n 'tt0107207',\n 'tt2267998',\n 'tt2119532',\n 'tt0015324',\n 'tt0017925',\n 'tt0892769',\n 'tt0031679',\n 'tt0353969',\n 'tt2278388',\n 'tt0060827',\n 'tt0112471',\n 'tt0264464',\n 'tt0758758',\n 'tt0061512',\n 'tt0046268',\n 'tt2024544',\n 'tt0040725',\n 'tt1392214',\n 'tt0074958',\n 'tt0079470',\n 'tt0092005',\n 'tt1392190',\n 'tt0019254',\n 'tt0405508',\n 'tt0091763',\n 'tt0060107',\n 'tt1979320',\n 'tt0405159',\n 'tt1028532',\n 'tt3315342',\n 'tt0072684',\n 'tt0052618',\n 'tt0087544',\n 'tt0395169',\n 'tt0245712',\n 'tt1201607',\n 'tt0053198',\n 'tt1895587',\n 'tt0042041',\n 'tt0097165',\n 'tt0043338',\n 'tt0032976',\n 'tt0075148',\n 'tt0198781',\n 'tt0025316',\n 'tt0113247',\n 'tt1954470',\n 'tt4016934',\n 'tt0169102',\n 'tt0093779',\n 'tt0374887',\n 'tt2338151',\n 'tt0064115',\n 'tt0381681',\n 'tt1454029',\n 'tt0094625',\n 'tt0088247',\n 'tt0118694',\n 'tt0367110',\n 'tt0087884',\n 'tt0103639',\n 'tt2015381',\n 'tt0338564',\n 'tt0058946',\n 'tt0056732',\n 'tt0120731',\n 'tt0092067']","sub_path":"IMDB_SCRAPY/spiders/metacritic.py","file_name":"metacritic.py","file_ext":"py","file_size_in_byte":19553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"461379908","text":"# 1. Создайте переменную x, которая равняется 2 в степени 3\n\n\n# 2. Прибавьте к x 3\n\n\n# 3. Сгенерируйте список num_list длиной x, из случайных чисел от 1 до x\n\n\n# 4. Выведите на экран числа из списка num_list в обратном порядке\n# (от последнего до первого элемента) через пробел\n\n\n# 5. Вставьте в средину списка число 11.\n\n\n# 6. Запишите в файл list_info.txt строчки\n# - 1. количество элементом списка num_list\n# - 2. количество уникальных элементов списка num_list\n# - 3. самое маленькое число списка num_list\n# - 4. сумму чисел списка num_list кратных 3\n\n\n# 7. Отсортируйте список словарей countries_info\n# по ключу 'population' в порядке возрастания,\n# а также каждый список cities по длине строк в порядке убывания\ncountries_info = [\n {\n \"country\": \"Ukraine\",\n \"population\": 42000000,\n \"cities\": [\"Kiev\", \"Kharkiv\", \"Odesa\", \"Dnipro\"],\n },\n {\n \"country\": \"France\",\n \"population\": 66999999,\n \"cities\": [\"Paris\", \"Marseille\", \"Lyon\", \"Toulouse\"],\n },\n {\n \"country\": \"Germany\",\n \"population\": 83000000,\n \"cities\": [\"Berlin\", \"Hamburg\", \"Munich\", \"Frankfurt\"],\n },\n]\n\n\n# 8. Напишите функцию create_country_info, которая принимает параметры\n# country, population, cities и возвращает словарь\n\n\n# 9. Создайте словарь с помощью функции create_country_info\n# и вставьте его в начало списка countries_info\n\n\n# 10. Создать репозиторий и залить туда этот файл\n\ndef main():\n x = pow(2, 3)\n print(x)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"209086088","text":"import numpy as np\nimport pandas as pd\n\nfrom iops_data_2019 import phase1\n\nimport hotspot\n\ndef test_get_sum_cuboid():\n sum1 = hotspot.forecast.get_sum_cuboid(\n phase1.data_frame,\n phase1.d2l({\n \"i\": \"i03\",\n }))\n\n sum2 = hotspot.forecast.get_sum_cuboid(\n phase1.data_frame,\n phase1.d2l({\n \"i\": \"i03\",\n \"p\": \"p07\",\n }))\n\n sum_total = hotspot.forecast.get_sum_cuboid(\n phase1.data_frame,\n phase1.d2l({}))\n\n assert sum_total >= sum1 and sum1 >= sum2\n\ndef test_forecast_mean():\n mean1 = hotspot.forecast.forecast_mean(\n phase1.data_frame_list,\n 0, # index\n 5, # 5 days\n phase1.d2l({\n \"i\": \"i03\",\n }))\n\n mean2 = hotspot.forecast.forecast_mean(\n phase1.data_frame_list,\n 0, # index\n 5, # 5 days\n phase1.d2l({\n \"i\": \"i03\",\n \"p\": \"p07\",\n }))\n\n assert mean1 >= mean2\n\ndef test_forecast_frame():\n df = hotspot.forecast.forecast_frame(\n phase1.data_frame_list[:-1],\n 0, # index\n 5, # 5 days\n phase1.data_frame_list[-1])\n print (\"- test_forecast_frame\\n\")\n print (df)\n","sub_path":"Code/hotspot/tests/test_forecast.py","file_name":"test_forecast.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"264532518","text":"import math\n\nclass AStar:\n\n def __init__(self, bomb_testing, bomb_radius, monster_radius, highlight, monster_weight):\n \"\"\" Initializes A* Variables\"\"\"\n self.bomb_testing = bomb_testing\n self.bomb_radius = bomb_radius\n self.monster_radius = monster_radius\n self.highlight = highlight\n self.monster_weight = monster_weight\n\n def heuristic(self, w, x, y, ex, ey, b, monsters):\n # Euclidean distance\n distance = pow(pow(x - ex, 2) + pow(y - ey, 2), 0.5)\n\n if w.wall_at(x, y):\n distance += 6\n\n if w.explosion_at(x, y):\n distance += 6\n\n if self.monster_radius:\n for m in monsters:\n distance_to_monster = pow(pow(x - m.x, 2) + pow(y - m.y, 2), 0.5)\n if distance_to_monster < self.monster_radius:\n distance += (self.monster_radius - distance_to_monster) * self.monster_weight\n\n if self.bomb_testing and b and (b.x == x or b.y == y):\n if b.timer < 4:\n if y == b.y:\n distance_to_bomb = x - b.x\n if abs(distance_to_bomb) <= self.bomb_radius:\n blocked = False\n sign = 1\n if distance_to_bomb < 0:\n sign = -1\n for i in range(1, int(abs(distance_to_bomb))):\n xx = b.x + i * sign\n if w.wall_at(xx, y):\n blocked = True\n if not blocked:\n distance += (4 - b.timer) * 2\n if x == b.x:\n distance_to_bomb = y - b.y\n if abs(distance_to_bomb) <= self.bomb_radius:\n blocked = False\n sign = 1\n if distance_to_bomb < 0:\n sign = -1\n for j in range(1, int(abs(distance_to_bomb))):\n yy = b.y + j * sign\n if w.wall_at(x, yy):\n blocked = True\n if not blocked:\n distance += (4 - b.timer) * 2\n\n return distance\n\n def reconstruct_path(self, came_from, current):\n path = []\n while current[0] != -1:\n path.append(current)\n current = came_from[current[0]][current[1]]\n return path\n\n # https://en.wikipedia.org/wiki/A*_search_algorithm\n def evaluate(self, w, px, py, ex, ey, b, monsters):\n final_path = []\n neighbor_nodes = [[0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1]]\n\n grid_w = w.width()\n grid_h = w.height()\n\n closed_set = []\n\n open_set = [[px, py]]\n\n came_from = [[[-1] for j in range(grid_h)] for i in range(grid_w)]\n\n g_score = [[math.inf for j in range(grid_h)] for i in range(grid_w)]\n\n g_score[px][py] = 0\n\n f_score = [[math.inf for j in range(grid_h)] for i in range(grid_w)]\n\n f_score[px][py] = self.heuristic(w, px, py, ex, ey, b, monsters)\n\n while len(open_set):\n lowest_f_score = math.inf\n current = []\n for node in open_set:\n f = f_score[node[0]][node[1]]\n if f < lowest_f_score:\n lowest_f_score = f\n current = [node[0], node[1]]\n\n # We could stop here but we want to score the entire board\n if current[0] == ex and current[1] == ey and final_path == []:\n final_path = self.reconstruct_path(came_from, current)\n\n open_set.remove(current)\n closed_set.append(current)\n\n for node in neighbor_nodes:\n neighbor = [current[0] + node[0], current[1] + node[1]]\n if grid_w > neighbor[0] >= 0 and grid_h > neighbor[1] >= 0:\n if neighbor in closed_set:\n continue\n\n score = g_score[current[0]][current[1]] + pow(pow(node[0], 2) + pow(node[1], 2), 0.5)\n\n if neighbor not in open_set:\n open_set.append(neighbor)\n elif score >= g_score[neighbor[0]][neighbor[1]]:\n continue\n\n came_from[neighbor[0]][neighbor[1]] = current\n g_score[neighbor[0]][neighbor[1]] = score\n f_score[neighbor[0]][neighbor[1]] = score + self.heuristic(w, neighbor[0], neighbor[1], ex, ey, b, monsters)\n\n # Highlight path\n \"\"\"if self.highlight:\n for i in range(len(final_path)):\n node = final_path[i]\n f_score[node[0]][node[1]] -= 9\n f_score[node[0]][node[1]] += 0.5 * i\n # scale values toward the exit\n # f_score[node[0]][node[1]] += i/2\"\"\"\n\n \"\"\"for i in range(grid_w):\n for j in range(grid_h):\n f_score[i][j] += (pow(pow(i - ex, 2) + pow(j - ey, 2), 0.5) + 2) / 2\"\"\"\n\n # Find greatest value\n max_score = 0\n for i in range(grid_w):\n for j in range(grid_h):\n if f_score[i][j] > max_score:\n max_score = f_score[i][j]\n\n # Normalize values\n for i in range(grid_w):\n for j in range(grid_h):\n f_score[i][j] = 1 - f_score[i][j] / max_score\n\n # Find greatest value\n max_score = 0\n for i in range(grid_w):\n for j in range(grid_h):\n if f_score[i][j] > max_score:\n max_score = f_score[i][j]\n\n # Scale values so the greatest value is 1\n for i in range(grid_w):\n for j in range(grid_h):\n f_score[i][j] *= 1 / max_score\n\n return f_score, final_path","sub_path":"Bomberman/group29/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"567289606","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import apis\n\n\nurlpatterns = format_suffix_patterns([\n path('products/', apis.ProductList.as_view(), name='product-list'),\n path('products/', apis.ProductDetail.as_view(), name='product-detail'),\n path('games/', apis.GameList.as_view()),\n path('games//', apis.GameDetail.as_view(), name='game-detail'),\n path('books/', apis.BookList.as_view()),\n path('books//', apis.BookDetail.as_view(), name='book-detail'),\n path('clothes/', apis.ClothingList.as_view()),\n path('clothes//', apis.ClothingDetail.as_view(), name='clothing-detail'),\n path('shopping-cart/', apis.ShoppingCartList.as_view(), name='shopping-cart'),\n path('shopping-cart//', apis.ShoppingCartDetail.as_view(), name='shopping-cart-detail'),\n path('orders/', apis.OrderList.as_view(), name='order-list'),\n path('orders//', apis.OrderDetail.as_view(), name='order-detail'),\n])","sub_path":"mall/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"11025195","text":"from decimal import Decimal, getcontext\n\nfrom vector import Vector\n\ngetcontext().prec = 30\n\n\nclass Plane(object):\n\n NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'\n\n def __init__(self, normal_vector=None, constant_term=None):\n self.dimension = 3\n\n if not normal_vector:\n all_zeros = [0]*self.dimension\n normal_vector = Vector(all_zeros)\n self.normal_vector = normal_vector\n\n if not constant_term:\n constant_term = Decimal(0)\n self.constant_term = Decimal(constant_term)\n\n self.set_basepoint()\n\n\n def set_basepoint(self):\n try:\n n = self.normal_vector\n c = self.constant_term\n basepoint_coords = [0]*self.dimension\n\n initial_index = Plane.first_nonzero_index(n.coordinates)\n initial_coefficient = n.coordinates[initial_index]\n\n basepoint_coords[initial_index] = float(c)/initial_coefficient\n self.basepoint = Vector(basepoint_coords)\n\n except Exception as e:\n if str(e) == Plane.NO_NONZERO_ELTS_FOUND_MSG:\n self.basepoint = None\n else:\n raise e\n\n\n def parallel(self, p):\n return self.normal_vector.parallel(p.normal_vector)\n\n\n def same_plane(self, p):\n if not(self.parallel(p)):\n return False\n if (self.constant_term==0) and (p.constant_term==0):\n return True\n if (self.constant_term==0) ^ (p.constant_term==0):\n return False\n n1 = self.normal_vector.times_scalar(1/float(self.constant_term))\n n2 = p.normal_vector.times_scalar(1 / float(p.constant_term))\n return (n1 == n2)\n\n\n def __eq__(self, p):\n return ((self.constant_term==p.constant_term)and(self.normal_vector==p.normal_vector))\n\n\n def __str__(self):\n\n num_decimal_places = 3\n\n def write_coefficient(coefficient, is_initial_term=False):\n coefficient = round(coefficient, num_decimal_places)\n if coefficient % 1 == 0:\n coefficient = int(coefficient)\n\n output = ''\n\n if coefficient < 0:\n output += '-'\n if coefficient > 0 and not is_initial_term:\n output += '+'\n\n if not is_initial_term:\n output += ' '\n\n if abs(coefficient) != 1:\n output += '{}'.format(abs(coefficient))\n\n return output\n\n n = self.normal_vector\n\n try:\n initial_index = Plane.first_nonzero_index(n.coordinates)\n terms = [write_coefficient(n.coordinates[i], is_initial_term=(i==initial_index)) + 'x_{}'.format(i+1)\n for i in range(self.dimension) if round(n.coordinates[i], num_decimal_places) != 0]\n output = ' '.join(terms)\n\n except Exception as e:\n if str(e) == self.NO_NONZERO_ELTS_FOUND_MSG:\n output = '0'\n else:\n raise e\n\n constant = round(self.constant_term, num_decimal_places)\n if constant % 1 == 0:\n constant = int(constant)\n output += ' = {}'.format(constant)\n\n return output\n\n\n @staticmethod\n def first_nonzero_index(iterable):\n for k, item in enumerate(iterable):\n if not MyDecimal(item).is_near_zero():\n return k\n raise Exception(Plane.NO_NONZERO_ELTS_FOUND_MSG)\n\n\nclass MyDecimal(Decimal):\n def is_near_zero(self, eps=1e-10):\n return abs(self) < eps\n\n\n'''\np1 = Plane(Vector([-0.412,3.806,0.728]),-3.46)\np2 = Plane(Vector([1.03,-9.515,-1.82]),8.65)\nprint(p1.parallel(p2), p1.same_plane(p2))\n\np3 = Plane(Vector([2.611,5.528,0.283]),4.6)\np4 = Plane(Vector([7.715,8.306,5.342]),3.76)\nprint(p3.parallel(p4), p3.same_plane(p4))\n\np5 = Plane(Vector([-7.926,8.625,-7.212]),-7.952)\np6 = Plane(Vector([-2.642,2.875,-2.404]),-2.443)\nprint(p5.parallel(p6), p5.same_plane(p6))\n'''\n","sub_path":"plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"612311672","text":"#!/usr/bin/env python\n\n'''\nSimple bruteforce script with threading\n\n\nAuthor: Richard Isberg (richard.isberg@gmail.com)\n\n\nLicense:\n * ----------------------------------------------------------------------------\n * \"THE BEER-WARE LICENSE\" (Revision 42):\n * wrote this file. As long as you retain this\n * notice you can do whatever you want with this stuff. If we meet some day,\n * and you think this stuff is worth it, you can buy me a beer in return.\n * Richard Isberg\n * ----------------------------------------------------------------------------\n\nToDo: - Solve scheduling problems for the workers, the workers spend a considerable\n amount of time waiting to acquaire a semaphore lock.\n\n - Handle signals better, make sure to nicely kill threads and join them if\n possible.\n\n - Refactor status printing.\n\n\nChanges: 2016-06-16 (Richard Isberg)\n - Added multithreading support\n\n\n'''\n\nimport sys\nimport signal\nimport re\nimport math\nimport string\nimport itertools\nimport threading\nimport time\nimport datetime\nimport argparse\n\nfrom profile_support import profile\n\n\n''' Job object '''\nclass Job:\n\n def __init__(self, id, start, size, base, key_length):\n self.id = id\n self.start = start\n self.size = size\n self.base = base\n self.key_length = key_length\n self.counter = 0\n self.result = False\n self.worker_id = -1\n\n\n''' Worker class '''\nclass Worker_Thread(threading.Thread):\n\n def __init__(self, thread_id, jobs_queue, results_queue):\n threading.Thread.__init__(self)\n self.terminate = False\n self.id = thread_id\n self.type = 'worker'\n self.jobs_queue = jobs_queue\n self.results_queue = results_queue\n\n def stop(self):\n self.terminate = True\n\n def run(self):\n while not self.terminate:\n if len(self.jobs_queue):\n jobs_queue_lock.acquire()\n if len(self.jobs_queue):\n job = self.jobs_queue.pop(0)\n jobs_queue_lock.release()\n\n job.worker_id = self.id\n result = self.process_job(job)\n\n results_queue_lock.acquire()\n self.results_queue.append(result)\n results_queue_lock.release()\n\n else:\n time.sleep(0.01)\n\n ''' Process job function '''\n def process_job(self, job):\n generator = itertools.product(job.base, repeat=job.key_length)\n generator = itertools.islice(generator, job.start, job.start+job.size)\n\n for key in generator:\n if self.terminate:\n break\n\n key = ''.join(key)\n job.counter += 1\n if check_key(key):\n job.result = key\n\n return job\n\n''' Dispatcher thread '''\nclass Dispatcher_Thread(threading.Thread):\n\n def __init__(self, thread_id, jobs_queue, job_template):\n threading.Thread.__init__(self)\n self.terminate = False\n self.id = thread_id\n self.type = 'dispatcher'\n self.job_template = job_template\n self.jobs_queue = jobs_queue\n\n def stop(self):\n self.terminate = True\n\n def run(self):\n id_counter = 1\n for length in range(self.job_template['key_min_length'], self.job_template['key_max_length'] + 1 ):\n for chunk in range(int(math.pow(len(self.job_template['base']),length)/self.job_template['size']) + 1):\n if self.terminate:\n break # break nested for loop\n\n while len(self.jobs_queue) >= 200:\n if self.terminate:\n break\n\n time.sleep(0.001)\n\n jobs_queue_lock.acquire()\n self.jobs_queue.append(Job(id=id_counter, start=(chunk*self.job_template['size']), size=self.job_template['size'], base=self.job_template['base'], key_length=length))\n jobs_queue_lock.release()\n id_counter += 1\n\n\n\n''' Timer class '''\nclass Timer:\n\n def __init__(self):\n self.start_time = 0\n self.end_time = 0\n self.timer = 0\n\n def start(self):\n self.reset()\n\n def stop(self):\n self.end_time = time.time()\n\n def reset(self):\n self.start_time = time.time()\n self.end_time = time.time()\n\n def time(self):\n self.end_time = time.time()\n return self.end_time - self.start_time\n\n def set_alarm(self, time):\n self.reset()\n self.timer = time\n\n def trigger_alarm(self):\n self.timer = 0\n\n def alarm(self):\n if self.time() >= self.timer:\n return True\n else:\n return False\n\n\n''' Reprinter class '''\nclass Reprinter:\n def __init__(self):\n self.text = ''\n\n def moveup(self, lines):\n for _ in range(lines):\n sys.stdout.write(\"\\x1b[A\")\n\n def reprint(self, text):\n # Clear previous text by overwritig non-spaces with spaces\n self.moveup(self.text.count(\"\\n\"))\n sys.stdout.write(re.sub(r\"[^\\s]\", \" \", self.text))\n\n # Print new text\n lines = min(self.text.count(\"\\n\"), text.count(\"\\n\"))\n self.moveup(lines)\n sys.stdout.write(text)\n self.text = text\n\n\n\n\n''' Key check function '''\ndef check_key(key):\n\n try:\n from .check import check_key\n return check_key(key)\n except:\n return False\n\n\n\n\n\n''' Global resources '''\njobs_queue_lock = threading.Semaphore(value=1)\nresults_queue_lock = threading.Semaphore(value=1)\n\n\n''' main '''\ndef main():\n ''' Resources '''\n threads = []\n jobs_queue = []\n results_queue = []\n elapsed_time_timer = Timer()\n elapsed_time_timer.start()\n\n progress_print_timer = Timer()\n progress_print_timer.set_alarm(1)\n\n reprinter = Reprinter()\n\n statistics = {\n 'search_scope': 0,\n 'current_key_length': 0,\n 'checks_preformed': 0,\n 'checks_per_second': 0,\n 'completion_precentage': 0,\n 'last_returned_job_id': 0\n }\n\n config = {\n 'verbose': False,\n 'thread_count': 0,\n 'job_size': 1,\n 'key_min_length': 0,\n 'key_max_length': 0,\n 'base': ''\n }\n\n ''' Parse args '''\n parser = argparse.ArgumentParser()\n parser.add_argument('-v','--verbose',\n action='store_true',\n help='enable verbose output')\n\n parser.add_argument('--min-keylength',\n type=int,\n default=1,\n metavar='NUM',\n help='min keylength in scope (default=1)')\n\n parser.add_argument('--max-keylength',\n type=int,\n default=4,\n metavar='NUM',\n help='max keylength in scope (default=4)')\n\n parser.add_argument('--keylength',\n type=int,\n metavar='NUM',\n help='sets max and min keylength to NUM')\n\n parser.add_argument('--base-preset',\n dest='base_preset',\n action='append',\n choices=['digits', 'asciilowercase', 'asciiuppercase', 'punctation'],\n help='base presets, (punctation preset depends on your C locale)')\n\n parser.add_argument('--custom-base',\n dest='custom_base',\n metavar='STRING',\n help='string of characters of which to add to base')\n\n parser.add_argument('-j', '--num-threads',\n dest='num_threads',\n type=int,\n default=1,\n metavar='NUM',\n help='number of threads (default=1)')\n\n parser.add_argument('--job-size',\n dest='job_size',\n type=int,\n default=1,\n metavar='NUM',\n help='size of each job (default=1)')\n\n args = parser.parse_args()\n\n if args.verbose:\n config['verbose'] = True\n\n if args.min_keylength:\n config['key_min_length'] = args.min_keylength\n\n if args.max_keylength:\n config['key_max_length'] = args.max_keylength\n\n if args.keylength:\n config['key_min_length'] = args.keylength\n config['key_max_length'] = args.keylength\n\n if args.base_preset:\n for preset in args.base_preset:\n if preset == 'digits':\n config['base'] += string.digits\n\n if preset == 'asciilowercase':\n config['base'] += string.ascii_lowercase\n\n if preset == 'asciiuppercase':\n config['base'] += string.ascii_uppercase\n\n if preset == 'punctation':\n config['base'] += string.punctation\n\n\n if args.custom_base:\n config['base'] += ''.join(set(args.custom_base))\n\n if args.num_threads:\n config['thread_count'] = args.num_threads\n\n if args.job_size:\n config['job_size'] = args.job_size\n\n\n ''' Signal handler '''\n def signal_handler(signal, frame):\n print('Caught signal {}, terminating...'.format(signal))\n for thread in threads:\n print('killing {} thread...'.format(thread.type))\n thread.stop()\n\n sys.exit(0)\n\n ''' Register handlers '''\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n\n for length in range(config['key_min_length'], config['key_max_length'] + 1 ):\n statistics['search_scope'] += int(math.pow(len(config['base']),length))\n\n job_template = {\n 'size': config['job_size'],\n 'base': config['base'],\n 'key_min_length': config['key_min_length'],\n 'key_max_length': config['key_max_length']\n }\n\n ''' Create and start threads '''\n thread = Dispatcher_Thread(0, jobs_queue, job_template)\n threads.append(thread)\n\n for thread_id in range(1, config['thread_count']+1):\n thread = Worker_Thread(thread_id, jobs_queue, results_queue)\n threads.append(thread)\n\n\n for thread in threads:\n thread.start()\n\n\n\n ''' Print configuration '''\n print(\"---- Configuration ----\".ljust(80, '-'))\n print(\"Base: {}\".format(config['base']))\n print(\"Min key length: {}\".format(config['key_min_length']))\n print(\"Max key length: {}\".format(config['key_max_length']))\n print(\"Search scope size: {:.0f}\".format(statistics['search_scope']))\n print(\"---- Progress ----\".ljust(80, '-'))\n\n\n '''\n Collect results and update statistics while needle not found and checks preformed not overshooting he givven search scope\n\n '''\n needle = False\n collected_results = 0\n running = True\n while running:\n\n ''' Collect results until queue is empty'''\n while len(results_queue):\n\n results_queue_lock.acquire()\n result = results_queue.pop(0)\n results_queue_lock.release()\n\n\n statistics['last_returned_job_id'] = result.id\n statistics['checks_preformed'] += result.counter\n statistics['current_key_length'] = result.key_length\n statistics['completion_precentage'] = float(statistics['checks_preformed']) / float(statistics['search_scope']) * 100\n\n ''' Check if key is found '''\n if result.result:\n needle = result.result\n jobs_queue_lock.acquire()\n jobs_queue[:] = []\n jobs_queue_lock.release()\n progress_print_timer.trigger_alarm()\n running = False\n\n\n if statistics['checks_preformed'] >= statistics['search_scope']:\n progress_print_timer.trigger_alarm()\n running = False\n\n ''' Print results '''\n if progress_print_timer.alarm():\n progress_print_timer.set_alarm(0.1)\n printable_stats = 'Checks preformed: {:.0f}/{:.0f}\\n'.format(statistics['checks_preformed'], statistics['search_scope'])\n printable_stats += 'Avg checks per second: {:.2f}\\n'.format(statistics['checks_preformed']/elapsed_time_timer.time())\n printable_stats += 'Current key length: {}\\n'.format(statistics['current_key_length'])\n printable_stats += 'Last returned job id: {}\\n'.format(statistics['last_returned_job_id'])\n printable_stats += 'Jobs in queue: {}\\n'.format(len(jobs_queue))\n printable_stats += 'Time elapsed: {}\\n'.format(datetime.timedelta(seconds=elapsed_time_timer.time()))\n try:\n printable_stats += 'Time left: {}\\n'.format((datetime.timedelta(seconds=( elapsed_time_timer.time() / ( ( float(statistics['checks_preformed']) / float(statistics['search_scope']) ) * 100 ) * 100 ) - elapsed_time_timer.time())))\n\n except ZeroDivisionError:\n printable_stats += 'Time left: 0\\n'\n\n if running:\n marker = '>'\n else:\n marker = '||'\n printable_stats += '[{done}{marker}{undone}] {precentage:.2f}%\\n'.format(\n marker=marker,\n done='=' * int(statistics['completion_precentage'] * 0.77),\n undone=' ' * (77 - int(statistics['completion_precentage'] * 0.77)),\n precentage=statistics['completion_precentage'])\n\n reprinter.reprint(printable_stats)\n\n time.sleep(0.001)\n\n\n print(\"---- Result ----\".ljust(80, '-'))\n\n if needle:\n print(needle)\n else:\n print(\"No match found within given search scope!\")\n\n ''' Wait for threads to terminate '''\n for thread in threads:\n thread.stop()\n thread.join()\n\n\nif __name__ == '__main__':\n main()\n\n# EOF\n","sub_path":"bruteforce.py","file_name":"bruteforce.py","file_ext":"py","file_size_in_byte":13600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"324335852","text":"\"\"\"Unit tests for the print JSON reporting plugin.\"\"\"\nimport json\nimport os\n\nfrom yapsy.PluginManager import PluginManager\n\nimport statick_tool\nfrom statick_tool.issue import Issue\nfrom statick_tool.package import Package\nfrom statick_tool.plugins.reporting.print_json_reporting_plugin import (\n PrintJsonReportingPlugin,\n)\nfrom statick_tool.reporting_plugin import ReportingPlugin\n\n\ndef test_print_json_reporting_plugin_found():\n \"\"\"Test that the plugin manager finds the print json reporting plugin.\"\"\"\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Reporting\": ReportingPlugin,\n }\n )\n manager.collectPlugins()\n assert any(\n plugin_info.plugin_object.get_name() == \"print_json\"\n for plugin_info in manager.getPluginsOfCategory(\"Reporting\")\n )\n assert any(\n plugin_info.name == \"Print JSON Reporting Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Reporting\")\n )\n\n\ndef test_print_json_reporting_plugin_report_cert(capsys):\n \"\"\"Test the output of the reporting plugin.\"\"\"\n pjrp = PrintJsonReportingPlugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n issues = {\n \"tool_a\": [\n Issue(\"test.txt\", 1, \"tool_a\", \"type\", 1, \"This is a test\", \"MEM50-CPP\")\n ]\n }\n\n pjrp.report(package, issues, \"level\")\n captured = capsys.readouterr()\n output = captured.out.splitlines()[0]\n assert (\n output\n == '{\"issues\": [{\"fileName\": \"test.txt\", \"lineNumber\": 1, \"tool\": \"tool_a\", \"type\": \"type\", \"severity\": 1, \"message\": \"This is a test\", \"certReference\": \"MEM50-CPP\"}]}'\n )\n assert json.loads(output)\n\n\ndef test_print_json_reporting_plugin_report_nocert(capsys):\n \"\"\"Test the output of the reporting plugin.\"\"\"\n pjrp = PrintJsonReportingPlugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n issues = {\n \"tool_a\": [\n Issue(\"test.txt\", 1, \"tool_a\", \"type\", 1, \"This is a test\", None),\n Issue(\"test2.txt\", 2, \"tool_b\", \"type\", 2, \"This is a second test\", None),\n ]\n }\n\n pjrp.report(package, issues, \"level\")\n captured = capsys.readouterr()\n output = captured.out.splitlines()[0]\n assert (\n output\n == '{\"issues\": [{\"fileName\": \"test.txt\", \"lineNumber\": 1, \"tool\": \"tool_a\", \"type\": \"type\", \"severity\": 1, \"message\": \"This is a test\", \"certReference\": \"\"}, {\"fileName\": \"test2.txt\", \"lineNumber\": 2, \"tool\": \"tool_b\", \"type\": \"type\", \"severity\": 2, \"message\": \"This is a second test\", \"certReference\": \"\"}]}'\n )\n assert json.loads(output)\n","sub_path":"tests/plugins/reporting/print_json_reporting_plugin/test_print_json_reporting_plugin.py","file_name":"test_print_json_reporting_plugin.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"438540134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 21:49:39 2017\n\n-read_nvt pulled from og script:\n\"Extracts LED data from .NVT video file\n\nadapted from matlab code written by Susan Schwarz\"\n\n@author: Patrick\n\"\"\"\n\nimport numpy as np\nimport csv\n\ndef read_nvt(adv,filename,trial_data):\n #TODO: rewrite this in simpler way\n \"\"\"set stuff up\"\"\"\n \n \n fid = open(filename, 'r')\n header = {}\n header_string = fid.read(1024).splitlines()\n for pair in header_string:\n if str(pair).startswith('-SamplingFrequency '):\n header['fs'] = np.float(pair[19:])\n elif str(pair).startswith('-CheetahRev '):\n header['cheetah_ver'] = str(pair[12:])\n elif str(pair).startswith('-EnableFieldEstimation '):\n header['field_est'] = str(pair[23:])\n elif str(pair).startswith('-DirectionOffset '):\n header['direction_offset'] = str(pair[17:])\n \n if 'fs' in header.keys():\n if 'field_est' in header and header['field_est'] == 'True':\n adv['framerate'] = header['fs'] * 2.\n else:\n adv['framerate'] = header['fs']\n \n if 'direction_offset' in header.keys():\n adv['offset'] = header['direction_offset']\n \n \n #specify the nvt datatypes\n nvt_dtype = np.dtype([ \n ('swstx' , ' best_score:\n best_score = episode_score\n best_path_actions = episode_actions\n\n print(f'\\rEpisode: {i_episode}/{num_episodes}, score: {episode_score}, Average(last 100): {sum(episode_scores[:-100])/len(episode_scores)}, epsilon: {epsilon}', end='')\n\nprint(f'\\nAfter {num_episodes}, average score: {sum(episode_scores)/len(episode_scores)}, Average(last 100): {sum(episode_scores[:-100])/len(episode_scores)}')\nprint(f'Best score: {best_score}, Sequence of actions: {[gridworld.num2action[action] for action in best_path_actions]}')\n\nplt.plot(range(len(episode_scores)), episode_scores)\nplt.xlabel('Episodes ->')\nplt.ylabel('Score ->')\nplt.title('Training progress')\nplt.show()\n","sub_path":"Q_learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"423337928","text":"import os\r\nimport traceback\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom skimage import io\r\nimport torch\r\nfrom torchvision import transforms\r\nfrom torch.utils.data import Dataset\r\nimport image_pb2\r\nfrom pathlib import Path\r\nimport cv2\r\nfrom MNIST_color import MNISTColor\r\nfrom PIL import Image\r\n\r\nclass TripletFaceDataset(Dataset):\r\n\r\n def __init__(self, root_dir, csv_name, num_triplets, format, dataset_depth, use_torchvision=False, transform = None):\r\n \r\n self.root_dir = root_dir\r\n self.df = pd.read_csv(csv_name, header=0, names=['id', 'name', 'class'], \r\n dtype={'id': str, 'name': str, 'class': str})\r\n self.num_triplets = num_triplets\r\n self.transform = transform\r\n self.training_triplets = self.generate_triplets(self.df, self.num_triplets)\r\n self.dataset_depth = dataset_depth\r\n self.format = format\r\n self.use_torchvision = use_torchvision\r\n self.dataset_name = os.path.basename(os.path.dirname(root_dir)) #fix\r\n self.dataset = None\r\n if use_torchvision:\r\n print(\"dataset_name: \", self.dataset_name)\r\n if self.dataset_name == 'mnist':\r\n train_val = os.path.basename(root_dir)\r\n self.dataset = MNISTColor(os.path.join(root_dir), train='train'==train_val,\r\n transform=transform, target_transform=None,\r\n download=True, dataset_depth=dataset_depth)\r\n \r\n \r\n def get_image(self, person, image):\r\n for person_file in os.listdir(self.root_dir):\r\n #print(\"hit line 25\")\r\n person_path = os.path.join(self.root_dir, person_file)\r\n if os.path.isfile(person_path) and person_file == person+\".pid\":\r\n #print(\"hit line 27\")\r\n f = open(person_path, \"rb\")\r\n p = image_pb2.Person()\r\n p.ParseFromString(f.read())\r\n f.close()\r\n for i in p.images:\r\n #print(\"hit line 33\")\r\n if i.name == image:\r\n #print(i.name)\r\n return i\r\n raise Exception(\"pid file not found. Variables: image: \", image, \", person: \", person)\r\n @staticmethod\r\n def generate_triplets(df, num_triplets):\r\n #dictionary to keep track of which images belong to which \"faces\":\r\n def make_dictionary_for_face_class(df):\r\n\r\n '''\r\n - face_classes = {'class0': [class0_id0, ...], 'class1': [class1_id0, ...], ...}\r\n '''\r\n face_classes = dict()\r\n for idx, label in enumerate(df['class']):\r\n if label not in face_classes:\r\n face_classes[label] = []\r\n face_classes[label].append(df.iloc[idx, 0])\r\n return face_classes\r\n \r\n triplets = []\r\n classes = df['class'].unique()\r\n face_classes = make_dictionary_for_face_class(df)\r\n \r\n for _ in range(num_triplets):\r\n\r\n '''\r\n - randomly choose anchor, positive and negative images for triplet loss\r\n - anchor and positive images in pos_class\r\n - negative image in neg_class\r\n - at least, two images needed for anchor and positive images in pos_class\r\n - negative image should have different class as anchor and positive images by definition\r\n '''\r\n \r\n pos_class = np.random.choice(classes)\r\n neg_class = np.random.choice(classes)\r\n while len(face_classes[pos_class]) < 2:\r\n pos_class = np.random.choice(classes)\r\n while pos_class == neg_class:\r\n neg_class = np.random.choice(classes)\r\n \r\n pos_name = df.loc[df['class'] == pos_class, 'name'].values[0]\r\n neg_name = df.loc[df['class'] == neg_class, 'name'].values[0]\r\n\r\n if len(face_classes[pos_class]) == 2:\r\n ianc, ipos = np.random.choice(2, size = 2, replace = False)\r\n else:\r\n ianc = np.random.randint(0, len(face_classes[pos_class]))\r\n ipos = np.random.randint(0, len(face_classes[pos_class]))\r\n while ianc == ipos:\r\n ipos = np.random.randint(0, len(face_classes[pos_class]))\r\n ineg = np.random.randint(0, len(face_classes[neg_class]))\r\n \r\n triplets.append([face_classes[pos_class][ianc], face_classes[pos_class][ipos], face_classes[neg_class][ineg], \r\n pos_class, neg_class, pos_name, neg_name])\r\n \r\n return triplets\r\n \r\n def __getitem__(self, idx):\r\n keep = {}\r\n try:\r\n #print(\"first idx print: \", idx)\r\n anc_id, pos_id, neg_id, pos_class, neg_class, pos_name, neg_name = self.training_triplets[idx]\r\n \r\n #anc_img = os.path.join(self.root_dir, str(pos_name), str(anc_id) + self.format)\r\n #pos_img = os.path.join(self.root_dir, str(pos_name), str(pos_id) + self.format)\r\n #neg_img = os.path.join(self.root_dir, str(neg_name), str(neg_id) + self.format)\r\n \r\n #print(\"line 107 anc_img: \", anc_img)\r\n anc_img, pos_img, neg_img = None, None, None\r\n if not self.use_torchvision:\r\n anc_id = str(anc_id) + self.format\r\n pos_id = str(pos_id) + self.format\r\n neg_id = str(neg_id) + self.format\r\n keep = {'anc_id': anc_id, 'pos_id': pos_id, 'neg_id': neg_id, 'pos_name': pos_name, 'neg_name': neg_name}\r\n \r\n anc_img = self.get_image(pos_name, anc_id).contents\r\n pos_img = self.get_image(pos_name, pos_id).contents\r\n neg_img = self.get_image(neg_name, neg_id).contents \r\n\r\n anc_img = io.imread(anc_img, plugin='imageio')\r\n pos_img = io.imread(pos_img, plugin='imageio')\r\n neg_img = io.imread(neg_img, plugin='imageio')\r\n #print(\"anc_img.shape: \", anc_img.shape)\r\n \r\n if self.dataset_depth==1:\r\n anc_img = cv2.cvtColor(anc_img, cv2.COLOR_GRAY2RGB)\r\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_GRAY2RGB)\r\n neg_img = cv2.cvtColor(neg_img, cv2.COLOR_GRAY2RGB)\r\n else:\r\n if len(anc_img.shape) < 3:\r\n anc_img = cv2.cvtColor(anc_img, cv2.COLOR_GRAY2RGB)\r\n if len(pos_img.shape) < 3:\r\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_GRAY2RGB)\r\n if len(neg_img.shape) < 3:\r\n neg_img = cv2.cvtColor(neg_img, cv2.COLOR_GRAY2RGB)\r\n #else if dataset is mnist \r\n elif self.dataset_name == 'mnist':\r\n #print(\"idx: \", idx)\r\n #print(\", anc_id: \", anc_id)\r\n #print(\", self.dataset.targets: \", self.dataset.targets)\r\n #print(\", self.dataset.data: \", self.dataset.data.item())\r\n #print(\", self.dataset.targets[anc_id]: \", self.dataset.targets.item()[anc_id])\r\n #print(\"self.dataset[int(anc_id)]: \", self.dataset[int(anc_id)])\r\n #print(\", self.dataset.data[587]: \", self.dataset.data[587])\r\n #print(\", self.dataset.data.shape: \", self.dataset.data.shape)\r\n #print(\", self.dataset.data[anc_id].shape: \", self.dataset.data[anc_id].shape)\r\n #print(\", self.dataset.data[anc_id]: \", self.dataset.data.item()[anc_id])\r\n anc_dict = self.dataset[int(anc_id)]\r\n pos_dict = self.dataset[int(pos_id)]\r\n neg_dict = self.dataset[int(neg_id)]\r\n anc_img = anc_dict[\"image\"]\r\n pos_img = anc_dict[\"image\"]\r\n neg_img = anc_dict[\"image\"]\r\n #print(\"anc_img: \", anc_img)\r\n # anc_img = Image.fromarray(anc_img.numpy(), mode='L')\r\n # pos_img = Image.fromarray(pos_img.numpy(), mode='L')\r\n # neg_img = Image.fromarray(neg_img.numpy(), mode='L')\r\n # if self.dataset_depth == 1:\r\n # anc_img = np.asarray(anc_img)\r\n # pos_img = np.asarray(pos_img)\r\n # neg_img = np.asarray(neg_img) \r\n \r\n #if self.dataset_depth==1:\r\n # anc_img = cv2.cvtColor(anc_img, cv2.COLOR_GRAY2RGB)\r\n # pos_img = cv2.cvtColor(pos_img, cv2.COLOR_GRAY2RGB)\r\n # neg_img = cv2.cvtColor(neg_img, cv2.COLOR_GRAY2RGB)\r\n\r\n pos_class = torch.from_numpy(np.array([pos_class]).astype('long'))\r\n neg_class = torch.from_numpy(np.array([neg_class]).astype('long'))\r\n \r\n #print(\"anc_img: \", anc_img)\r\n \r\n sample = {'anc_id': anc_id, 'pos_id': pos_id, 'neg_id': neg_id, 'anc_img': anc_img, 'pos_img': pos_img, 'neg_img': neg_img, 'pos_class': pos_class, 'neg_class': neg_class}\r\n\r\n if not self.use_torchvision and self.transform:\r\n sample['anc_img'] = self.transform(sample['anc_img'])\r\n sample['pos_img'] = self.transform(sample['pos_img'])\r\n sample['neg_img'] = self.transform(sample['neg_img'])\r\n except:\r\n print(\"traceback: \", traceback.format_exc())\r\n sample = {'exception': True}\r\n print(\"exception occurred: \", keep)\r\n return sample\r\n \r\n \r\n def __len__(self):\r\n \r\n return len(self.training_triplets)\r\n \r\n\r\ndef get_dataloader(train_root_dir, valid_root_dir, \r\n train_csv_name, valid_csv_name, \r\n num_train_triplets, num_valid_triplets, \r\n batch_size, num_workers,\r\n train_format, valid_format,\r\n train_dataset_depth,val_dataset_depth,\r\n train_torchvision, val_torchvision,\r\n train_input_size, val_input_size,\r\n pure_validation, pure_training):\r\n data_transforms = {'train': None, 'valid': None}\r\n if train_torchvision: \r\n data_transforms['train'] = transforms.Compose([\r\n transforms.ToPILImage(),#extra, not recommended to keep here\r\n transforms.RandomResizedCrop(train_input_size),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ])\r\n else:\r\n data_transforms['train'] = transforms.Compose([\r\n transforms.ToPILImage(),\r\n transforms.RandomResizedCrop(train_input_size),#extra, not recommended to keep here\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\r\n ])\r\n if val_torchvision:\r\n data_transforms['valid'] = transforms.Compose([\r\n transforms.ToPILImage(),#extra, not recommended to keep here\r\n transforms.Resize((val_input_size, val_input_size)),\r\n transforms.CenterCrop(val_input_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ])\r\n else:\r\n data_transforms['valid'] = transforms.Compose([\r\n transforms.ToPILImage(),\r\n transforms.Resize((val_input_size, val_input_size)),#extra, not recommended to keep here\r\n transforms.CenterCrop(val_input_size),#extra, not recommended to keep here\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\r\n ])\r\n #data_transforms = {\r\n # 'train': transforms.Compose([\r\n # transforms.ToPILImage(),\r\n # transforms.RandomHorizontalFlip(),\r\n # transforms.ToTensor(),\r\n # transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\r\n # ]),\r\n # 'valid': transforms.Compose([\r\n # transforms.ToPILImage(),\r\n # transforms.ToTensor(),\r\n # transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])\r\n # ])}\r\n face_dataset = {\r\n 'train': None, \r\n 'valid': None}\r\n data_size = {\r\n 'train': None,\r\n 'valid': None}\r\n \r\n if not pure_training:\r\n face_dataset['valid'] = TripletFaceDataset(root_dir = valid_root_dir,\r\n csv_name = valid_csv_name,\r\n num_triplets = num_valid_triplets,\r\n format = valid_format,\r\n dataset_depth= val_dataset_depth,\r\n use_torchvision = val_torchvision,\r\n transform = data_transforms['valid'])\r\n data_size['valid'] = len(face_dataset['valid'])\r\n \r\n if not pure_validation:\r\n face_dataset['train'] = TripletFaceDataset(root_dir = train_root_dir,\r\n csv_name = train_csv_name,\r\n num_triplets = num_train_triplets,\r\n format = train_format,\r\n dataset_depth= train_dataset_depth,\r\n use_torchvision = train_torchvision,\r\n transform = data_transforms['train'])\r\n data_size['train'] = len(face_dataset['train'])\r\n #face_dataset = {\r\n # 'train' : TripletFaceDataset(root_dir = train_root_dir,\r\n # csv_name = train_csv_name,\r\n # num_triplets = num_train_triplets,\r\n # format = train_format,\r\n # dataset_depth= train_dataset_depth,\r\n # use_torchvision = train_torchvision,\r\n # transform = data_transforms['train']),\r\n # 'valid' : TripletFaceDataset(root_dir = valid_root_dir,\r\n # csv_name = valid_csv_name,\r\n # num_triplets = num_valid_triplets,\r\n # format = valid_format,\r\n # dataset_depth= val_dataset_depth,\r\n # use_torchvision = val_torchvision,\r\n # transform = data_transforms['valid'])}\r\n #data_size = {x: len(face_dataset[x]) for x in ['train', 'valid']}\r\n dataloaders = {\r\n x: torch.utils.data.DataLoader(face_dataset[x], batch_size = batch_size, shuffle = False, num_workers = num_workers)\r\n for x in ['train', 'valid']}\r\n #dataloaders = {\r\n # x: face_dataset[x] \r\n # for x in ['train', 'valid']}\r\n \r\n\r\n return dataloaders, data_size","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":15240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"380639882","text":"from unityagents import UnityEnvironment\nimport numpy as np\nimport torch\nfrom collections import deque\nimport matplotlib.pyplot as plt\nimport time\nimport ddpg_agent\nimport sys\n\nfrom cProfile import Profile\nfrom pstats import Stats\n\nTARGET_SCORE = 30.0\n\nenv = UnityEnvironment(file_name='./Reacher_Windows_x86_64_20/Reacher.exe')\n\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\n# reset the environment\nenv_info = env.reset(train_mode=True)[brain_name]\n\n# number of agents\nnum_agents = len(env_info.agents)\n\n# size of each action\naction_size = brain.vector_action_space_size\n\n# examine the state space \nstates = env_info.vector_observations\nstate_size = states.shape[1]\n\nagent = ddpg_agent.Agent(num_agents=num_agents, state_size=state_size, action_size=action_size, random_seed=31337)\n\ndef tick_simulation(actions):\n env_info = env.step(actions)[brain_name]\n return env_info.vector_observations, env_info.rewards, env_info.local_done\n\ndef plot_scores(scores, mean_scores):\n fig = plt.figure()\n ax = fig.add_subplot(111) \n plt.plot(np.arange(1, len(scores)+1), scores)\n plt.plot(np.arange(1, len(mean_scores)+1), mean_scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.savefig(\"score_plot.png\", dpi=600)\n plt.close(fig)\n\ndef ddpg(n_episodes=500, max_t=10000):\n scores_deque = deque(maxlen=100)\n scores = []\n windowed_mean_scores = []\n start_time = time.time()\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations\n agent.reset()\n score = np.zeros(num_agents)\n\n for t in range(max_t):\n # Request new actions for each agent\n actions = agent.determine_actions(states)\n\n # Step the environment and retrieve the new states, rewards and whether or not the agent is done\n next_states, rewards, dones = tick_simulation(actions)\n\n # Submit the N-step transition to the replay buffer\n agent.store_transitions(states, actions, rewards, next_states, dones)\n\n # Learn from a minibatch of transitions\n if (t % ddpg_agent.LEARN_INTERVAL) == 0:\n for _ in range(ddpg_agent.LEARN_STEPS):\n agent.learn()\n \n states = next_states\n score += rewards\n if np.any(dones):\n break \n\n agent_ave_score = np.mean(score)\n\n scores_deque.append(agent_ave_score)\n scores.append(agent_ave_score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}\\tElapsed time: {:.2f}'.format(i_episode, np.mean(scores_deque), agent_ave_score, time.time() - start_time))\n\n windowed_mean_scores.append(np.mean(scores_deque))\n plot_scores(scores, windowed_mean_scores)\n if (i_episode > 100) and (np.mean(scores_deque) > TARGET_SCORE):\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n return scores\n return scores\n\n# Optional profiling step (just a handful of episodes)\nif len(sys.argv) > 1 and sys.argv[1].lower() == 'profile':\n ddpg(70, 700)\n\n env.close()\n exit()\n\nscores = ddpg()\n\nenv.close()","sub_path":"p2_continuous-control/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"189965843","text":"\nimport sys\nimport os\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QStackedWidget, QLabel, QShortcut, QGridLayout\nfrom PyQt5.QtGui import QKeySequence\n\nclass Window(QWidget):\n \n def __init__(self):\n \n super().__init__()\n self.initUi()\n \n self.show()\n \n \n def initUi(self):\n\n layout = QGridLayout()\n\n stacked_widget = QStackedWidget()\n stacked_widget.addWidget(QLabel('Linux'))\n stacked_widget.addWidget(QLabel('Windows'))\n stacked_widget.addWidget(QLabel('Mac'))\n stacked_widget.addWidget(QLabel('Android'))\n\n layout.addWidget(stacked_widget, 0, 0)\n \n self.setLayout(layout)\n\n left_shortcut = QShortcut(self)\n left_shortcut.setKey('Ctrl+L')\n right_shortcut = QShortcut(self)\n right_shortcut.setKey('Ctrl+R')\n\n left_shortcut.activated.connect(\n lambda: self.on_shortcut_activated(\n left_shortcut, stacked_widget))\n\n right_shortcut.activated.connect(\n lambda: self.on_shortcut_activated(\n right_shortcut, stacked_widget)) \n\n\n def on_shortcut_activated(self, shortcut, target):\n\n index = target.currentIndex()\n \n if shortcut.key() == 'Ctrl+R':\n if index < target.count():\n target.setCurrentIndex(index + 1)\n elif shortcut.key() == 'Ctrl+L':\n if index > 0:\n target.setCurrentIndex(index - 1)\n\n\ndef main(args):\n \n app = QApplication(args)\n window = Window()\n sys.exit(app.exec_())\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"PyQt5-Examples/08_containers/stacked_widget.py","file_name":"stacked_widget.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"286734418","text":"# from sys import exit\nN, M = [int(n) for n in input().split()]\n# N = int(input())\na = [(int(n), 1) for n in input().split()]\nB = [0 for _ in range(M)]\nC = [0 for _ in range(M)]\nfor i in range(M):\n B[i], C[i] = [int(n) for n in input().split()]\n a.append((C[i], B[i]))\n\na = sorted(a, key=lambda x: -x[0])\ni = 0\nans = 0\n# print(a)\nwhile(N != 0):\n num = min(N, a[i][1])\n ans += num*a[i][0]\n # print(num*a[i][0])\n N -= num\n i += 1\n\nprint(ans)\n","sub_path":"problems/Beginner/D/127.py","file_name":"127.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"119373297","text":"#coding=utf-8\n# ycat\t\t\t2018-08-03\t create \nimport rospy\nimport time\nimport setup\nif __name__ == '__main__':\n\tsetup.setCurPath(__file__)\nimport shell,utility\nimport log\nimport ros2.msg \nimport ros2.rosUtility \nimport traceWriter\n\n#返回提供服务的实例 \ndef getSrvList():\n\tassert utility.is_ros()\n\tret = shell.run_cmd2(\"rosservice list\")\n\tif not ret[1]:\n\t\traise Exception(\"执行rosservice list失败\") \n\treturn [ x for x in ret[0].split(\"\\r\\n\") if x ]\n\t\n#返回消息类型 \ndef getSrvMsgType(srvName):\n\tassert utility.is_ros()\n\tret = shell.run_cmd2(\"rosservice info \"+srvName)\n\tif not ret[1]:\n\t\traise Exception(\"执行rosservice info失败\") \n\tline = ret[0].splitlines()\n\tif len(line) < 3:\n\t\traise Exception(\"执行rosservice info失败2\") \n\tline = line[2]\n\treturn line[line.find(\"Type: \")+len(\"Type: \"):]\n\n\n#根据Request类型,返回Response类型\ndef getResponse(msg):\n\tm = __import__(msg.__module__)\n\tm = getattr(m,\"srv\")\n\tname = type(msg).__name__\n\tindex = -len(\"Request\") \n\tassert name[index:] == \"Request\"\n\treturn getattr(m,name[:index]+\"Response\")()\n\t\ndef getCaller(request):\n\t#{'callerid': '/rosservice_15958_1603682173451', 'md5sum': '04af24139a49a00f4c578ef63795a508', 'service': '/ss/Reloc'}\n\tif \"callerid\" in request._connection_header:\n\t\treturn request._connection_header[\"callerid\"]\n\treturn \"unknown\"\n\n#自动处理返回值为code和message的类型 \ndef catch(func):\n\tdef __call(*p):\n\t\tassert len(p) > 0\n\t\tassert len(p) <= 2\n\t\timport log \n\t\ttry:\n\t\t\tr = func(*p) \n\t\t\tif r is None:\n\t\t\t\treturn getResponse(p[-1])\n\t\t\telse:\n\t\t\t\treturn r\n\t\texcept Exception as e:\n\t\t\tlog.exception(\"invoke \"+str(func),e)\n\t\t\tret = getResponse(p[-1])\t\t\n\t\t\tassert hasattr(ret,\"code\")\n\t\t\tassert hasattr(ret,\"message\")\n\t\t\tret.code = -1\n\t\t\tret.message = str(e)\n\t\t\treturn ret\n\t__call.__name__ = func.__name__\n\treturn __call\n\t\n\n\t\nclass service:\n\tdef __init__(self,serviceType,serviceName,callback,useRosType=False):\n\t\tassert ros2.rosUtility.isInited()\n\t\tself.type = serviceType\n\t\tself.name = serviceName\n\t\tself.callback = callback\n\t\tif useRosType:\n\t\t\th = self._handleRos\n\t\telse:\n\t\t\th = self._handleDict\n\t\tif isinstance(serviceType,str):\n\t\t\tt = ros2.msg.getSrv(serviceType).rosType\n\t\telse:\n\t\t\tt = serviceType\n\t\t\tassert useRosType\n\t\tlog.info(\"start ros service, name=\"+serviceName + \", type=\"+serviceType) #{'callerid': '/rosservice_15386_1603682094284', 'md5sum': '04af24139a49a00f4c578ef63795a508', 'service': '/ss/Reloc'}\n\t\tself.imp = rospy.Service(serviceName, t, h) \n\t\t\n\t\t\n\tdef _handleDict(self,request):\n\t\tmsg = ros2.msg.getSrv(self.type)\n\t\tdata = msg.request.toDict(request)\n\t\ttraceWriter.addEvent(\"rossrv.%s.%s\"%(self.type,self.name),data)\n\t\ttry:\n\t\t\tresponse = self.callback(data)\n\t\t\treturn msg.response.toRos(response)\n\t\texcept Exception as e:\n\t\t\tlog.exception(\"handle \"+self.type,e)\n\t\treturn None\n\t\t\n\tdef _handleRos(self,request): \n\t\tmsg = ros2.msg.getSrv(self.type)\n\t\tdata = msg.request.toDict(request)\n\t\ttraceWriter.addEvent(\"rossrv.%s.%s\"%(self.type,self.name),data)\n\t\ttry:\n\t\t\treturn self.callback(request)\n\t\texcept Exception as e:\n\t\t\tlog.exception(\"handle \"+self.type,e)\n\t\treturn None\n\t\t\n\n\t\t\n#getSrvMsgType(serviceName) \nclass client:\n\tdef __init__(self,serviceType,serviceName,useRosType=False):\n\t\tself.name = serviceName\n\t\tif serviceType is None:\n\t\t\tself.type = getSrvMsgType(serviceName)\n\t\telse:\n\t\t\tself.type = serviceType\n\t\tself.useRosType = useRosType\n\t\tself.imp = rospy.ServiceProxy(serviceName, ros2.msg.getSrv(self.type).rosType)\n\t\t\n\t\t\n\tdef __call__(self,*param,**params):\n\t\tmsg = ros2.msg.getSrv(self.type)\n\t\ttry:\n\t\t\tif len(param) == 1 and isinstance(param[0],dict) and len(params) == 0:\n\t\t\t\t#直接传字典的运行方法 \n\t\t\t\tresponse = self.imp(msg.request.toRos(param[0])) \n\t\t\telse:\n\t\t\t\tresponse = self.imp(*param,**params) \n\t\t\tif self.useRosType:\n\t\t\t\treturn response\n\t\t\telse:\n\t\t\t\treturn msg.response.toDict(response)\n\t\texcept rospy.ServiceException as e:\n\t\t\tlog.exception(\"Service call failed:\"+self.type,e)\n\t\t\traise\n\t\t\n\tdef wait(self,timeout):\n\t\trospy.wait_for_service(self.name,timeout=timeout)\n\t\n\tdef invoke(self,*param,**params):\n\t\treturn self(*param,**params)\n\t\t\n\t\t\n#返回值为字典 \ndef invoke(serviceType,serviceName,*params):\n\treturn client(serviceType,serviceName)(*params)\n\t\t\n\n\ng_services = {}\n\ndef startService(serviceType,serviceName,callback):\n\tglobal g_services\n\tif serviceName not in g_services:\n\t\tg_services[serviceName] = service(serviceType,serviceName,callback,utility.is_ros())\n\t\n\n#def stopService():\n#\tglobal g_services\n#\tif serviceName in g_services:\n#\t\tdel g_services[serviceName]\n#\t\n\t\n############ unit test ############ \ndef testgetResponse():\n\timport std_srvs.srv as s\n\ta = s.SetBoolRequest()\n\tb = getResponse(a)\n\tassert isinstance(b,s.SetBoolResponse)\n\t\ndef testclient1():\n\tret = invoke(\"roscpp/SetLoggerLevel\",\"/rosout/set_logger_level\",\"/rosout\",\"ERROR\")\n\tprint(ret)\n\n\tc = client(None,\"/rosout/get_loggers\")()\n\tll = c[\"loggers\"]\n\tfind = False\n\tfor r in ll:\n\t\tif r[\"name\"] == \"/rosout\":\n\t\t\tassert r[\"level\"] == \"error\"\n\t\t\tfind = True\n\tassert find \n\t\ndef testserver():\n\tdef handle_reloc(msg):\n\t\tprint(\"caller is\",getCaller(msg))\n\t\t\n\tstartService(\"robot_msg/Reloc\",\"ss/Reloc\",\t\thandle_reloc)\n\tros2.rosUtility.wait()\n\t\nif __name__ == \"__main__\": \n\t#要先执行 rosrun turtlesim turtlesim_node\n\tutility.start()\n\tros2.msg.init()\n\tros2.rosUtility.init(\"ycattest2\")\n\ttestgetResponse()\n\ttestclient1() \n\t\n\ttestserver()\n\t\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","sub_path":"hucais/trunk/common/ros2/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"446779110","text":"import heapq\nimport math\nfrom math import sqrt\nfrom collections import ChainMap, defaultdict\n\n\ndef cube_round(h):\n \"\"\"Round cubic hexagonal coordinates to an integer cubic tile.\"\"\"\n x, y, z = h\n rx = round(x)\n ry = round(y)\n rz = round(z)\n\n x_diff = abs(rx - x)\n y_diff = abs(ry - y)\n z_diff = abs(rz - z)\n\n if x_diff > y_diff and x_diff > z_diff:\n rx = -ry - rz\n elif y_diff > z_diff:\n ry = -rx - rz\n else:\n rz = -rx - ry\n\n return rx, ry, rz\n\n\ndef cube_to_hex(h):\n \"\"\"Convert a 3-tuple of cubic coords to 2 \"even-q vertical\" coords.\"\"\"\n x, _, z = h\n return x, z + (x + (int(x) % 2)) // 2\n\n\ndef hex_to_cube(h):\n \"\"\"Convert a 2-tuple of \"even-q vertical\" coords to cubic coords.\"\"\"\n x, row = h\n\n z = row - ((x + (x & 1)) >> 1)\n return (\n x,\n -x - z,\n z\n )\n\n\ndef hex_round(h):\n return cube_to_hex(cube_round(hex_to_cube(h)))\n\n\nroot3 = 3 ** 0.5\n\n# The calculated dimensions of a tile in pixels.\n#\n# They aren't round numbers due to the way we have to align tile graphics to\n# pixel boundaries. See the implementation of coord_to_screen for the integer\n# version; it is from those numbers that these are derived.\n#\n# These need to be exact or the errors add up!!!\nHEX_WIDTH = 95 * 4 / 3\nHEX_HEIGHT = root3 * 0.25 * 128\n\n\nclass PriorityQueue:\n def __init__(self):\n self.elements = []\n\n def empty(self):\n return len(self.elements) == 0\n\n def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))\n\n def get(self):\n return heapq.heappop(self.elements)[1]\n\n# TODO: This doesn't calculate a correct distance. (0,0) to (1,1) should be adjacent.\n# See HexGrid.distance\n\n\ndef heuristic(a, b):\n \"\"\"Get the distance between points a and b.\"\"\"\n (x1, y1) = a\n (x2, y2) = b\n dx = x1 - x2\n dy = y1 - y2\n return sqrt(dx * dx + dy * dy)\n\n\nclass NoPath(Exception):\n \"\"\"There is no route to the goal.\"\"\"\n\n\n\nclass HexGrid:\n def __init__(self):\n self.cells = {} # hex coord : val ; bit 0 blocks sight, bit 1 blocks movement so 3 == 1 | 2 which blocks both\n self.layers = []\n\n# def __setitem__(self, coords, value):\n# self.cells[coords] = value\n\n def blocks_sight(self, coord):\n return self.cells.get(coord, 1) & 1\n\n def blocks_movement(self, coord):\n \"\"\"Return True if the given coordinates are blocked.\"\"\"\n return any(layer.get(coord) for layer in self.layers) or self.cells.get(coord, 2) & 2\n\n \"\"\" depreciated\n def blocked(self, coords):\n return any(layer.get(coords) for layer in self.layers) or self.cells.get(coords, True)\n \"\"\"\n\n NEIGHBOURS_EVEN = [\n (0, -1),\n (1, 0),\n (1, 1),\n (0, 1),\n (-1, 1),\n (-1, 0),\n ]\n NEIGHBOURS_ODD = [\n (0, -1),\n (1, -1),\n (1, 0),\n (0, 1),\n (-1, 0),\n (-1, -1)\n ]\n\n @staticmethod\n def coord_to_world(coord):\n \"\"\"Convert a map coordinate to a Cartesian world coordinate.\"\"\"\n cx, cy = coord\n wx = 1.5 * cx\n wy = root3 * (cy - 0.5 * (cx & 1))\n return wx, wy\n\n @staticmethod\n def coord_to_screen(coord):\n \"\"\"Convert a map coordinate to screenspace coordinates.\"\"\"\n cx, cy = coord\n return (\n cx * 95,\n (2 * cy - (cx & 1)) * 24\n )\n\n @staticmethod\n def world_to_coord(coord):\n \"\"\"Get the map coordinates for a screenspace pixel (x, y).\"\"\"\n x, y = coord\n q = x / 1.5\n r = -x / 3 + root3 / 3 * y\n return cube_to_hex(cube_round((q, -q - r, r)))\n\n @classmethod\n def neighbours(cls, coords):\n \"\"\"Iterate over the neighbour of the given coords.\n\n Note that we use an \"even-q vertical\" layout in the terminology of\n http://www.redblobgames.com/grids/hexagons/#coordinates\n\n \"\"\"\n x, y = coords\n neighbours = cls.NEIGHBOURS_ODD if x % 2 else cls.NEIGHBOURS_EVEN\n for dx, dy in neighbours:\n yield x + dx, y + dy\n\n def unblocked_neighbours(self, coords):\n \"\"\"Adjacent cells that are not blocked.\"\"\"\n return (c for c in self.neighbours(coords) if not self.blocks_movement(c))\n\n def hex_in_front(self, coords, facing):\n \"\"\"\n Adjacent hex in the facing direction. May be off-map.\n\n :param coords:\n :param facing:\n :return: Tuple: empty if the front hex is off-map\n \"\"\"\n x, y = coords\n neighbours = HexGrid.NEIGHBOURS_ODD if x % 2 else HexGrid.NEIGHBOURS_EVEN\n dx, dy = neighbours[facing]\n ret = x + dx, y + dy,\n return ret\n\n @staticmethod\n def distance(a, b):\n \"\"\"Calculate the distance between two pairs of coordinates.\"\"\"\n ax, ay = HexGrid.coord_to_world(a)\n bx, by = HexGrid.coord_to_world(b)\n dx = ax - bx\n dy = ay - by\n return math.sqrt(dx * dx + dy * dy)\n\n def find_path(self, start, goal):\n \"\"\"Find a path from start to goal using A*.\n\n This can be quite expensive if goal is unreachable.\n\n \"\"\"\n if self.blocks_movement(goal):\n raise NoPath(start, goal)\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == goal:\n break\n\n for next in self.unblocked_neighbours(current):\n new_cost = cost_so_far[current] + self.distance(current, next)\n if (next not in cost_so_far or new_cost < cost_so_far[next]):\n cost_so_far[next] = new_cost\n priority = new_cost + heuristic(goal, next)\n frontier.put(next, priority)\n came_from[next] = current\n else:\n raise NoPath(start, goal)\n\n path = [goal]\n while current != start:\n current = came_from[current]\n path.append(current)\n return path\n\n @staticmethod\n def distance(start, target):\n sx, sy, sz = hex_to_cube(start)\n tx, ty, tz = hex_to_cube(target)\n return max(abs(sx-tx), abs(sy-ty), abs(sz-tz))\n\n def reachable(self, tile, dist=20):\n \"\"\"Get the set of tiles that are reachable in at most dist steps.\n\n This is a variation of Dijkstra's Algorithm. Note that this step does\n not consider object layers, only the base grid.\n\n \"\"\"\n seen = {tile,}\n found = {tile,}\n costs = {tile: 0}\n frontier = PriorityQueue()\n frontier.put(tile, 0)\n\n x, y = self.coord_to_world(tile)\n while not frontier.empty():\n t = frontier.get()\n cost = costs[t]\n if cost <= dist:\n found.add(t)\n for t in self.neighbours(t):\n if self.blocks_movement(t):\n continue\n newcost = cost + 1\n if t in costs:\n oldcost = costs[t]\n if newcost < oldcost:\n costs[t] = newcost\n else:\n costs[t] = newcost\n\n if t not in seen and newcost <= dist:\n seen.add(t)\n frontier.put(t, newcost)\n return found\n\n def obstacles_in_line_of_sight(self, start, target):\n \"\"\"\n Can you see the target from the starting position?\n\n The first blocking terrain type is considered visible: you can see ito\n the bush, but not through it. This is used for line-of-sight attacks,\n e.g. you can throw a grenade over some things you can't shoot through\n\n :param start: Observer coordinates\n :param target: Target coordinates\n :return: set of obstacle coordinates (empty if full line of sight)\n \"\"\"\n if start == target:\n # Simplest case, avoids division by 0 in the code below\n return set()\n c_start = self.coord_to_world(start)\n c_target = self.coord_to_world(target)\n # Delta\n delta = (c_target[0] - c_start[0], c_target[1] - c_start[1])\n d_len = (delta[0] ** 2 + delta[1] ** 2) ** 0.5\n # Normalize to unit\n d_1 = (delta[0] / d_len, delta[1] / d_len)\n # Look for obstacles\n obstacles = set()\n for i in range(1, int(math.ceil(d_len))):\n checked = (c_start[0] + d_1[0] * i, c_start[1] + d_1[1] * i,)\n for fuzzy_x, fuzzy_y in (-1e-6, -1e-6), (-1e-6, 1e-6), (1e-6, -1e-6), (1e-6, 1e-6):\n checked_coord = self.world_to_coord((checked[0] + fuzzy_x, checked[1] + fuzzy_y))\n if checked_coord != start and self.blocks_sight(checked_coord):\n obstacles.add(checked_coord)\n return obstacles\n\n def visible(self, start, target):\n if target not in self.cells:\n return False # off-map\n\n obstacles = self.obstacles_in_line_of_sight(start, target)\n if obstacles and obstacles != {target}:\n return False\n\n return True\n\ndef test_hex_grid_distance():\n assert HexGrid.distance( (0,0), (1,1) ) == 1\n assert HexGrid.distance( (1,1), (0,0) ) == 1\n assert HexGrid.distance( (0,5), (7,0) ) == 9\n\n","sub_path":"lostcolony/pathfinding.py","file_name":"pathfinding.py","file_ext":"py","file_size_in_byte":9377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"47752742","text":"from __future__ import absolute_import\n\nfrom rest_framework import serializers\n\nfrom sentry.api.fields import ActorField\nfrom sentry.constants import DEFAULT_SORT_OPTION\nfrom sentry.models import Commit, Group, GroupStatus, Release, Repository, Team, User\nfrom sentry.models.group import looks_like_short_id\nfrom sentry.search.utils import InvalidQuery, parse_query\nfrom sentry.utils.cursors import Cursor\n\n\nclass ValidationError(Exception):\n pass\n\n\ndef build_query_params_from_request(request, projects):\n query_kwargs = {\n 'projects': projects,\n 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION),\n }\n\n limit = request.GET.get('limit')\n if limit:\n try:\n query_kwargs['limit'] = int(limit)\n except ValueError:\n raise ValidationError('invalid limit')\n\n # TODO: proper pagination support\n cursor = request.GET.get('cursor')\n if cursor:\n query_kwargs['cursor'] = Cursor.from_string(cursor)\n\n query = request.GET.get('query', 'is:unresolved').strip()\n if query:\n try:\n query_kwargs.update(parse_query(projects, query, request.user))\n except InvalidQuery as e:\n raise ValidationError(\n u'Your search query could not be parsed: {}'.format(\n e.message)\n )\n\n return query_kwargs\n\n\ndef get_by_short_id(organization_id, is_short_id_lookup, query):\n if is_short_id_lookup == '1' and \\\n looks_like_short_id(query):\n try:\n return Group.objects.by_qualified_short_id(\n organization_id, query\n )\n except Group.DoesNotExist:\n pass\n\n\nSTATUS_CHOICES = {\n 'resolved': GroupStatus.RESOLVED,\n 'unresolved': GroupStatus.UNRESOLVED,\n 'ignored': GroupStatus.IGNORED,\n 'resolvedInNextRelease': GroupStatus.UNRESOLVED,\n\n # TODO(dcramer): remove in 9.0\n 'muted': GroupStatus.IGNORED,\n}\n\n\nclass InCommitValidator(serializers.Serializer):\n commit = serializers.CharField(required=True)\n repository = serializers.CharField(required=True)\n\n def validate_repository(self, attrs, source):\n value = attrs[source]\n project = self.context['project']\n try:\n attrs[source] = Repository.objects.get(\n organization_id=project.organization_id,\n name=value,\n )\n except Repository.DoesNotExist:\n raise serializers.ValidationError(\n 'Unable to find the given repository.'\n )\n return attrs\n\n def validate(self, attrs):\n attrs = super(InCommitValidator, self).validate(attrs)\n repository = attrs.get('repository')\n commit = attrs.get('commit')\n if not repository:\n raise serializers.ValidationError({\n 'repository': ['Unable to find the given repository.'],\n })\n if not commit:\n raise serializers.ValidationError({\n 'commit': ['Unable to find the given commit.'],\n })\n try:\n commit = Commit.objects.get(\n repository_id=repository.id,\n key=commit,\n )\n except Commit.DoesNotExist:\n raise serializers.ValidationError({\n 'commit': ['Unable to find the given commit.'],\n })\n return commit\n\n\nclass StatusDetailsValidator(serializers.Serializer):\n inNextRelease = serializers.BooleanField()\n inRelease = serializers.CharField()\n inCommit = InCommitValidator(required=False)\n ignoreDuration = serializers.IntegerField()\n ignoreCount = serializers.IntegerField()\n # in minutes, max of one week\n ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)\n ignoreUserCount = serializers.IntegerField()\n # in minutes, max of one week\n ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)\n\n def validate_inRelease(self, attrs, source):\n value = attrs[source]\n project = self.context['project']\n if value == 'latest':\n try:\n attrs[source] = Release.objects.filter(\n projects=project,\n organization_id=project.organization_id,\n ).extra(select={\n 'sort': 'COALESCE(date_released, date_added)',\n }).order_by('-sort')[0]\n except IndexError:\n raise serializers.ValidationError(\n 'No release data present in the system to form a basis for \\'Next Release\\''\n )\n else:\n try:\n attrs[source] = Release.objects.get(\n projects=project,\n organization_id=project.organization_id,\n version=value,\n )\n except Release.DoesNotExist:\n raise serializers.ValidationError(\n 'Unable to find a release with the given version.'\n )\n return attrs\n\n def validate_inNextRelease(self, attrs, source):\n project = self.context['project']\n try:\n attrs[source] = Release.objects.filter(\n projects=project,\n organization_id=project.organization_id,\n ).extra(select={\n 'sort': 'COALESCE(date_released, date_added)',\n }).order_by('-sort')[0]\n except IndexError:\n raise serializers.ValidationError(\n 'No release data present in the system to form a basis for \\'Next Release\\''\n )\n return attrs\n\n\nclass GroupValidator(serializers.Serializer):\n status = serializers.ChoiceField(choices=zip(\n STATUS_CHOICES.keys(), STATUS_CHOICES.keys()))\n statusDetails = StatusDetailsValidator()\n hasSeen = serializers.BooleanField()\n isBookmarked = serializers.BooleanField()\n isPublic = serializers.BooleanField()\n isSubscribed = serializers.BooleanField()\n merge = serializers.BooleanField()\n discard = serializers.BooleanField()\n ignoreDuration = serializers.IntegerField()\n ignoreCount = serializers.IntegerField()\n # in minutes, max of one week\n ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60)\n ignoreUserCount = serializers.IntegerField()\n # in minutes, max of one week\n ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60)\n assignedTo = ActorField()\n\n # TODO(dcramer): remove in 9.0\n snoozeDuration = serializers.IntegerField()\n\n def validate_assignedTo(self, attrs, source):\n value = attrs[source]\n if value and value.type is User and not self.context['project'].member_set.filter(\n user_id=value.id).exists():\n raise serializers.ValidationError(\n 'Cannot assign to non-team member')\n\n if value and value.type is Team and not self.context['project'].teams.filter(\n id=value.id).exists():\n raise serializers.ValidationError(\n 'Cannot assign to a team without access to the project')\n\n return attrs\n\n def validate(self, attrs):\n attrs = super(GroupValidator, self).validate(attrs)\n if len(attrs) > 1 and 'discard' in attrs:\n raise serializers.ValidationError(\n 'Other attributes cannot be updated when discarding')\n return attrs\n","sub_path":"src/sentry/api/helpers/group_index.py","file_name":"group_index.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"487105350","text":"\nimport sys\nimport numpy as np\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\nfrom INapIKModel import INapIKModel\nfrom utils import integrateForward\n\ndef main(argv):\n resultsFilename = 'results/integrationINapIKFig10_1_INapIKHighThreshold.npz'\n v0 = -60.00\n t0 = 0.0\n tf = 25.0\n dt = 1e-5\n nTSteps = round((tf-t0)/dt)\n times = np.empty(nTSteps+1)\n i0 = 10\n def i(t):\n return(i0)\n iNapIKModel = INapIKModel.getHighThresholdInstance()\n iNapIKModel.setI(i=i)\n n0 = iNapIKModel._nInf(v=v0)\n y0 = np.array([v0, n0])\n res = integrateForward(deriv=iNapIKModel.deriv, t0=t0, y0=y0, dt=dt, \n nTSteps=nTSteps)\n np.savez(resultsFilename, times=res['times'], ys=res['ys'])\n\n plt.plot(res['times'], res['ys'][0,:])\n plt.xlabel(\"Time (sec)\")\n plt.ylabel(\"Membrane Potential (mV)\")\n plt.show()\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","sub_path":"scripts/ch10/doIntegrateINapIKFig10_1_INapIKHighThreshold.py","file_name":"doIntegrateINapIKFig10_1_INapIKHighThreshold.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"632201138","text":"\n#%% \n# Neural network model\nimport pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('Data/cleaned.csv', parse_dates=['FECHA'])\n\n#%%\ndf= df.set_index(['FECHA', 'ESTACION'])\n#for now, work with just one station\n#find the stations with the most lectures.\n\nstation_lectures=df.stack().count(level='ESTACION')\nmax_lectures=station_lectures.max()\nstation_lectures[station_lectures==max_lectures] \n#For the 2017, 2018 period the 7 stations that has the most observations are\n#AJM, HGM, INN, MER, MPA, TLA, XAL. \n\n#%%\n#Temporarily will work with TLA\ndfin=(df.stack()\n .reset_index()[df.stack().reset_index().ESTACION=='TLA']\n .drop('ESTACION', axis=1))\n\ndint=dfin.pivot(index='FECHA',columns='level_2', values=0).reset_index()\n\ndint=dint.set_index('FECHA')\ndint=dint[dint.index.year == 2017]\n\n#In 2017, 13.6% of the data is missing\ndint.unstack()[dint.unstack()==-99].count()/dint.unstack().count()\ncleandf=dint.unstack()[dint.unstack()!=-99]\n#%%\nimport datetime\n\n","sub_path":"CDMX/Code/NNmodel.py","file_name":"NNmodel.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"592352368","text":"from typing import Optional\r\nfrom data import Reader\r\nfrom datetime import datetime \r\n#import tkinter for GUI\r\nimport tkinter as tk\r\nfrom tkinter import ttk, W, LEFT, END\r\n#initializations for video\r\nfrom PIL import Image, ImageTk\r\n'''import cv2 #open source computer vision library\r\ncap = cv2.VideoCapture(0)\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 600)'''\r\n#font types\r\nTITLE_FONT = (\"Verdana\", 14, 'bold')\r\nLARGE_FONT = (\"Verdana\", 12)\r\nMEDIUM_FONT = (\"Verdana\", 10)\r\nSMALL_FONT = (\"Verdana\", 8)\r\n#import stuff for graph\r\nimport csv\r\nimport matplotlib\r\nfrom matplotlib import ticker as mticker\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk \r\nmatplotlib.use('TkAgg')\r\nfrom matplotlib import figure\r\nfrom matplotlib import dates as mdates\r\n#import animation to make graph live\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import style\r\nstyle.use(\"seaborn-darkgrid\")\r\n#import vertical scroll bar\r\nfrom vertical_scroll_frame import VerticalScrolledFrame\r\nfrom sendtext import pCheck\r\nfrom sendtext import allOk\r\nfrom main import user_settings\r\nconfig_path, db_path, img_path = user_settings()\r\n\r\n\r\n#initialize entry configs, email_config, num_config, provider_config, and SQLite reader\r\ndb_name = 'sensor_db.db'\r\nreader = Reader(db_path, db_name)\r\n\r\nnum_contacts = 5\r\nwith open(config_path, \"r\") as file:\r\n config_settings = list(csv.reader(file))\r\n if len(config_settings) != 6:\r\n with open(config_path, 'w', newline='') as file:\r\n channel_buttons_config = [-1]*16\r\n num_config = ['Enter Phone Number Here:']*num_contacts\r\n provider_config = ['']*num_contacts\r\n email_config = ['Email']*num_contacts\r\n upper_config = [1000]*11\r\n lower_config = [0]*11\r\n pump_config = [0, 0, None, \"off\"]\r\n writer = csv.writer(file)\r\n writer.writerows([channel_buttons_config,num_config,provider_config,email_config,upper_config, lower_config, pump_config])\r\n config_settings = [channel_buttons_config,num_config,provider_config,email_config, upper_config, lower_config, pump_config]\r\n file.flush()\r\n channel_buttons_config = config_settings[0]\r\n num_config = config_settings[1]\r\n provider_config = config_settings[2]\r\n email_config = config_settings[3]\r\n upper_config = config_settings[4]\r\n lower_config = config_settings[5]\r\n\r\n#create figure for plots and set figure size/layout\r\n#f = figure.Figure(figsize=(8.5,17.5), dpi=100)\r\nf = figure.Figure(figsize=(10.2,10), dpi=100, facecolor='white')\r\n#f.subplots_adjust(top=0.993, bottom=0.015, hspace=0.4)\r\nf.subplots_adjust(top=0.993, bottom=0.015, left=0.04, right = 0.96, hspace=0.65)\r\n\r\nparam_dict = {}\r\nparam_list = ['pH', 'TDS (ppm)', 'Rela. Humidity (%)', 'Air Temp (\\N{DEGREE SIGN}C)', 'Water Temp (\\N{DEGREE SIGN}C)', 'Water Level (cm)']\r\nparam_ylim = [(5, 9), (0, 1500), (20, 80), (15, 35), (15, 35), (0, 61)]\r\n#param_list = ['pH', 'Water Temp', 'Air Temp', 'Nitrate', 'TDS', 'DO', 'Ammonia', 'Phosphate', 'Humidity', 'Flow Rate', 'Water Level']\r\nlive_dict = {}\r\n\r\n\r\n########################\r\n#this is for texting\r\n\r\nallIsGood = {}\r\nminuta = {}\r\n#minuta is used to make sure that you're not bombarded with texts\r\nMinute = {}\r\nfor i in param_list:\r\n allIsGood[i] = True\r\n minuta[i] = None\r\n Minute[i] = None\r\n\r\n########################\r\n\r\n\r\nclass Live_Text:\r\n def __init__(self, label):\r\n self.label = label\r\n \r\nclass Sensor_Plot:\r\n def __init__(self, plot, tList, x_ax, ylim, param, incoming_data, plot_color):\r\n self.plot = plot\r\n self.tList = tList\r\n self.x_ax = x_ax\r\n self.ylim = ylim\r\n self.param = param\r\n self.incoming_data = incoming_data #<- graph is bound by incoming data and Data Summary Table displays most recent value 20 of them\r\n self.plot_color = plot_color #initially 'b' for all\r\n \r\n def make_plot(self):\r\n self.plot.clear()\r\n self.plot.set_xlabel('Time')\r\n self.plot.set_ylabel(self.param)\r\n self.plot.set_ylim(self.ylim)\r\n\r\n self.x_ax.xaxis_date()\r\n self.x_ax.xaxis.set_major_formatter(mdates.DateFormatter('%I:%M:%S %p'))\r\n \r\n [tk.set_visible(True) for tk in self.x_ax.get_xticklabels()]\r\n [label.set_rotation(10) for label in self.x_ax.xaxis.get_ticklabels()] #slant the x axis tick labels for extra coolness\r\n\r\n if len(self.tList) > 4:\r\n self.x_ax.set_xlim(self.tList[-2], self.tList[0])\r\n self.x_ax.xaxis.set_major_locator(mticker.MaxNLocator(nbins = 4))\r\n \r\n self.plot.fill_between(self.tList, self.incoming_data, #where=(self.incoming_data > [0]*len(self.incoming_data))\r\n facecolor=self.plot_color, edgecolor=self.plot_color, alpha=0.5) #blue @initilization\r\n\r\ndef initialize_plots(): #intiailizes plots...\r\n global initialize_plots\r\n try:\r\n most_recent = reader.get_timeset(table=\"SensorData\", num=30) #initializes plot up to 20 if possible if possible\r\n for i, param in enumerate(param_list, 1):\r\n tList = []\r\n most_recent_any_size = []\r\n for j in range(len(most_recent)):\r\n time_f = datetime.strptime(most_recent[j][0], \"%m/%d/%Y %H:%M:%S\")\r\n tList.append(time_f)\r\n most_recent_any_size.append(most_recent[j][i])\r\n\r\n subplot = f.add_subplot(6, 2, i) # sharex?\r\n x_ax = f.get_axes()\r\n \r\n current_plot = Sensor_Plot(subplot, tList, x_ax[i-1], param_ylim[i-1], param, most_recent_any_size, 'b')\r\n param_dict[param] = current_plot\r\n current_plot.make_plot()\r\n \r\n except: #if there is no data points available to plot, initialize the subplots\r\n for i, param in enumerate(param_list, 1):\r\n subplot = f.add_subplot(6, 2, i)\r\n x_ax = f.get_axes()\r\n current_plot = Sensor_Plot(subplot, [], x_ax[i-1], param_ylim[i-1], param, [], 'b')\r\n param_dict[param] = current_plot\r\n #current_plot.make_plot() \r\n reader.commit()\r\n initialize_plots = _plots_initialized\r\n\r\ndef _plots_initialized(): #ensures plots only intialized once though!\r\n pass\r\ninitialize_plots()\r\n\r\n\r\n\r\n###ANIMATE FUNCTION, REMOVE LAST ITEM FROM MOST_RECENT_ANY LIST AND INSERT FRESHLY CALLED VALUE TO BE FIRST IN LIST\r\ndef animate(ii):\r\n\r\n while True:\r\n most_recent_time_graphed = param_dict[param_list[0]] #first, pulls up first plot\r\n most_recent = reader.get_timeset(table=\"SensorData\", num=1)\r\n reader.commit() #if identical, do not animate\r\n #then checks that plot's time list\r\n if (len(most_recent) == 0):\r\n break\r\n \r\n time_reader = datetime.strptime(most_recent[0][0], \"%m/%d/%Y %H:%M:%S\")\r\n if (len(most_recent_time_graphed.tList) != 0) and (time_reader == most_recent_time_graphed.tList[0]):\r\n for i, param in enumerate(param_list, 1):\r\n current_text = live_dict[param]\r\n current_text.label.config(text=most_recent[0][i], fg=\"black\", bg=\"white\")\r\n break #checks if the timestamp is exactly the same as prior, i.e. no new data points have been logged in this frame\r\n #do I have to add an else?\r\n \r\n else:\r\n with open(config_path, \"r\") as file: #ELSE: this is a new data point, so go ahead and plot it\r\n config_settings = list(csv.reader(file))\r\n for i, key in enumerate(param_dict, 1):\r\n current_plot = param_dict[key]\r\n current_param_val = float(most_recent[0][i])\r\n current_text = live_dict[key] #update to live text data summary\r\n if current_param_val > float(config_settings[4][i-1]) or current_param_val < float(config_settings[5][i-1]):\r\n #print('NOT OK')\r\n ###sends text if new problem arises or every 5 minutes\r\n if allIsGood[key] and Minute[key] == None:\r\n print('new problem')\r\n Minute[key] = datetime.now().minute\r\n minuta[key] = Minute[key]\r\n pCheck(float(config_settings[4][i-1]),float(config_settings[5][i-1]),key,current_param_val,config_settings[1],config_settings[2]) #uncomment to test emergency texts\r\n elif allIsGood[key] == False and abs(Minute[key] - datetime.now().minute) % 5 == 0 and not (minuta[key] == datetime.now().minute):\r\n print('same problem')\r\n minuta[key] = datetime.now().minute\r\n pCheck(float(config_settings[4][i-1]),float(config_settings[5][i-1]),key,current_param_val,config_settings[1],config_settings[2]) #uncomment to test emergency texts\r\n #pass\r\n \r\n\r\n\r\n current_text.label.config(text=most_recent[0][i], fg=\"red\", bg=\"white\")\r\n current_plot.plot_color = 'r'\r\n \r\n #setting the parameter to not ok\r\n allIsGood[key] = False\r\n \r\n else:\r\n current_text.label.config(text=most_recent[0][i], fg=\"black\", bg=\"white\")\r\n current_plot.plot_color = 'g'\r\n \r\n ###setting the parameter back to true and sending \"ok\" text \r\n if allIsGood[key] == False:\r\n Minute[key] = None\r\n allOk(key,config_settings[1],config_settings[2])\r\n pass\r\n \r\n allIsGood[key] = True\r\n\r\n data_stream = current_plot.incoming_data\r\n time_stream = current_plot.tList\r\n data_stream.insert(0, most_recent[0][i])\r\n time_f = datetime.strptime(most_recent[0][0], \"%m/%d/%Y %H:%M:%S\")\r\n time_stream.insert(0, time_f)\r\n if len(data_stream) < 20: #graph updates, growing to show 20 points\r\n current_plot.make_plot()\r\n else: #there are 20 points and more available, so animation occurs\r\n data_stream.pop()\r\n time_stream.pop()\r\n current_plot.make_plot()\r\n break\r\n\r\n \r\n#initialization\r\nclass AllWindow(tk.Tk):\r\n def __init__(self, *args, **kwargs):\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n #add title\r\n tk.Tk.wm_title(self, \"AutoAquaponics\")\r\n container = tk.Frame(self)\r\n container.pack(side=\"top\", fill=\"both\", expand=True)\r\n container.grid_rowconfigure(0, weight=1)\r\n container.grid_columnconfigure(0, weight=1)\r\n #create navigation menu\r\n menubar = tk.Menu(container)\r\n navimenu = tk.Menu(menubar, tearoff=0)\r\n #add new items in navimenu to navigate to pages\r\n navimenu.add_command(label=\"Dashboard\", command=lambda: self.show_frame(HomePage))\r\n navimenu.add_command(label=\"Control Panel\", command=lambda: self.show_frame(ControlPanel))\r\n navimenu.add_command(label=\"Video Stream\", command=lambda: self.show_frame(VideoStream))\r\n navimenu.add_command(label=\"Settings\", command=lambda: self.show_frame(Settings))\r\n #add separator line\r\n navimenu.add_separator()\r\n #add quit button in menu that triggers a command\r\n navimenu.add_command(label=\"Quit\", command=self.die)\r\n #actually add the bar\r\n menubar.add_cascade(label=\"Menu\", menu=navimenu)\r\n tk.Tk.config(self, menu=menubar)\r\n #show the frames\r\n self.frames = {}\r\n #remember to add page to this list when making new ones\r\n for F in (HomePage, ControlPanel, Settings, VideoStream, Lights, WaterPump,\r\n FishFeeder, SensorArray, Oxygenator, Backwashing):\r\n frame = F(container, self)\r\n #set background color for the pages\r\n frame.config(bg='white')\r\n self.frames[F] = frame\r\n frame.grid(row=0, column=0, sticky=\"nsew\")\r\n self.show_frame(HomePage)\r\n def show_frame(self, cont):\r\n frame = self.frames[cont]\r\n frame.tkraise()\r\n #end program fcn triggered by quit button\r\n def die(self):\r\n exit()\r\n\r\n#add home page\r\nclass HomePage(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self,parent)\r\n #bring up vertical scroll frame and place it\r\n scframe = VerticalScrolledFrame(self)\r\n scframe.place(x=225, y=40)\r\n #bring up canvas with plot in the frame with vertical scroll bar\r\n canvas = FigureCanvasTkAgg(f, scframe.interior)\r\n #background = canvas.copy_from_bbox(f.bbox)\r\n canvas.draw()\r\n #create title label\r\n label = tk.Label(self, text=\"Dashboard\", bg='white', font = TITLE_FONT)\r\n label.place(x=600, y=10)\r\n #embed graph into canvas\r\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand = True)\r\n #add navigation bar\r\n toolbar = NavigationToolbar2Tk(canvas, self)\r\n toolbar.update()\r\n #color variables\r\n #data table labels\r\n table_title = tk.Label(self, text=\"Data Summary\", bg=\"white\", font = LARGE_FONT)\r\n table_title.place(x=28, y=40)\r\n for i, param in enumerate(param_list): #tk.Label self refers to Homepage\r\n param_label = tk.Label(self, text=param, fg=\"black\", bg=\"white\",\r\n font = MEDIUM_FONT, borderwidth = 2, relief = \"ridge\",\r\n width=16, height=1, anchor=W, justify=LEFT)\r\n param_label.place(x=5, y=65+22*i)\r\n\r\n for i, param in enumerate(param_list):\r\n loading_text = tk.Label(self, text=\"Loading\", fg=\"black\", bg=\"white\",\r\n font = MEDIUM_FONT, borderwidth = 2, relief = \"ridge\",\r\n width=7, height=1)\r\n loading_text.place(x=140, y=65+22*i)\r\n current_text = Live_Text(loading_text)\r\n live_dict[param] = current_text\r\n''' \r\nchannel_count = []\r\nbutton_count = []\r\non_times = []\r\noff_times = []\r\non_buttons = []\r\noff_buttons = []\r\n#add control panel page\r\nclass ControlPanel(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n \r\n #title\r\n tk.Label(self, text=\"Control Panel\", bg=\"white\", font=TITLE_FONT).grid(row=0, columnspan=14)\r\n\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back to Dashboard\", command=lambda: controller.show_frame(HomePage))\r\n navibutton1.grid(row = 1, columnspan = 14)\r\n\r\n #Save button\r\n self.saveButton= ttk.Button(self, text=\"Save\", command=self.popup)\r\n self.saveButton.grid(row=2, columnspan=14, pady=(0,0))\r\n #Discard button\r\n self.discardButton= ttk.Button(self, text=\"Discard\", command=self.discard)\r\n self.discardButton.grid(row=3, columnspan=14, pady=(0,20))\r\n \r\n self.grid_columnconfigure(0, weight=1)\r\n self.grid_columnconfigure(13, weight=1)\r\n \r\n \r\n def preconfig_label(count: str):\r\n return tk.Label(self, text=count, bg='white', font=SMALL_FONT)\r\n for count in range(1, 17):\r\n channel_count.append(preconfig_label(str(count)))\r\n \r\n preconfig_button = tk.Button(self, text=\"Channel OFF\", bg= \"red\", fg= \"white\", width=10, \r\n height=1, command=self.get_channel_state) #command will change state\r\n for count in range(16):\r\n button_count.append(preconfig_button)\r\n \r\n \r\n #Labels, buttons, and entries, oh my!\r\n for i in range(16):\r\n if i > 7 and i < 15:\r\n row = i - 4\r\n channel_count[i].grid(row=row, column=7, padx=(40,0))\r\n button_count[i].grid(row=row, column=8)\r\n tk.Label(self, text=\"Turn on for:\", bg=\"white\").grid(row=row, column=9)\r\n tk.Label(self, text=\"Turn off for:\", bg=\"white\").grid(row=row, column=11)\r\n on_element = (tk.Entry(self, width=10))\r\n on_buttons.append(on_element)\r\n on_element.grid(row=row, column=10) #on entry\r\n off_element = (tk.Entry(self, width=10))\r\n off_buttons.append(off_element)\r\n off_element.grid(row=row, column=12) #off entry\r\n elif i == 7:\r\n row = i + 4\r\n channel_count[i].grid(row=row, column=1, pady=(0,20))\r\n button_count[i].grid(row=row, column=2, pady=(0,20))\r\n tk.Label(self, text=\"Turn on for:\", bg=\"white\").grid(row=row, column=3, pady=(0,10))\r\n tk.Label(self, text=\"Turn off for:\", bg=\"white\").grid(row=row, column=5, pady=(0,10))\r\n on_element = (tk.Entry(self, width=10))\r\n on_buttons.append(on_element)\r\n on_element.grid(row=row, column=4, pady=(0,10)) #on entry\r\n off_element = (tk.Entry(self, width=10))\r\n off_buttons.append(off_element)\r\n off_element.grid(row=row, column=6, pady=(0,10)) #off entry\r\n elif i == 15:\r\n row = i - 4\r\n channel_count[i].grid(row=row, column=7, padx=(40,0), pady=(0,20))\r\n button_count[i].grid(row=row, column=8, pady=(0,20))\r\n tk.Label(self, text=\"Turn on for:\", bg=\"white\").grid(row=row, column=9, pady=(0,10))\r\n tk.Label(self, text=\"Turn off for:\", bg=\"white\").grid(row=row, column=11, pady=(0,10))\r\n on_element = (tk.Entry(self, width=10))\r\n on_buttons.append(on_element)\r\n on_element.grid(row=row, column=10, pady=(0,10)) #on entry\r\n off_element = (tk.Entry(self, width=10))\r\n off_buttons.append(off_element)\r\n off_element.grid(row=row, column=12, pady=(0,10)) #off entry\r\n else:\r\n row = i + 4\r\n channel_count[i].grid(row=row, column=1)\r\n button_count[i].grid(row=row, column=2)\r\n tk.Label(self, text=\"Turn on for:\", bg=\"white\").grid(row=row, column=3)\r\n tk.Label(self, text=\"Turn off for:\", bg=\"white\").grid(row=row, column=5)\r\n on_element = (tk.Entry(self, width=10))\r\n on_buttons.append(on_element)\r\n on_element.grid(row=row, column=4) #on entry\r\n off_element = (tk.Entry(self, width=10))\r\n off_buttons.append(off_element)\r\n off_element.grid(row=row, column=6) #off entry\r\n #Tells user what to input\r\n tk.Label(self, text=\"*Input Time in Hours\", bg=\"white\").grid(row=12, columnspan=14)\r\n \r\n self.discard()\r\n \r\n \r\n\r\n #fcn triggered by save button\r\n def popup(self):\r\n #get the input of all entries as a float value to the hundredth place\r\n self.popup = tk.Tk()\r\n self.popup.wm_title(\"Alert\")\r\n label = ttk.Label(self.popup, text=\"Are you sure you want to save?\", font=MEDIUM_FONT)\r\n label.grid(row=0, columnspan=14, pady=(10,20), padx = (5,5))\r\n \r\n # centers the popup window\r\n popup_width = self.popup.winfo_reqwidth()\r\n popup_height = self.popup.winfo_reqheight()\r\n positionRight = int(self.popup.winfo_screenwidth()/2 - popup_width/2 )\r\n positionDown = int(self.popup.winfo_screenheight()/2 - popup_height/2 )\r\n self.popup.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n \r\n\r\n YesB = ttk.Button(self.popup, text=\"YES\", command = self.save)\r\n YesB.grid(row=1, column=1, padx =(23,10), pady = (0,10))\r\n NoB = ttk.Button(self.popup, text=\"NO\", command = self.popup.destroy)\r\n NoB.grid(row=1, column=2, pady = (0,10))\r\n self.popup.mainloop()\r\n\r\n\r\n #triggered if user press YES in popup window \r\n def save(self):\r\n for i in range(16):\r\n try:\r\n user_ON = int(on_buttons[i].get())\r\n on_times.append(user_ON)\r\n except ValueError:\r\n on_times.append(0) #if left blank, fill w/zeros\r\n except AttributeError:\r\n on_times.append(0) #if you put not Integers, fill w/zeros\r\n for i in range(16):\r\n try:\r\n user_OFF = int(off_buttons[i].get())\r\n off_times.append(user_OFF)\r\n except ValueError:\r\n off_times.append(0) #if left blank, fill w/zeros\r\n except AttributeError:\r\n off_times.append(0) #if you put noy Integers, fill w/zeros\r\n # save channel button settings\r\n with open(config_path, 'r', newline='') as file:\r\n config_settings = list(csv.reader(file))\r\n channel_buttons_config = config_settings[0]\r\n on_config = on_times\r\n off_config = off_times\r\n upper_config = config_settings[3]\r\n lower_config = config_settings[4]\r\n with open(config_path, 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows([channel_buttons_config, on_config, off_config, upper_config, lower_config])\r\n file.flush()\r\n #destroy popup window after writing file\r\n self.popup.destroy()\r\n \r\n #fcn triggered by discard button\r\n def discard(self):\r\n #Get last saved values\r\n with open(config_path, \"r\") as file:\r\n config_settings = list(csv.reader(file))\r\n for i in range(16):\r\n channel_buttons_config[i] = config_settings[0][i]\r\n on_buttons[i].delete(0, END)\r\n off_buttons[i].delete(0, END)\r\n on_buttons[i].insert(0, config_settings[1][i]) \r\n off_buttons[i].insert(0, config_settings[2][i])\r\n \r\n def get_channel_state(self):\r\n for i in range(16):\r\n if int(channel_buttons_config[i]) == -1: #change channel button color to green when channel is forced on\r\n button_count[i].configure(bg= \"green\")\r\n button_count[i].configure(text = \"Channel ON\")\r\n channel_buttons_config[i] = 1\r\n continue\r\n\r\n elif int(channel_buttons_config[i]) == 1: #change channel button color to purple to run on timer\r\n button_count[i].configure(bg= \"purple\")\r\n button_count[i].configure(text = \"Timer ON\")\r\n channel_buttons_config[i] = 0\r\n continue\r\n \r\n else:\r\n button_count[i].configure(bg= \"red\")\r\n button_count[i].configure(text = \"Channel OFF\")\r\n channel_buttons_config[i] = -1\r\n continue '''\r\n\r\n\r\nclass Settings(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Settings\", bg='white', font = TITLE_FONT)\r\n label.grid(row = 0, columnspan= 14, pady=10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back to Dashboard\",\r\n command=lambda: controller.show_frame(HomePage))\r\n navibutton1.grid(row = 1, columnspan = 14)\r\n #Save button\r\n self.saveButton= ttk.Button(self, text=\"Save\", command=self.popup)\r\n self.saveButton.grid(row = 2, columnspan = 14)\r\n #Discard button\r\n self.discardButton= ttk.Button(self, text=\"Discard\", command=self.discard)\r\n self.discardButton.grid(row = 3, columnspan = 14, pady = (0,20))\r\n self.sendtext_state = tk.IntVar()\r\n self.s = ttk.Style() #make a new style for checkbutton so bg can be white\r\n self.s.configure('New.TCheckbutton', background='white')\r\n self.emergencyButton = ttk.Checkbutton(self, text=\"Enable Emergency Texts\", #state=tk.NORMAL\r\n variable=self.sendtext_state, onvalue = 1, offvalue = 0, style='New.TCheckbutton') #command=self.get_state)\r\n self.emergencyButton.grid(row = 16, columnspan = 14, pady=(10,20))\r\n #Tells user what to input\r\n tk.Label(self, text=\"*Enter Min/Max Values For The Specified Parameters\", bg=\"white\").grid(row=15, columnspan=14, pady=(10,0))\r\n\r\n # ENTRY WIDGETS\r\n self.lower_entries = [0 for i in range(len(param_list))]\r\n self.lower_entries = [tk.DoubleVar() for x in range(len(param_list))]\r\n \r\n self.upper_entries = [0 for i in range(len(param_list))]\r\n self.upper_entries = [tk.DoubleVar() for x in range(len(param_list))]\r\n \r\n # for each widget, create its upper and lower label and entry, store in temp var, then place in entries list\r\n for i in range(len(param_list)):\r\n lower_label = tk.Label(self,bg = 'white', width = 25, anchor = 'e', text=\"Min \" + param_list[i] + \":\")\r\n lower_label.grid(row=i+4, column = 1, padx = (0,10), pady=(0,0))\r\n lower_entry = tk.Entry(self, width = 20, highlightthickness = 0, textvariable = self.lower_entries[i])\r\n lower_entry.grid(row=i+4, column = 2, padx = (0,50), pady=(0,0))\r\n self.lower_entries[i] = lower_entry\r\n upper_label = tk.Label(self,bg = 'white', width = 25, anchor = 'e', text=\"Max \" + param_list[i] + \":\")\r\n upper_label.grid(row=i+4, column = 3, padx = (0,10), pady=(0,0))\r\n upper_entry = tk.Entry(self, width = 20, highlightthickness = 0, textvariable = self.upper_entries[i])\r\n upper_entry.grid(row=i+4, column = 4, padx = (0,50), pady=(0,0))\r\n self.upper_entries[i] = upper_entry\r\n\r\n self.grid_columnconfigure(0, weight=2)\r\n self.grid_columnconfigure(5, weight=2)\r\n \r\n bottomFrame = tk.Frame(master=self, bg='white')\r\n bottomFrame.grid(row=17, columnspan=14, pady=10)\r\n \r\n self.phone_number = [0 for i in range(num_contacts)]\r\n self.phone_number = [tk.StringVar() for x in range(num_contacts)]\r\n self.phone_carrier = [0 for i in range(num_contacts)]\r\n self.phone_carrier = [tk.StringVar() for x in range(num_contacts)]\r\n self.carriers = ['AT&T', 'Sprint', 'T-Mobile', 'Verizon', 'Boost Mobile', 'Cricket',\r\n 'Metro PCS', 'Tracfone', 'U.S. Cellular', 'Virgin Mobile']\r\n self.options = []\r\n self.email = [0 for i in range(num_contacts)]\r\n self.email = [tk.StringVar() for x in range(num_contacts)]\r\n \r\n # WIDGETS FOR CONTACTS\r\n for ii in range(num_contacts):\r\n # emergency phone number entry boxes: \r\n self.phone_label = tk.Label(master=bottomFrame, bg = 'white', width = 8, justify = 'right', anchor = 'w', text='Contact ' + str(ii+1) + ':')\r\n self.phone_label.grid(row = ii+20, column = 0, padx = (10,10), pady = (0,0))\r\n phone_entry = tk.Entry(master=bottomFrame, width = 15, textvariable = self.phone_number[ii])\r\n phone_entry.grid(row = ii+20, column = 1, sticky = 'e', padx = (0,40), pady = (0,0))\r\n self.phone_number[ii] = phone_entry\r\n # emergency phone carrier label/optionmenus:\r\n self.carrier_label = tk.Label(master=bottomFrame, bg = 'white', width = 11, anchor = 'w', text='Phone Carrier:')\r\n self.carrier_label.grid(row = ii+20, column = 2, sticky = 'w', padx = (0,10), pady = (0,0))\r\n self.v = self.phone_carrier[ii]\r\n carrier_entry = tk.OptionMenu(bottomFrame, self.v, *self.carriers)\r\n carrier_entry.config(width = 12, highlightthickness = 0)\r\n carrier_entry.grid(row = ii+20, column = 3, sticky = 'w', padx = (0,40), pady = (0,0))\r\n self.options.append(self.v) #keep list of the options\r\n # email address label/entry boxes:\r\n self.email_label = tk.Label(master=bottomFrame, bg = 'white', width = 5, anchor = 'e', text='Email:')\r\n self.email_label.grid(row = ii+20, column = 4, sticky = 'w', padx = (0,10), pady = (0,0))\r\n email_entry = tk.Entry(master=bottomFrame, width = 35, textvariable = self.email[ii])\r\n email_entry.grid(row = ii+20, column = 5, sticky = 'w', padx = (0,0), pady = (0,0))\r\n self.email[ii] = email_entry\r\n\r\n self.discard()\r\n\r\n def popup(self):\r\n #get the input of all entries as a float value to the hundredth place\r\n self.popup = tk.Tk()\r\n self.popup.wm_title(\"Alert\")\r\n label = ttk.Label(self.popup, text=\"Are you sure you want to save?\", font=MEDIUM_FONT)\r\n label.grid(row=0, columnspan=14, pady=(10,20), padx = (5,5))\r\n \r\n # centers the popup window\r\n popup_width = self.popup.winfo_reqwidth()\r\n popup_height = self.popup.winfo_reqheight()\r\n positionRight = int(self.popup.winfo_screenwidth()/2 - popup_width/2 )\r\n positionDown = int(self.popup.winfo_screenheight()/2 - popup_height/2 )\r\n self.popup.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n \r\n YesB = ttk.Button(self.popup, text=\"YES\", command = self.save)\r\n YesB.grid(row=1, column=1, padx =(23,10), pady = (0,10))\r\n NoB = ttk.Button(self.popup, text=\"NO\", command = self.popup.destroy)\r\n NoB.grid(row=1, column=2, pady = (0,10))\r\n self.popup.mainloop()\r\n #triggered if user press YES in popup window\r\n def save(self):\r\n with open(config_path, 'r', newline='') as file:\r\n config_settings = list(csv.reader(file))\r\n channel_buttons_config = config_settings[0]\r\n num_config = [entry.get() for entry in self.phone_number]\r\n provider_config = [option.get() for option in self.options]\r\n email_config = [entry.get() for entry in self.email]\r\n upper_config = [round(float(entry.get()),2) for entry in self.upper_entries] \r\n lower_config = [round(float(entry.get()),2) for entry in self.lower_entries]\r\n pump_config = config_settings[6]\r\n with open(config_path, 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows([channel_buttons_config, num_config, provider_config, email_config, upper_config, lower_config, pump_config])\r\n file.flush()\r\n #destroy popup window after writing file\r\n self.popup.destroy()\r\n \r\n #fcn triggered by discard button\r\n def discard(self):\r\n #Delete current values\r\n for entry in self.phone_number:\r\n entry.delete(0,END)\r\n for entry in self.email:\r\n entry.delete(0,END)\r\n for entry in self.upper_entries: \r\n entry.delete(0, END)\r\n for entry in self.lower_entries: \r\n entry.delete(0, END)\r\n #Get last saved values\r\n with open(config_path, \"r\") as file:\r\n config_settings = list(csv.reader(file))\r\n for i, entry in enumerate(self.phone_number):\r\n entry.insert(0, config_settings[1][i])\r\n for i, option in enumerate(self.phone_carrier):\r\n option.set(config_settings[2][i])\r\n for i, entry in enumerate(self.email):\r\n entry.insert(0, config_settings[3][i])\r\n for i, entry in enumerate(self.upper_entries):\r\n entry.insert(0, config_settings[4][i])\r\n for i, entry in enumerate(self.lower_entries):\r\n entry.insert(0, config_settings[5][i])\r\n \r\n\r\n def submit(self):\r\n # submit the entered phone number & carrier to the emergency texts list\r\n # need to add senttext.py to GUI before this can function\r\n if len(self.phone_number.get()) != 10:\r\n self.num_popup = tk.Tk()\r\n self.num_popup.wm_title(\"Alert\")\r\n label = ttk.Label(self.num_popup, text=\"Invalid phone number.\", font=MEDIUM_FONT)\r\n label.grid(row=0, columnspan=14, pady=(10,20), padx = (5,5))\r\n okb = ttk.Button(self.num_popup, text=\"OK\", command = self.num_popup.destroy)\r\n okb.grid(row=1, column=1, padx = (20,0), pady = (0,15))\r\n self.num_popup.mainloop()\r\n elif self.phone_carrier.get() == 'Select':\r\n self.car_popup = tk.Tk()\r\n self.car_popup.wm_title(\"Alert\")\r\n label = ttk.Label(self.car_popup, text=\" Choose a carrier. \", font=MEDIUM_FONT)\r\n label.grid(row=0, columnspan=14, pady=(10,20), padx = (5,5))\r\n okb = ttk.Button(self.car_popup, text=\"OK\", command = self.car_popup.destroy)\r\n okb.grid(row=1, column=1, padx = (20,0), pady = (0,15))\r\n self.car_popup.mainloop()\r\n else:\r\n # numbers[self.phone_number.get()] = self.phone_carrier.get() <- once sendtext.py is in GUI\r\n self.phone_entry.delete(0, 'end')\r\n self.phone_carrier.set('Select')\r\n self.ent_popup = tk.Tk()\r\n self.ent_popup.wm_title(\"Alert\")\r\n label = ttk.Label(self.ent_popup, text=\"Phone number entered.\", font=MEDIUM_FONT)\r\n label.grid(row=0, columnspan=14, pady=(10,20), padx = (5,5))\r\n okb = ttk.Button(self.ent_popup, text=\"OK\", command = self.ent_popup.destroy)\r\n okb.grid(row=1, column=1, padx = (20,0), pady = (0,15))\r\n self.ent_popup.mainloop() \r\n \r\n\r\n#add Video Stream page\r\nclass VideoStream(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n label = tk.Label(self, text=\"Video Stream\", bg='white', font = TITLE_FONT)\r\n label.pack(pady=10, padx=10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back to Dashboard\",\r\n command=lambda: controller.show_frame(HomePage))\r\n navibutton1.pack()\r\n'''\r\n #main label for showing the feed \r\n self.imagel = tk.Label(self)\r\n self.imagel.pack(pady=10, padx=10)\r\n #initialize button with a picture\r\n frame = self.get_frame()\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n img = Image.fromarray(cv2image)\r\n imgtk = ImageTk.PhotoImage(image=img)\r\n self.imagel.imgtk = imgtk\r\n self.imagel.configure(image=imgtk)\r\n #button to turn video on and off\r\n self.toggle_button = tk.Button(self, text=\"Video OFF\", bg= \"red\", fg= \"white\", width=10, \r\n height=1, command=self.toggle)\r\n self.toggle_button.pack(pady=10)\r\n self.update()\r\n def toggle(self):\r\n if self.toggle_button['bg']=='red':\r\n self.toggle_button.config(bg='green',text='Video ON')\r\n self.update()\r\n elif self.toggle_button['bg']=='green':\r\n self.toggle_button.configure(bg='red',text='Video OFF')\r\n def get_frame(self):\r\n \"\"\"get a frame from the cam and return it.\"\"\"\r\n ret, frame = cap.read()\r\n return frame\r\n def update(self):\r\n \"\"\"update frames.\"\"\"\r\n if self.toggle_button['bg']=='green':\r\n frame = self.get_frame()\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n img = Image.fromarray(cv2image)\r\n imgtk = ImageTk.PhotoImage(image=img)\r\n self.imagel.imgtk = imgtk\r\n self.imagel.configure(image=imgtk)\r\n self.imagel.after(15, self.update)'''\r\n \r\nclass ControlPanel(tk.Frame):\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Control Panel\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n\r\n #Setup for lables and button images\r\n self.ctrl_panel_labels = [\"Lights\", \"Water Pump\", \"Fish Feeder\", \"Sensor Array\", \"Oxygenator\", \r\n \"Backwashing\", \"Back\"] \r\n self.icons = [\"light.png\", \"water.png\", \"food.png\", \"sensor.png\", \"oxygen.png\", \r\n \"backwash.png\", \"back.png\"]\r\n self.ctrl_panel_image = []\r\n \r\n for image in self.icons:\r\n self.ctrl_panel_image.append(tk.PhotoImage(file = img_path + image)) #create array of images using image path\r\n \r\n buttonFrame = tk.Frame(master=self, bg='white')\r\n buttonFrame.pack(fill=tk.BOTH, side=tk.TOP, expand=True)\r\n i = 0\r\n j = 0\r\n for counter in range(7):\r\n buttonFrame.columnconfigure(i, weight=1, minsize=300)\r\n buttonFrame.rowconfigure(i, weight=1, minsize=100)\r\n \r\n frame = tk.Frame(master=buttonFrame)\r\n\r\n frame.grid(row=i, column=j, padx=2, pady=2, sticky=\"nsew\")\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP)\r\n if(counter == 0):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(Lights))\r\n if(counter == 1):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(WaterPump))\r\n if(counter == 2):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(FishFeeder))\r\n if(counter == 3):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(SensorArray))\r\n if(counter == 4):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(Oxygenator))\r\n if(counter == 5):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(Backwashing))\r\n if(counter == 6):\r\n button = tk.Button(master=frame, text=self.ctrl_panel_labels[counter], image=self.ctrl_panel_image[counter], compound = tk.TOP, command=lambda: controller.show_frame(HomePage))\r\n button.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n j += 1\r\n if(j == 3):\r\n i += 1\r\n j = 0\r\n if(i == 2):\r\n j = 1\r\n\r\nclass Lights(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Lights\", bg=\"white\", font=TITLE_FONT).grid(row=0, column=1, pady=(0,10))\r\n #shelf1\r\n tk.Label(self, text = \"shelf 1\", bg = \"white\", font = MEDIUM_FONT).grid(row=1, column=0, pady=(0,10))\r\n self.toggle1 = tk.Button(self, text=\"Light OFF\", bg= \"red\", width=10, \r\n height=1, command=self.toggle_a)\r\n self.toggle1.grid(row=1, column=1, pady=(0,10))\r\n self.timer1 = tk.Button(self, text=\"timer\", bg= \"white\", width=10, \r\n height=1, command=self.popup)\r\n self.timer1.grid(row=1, column=2, pady=(0,10))\r\n #shelf2\r\n tk.Label(self, text = \"shelf 2\", bg = \"white\", font = MEDIUM_FONT).grid(row=2, column=0, pady=(0,10))\r\n self.toggle2 = tk.Button(self, text=\"Light OFF\", bg= \"red\", width=10, \r\n height=1, command=self.toggle_b)\r\n self.toggle2.grid(row=2, column=1, pady=(0,10))\r\n self.timer2 = tk.Button(self, text=\"timer\", bg= \"white\", width=10, \r\n height=1, command=self.popup)\r\n self.timer2.grid(row=2, column=2, pady=(0,10))\r\n #fish tank\r\n tk.Label(self, text = \"fish tank\", bg = \"white\", font = MEDIUM_FONT).grid(row=3, column=0, pady=(0,10))\r\n self.toggle_tank = tk.Button(self, text=\"Light OFF\", bg= \"red\", width=10, \r\n height=1, command=self.toggle_c)\r\n self.toggle_tank.grid(row=3, column=1, pady=(0,10))\r\n self.timer_tank = tk.Button(self, text=\"timer\", bg= \"white\", width=10, \r\n height=1, command=self.popup)\r\n self.timer_tank.grid(row=3, column=2, pady=(0,10))\r\n #basking\r\n tk.Label(self, text = \"basking\", bg = \"white\", font = MEDIUM_FONT).grid(row=4, column=0, pady=(0,10))\r\n self.toggle_basking = tk.Button(self, text=\"Light OFF\", bg= \"red\", width=10, \r\n height=1, command=self.toggle_d)\r\n self.toggle_basking.grid(row=4, column=1, pady=(0,10))\r\n self.timer_basking = tk.Button(self, text=\"timer\", bg= \"white\", width=10, \r\n height=1, command=self.popup)\r\n self.timer_basking.grid(row=4, column=2, pady=(0,20))\r\n #back button to Alternate Control Panel\r\n self.back = tk.Button(self, text=\"Back\", bg= \"white\", width=10, \r\n height=1, command=lambda: controller.show_frame(ControlPanel))\r\n self.back.grid(row = 5, column = 0)\r\n\r\n # toggle... ; _ ; technically works but it'd definitely be better if tidied up\r\n def toggle_a(self):\r\n if self.toggle1['bg']=='red':\r\n self.toggle1.config(bg='green',text='Lights ON')\r\n self.update()\r\n elif self.toggle1['bg']=='green':\r\n self.toggle1.configure(bg='red',text='Lights OFF')\r\n self.update()\r\n def toggle_b(self):\r\n if self.toggle2['bg']=='red':\r\n self.toggle2.config(bg='green',text='Lights ON')\r\n self.update()\r\n elif self.toggle2['bg']=='green':\r\n self.toggle2.configure(bg='red',text='Lights OFF')\r\n self.update()\r\n def toggle_c(self):\r\n if self.toggle_tank['bg']=='red':\r\n self.toggle_tank.config(bg='green',text='Lights ON')\r\n self.update()\r\n elif self.toggle_tank['bg']=='green':\r\n self.toggle_tank.configure(bg='red',text='Lights OFF')\r\n self.update()\r\n def toggle_d(self):\r\n if self.toggle_basking['bg']=='red':\r\n self.toggle_basking.config(bg='green',text='Lights ON')\r\n self.update_basking()\r\n elif self.toggle_basking['bg']=='green':\r\n self.toggle_basking.configure(bg='red',text='Lights OFF')\r\n self.update()\r\n \r\n def popup(self):\r\n #get the input of all entries as a float value to the hundredth place\r\n self.popup = tk.Tk()\r\n self.popup.wm_title(\"Timer\")\r\n start_label= ttk.Label(self.popup, text=\"Start\", font=MEDIUM_FONT)\r\n start_entry = ttk.Entry(self.popup, width=10)\r\n duration_label = ttk.Label(self.popup, text=\"Duration\", font=MEDIUM_FONT)\r\n duration_entry = ttk.Entry(self.popup, width=10)\r\n start_label.grid(row=0, column=0, pady=(0,10))\r\n duration_label.grid(row=1, column=0, pady=(0,10))\r\n start_entry.grid(row=0, column=1, pady=(0,10))\r\n duration_entry.grid(row=1, column=1, pady=(0,10))\r\n\r\n save_button = ttk.Button(self.popup, text=\"SAVE\", command = self.save)\r\n save_button.grid(row=2, column=0, pady = (0,10))\r\n cancel_button = ttk.Button(self.popup, text=\"CANCEL\", command = self.popup.destroy)\r\n cancel_button.grid(row=2, column=1, pady = (0,10))\r\n \r\n # centers the popup window\r\n popup_width = self.popup.winfo_reqwidth()\r\n popup_height = self.popup.winfo_reqheight()\r\n positionRight = int(self.popup.winfo_screenwidth()/2 - popup_width/2 )\r\n positionDown = int(self.popup.winfo_screenheight()/2 - popup_height/2 )\r\n self.popup.geometry(\"+{}+{}\".format(positionRight, positionDown))\r\n self.popup.geometry('300x200')\r\n self.popup.mainloop() \r\n \r\n #triggered if user press SAVE in popup window\r\n def save(self):\r\n # does something here\r\n #destroy popup window after writing file\r\n self.popup.destroy()\r\n\r\nclass WaterPump(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Water Pump\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n #navigation button\r\n navibutton1 = tk.Button(self, text=\"Back\", command=lambda: controller.show_frame(ControlPanel))\r\n navibutton1.pack(pady = (0,10))\r\n \r\n self.rateA, self.rateB, self.time, self.mode = tk.IntVar(), tk.IntVar(), tk.IntVar(), \"off\"\r\n\r\n self.buttonFrame = tk.Frame(master=self, bg='white')\r\n self.buttonFrame.pack()\r\n tk.Label(master=self.buttonFrame, text=\"\").grid(row=0, column=0)\r\n tk.Label(master=self.buttonFrame, text=\"Flow Control:\").grid(row=0, column=0)\r\n tk.Label(master=self.buttonFrame, text=\"Bed A Flow Rate (gal/hr):\").grid(row=1, column=0)\r\n tk.Label(master=self.buttonFrame, text=\"Bed B Flow Rate (gal/hr):\").grid(row=2, column=0)\r\n\r\n self.control = tk.Button(master=self.buttonFrame, text=\"Off\", fg=\"red\", command=self.switch)\r\n self.control.grid(row=0, column=1, padx=(5,0), pady=8, sticky=\"W\")\r\n tk.Entry(master=self.buttonFrame, width=9, textvariable=self.rateA).grid(row=1, column=1, padx=5, pady=5, columnspan=2)\r\n tk.Entry(master=self.buttonFrame, width=9, textvariable=self.rateB).grid(row=2, column=1, padx=5, pady=5, columnspan=2)\r\n \r\n tk.Button(self, text=\"Save\", command=self.save).pack(pady = (10,0))\r\n\r\n def switch(self):\r\n if self.mode == \"off\":\r\n self.mode = \"on\"\r\n self.control.config(text=\"On\", fg=\"green\")\r\n elif self.mode == \"on\":\r\n self.mode = \"timer\"\r\n self.control.config(text=\"Timer\", fg=\"black\")\r\n self.timer = tk.Entry(master=self.buttonFrame, width=4, textvariable=self.time)\r\n self.timer.grid(row=0, column=2, padx=(0,5), pady=5, columnspan=1)\r\n elif self.mode == \"timer\":\r\n self.mode = \"off\"\r\n self.control.config(text=\"Off\", fg=\"red\")\r\n self.timer.destroy()\r\n \r\n def save(self):\r\n if self.mode == \"timer\":\r\n real_time = self.timer.get()\r\n else:\r\n real_time = None\r\n with open(config_path, 'r', newline='') as file:\r\n config_settings = list(csv.reader(file))\r\n channel_buttons_config = config_settings[0]\r\n num_config = config_settings[1]\r\n provider_config = config_settings[2]\r\n email_config = config_settings[3]\r\n upper_config = config_settings[4]\r\n lower_config = config_settings[5]\r\n pump_config = [self.rateA.get(), self.rateB.get(), real_time, self.mode]\r\n with open(config_path, 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows([channel_buttons_config, num_config, provider_config, email_config, upper_config, lower_config, pump_config])\r\n file.flush()\r\n \r\n\r\nclass FishFeeder(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Fish Feeder\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back\",\r\n command=lambda: controller.show_frame(ControlPanel))\r\n navibutton1.pack()\r\n\r\nclass SensorArray(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Sensor Array\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back\",\r\n command=lambda: controller.show_frame(ControlPanel))\r\n navibutton1.pack()\r\n\r\nclass Oxygenator(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Oxygenator\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back\",\r\n command=lambda: controller.show_frame(ControlPanel))\r\n navibutton1.pack()\r\n\r\nclass Backwashing(tk.Frame):\r\n \r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n #title\r\n tk.Label(self, text=\"Backwashing\", bg=\"white\", font=TITLE_FONT).pack(pady = 10)\r\n #navigation button\r\n navibutton1 = ttk.Button(self, text=\"Back\",\r\n command=lambda: controller.show_frame(ControlPanel))\r\n navibutton1.pack()\r\n\r\napp = AllWindow()\r\n#app.geometry('1025x672')\r\napp.geometry('1280x623')\r\n#this makes app full screen, not sure if it's good for us or not\r\n#app.attributes('-fullscreen', True)\r\n#update animation first\r\nani = animation.FuncAnimation(f, animate, interval=5000)\r\n#mainloop\r\napp.mainloop()\r\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":49226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"117888489","text":"import struct\n\ndef color(r,g,b):\n return bytes([r, g, b])\n\nBLACK = color(0,0,0)\nWHITE = color(255,255,255)\nRED = color(200, 0, 0)\n\n\ndef char(c):\n return struct.pack('=c', c.encode('ascii'))\n\ndef word(w):\n return struct.pack('=h',w)\n\ndef dword(d):\n return struct.pack('=l',d)\n\nclass Render(object):\n def __init__(self,width,height):\n\n self.width = width\n self.height = height\n self.current_color = WHITE\n self.clear()\n\n def color(self,color):\n r,g,b = color\n return bytes([b,g,r])\n\n def clear(self):\n self.pixels = [\n [WHITE for x in range(self.width)]\n for y in range(self.height)\n ]\n\n def write(self,filename):\n f = open(filename,'bw')\n #HEADER\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14+40+self.width * self.height *3))\n f.write(dword(0))\n f.write(dword(14+40))\n\n #IMAGE HEADER (40 BYTES)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(self.width * self.height *3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.pixels[x][y])\n\n f.close()\n\n def display(self, filename='out.bmp'):\n self.write(filename)\n try:\n from wand.image import Image\n from wand.display import display\n with Image(filename=filename) as image:\n display(image)\n except ImportError:\n pass\n\n def point(self,x,y,color=None):\n self.pixels[y][x] = color or self.current_color\n\n def set_color(self,color):\n self.current_color = color\n\n def line(self, start, end, color):\n x1, y1 = start\n x2, y2 = end\n\n dy = abs(y2 - y1)\n dx = abs(x2 - x1)\n steep = dy > dx\n\n if steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n\n dy = abs(y2 - y1)\n dx = abs(x2 - x1)\n\n offset = 0\n threshold = dx\n\n y = y1\n for x in range(x1, x2 + 1):\n if steep:\n self.point(y, x, self.color(color))\n else:\n self.point(x, y, self.color(color))\n\n offset += dy * 2\n if offset >= threshold:\n y += 1 if y1 < y2 else -1\n threshold += dx * 2\n\nr = Render(800, 600)\n\n\"\"\" Draw house here \"\"\"\n\n# Left wall\nfor i in range(150):\n r.line((200 + i, 300 - i), (200 + i, 200 - i), RED)\n# Left roof\nfor i in range(150):\n r.line((330 + i, 450 - i), (200 + i, 300 - i), RED)\n# Front house triangle\nfor i in range(200):\n r.line((480, 300), (350 + i, 150 + (i // 2)), RED)\n# Front wall 1\nfor i in range(70):\n r.line((350 + i, 150 + (i // 2)), (350 + i, 50 + (i // 2)), RED)\n# Front top mid wall\nfor i in range (40):\n r.line((420 + i, 185 + (i // 2)), (420 + i, 150 + (i // 2)), RED)\n# Front wall 2\nfor i in range(90):\n r.line((460 + i, 205 + (i // 2)), (460 + i, 105 + (i // 2)), RED)\n\n\n\"\"\" End of house \"\"\"\n\nr.write('out.bmp')\n","sub_path":"Python-Renderer/2-Line-drawing/line-draw.py","file_name":"line-draw.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"137421257","text":"\"\"\"The Poincare Polydisk.\"\"\"\n\nimport geomstats.backend as gs\nfrom geomstats.geometry.hyperbolic import Hyperbolic\nfrom geomstats.geometry.hyperbolic import HyperbolicMetric\nfrom geomstats.geometry.product_manifold import ProductManifold\nfrom geomstats.geometry.product_riemannian_metric \\\n import ProductRiemannianMetric # NOQA\n\n\nclass PoincarePolydisk(ProductManifold):\n \"\"\"Class for the Poincare polydisk.\n\n The Poincare polydisk is a direct product of n Poincare disks,\n i.e. hyperbolic spaces of dimension 2.\n \"\"\"\n\n def __init__(self, n_disks, point_type='ball'):\n self.n_disks = n_disks\n self.point_type = point_type\n disk = Hyperbolic(dimension=2, point_type=point_type)\n list_disks = [disk, ] * n_disks\n super(PoincarePolydisk, self).__init__(\n manifolds=list_disks)\n self.metric = PoincarePolydiskMetric(n_disks=n_disks,\n point_type=point_type)\n\n def intrinsic_to_extrinsic_coords(self, point_intrinsic):\n \"\"\"Convert point from intrinsic to extrensic coordinates.\n\n Convert the parameterization of a point on the Hyperbolic space\n from its intrinsic coordinates, to its extrinsic coordinates\n in Minkowski space.\n\n Parameters\n ----------\n point_intrinsic : array-like, shape=[n_diskx, n_samples, dimension]\n\n Returns\n -------\n point_extrinsic : array-like, shape=[n_disks, n_samples, dimension + 1]\n \"\"\"\n n_disks = point_intrinsic.shape[0]\n return gs.array([Hyperbolic._intrinsic_to_extrinsic_coordinates(\n point_intrinsic[i_disks, ...]) for i_disks in range(n_disks)])\n\n\nclass PoincarePolydiskMetric(ProductRiemannianMetric):\n \"\"\"Class defining the Poincare polydisk metric.\n\n The Poincare polydisk metric is a product of n Poincare metrics,\n each of them being multiplied by a specific constant factor (see\n [JV2016]_).\n\n This metric comes from a model used to represent stationary complex\n signals.\n\n References\n ----------\n .. [JV2016] B. Jeuris and R. Vandebril. The Kähler mean of Block-Toeplitz\n matrices with Toeplitz structured blocks, 2016.\n https://epubs.siam.org/doi/pdf/10.1137/15M102112X\n \"\"\"\n\n def __init__(self, n_disks, point_type='ball'):\n self.n_disks = n_disks\n self.point_type = point_type\n list_metrics = []\n for i_disk in range(n_disks):\n scale_i = (n_disks - i_disk) ** 0.5\n metric_i = HyperbolicMetric(dimension=2,\n point_type=point_type,\n scale=scale_i)\n list_metrics.append(metric_i)\n super(PoincarePolydiskMetric, self).__init__(\n metrics=list_metrics)\n","sub_path":"geomstats/geometry/poincare_polydisk.py","file_name":"poincare_polydisk.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"522910049","text":"import os\nfrom argparse import ArgumentParser\nimport requests\nimport json\nimport traceback\n\n\ndef getAllIndicatorList():\n url = \"https://api.worldbank.org/v2/indicators?format=json&page=1\"\n res = requests.get(url)\n data = res.json()\n total = data[0]['total']\n url2 = \"https://api.worldbank.org/v2/indicators?format=json&page=1&per_page=\" + str(total)\n res2 = requests.get(url2)\n data2 = res2.json()\n return data2[1]\n\n\ndef generate_json_schema(dst_path):\n unique_urls_str = getAllIndicatorList()\n try:\n for commondata in unique_urls_str:\n urldata = \"https://api.worldbank.org/v2/countries/indicators/\" + commondata['id'] + \"?format=json\"\n resdata = requests.get(urldata)\n data_ind = resdata.json()\n materialiseFormat = 'csv'\n infoFormat = 'json'\n print(\"Generating schema for Trading economics\", commondata['name'])\n schema = {}\n schema[\"title\"] = commondata['name']\n schema[\"description\"] = commondata['sourceNote']\n schema[\"url\"] = \"https://api.worldbank.org/v2/indicators/\" + commondata['id'] + \"?format=json\"\n schema[\"keywords\"] = [i for i in commondata['name'].split()]\n schema[\"date_updated\"] = data_ind[0][\"lastupdated\"] if data_ind else None\n schema[\"license\"] = None\n schema[\"provenance\"] = {\"source\": \"http://worldbank.org\"}\n schema[\"original_identifier\"] = commondata['id']\n\n schema[\"materialization\"] = {\n \"python_path\": \"worldbank_materializer\",\n \"arguments\": {\n \"url\": \"https://api.worldbank.org/v2/indicators/\" + commondata['id'] + \"?format=json\"\n }\n }\n schema['variables'] = []\n first_col = {\n \"name\": \"indicator_id\",\n\n \"description\": \"id is identifier of an indicator in worldbank datasets\",\n \"semantic_type\": [\"https://metadata.datadrivendiscovery.org/types/CategoricalData\"]\n }\n second_col = {\n \"name\": \"indicator_value\",\n\n \"description\": \"name of an indicator in worldbank datasets\",\n \"semantic_type\": [\"http://schema.org/Text\"]\n }\n third_col = {\n \"name\": \"unit\",\n\n \"description\": \"unit of value returned by this indicator for a particular country\",\n \"semantic_type\": [\"https://metadata.datadrivendiscovery.org/types/CategoricalData\"]\n\n }\n fourth_col = {\n \"name\": \"sourceNote\",\n\n \"description\": \"Long description of the indicator\",\n \"semantic_type\": [\"http://schema.org/Text\"]\n }\n fifth_col = {\n \"name\": \"sourceOrganization\",\n \"description\": \"Source organization from where Worldbank acquired this data\",\n \"semantic_type\": [\"http://schema.org/Text\"]\n }\n sixth_col = {\n \"name\": \"country_value\",\n\n \"description\": \"Country for which idicator value is returned\",\n \"semantic_type\": [\"https://metadata.datadrivendiscovery.org/types/Location\"],\n \"named_entity\": None\n }\n seventh_col = {\n \"name\": \"countryiso3code\",\n\n \"description\": \"Country iso code for which idicator value is returned\",\n \"semantic_type\": [\"https://metadata.datadrivendiscovery.org/types/Location\"]\n }\n eighth_col = {\n \"name\": \"date\",\n\n \"description\": \"date for which indictor value is returned for a particular country\",\n \"semantic_type\": [\"https://metadata.datadrivendiscovery.org/types/Time\"],\n \"temporal_coverage\": {\"start\": None, \"end\": None}\n\n }\n schema['variables'].append(first_col)\n schema['variables'].append(second_col)\n schema['variables'].append(third_col)\n schema['variables'].append(fourth_col)\n schema['variables'].append(fifth_col)\n schema['variables'].append(sixth_col)\n schema['variables'].append(seventh_col)\n schema['variables'].append(eighth_col)\n if dst_path:\n os.makedirs(dst_path + '/worldbank_schema', exist_ok=True)\n\n file = os.path.join(dst_path, 'worldbank_schema',\n \"{}_description.json\".format(commondata['id']))\n else:\n os.makedirs('WorldBank_schema', exist_ok=True)\n file = os.path.join('worldbank_schema',\n \"{}_description.json\".format(commondata['id']))\n\n with open(file, \"w\") as fp:\n json.dump(schema, fp, indent=2)\n except:\n traceback.print_exc()\n pass\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"-o\", \"--dst\", action=\"store\", type=str, dest=\"dst_path\")\n args, _ = parser.parse_known_args()\n generate_json_schema(args.dst_path)\n","sub_path":"scripts/generate_schema/worldbank/generate_worldbank_schema.py","file_name":"generate_worldbank_schema.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"99426178","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\nimport time\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nBUFFER_SIZE = 60000\nBATCH_SIZE = 256\n\ndef get_dataset():\n (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()\n train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n train_images = (train_images - 127.5) / 127.5 \n train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n return train_dataset\n\ndef make_generator_model():\n model = tf.keras.Sequential()\n model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256) # 주목: 배치사이즈로 None이 주어집니다.\n\n model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU())\n\n model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n\n return model\n\n\nclass make_generator(tf.keras.Model):\n def __init__(self):\n super(make_generator, self).__init__()\n self.Dense_1 = layers.Dense(7*7*256, use_bias=False, input_shape=(100,))\n self.LRelu = layers.LeakyReLU()\n self.reshape_1 = layers.Reshape((7,7,256))\n self.conv_1 = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)\n self.conv_2 = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)\n self.conv_3 = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n def call(self, inputs):\n x = self.Dense_1(inputs)\n x = self.bn1(x)\n x = self.LRelu(x)\n x = self.reshape_1(x)\n for layer in [[self.conv_1, self.bn2], [self.conv_2, self.bn3]]:\n x = layer[0](x)\n x = layer[1](x)\n x = self.LRelu(x)\n x = self.conv_3(x)\n return x\n\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n\n return model\n\nclass make_discriminator(tf.keras.Model):\n def __init__(self):\n super(make_discriminator, self).__init__()\n self.LRelu = layers.LeakyReLU()\n self.conv_1 = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28,28,1])\n self.conv_2 = layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')\n self.dr = layers.Dropout(0.3)\n self.fl = layers.Flatten()\n self.dn = layers.Dense(1)\n def call(self, inputs):\n x = self.conv_1(inputs)\n x = self.LRelu(x)\n x = self.dr(x)\n x = self.fl(x)\n x = self.dn(x)\n return x\n\noption = \"class\"\nif option == \"function\":\n generator = make_generator_model()\n discriminator = make_discriminator_model()\nelse:\n generator = make_generator()\n discriminator = make_discriminator()\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\ndef generator_loss(fake_output):\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\ngenerator_optimizer = tf.keras.optimizers.Adam(1e-4)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n\nEPOCHS = 500\nnoise_dim = 100\nnum_examples_to_generate = 16\n\n# 이 시드를 시간이 지나도 재활용하겠습니다. \n# (GIF 애니메이션에서 진전 내용을 시각화하는데 쉽기 때문입니다.) \nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n# `tf.function`이 어떻게 사용되는지 주목해 주세요.\n# 이 데코레이터는 함수를 \"컴파일\"합니다.\n\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\n@tf.function\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output)\n disc_loss = discriminator_loss(real_output, fake_output)\n\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n\ndef train(dataset, epochs):\n for epoch in range(epochs):\n start = time.time()\n \n for image_batch in dataset:\n train_step(image_batch)\n\n # 15 에포크가 지날 때마다 모델을 저장합니다.\n if (epoch + 1) % 50 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n \n # print (' 에포크 {} 에서 걸린 시간은 {} 초 입니다'.format(epoch +1, time.time()-start))\n print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\n\n # 마지막 에포크가 끝난 후 생성합니다.\n if (epoch + 1) % 10 == 0:\n generate_and_save_images(generator, epoch, seed)\n\ndef generate_and_save_images(model, epoch, test_input):\n # `training`이 False로 맞춰진 것을 주목하세요.\n # 이렇게 하면 (배치정규화를 포함하여) 모든 층들이 추론 모드로 실행됩니다. \n predictions = model(test_input, training=False)\n\n fig = plt.figure(figsize=(4,4))\n\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i+1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n\n plt.savefig('./figures/image_at_epoch_{:04d}.png'.format(epoch))\n\ntrain_dataset = get_dataset()\ntrain(train_dataset, EPOCHS)\n\n","sub_path":"tf2_gan.py","file_name":"tf2_gan.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"628767732","text":"try:\n from django.conf.urls import url\nexcept ImportError:\n from django.conf.urls.defaults import url\n# from friendship.views import view_friends,test_view_friends, friendship_add_friend,friendship_remove_friend, friendship_accept, \\\n# friendship_reject, friendship_cancel, friendship_request_list, \\\n# friendship_request_list_rejected, friendship_requests_detail, followers,\\\n# following, follower_add, follower_remove, all_users,block_add,block_remove,blockers,blocking\n\nfrom . import views\n\n\n\n\nurlpatterns = [\n url(r'^users/$',views.all_users,\n name='friendship_view_users',\n ),\n url(\n r'^friends/(?P[\\w-]+)/$',\n views.view_friends,\n name='friendship_view_friends',\n ),\n url(\n r'^test/$',\n views.test_view_friends,\n name='test_friendship_view_friends',\n ),\n\n url(\n r'^friend/add/(?P[\\w-]+)/$',\n views.friendship_add_friend,\n name='friendship_add_friend',\n ),\n url(\n r'^friend/remove/(?P[\\w-]+)/$',\n views.friendship_remove_friend,\n name='friendship_remove_friend',\n ),\n url(\n r'^friend/accept/(?P\\d+)/$',\n views.friendship_accept,\n name='friendship_accept',\n ),\n url(\n r'^friend/reject/(?P\\d+)/$',\n views.friendship_reject,\n name='friendship_reject',\n ),\n url(\n r'^friend/cancel/(?P\\d+)/$',\n views.friendship_cancel,\n name='friendship_cancel',\n ),\n url(\n r'^friend/requests/$',\n views.friendship_request_list,\n name='friendship_request_list',\n ),\n url(\n r'^friend/requests/rejected/$',\n views.friendship_request_list_rejected,\n name='friendship_requests_rejected',\n ),\n url(\n r'^friend/request/(?P\\d+)/$',\n views.friendship_requests_detail,\n name='friendship_requests_detail',\n ),\n # url(\n # r'^followers/(?P[\\w-]+)/$',\n # views.followers,\n # name='friendship_followers',\n # ),\n # url(\n # r'^following/(?P[\\w-]+)/$',\n # views.following,\n # name='friendship_following',\n # ),\n # url(\n # r'^follower/add/(?P[\\w-]+)/$',\n # views.follower_add,\n # name='follower_add',\n # ),\n # url(\n # r'^follower/remove/(?P[\\w-]+)/$',\n # views.follower_remove,\n # name='follower_remove',\n # ),\n # url(\n # r'^blockers/(?P[\\w-]+)/$',\n # views.blockers,\n # name='friendship_blockers',\n # ),\n # url(\n # r'^blocking/(?P[\\w-]+)/$',\n # views.blocking,\n # name='friendship_blocking',\n # ),\n # url(\n # r'^block/add/(?P[\\w-]+)/$',\n # views.block_add,\n # name='block_add',\n # ),\n # url(\n # r'^block/remove/(?P[\\w-]+)/$',\n # views.block_remove,\n # name='block_remove',\n # ),\n]\n","sub_path":"friendship/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"409233078","text":"from in_bisect import in_bisect\nfrom get_wordlist import getWordlist\n\n\ndef interlock(l, n):\n interlockList = []\n for i in range(n):\n inter = word[i::n]\n f = in_bisect(l, inter)\n if f == None:\n return False\n else:\n interlockList.append(inter)\n print(word, interlockList)\n\n\nif __name__ == \"__main__\":\n words = getWordlist('words.txt')\n for word in words:\n interlock(words, 3)\n","sub_path":"0-ThinkPython/interlock.py","file_name":"interlock.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"16341203","text":"import unittest\nimport connection\n\nclass TestNeuron():\n def __init__(self):\n self.state = 1.0\n self.output = 1.0\n self.error = 1.0\n self.projectedError = 1.0\n self.derivative = 1.0\n self.learningRate = 0.1\n self.momentum = 0.0\n self.decay = 0.0\n self.recurrenceConnection = None\n self.gatedConnections = dict()\n self.gatedNeurons = set()\n\n def addRecurrenceConnection(self, c):\n self.recurrenceConnection = c\n\n def addGatedConnections(self, gatedNeuron, connections):\n self.gatedNeurons.add(gatedNeuron)\n self.gatedConnections[gatedNeuron] = connections\n\n\nclass TestSimpleConnection(unittest.TestCase):\n def setUp(self):\n # Base neurons.\n self.in1 = TestNeuron()\n self.in1.output = 1.0\n self.in2 = TestNeuron()\n self.in2.output = 2.0\n\n self.out1 = TestNeuron()\n self.out1.output = 3.0\n self.out1.projectedError = 2.0\n\n # Momentum and decay neuron.\n self.out2 = TestNeuron()\n self.out2.output = 3.0\n self.out2.projectedError = 2.0\n self.out2.momentum = 0.5\n self.out2.decay = 0.1\n\n # Gate neurons.\n self.g1 = TestNeuron()\n self.g1.output = 4.0\n self.g2 = TestNeuron()\n self.g2.output = 5.0\n\n # Ungated connections.\n self.c1 = connection.Connection(self.in1, self.out1)\n self.c1.weight = 0.5\n self.c1.eligibilityTrace = 10.0\n self.c2 = connection.Connection(self.in2, self.out1)\n self.c2.weight = 2.0\n self.c2.eligibilityTrace = 20.0\n self.c3 = connection.Connection(self.in2, self.out2)\n self.c3.weight = 2.0\n self.c3.eligibilityTrace = 20.0\n\n # Gated connections.\n self.cg1 = connection.Connection(self.in1, self.out1, self.g1)\n self.cg1.weight = 3.0\n self.cg1.eligibilityTrace = 10.0\n\n self.cg2 = connection.Connection(self.in2, self.out1, self.g1)\n self.cg2.weight = 5.0\n self.cg2.eligibilityTrace = 20.0\n\n self.cg3 = connection.Connection(self.in1, self.g1)\n self.cg3.weight = 10.0\n self.cg3.eligibilityTrace = 30.0\n\n self.g1.addGatedConnections(self.out1, [self.cg1, self.cg2])\n self.cg3.extendedEligibilityTraces[self.out1] = 5.0\n\n def test_calculate(self):\n # Ungated connections.\n activ = self.c1.calculate()\n self.assertAlmostEqual(0.5, activ)\n\n activ = self.c2.calculate()\n self.assertAlmostEqual(4.0, activ)\n\n # Gated connections.\n activ = self.cg1.calculate()\n self.assertAlmostEqual(12.0, activ)\n\n activ = self.cg2.calculate()\n self.assertAlmostEqual(40.0, activ)\n\n def test_adjust_weight(self):\n # Simple weight adjustment.\n oldWeight = self.c1.weight\n delta = self.out1.projectedError * self.out1.learningRate * self.c1.eligibilityTrace\n self.c1.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.c1.weight)\n self.c1.weight = oldWeight\n\n oldWeight = self.c2.weight\n delta = self.out1.projectedError * self.out1.learningRate * self.c2.eligibilityTrace\n self.c2.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.c2.weight)\n self.c2.weight = oldWeight\n\n # Gated weight adjustment.\n oldWeight = self.cg1.weight\n delta = (self.out1.projectedError * self.out1.learningRate * self.cg1.eligibilityTrace) \n self.cg1.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.cg1.weight)\n self.cg1.weight = oldWeight\n\n oldWeight = self.cg3.weight\n delta = (self.g1.projectedError * self.g1.learningRate * self.cg3.eligibilityTrace) \\\n + (self.g1.learningRate * self.g1.error *\n self.cg3.extendedEligibilityTraces[self.out1])\n self.cg3.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.cg3.weight)\n self.cg3.weight = oldWeight\n\n def test_momentum_and_decay(self):\n oldWeight = self.c3.weight\n delta = self.out2.projectedError * self.out2.learningRate * self.c3.eligibilityTrace\n delta += self.out2.momentum * self.c3.delta\n delta -= self.out2.decay * self.c3.weight * self.out2.learningRate\n self.c3.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.c3.weight)\n\n oldWeight = self.c3.weight\n delta = self.out2.projectedError * self.out2.learningRate * self.c3.eligibilityTrace\n delta += self.out2.momentum * self.c3.delta\n delta -= self.out2.decay * self.c3.weight * self.out2.learningRate\n\n self.c3.adjustWeight()\n self.assertAlmostEqual(oldWeight + delta, self.c3.weight)\n\n def test_reset(self):\n self.cg3.reset()\n self.assertAlmostEqual(0.0, self.cg3.eligibilityTrace)\n self.assertAlmostEqual(0.0, self.cg3.extendedEligibilityTraces[self.out1])\n\nclass TestPeepholeConnection(unittest.TestCase):\n def setUp(self):\n self.in1 = TestNeuron()\n self.in1.state = 0.5\n self.in1.output = 1.0\n self.in2 = TestNeuron()\n self.in2.state = 1.0\n self.in2.output = 2.0\n\n self.out1 = TestNeuron()\n self.out1.state = 10.0\n self.out1.output = 20.0\n\n self.c1 = connection.Connection(self.in1, self.out1, peephole=True)\n self.c2 = connection.Connection(self.in2, self.out1, peephole=True)\n\n self.r = connection.RecurrentConnection(self.in1, self.out1, peephole=True)\n self.r.weight = 2.0\n self.sr = connection.SelfRecurrentConnection(self.out1)\n\n def test_input_value(self):\n # Basic connection.\n self.assertAlmostEqual(self.c1.getInputValue(), 0.5)\n self.assertAlmostEqual(self.c2.getInputValue(), 1.0)\n\n # Recurrent connection.\n self.assertAlmostEqual(self.r.getInputValue(), 0.0)\n self.r.fire()\n self.assertAlmostEqual(self.r.getInputValue(), 0.5)\n self.assertAlmostEqual(self.r.calculate(), 1.0)\n\n # Self recurrent connection.\n self.assertAlmostEqual(self.sr.getInputValue(), 0.0)\n self.assertAlmostEqual(self.sr.calculate(), 0.0)\n self.sr.fire()\n self.assertAlmostEqual(self.sr.getInputValue(), 10.0)\n self.assertAlmostEqual(self.sr.calculate(), 10.0)\n\nclass TestRecurrentConnection(unittest.TestCase):\n def setUp(self):\n self.in1 = TestNeuron()\n self.in1.state = 0.5\n self.in1.output = 1.0\n self.in2 = TestNeuron()\n self.in2.state = 1.0\n self.in2.output = 2.0\n\n self.g1 = TestNeuron()\n self.g1.state = 5.0\n self.g1.output = 6.0\n\n self.out1 = TestNeuron()\n self.out1.state = 10.0\n self.out1.output = 20.0\n\n self.c1 = connection.RecurrentConnection(self.in1, self.out1)\n self.c1.weight = 100\n self.c2 = connection.RecurrentConnection(self.in2, self.out1, self.g1)\n self.c2.weight = 200\n\n def test_calculate(self):\n self.assertAlmostEqual(self.c1.calculate(), 0)\n self.assertAlmostEqual(self.c1.inputValue, 0)\n self.c1.fire()\n self.assertAlmostEqual(self.c1.inputValue, 1.0)\n self.assertAlmostEqual(self.c1.calculate(), 100)\n self.c1.reset()\n self.assertAlmostEqual(self.c1.calculate(), 0)\n self.assertAlmostEqual(self.c1.inputValue, 0)\n\n self.assertAlmostEqual(self.c2.calculate(), 0)\n self.assertAlmostEqual(self.c2.inputValue, 0)\n self.c2.fire()\n self.assertAlmostEqual(self.c2.inputValue, 2.0)\n self.assertAlmostEqual(self.c2.calculate(), 2400)\n self.c2.reset()\n self.assertAlmostEqual(self.c2.calculate(), 0)\n self.assertAlmostEqual(self.c2.inputValue, 0)\n\nclass TestSelfRecurrentConnection(unittest.TestCase):\n def setUp(self):\n self.in1 = TestNeuron()\n self.in1.state = 0.5\n self.in1.output = 1.0\n\n self.in2 = TestNeuron()\n self.in2.state = 2.0\n self.in2.output = 3.0\n\n self.g1 = TestNeuron()\n self.g1.state = 4.0\n self.g1.output = 5.0\n\n self.r1 = connection.SelfRecurrentConnection(self.in1)\n self.r2 = connection.SelfRecurrentConnection(self.in2, self.g1)\n\n self.g1.addGatedConnections(self.in2, [self.r2])\n\n def test_calculate(self):\n self.assertAlmostEqual(self.r1.weight, 1.0)\n activ = self.r1.calculate()\n self.assertAlmostEqual(activ, 0)\n self.r1.fire()\n activ = self.r1.calculate()\n self.assertAlmostEqual(activ, 0.5)\n self.assertAlmostEqual(self.r1.weight, 1.0)\n\n self.assertAlmostEqual(self.r2.weight, 1.0)\n activ = self.r2.calculate()\n self.assertAlmostEqual(activ, 0)\n self.r2.fire()\n activ = self.r2.calculate()\n self.assertAlmostEqual(activ, 10.0)\n self.assertAlmostEqual(self.r2.weight, 1.0)\n\n def test_calculate_eligibility(self):\n self.assertAlmostEqual(self.r1.eligibilityTrace, 0.0)\n self.assertEqual(len(self.r1.extendedEligibilityTraces), 0)\n self.r1.calculateEligibility()\n self.assertAlmostEqual(self.r1.eligibilityTrace, 0.0)\n self.assertEqual(len(self.r1.extendedEligibilityTraces), 0)\n\n self.assertAlmostEqual(self.r2.eligibilityTrace, 0.0)\n self.assertEqual(len(self.r2.extendedEligibilityTraces), 0)\n self.r2.calculateEligibility()\n self.assertAlmostEqual(self.r2.eligibilityTrace, 0.0)\n self.assertEqual(len(self.r2.extendedEligibilityTraces), 0)\n\n def test_adjust_weight(self):\n self.assertAlmostEqual(self.r1.weight, 1.0)\n self.assertAlmostEqual(self.r1.delta, 0.0)\n self.r1.adjustWeight()\n self.assertAlmostEqual(self.r1.weight, 1.0)\n self.assertAlmostEqual(self.r1.delta, 0.0)\n\n self.assertAlmostEqual(self.r2.weight, 1.0)\n self.assertAlmostEqual(self.r2.delta, 0.0)\n self.r2.adjustWeight()\n self.assertAlmostEqual(self.r2.weight, 1.0)\n self.assertAlmostEqual(self.r2.delta, 0.0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lstm/test_connection.py","file_name":"test_connection.py","file_ext":"py","file_size_in_byte":10141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"550042892","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom sent_messages.models import Message\nfrom sent_messages.serizlizers import MessageSerializer\nimport json\n\n\nclass MessageAPIView(APIView):\n\n def get(self, request, *args, **kwargs):\n\n message = Message.objects.get(*args, **kwargs)\n serializer = MessageSerializer(message)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n\n user_id = request.user.user_id\n request.data['user_id'] = user_id\n serializer = MessageSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass MessageListAPIView(APIView):\n\n def get(self, request, *args, **kwargs):\n\n query_param_examinee_id = request.GET.get('examinee_id')\n query_param_exam_id = request.GET.get('exam_id')\n if query_param_examinee_id:\n messages = Message.objects\\\n .filter(examinee_id=query_param_examinee_id)\\\n .filter(alert=True)\n serializer = MessageSerializer(messages, many=True)\n data = {'alert_data': serializer.data}\n return Response(json.dumps(data), status=status.HTTP_200_OK)\n\n if query_param_exam_id:\n messages = Message.objects \\\n .filter(exam_id=query_param_exam_id) \\\n .filter(alert=True)\n serializer = MessageSerializer(messages, many=True)\n data = {'alert_data': serializer.data}\n return Response(json.dumps(data), status=status.HTTP_200_OK)\n\n return Response(None, status=status.HTTP_400_BAD_REQUEST)\n\n def post(self, request, *args, **kwargs):\n serializer = MessageSerializer(data=request.data, many=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(None, status=status.HTTP_201_CREATED)\n\n","sub_path":"server/proctor/sent_messages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"262293553","text":"from app import db\nfrom app.utilities.md import create_post_from_md\n\nTYPE_POST = 1\nTYPE_PAGE = 2\n\n\nclass BaseModel(db.Model):\n __abstract__ = True\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime, default=db.func.current_timestamp())\n modified_at = db.Column(db.DateTime, default=db.func.current_timestamp(),\n onupdate=db.func.current_timestamp())\n\n\ntags_posts = db.Table(\n 'tags_posts',\n db.Column('id', db.Integer, primary_key=True),\n db.Column('tag_id', db.Integer, db.ForeignKey('tags.id')),\n db.Column('post_id', db.Integer, db.ForeignKey('posts.id'))\n)\n\ncategories_posts = db.Table(\n 'categories_posts',\n db.Column('id', db.Integer, primary_key=True),\n db.Column('category_id', db.Integer, db.ForeignKey('categories.id')),\n db.Column('post_id', db.Integer, db.ForeignKey('posts.id'))\n)\n\n\nclass Tag(BaseModel):\n __tablename__ = 'tags'\n name = db.Column(db.String(100), unique=True, index=True)\n\n def __repr__(self):\n return '' % self.name\n\n\nclass Category(BaseModel):\n __tablename__ = 'categories'\n name = db.Column(db.String(100), unique=True, index=True)\n\n def __repr__(self):\n return '' % self.name\n\n\nclass Post(BaseModel):\n __tablename__ = 'posts'\n author_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n title = db.Column(db.String(256))\n slug = db.Column(db.String(300))\n description = db.Column(db.Text)\n body = db.Column(db.Text)\n status = db.Column(db.Boolean, default=True)\n body_html = db.Column(db.Text)\n tags = db.relationship(\n 'Tag', secondary=tags_posts,\n backref=db.backref('posts', lazy='dynamic')\n )\n categories = db.relationship(\n 'Category', secondary=categories_posts,\n backref=db.backref('posts', lazy='dynamic')\n )\n\n @property\n def content(self):\n return self.body\n\n @content.setter\n def content(self, body):\n self.body = body\n self.body_html = create_post_from_md(body)\n\n @property\n def html(self):\n return self.body_html\n\n @staticmethod\n def on_change_body(target, value, oldvalue, initiator):\n target.body_html = create_post_from_md(value)\n\n @staticmethod\n def generate_fake(count=100):\n from random import seed, randint\n import forgery_py\n seed()\n for i in range(count):\n author_id = 1\n p = Post(\n content=forgery_py.lorem_ipsum.paragraphs(),\n title=forgery_py.lorem_ipsum.sentence(),\n author_id=author_id\n )\n db.session.add(p)\n db.session.commit()\n\n\ndb.event.listen(Post.body, 'set', Post.on_change_body)\n","sub_path":"app/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"407449563","text":"\"\"\"\nThe DQN improvement: Prioritized Experience Replay (based on https://arxiv.org/abs/1511.05952)\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\nUsing:\nPyTorch: 0.4.0\ngym: 0.8.0\n\"\"\"\n\n\nimport gym\nfrom dqn_per import DQN_PER\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nimport time\n\n\n\nenv = gym.make('MountainCar-v0')\nenv = env.unwrapped\nenv.seed(21)\nMEMORY_SIZE = 10000\n\n\ntime_start = time.time()\n\nRL_natural = DQN_PER(\n n_actions=3, n_features=2, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.00005, prioritized=False\n )\n\nRL_natural_2 = DQN_PER(\n n_actions=3, n_features=2, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.00005, prioritized=False, doubled_q= True\n )\n\nRL_prio = DQN_PER(\n n_actions=3, n_features=2, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.00005, prioritized=True\n )\n\n# RL_prio_2 = DQN_PER(\n# n_actions=3, n_features=2, memory_size=MEMORY_SIZE,\n# e_greedy_increment=0.00005, prioritized=True, doubled_q= True\n# )\n#\n# RL_prio_selu = DQN_PER(\n# n_actions=3, n_features=2, memory_size=MEMORY_SIZE,\n# e_greedy_increment=0.00005, prioritized=True, relu_flag= False\n# )\n\n# time_start = time.time()\n\ndef train(RL, otherRL):\n total_steps = 0\n steps = []\n episodes = []\n otherRL_steps = []\n otherRL_episodes = []\n for i_episode in tqdm(range(50)):\n observation = env.reset()\n while True:\n env.render()\n\n action = RL.choose_action(observation)\n\n observation_, reward, done, info = env.step(action)\n\n if done:\n reward = 10\n\n RL.store_transition(observation, action, reward, observation_)\n\n if total_steps <= MEMORY_SIZE:\n otherRL.store_transition(observation, action, reward, observation_)\n\n if total_steps > MEMORY_SIZE:\n RL.learn()\n\n if done:\n print('episode ', i_episode, ' finished')\n steps.append(total_steps)\n episodes.append(i_episode)\n if total_steps <= MEMORY_SIZE:\n otherRL_steps.append(total_steps)\n otherRL_episodes.append(i_episode)\n break\n\n observation = observation_\n total_steps += 1\n\n if total_steps % 1000 == 0:\n print(\"total_steps:%d\" %total_steps)\n\n if total_steps == 30000:\n time_end = time.time()\n print('total_time:', time_end-time_start)\n\n return np.vstack((episodes, steps)), np.vstack((otherRL_steps, otherRL_episodes))\n\ndef train_otherRL(otherRL, otherRL_his):\n total_steps = MEMORY_SIZE + 1\n steps = otherRL_his[0,:].tolist()\n episodes = otherRL_his[1,:].tolist()\n\n for i_episode in tqdm(range(50)):\n observation = env.reset()\n while True:\n env.render()\n\n action = otherRL.choose_action(observation)\n\n observation_, reward, done, info = env.step(action)\n\n if done:\n reward = 10\n\n otherRL.store_transition(observation, action, reward, observation_)\n\n\n if total_steps > MEMORY_SIZE:\n otherRL.learn()\n\n if done:\n print('episode ', i_episode, ' finished')\n steps.append(total_steps)\n episodes.append(i_episode)\n break\n\n observation = observation_\n total_steps += 1\n\n if total_steps % 1000 == 0:\n print(\"total_steps:%d\" %total_steps)\n\n if total_steps == 30000:\n time_end = time.time()\n print('total_time:', time_end-time_start)\n\n return np.vstack((episodes, steps))\n\nhis_natural, his_natural_2 = train(RL_natural, RL_natural_2)\nhis_natural_2 = train_otherRL(RL_natural_2, his_natural_2)\n\n# his_prio = train_otherRL(RL_prio, his_prio)\n# his_natural_2 = train(RL_natural_2)\n# his_prio = train(RL_prio)\n# his_prio_2 = train(RL_prio_2)\n# his_prio_selu = train(RL_prio_selu)\n\n# compare based on first success\nplt.plot(his_natural[0, :], his_natural[1, :] - his_natural[1, 0], c='b', label='natural DQN(relu)')\nplt.plot(his_natural_2[0, :], his_natural_2[1, :] - his_natural_2[1, 0], c='y', label='Double DQN(relu)')\n# plt.plot(his_prio[0, :], his_prio[1, :] - his_prio[1, 0], c='r', label='DQN with prioritized replay(relu)') # 每个数据都以第一个数据为基准,看看差值是多少\n# plt.plot(his_prio_2[0, :], his_prio_2[1, :] - his_prio_2[1, 0], c='g', label='Double DQN with prioritized replay(relu)')\n# plt.plot(his_prio_selu[0, :], his_prio_selu[1, :] - his_prio_selu[1, 0], c='purple', label='DQN with prioritized replay(selu)')\nplt.legend(loc='best')\nplt.ylabel('total training time')\nplt.xlabel('episode')\nplt.grid()\nplt.show()\n\n\n","sub_path":"run_dqn_vs.py","file_name":"run_dqn_vs.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"211714360","text":"# Import the converted model's class\nimport sys\nsys.path.append('../src')\nimport argparse\nimport numpy as np\nimport random\nimport tensorflow as tf\nfrom GoogLeNet import GoogLeNet\nfrom PoseCNN import PoseCNN\nimport cv2\n\nbatch_size = 100\nmax_iterations = 5000\n# Set this path to your dataset directory\ndata_idx = '_data_mvs.txt'\n# Set this path to your working space directory\nresult_folder = '../result/'\n\nclass datasource(object):\n def __init__(self, images1, images2, poses, idx, max_size):\n self.images1 = images1\n self.images2 = images2\n self.poses = poses\n self.max_size = max_size\n self.idx = idx\n self.pos = 0\n\ndef preprocess(images):\n images_out = np.zeros((len(images), 3, 224, 224)) #final result\n for i in range(len(images)):\n temp_image = cv2.imread(images[i])\n images_out[i] = np.transpose(temp_image,(2,0,1))\n #compute images mean\n mean = np.mean(images_out, axis=0)\n\n #Subtract mean from all images\n images_out = np.transpose(images_out - mean, (0,2,3,1))\n return images_out\n\ndef get_data(mode = 'train', sub_sample = 'down_sampled', illumination = 'max'):\n poses = []\n images1 = []\n images2 = []\n\n with open('../data/'+ mode + data_idx) as f:\n next(f) # skip the 3 header lines\n next(f)\n next(f)\n for line in f:\n imgFiledId1, imgFiledId2, categoryId, x, y, z, q1, q2, q3, q4 = line.split()\n x = float(x)\n y = float(y)\n z = float(z)\n q1 = float(q1)\n q2 = float(q2)\n q3 = float(q3)\n q4 = float(q4)\n poses.append((x,y,z,q1,q2,q3,q4))\n \n imgFiledId1 = '0'+imgFiledId1 if len(imgFiledId1)==1 else imgFiledId1\n imgFiledId2 = '0'+imgFiledId2 if len(imgFiledId2)==1 else imgFiledId2\n images1.append('D:/Dataset/' + sub_sample + '/scan'+categoryId+'/'\\\n +'clean_0' + imgFiledId1 + '_max.png')\n images2.append('D:/Dataset/' + sub_sample + '/scan'+categoryId+'/'\\\n +'clean_0' + imgFiledId2 + '_max.png')\n max_size = len(poses)\n indices = list(range(max_size))\n random.shuffle(indices)\n return datasource(images1, images2, poses, indices, max_size)\n\ndef gen_data_batch(source):\n image1_batch = []\n image2_batch = []\n pose_x_batch = []\n pose_q_batch = []\n for i in range(batch_size):\n pos = i + source.pos\n pose_x = source.poses[source.idx[pos]][0:3]\n pose_q = source.poses[source.idx[pos]][3:7]\n image1_batch.append(source.images1[source.idx[pos]])\n image2_batch.append(source.images2[source.idx[pos]])\n pose_x_batch.append(pose_x)\n pose_q_batch.append(pose_q)\n image1_batch = preprocess(image1_batch)\n image2_batch = preprocess(image2_batch)\n source.pos += i\n if source.pos + i > source.max_size:\n source.pos = 0\n return image1_batch, image2_batch, np.array(pose_x_batch), np.array(pose_q_batch)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('beta', type=int)\n args = parser.parse_args()\n \n beta = beta = args.beta\n settings = 'beta'+str(beta)+'/'\n print('Beta selected as :' + str(beta))\n # Create 2 separate graphs\n graph_GoogLeNet = tf.Graph()\n graph_PoseCNN = tf.Graph()\n #outputFile_GoogLeNet = \"GoogLeNet.ckpt\"\n train_data = get_data(mode = 'train')\n test_data = get_data(mode = 'test')\n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6833)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n #Build GoogLeNet\n with graph_GoogLeNet.as_default():\n #place holder for input\n images = tf.placeholder(tf.float32, [batch_size, 224, 224, 3], name = 'input')\n \n # define network\n googLeNet = GoogLeNet({'data' : images}, trainable=False)\n main = googLeNet.layers['main_branch']\n aux1 = googLeNet.layers['aux1_branch']\n aux2 = googLeNet.layers['aux2_branch']\n \n # initialization\n initialize1 = tf.global_variables_initializer()\n sess_GoogLeNet = tf.Session(graph=graph_GoogLeNet)\n #sess_GoogLeNet = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n sess_GoogLeNet.run(initialize1)\n # Load the data\n googLeNet.load('../posenet.npy', sess_GoogLeNet)\n \n #saver_GoogLeNet = tf.train.Saver()\n \n #Build poseCNN\n with graph_PoseCNN.as_default():\n #place holder intermadiate output\n main_out = tf.placeholder(tf.float32, [batch_size, 7, 7, 1024], name = 'main')\n aux1_out = tf.placeholder(tf.float32, [batch_size, 4, 4, 128], name = 'aux1')\n aux2_out = tf.placeholder(tf.float32, [batch_size, 4, 4, 128], name = 'aux2')\n # Placeholder for output\n poses_x = tf.placeholder(tf.float32, [batch_size, 3], name = 'posex')\n poses_q = tf.placeholder(tf.float32, [batch_size, 4], name = 'poseq')\n # define network\n poseCNN = PoseCNN({'main_branch' : main_out, 'aux1_branch' : aux1_out, 'aux2_branch' : aux2_out})\n trans = poseCNN.layers['trans_out']\n rot = poseCNN.layers['rot_out']\n \n # Define Loss Function\n loss_x = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(trans, poses_x))))\n loss_q = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(rot, poses_q))))*beta\n loss = loss_x + loss_q\n opt = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, epsilon=0.00000001, use_locking=False, name='Adam').minimize(loss)\n\n \n # initialization\n initialize2 = tf.global_variables_initializer()\n sess_PoseCNN = tf.Session(graph=graph_PoseCNN)\n #sess_PoseCNN = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n sess_PoseCNN.run(initialize2)\n \n saver = tf.train.Saver()\n \n #array to store intermadiate output\n main1 = np.zeros((batch_size,7,7,1024), dtype=np.float32)\n main2 = np.zeros((batch_size,7,7,1024), dtype=np.float32)\n \n aux1_1 = np.zeros((batch_size,4,4,128), dtype=np.float32)\n aux1_2 = np.zeros((batch_size,4,4,128), dtype=np.float32)\n \n aux2_1 = np.zeros((batch_size,4,4,128), dtype=np.float32)\n aux2_2 = np.zeros((batch_size,4,4,128), dtype=np.float32)\n \n train_loss = np.zeros((1,4))\n test_loss = np.zeros((1,4))\n image1_t, image2_t, np_trans_t, np_rot_t = gen_data_batch(test_data)\n \n for i in range(max_iterations):\n # generating next batch\n image1, image2, np_trans, np_rot = gen_data_batch(train_data)\n feed1 = {images: image1}\n feed2 = {images: image2}\n \n aux1_2, aux1_1, main1 = sess_GoogLeNet.run([aux2, aux1, main], feed_dict=feed1)\n aux2_2, aux2_1, main2 = sess_GoogLeNet.run([aux2, aux1, main], feed_dict=feed2)\n \n # Element-wise Product\n aux1_1 = np.multiply(aux1_1, aux2_1)\n aux1_2 = np.multiply(aux1_2, aux2_2)\n main1 = np.multiply(main1, main2)\n # Prepare intermidaiate input for poseCNN\n feed_pose = {main_out : main1, aux1_out : aux1_1, aux2_out : aux1_2, poses_x : np_trans, poses_q : np_rot}\n \n # Back-Prop\n sess_PoseCNN.run(opt, feed_dict=feed_pose)\n if i % 10 == 0:\n # Evaluate\n batch_loss, b_l_x, b_l_q = sess_PoseCNN.run([loss, loss_x, loss_q], feed_dict=feed_pose)\n train_loss = np.append(train_loss, [(i, batch_loss, b_l_x, b_l_q)], axis=0)\n # Validation\n feed1 = {images: image1_t}\n feed2 = {images: image2_t}\n aux1_2, aux1_1, main1 = sess_GoogLeNet.run([aux2, aux1, main], feed_dict=feed1)\n aux2_2, aux2_1, main2 = sess_GoogLeNet.run([aux2, aux1, main], feed_dict=feed2)\n aux1_1 = np.multiply(aux1_1, aux2_1)\n aux1_2 = np.multiply(aux1_2, aux2_2)\n main1 = np.multiply(main1, main2)\n feed_pose = {main_out : main1, aux1_out : aux1_1, aux2_out : aux1_2, poses_x : np_trans, poses_q : np_rot}\n batch_loss_t, b_l_x, b_l_q = sess_PoseCNN.run([loss, loss_x, loss_q], feed_dict=feed_pose)\n test_loss = np.append(test_loss, [(i, batch_loss_t, b_l_x, b_l_q)], axis=0)\n \n print(\"iteration: \" + str(i) + \"\\n\\t\" + \"TrainLoss is: \" + str(batch_loss) + \" TestLoss is:\" + str(batch_loss_t))\n np.savez(result_folder + settings +'loss.npz', train = train_loss, test = test_loss)\n if i % 1000 == 0:\n saver.save(sess_PoseCNN, result_folder + settings + 'PoseCNN.ckpt')\n print(\"Intermediate file saved at: \" + result_folder + settings + 'PoseCNN.ckpt')\n \n saver.save(sess_PoseCNN, result_folder + settings + 'PoseCNN.ckpt')\n print(\"Intermediate file saved at: \" + result_folder + settings + 'PoseCNN.ckpt')\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"TrainTest/train_sub_sampled_beta.py","file_name":"train_sub_sampled_beta.py","file_ext":"py","file_size_in_byte":8939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"355698208","text":"#!/usr/bin/env python\n\"\"\"\nCreates lists of molecules in clusters\n\nHazen 09/17\n\"\"\"\nimport argparse\nimport numpy\nimport random\n\nimport storm_analysis.sa_library.i3dtype as i3dtype\nimport storm_analysis.sa_library.writeinsight3 as writeinsight3\n\nparser = argparse.ArgumentParser(description = \"Create emitters in (possibly overlapping) clusters.\")\n\nparser.add_argument('--bin', dest='i3bin', type=str, required=True,\n help = \"The name of Insight3 format file to save the emitter locations, etc.\")\nparser.add_argument('--ncl', dest='ncl', type=int, required=True,\n help = \"The number of clusters.\")\nparser.add_argument('--nlocs', dest='nlocs', type=int, required=True,\n help = \"The number of localizations per cluster.\")\nparser.add_argument('--dev', dest='dev', type=float, required=True,\n help = \"Cluster standard deviation in pixels.\")\nparser.add_argument('--sx', dest='sx', type=int, required=False, default=256,\n help = \"Image x size in pixels, default is 256.\")\nparser.add_argument('--sy', dest='sy', type=int, required=False, default=256,\n help = \"Image y size in pixels, default is 256.\")\nparser.add_argument('--z_start', dest='z_start', type=int, required=False, default=-500,\n help = \"Starting value for z position, default is -500nm.\")\nparser.add_argument('--z_stop', dest='z_stop', type=int, required=False, default=500,\n help = \"Stopping value for z position, default is 500nm.\")\n\nargs = parser.parse_args()\n\n# First, create a list of cluster centers.\ncl_centers = []\nwhile (len(cl_centers) < args.ncl):\n cx = random.uniform(0.0, args.sx)\n cy = random.uniform(0.0, args.sy)\n cz = random.uniform(args.z_start, args.z_stop)\n\n # Don't keep the cluster if it is too close to the edge of the image.\n if (cx < 2.0) or (cx > (args.sx - 2.0)):\n continue\n if (cy < 2.0) or (cy > (args.sy - 2.0)):\n continue\n\n cl_centers.append([cx, cy, cz])\n\n# Next, create localizations for each cluster.\nxp = None\nyp = None\nzp = None\nfor clc in cl_centers:\n\n if xp is None:\n xp = numpy.random.normal(scale = args.dev, size = args.nlocs) + clc[0]\n yp = numpy.random.normal(scale = args.dev, size = args.nlocs) + clc[1]\n\n # Z is in nm, we'll assume a 100nm pixel size.\n zp = numpy.random.normal(scale = args.dev * 100.0, size = args.nlocs) + clc[2]\n else:\n xp = numpy.append(xp, numpy.random.normal(scale = args.dev, size = args.nlocs) + clc[0])\n yp = numpy.append(yp, numpy.random.normal(scale = args.dev, size = args.nlocs) + clc[1])\n zp = numpy.append(zp, numpy.random.normal(scale = args.dev * 100.0, size = args.nlocs) + clc[2])\n\n# Create a molecule list structure & save it.\ni3data = i3dtype.createDefaultI3Data(args.ncl * args.nlocs)\ni3dtype.posSet(i3data, \"x\", xp)\ni3dtype.posSet(i3data, \"y\", yp)\ni3dtype.posSet(i3data, \"z\", zp)\n\nwith writeinsight3.I3Writer(args.i3bin) as i3w:\n i3w.addMolecules(i3data)\n","sub_path":"storm_analysis/simulator/emitters_in_clusters.py","file_name":"emitters_in_clusters.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"41527269","text":"\"\"\"\nSeries\n\"\"\"\n__version__ = \"0.0.2\"\n__all__ = ['__version__', 'slices']\ndef slices(digits, num):\n \"\"\"Take a string of `digits` and return all possible consecutive\n number series of length `num` in that string.\n .. versionadded:: 0.0.1\n .. versionchanged:: 0.0.2\n :param digits: the string of digits\n :param num: the slice length of the digits\n \"\"\"\n if 0 < num <= len(digits):\n return [[int(d) for d in digits[i:i+num]] for i in xrange(len(digits)-num+1)]\n raise ValueError(\"bad slice length\")\n","sub_path":"assignments/python/series/src/131.py","file_name":"131.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"302801143","text":"from itertools import chain\nimport json\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models.query import QuerySet\nfrom django.db.models.signals import pre_delete, post_save\nfrom django.utils.html import strip_tags\nfrom django.utils.functional import curry\n\nfrom audience.settings import AUDIENCE_FLAGS\nfrom bitfield import BitField\nfrom edumetadata.models import (AlternateType, GeologicTime, Grade,\n HistoricalEra, Subject)\nfrom edumetadata.fields import HistoricalDateField\nfrom concepts.models import delete_listener # , Concept, ConceptItem\nfrom concepts.managers import ConceptManager\n\nfrom curricula.utils import ul_as_list, list_as_ul\nfrom curricula.settings import ASSESSMENT_TYPES, RELATION_MODELS\n\nfrom core_media.models import NGPhoto # NOQA\nfrom credits.models import CreditGroup # NOQA\nfrom taxonomy.managers import TaxonomyTaggableManager\n\n__all__ = ('Lesson', 'LessonActivity',)\n\n\nclass LessonManager(models.Manager):\n def get_published(self):\n qs = self.get_queryset()\n return qs.filter(published=True)\n\n\nclass Lesson(models.Model):\n # Lesson-specific fields\n activities = models.ManyToManyField('curricula.Activity',\n through='curricula.LessonActivity')\n appropriate_for = BitField(\n flags=AUDIENCE_FLAGS,\n help_text='''Select the audience(s) for which this content is\n appropriate. Selecting audiences means that a separate audience view of\n the page will exist for those audiences. For a lesson, the only possible\n choices are Teachers and Informal Educators.\n\n Note that the text you input in this form serves as the default text.\n If you indicate this activity is appropriate for both T/IE audiences,\n you either need to add text variations or the default text must be\n appropriate for for both audiences.''')\n assessment_type = models.CharField(\n max_length=15,\n blank=True, null=True,\n choices=ASSESSMENT_TYPES)\n assessment = models.TextField(\n blank=True,\n null=True,\n help_text=\"\"\"This field is for a new, lesson-level assessment. It is\n not impacted by activity-level assessments.\"\"\")\n background_information = models.TextField(\n blank=True, null=True,\n help_text=\"\"\"Producers can either copy/paste background information\n into this field, or click the \"import text\" link to import background\n information from all activities in this lesson into this field and edit\n them. If you click \"import text from activities\" and revise/override the\n imported text, note that clicking \"import text from activities\" again\n will re-set the text back to the imported version.\"\"\")\n create_date = models.DateTimeField(auto_now_add=True)\n credit = models.ForeignKey(CreditGroup,\n blank=True, null=True,\n help_text=\"\"\"All activity-level credits will dynamically display in\n the lesson credits, broken out by activity number. Only use this\n field if you need to add additional, lesson-level credits.\"\"\")\n concepts = ConceptManager()\n description = models.TextField()\n key_image = models.ForeignKey(\n NGPhoto,\n blank=True, null=True,\n on_delete=models.SET_NULL)\n id_number = models.CharField(\n max_length=10,\n help_text=\"\"\"This field is for the internal NG Education ID number.\n This is required for all instructional content.\"\"\")\n instructional_pathways = models.BooleanField(\n default=True,\n verbose_name=\"Display instructional pathways module\")\n is_modular = models.BooleanField(\n default=True,\n help_text=\"\"\"If unchecked, this field indicates that this lesson should\n NOT appear as stand-alone outside of a unit view.\"\"\")\n last_updated_date = models.DateTimeField(auto_now=True)\n materials = models.ManyToManyField('curricula.Material',\n blank=True,\n help_text=\"\"\"This field is for additional, lesson-level materials a\n teacher will need to provide; for example, new materials needed in\n order to conduct the lesson-level assessment. Do not repeat activity-\n specific materials.\"\"\")\n other_notes = models.TextField(\n blank=True, null=True,\n help_text=\"\"\"This field has multiple uses, but one possible use is to\n indicate the larger context into which the lesson fits. Example: This\n is lesson 1 in a series of 10 lessons in a unit on Europe.\"\"\")\n prior_lessons = models.ManyToManyField('self',\n symmetrical=False,\n blank=True, )\n published = models.BooleanField(default=False)\n published_date = models.DateTimeField(\n blank=True, null=True)\n secondary_content_types = models.ManyToManyField(AlternateType,\n blank=True, )\n slug = models.SlugField(\n unique=True,\n help_text=\"\"\"The URL slug is auto-generated, but producers should adjust\n it if: a) punctuation in the title causes display errors; and/or b) the\n title changes after the slug has been generated.\"\"\")\n subtitle_guiding_question = models.TextField(\n verbose_name=\"Subtitle or Guiding Question\")\n title = models.CharField(\n max_length=256,\n help_text=\"\"\"GLOBAL: Use the text variations field to create versions\n for audiences other than the default.\"\"\")\n units = models.ManyToManyField('curricula.Unit',\n through='curricula.UnitLesson')\n\n # Read-only fields aggregated from Activities\n accessibility_notes = models.TextField(\n blank=True, null=True)\n eras = models.ManyToManyField(HistoricalEra,\n blank=True, )\n prior_knowledge = models.TextField(\n blank=True, null=True)\n relevant_start_date = HistoricalDateField(\n blank=True, null=True)\n relevant_end_date = HistoricalDateField(\n blank=True, null=True)\n geologic_time = models.ForeignKey(GeologicTime,\n blank=True, null=True)\n subjects = models.ManyToManyField(Subject,\n blank=True,\n # limit_choices_to={'parent__isnull': False},\n verbose_name=\"Subjects and Disciplines\")\n grades = models.ManyToManyField(Grade,\n blank=True, )\n duration = models.IntegerField(verbose_name=\"Duration Minutes\",\n default=0)\n physical_space_types = models.ManyToManyField('curricula.PhysicalSpaceType',\n blank=True, )\n plugin_types = models.ManyToManyField('curricula.PluginType',\n blank=True, )\n tech_setup_types = models.ManyToManyField('curricula.TechSetupType',\n blank=True, )\n archived = models.BooleanField(default=False)\n\n taxonomy = TaxonomyTaggableManager()\n objects = LessonManager()\n\n class Meta:\n ordering = [\"title\"]\n app_label = 'curricula'\n\n def save(self, *args, **kwargs):\n if self.id is None:\n super(Lesson, self).save(*args, **kwargs)\n kwargs['force_update'] = True\n kwargs['force_insert'] = False\n\n agg_activities = curry(self.aggregate_activity_attr, self.activities.all())\n\n # These are normal fields, so we can set them before we save\n agg_pki = agg_activities('prior_knowledge_items')\n self.prior_knowledge = json.dumps(\n list(\n set(\n chain(*agg_pki)\n )\n )\n )\n self.accessibility_notes = list_as_ul(\n list(set(chain(*[ul_as_list(x) for x in agg_activities('accessibility_notes')])))\n )\n self.duration = self._calc_duration(self.activities.all())\n super(Lesson, self).save(*args, **kwargs)\n self._sync_m2m(self.eras, agg_activities('eras'))\n self._sync_m2m(self.subjects, agg_activities('subjects', ignore_own=True))\n self._sync_m2m(self.grades, agg_activities('grades', ignore_own=True))\n self._sync_m2m(self.physical_space_types, agg_activities('physical_space_types', ignore_own=True))\n self._sync_m2m(self.plugin_types, agg_activities('plugin_types', ignore_own=True))\n self._sync_m2m(self.tech_setup_types, agg_activities('tech_setup_types', ignore_own=True))\n\n def _sync_m2m(self, attr, new_set):\n \"\"\"\n Synchronize the objects m2m objects in with the objects in \n \"\"\"\n current = set(attr.all())\n newer = set(new_set)\n to_remove = current - newer\n to_add = newer - current\n if to_add:\n attr.add(*list(to_add))\n if to_remove:\n attr.remove(*list(to_remove))\n\n @models.permalink\n def get_absolute_url(self):\n return ('lesson-detail', (), {'slug': self.slug})\n\n def get_canonical_page(self):\n return reverse('lesson-detail', args=[self.slug])\n\n def __str__(self):\n return strip_tags(self.title)\n\n if RELATION_MODELS:\n def get_related_content_type(self, content_type):\n \"\"\"\n Get all related items of the specified content type\n \"\"\"\n return self.relations.filter(\n content_type__model=content_type)\n\n def get_relation_type(self, relation_type):\n \"\"\"\n Get all relations of the specified relation type\n \"\"\"\n return self.relations.filter(\n relation_type__iexact=relation_type)\n\n # Activity Aggregations\n\n def aggregate_activity_attr(self, activities, attr_name, ignore_own=False):\n \"\"\"\n Generic method to gather up the activities and deduplicate a specific attribute\n\n Can pass a list of IDs or a QuerySet\n \"\"\"\n if not activities:\n return []\n from curricula.models import Activity\n\n # To find out if the attribute is a m2m or a regular field, we test for\n # attribute on the Uninstantiated class. m2m Descriptors will be there,\n # regular fields will not\n if hasattr(Activity, attr_name):\n if hasattr(getattr(Activity, attr_name), 'through'):\n is_m2m = True\n is_property = is_fk = False\n elif hasattr(getattr(Activity, attr_name), 'pk'):\n is_fk = True\n is_property = is_m2m = False\n else:\n is_m2m = is_fk = False\n is_property = True\n\n else:\n is_property = is_m2m = is_fk = False\n\n if isinstance(activities, (list, tuple)):\n if isinstance(activities[0], Activity):\n # We have a bunch of individual Activities. Hate to do this,\n # but we need a QuerySet\n activities = [a.pk for a in activities]\n qset = Activity.objects.filter(pk__in=activities)\n elif isinstance(activities, QuerySet):\n qset = activities\n else:\n return []\n if is_m2m:\n qset = qset.prefetch_related(attr_name)\n listoflists = [getattr(x, attr_name).all() for x in qset]\n if hasattr(self, attr_name) and not ignore_own:\n listoflists.append(getattr(self, attr_name).all())\n biglist = chain(*listoflists)\n unique = set(biglist)\n elif is_fk:\n qset = qset.select_related(attr_name)\n unique = set([getattr(x, attr_name) for x in qset])\n elif is_property:\n unique = [getattr(x, attr_name) for x in qset]\n else:\n unique = set(qset.values_list(attr_name, flat=True))\n if hasattr(self, attr_name) and not ignore_own:\n unique.add(getattr(self, attr_name))\n return list(unique)\n\n def is_all_activities(self, activities=None):\n \"\"\"\n Shortcut function to determine if the activities submitted is all\n associated activities by count. Uses the most efficient way to avoid\n database calls\n \"\"\"\n if activities is None:\n return True\n if isinstance(activities, QuerySet):\n count = activities.count()\n elif isinstance(activities, (list, tuple)):\n count = len(activities)\n else:\n return True\n return self.activities.count() == count\n\n @property\n def prior_knowledge_items(self):\n return json.loads(self.prior_knowledge)\n\n def get_accessibility(self, activities=None):\n if self.is_all_activities(activities):\n return self.accessibility_notes\n accessibility_notes = [ul_as_list(activity.accessibility_notes) for activity in activities]\n deduped_notes = set(accessibility_notes)\n return list(deduped_notes)\n\n def get_concepts(self, activities=None):\n activities = activities or self.activities.all()\n return self.aggregate_activity_attr(activities, 'concepts')\n\n def _calc_duration(self, activities=None):\n activities = activities or self.activities.all()\n return sum([activity.duration for activity in activities])\n\n def get_duration(self, activities=None):\n if self.is_all_activities(activities):\n return self.duration\n return self._calc_duration(activities)\n\n def get_background_information(self, activities=None):\n '''Used by the admin to import text'''\n activities = activities or self.activities.all()\n bg_info = [activity.background_information for activity in activities]\n deduped_info = set(bg_info)\n return list(deduped_info)\n\n def get_grades(self):\n return self.grades.all()\n\n def get_grades_and_ages(self):\n grades = self.grades.all()\n\n return (grades.as_grade_range(), grades.as_age_range())\n\n def get_materials(self, activities=None):\n activities = activities or self.activities.all()\n return self.aggregate_activity_attr(activities, 'materials')\n\n def get_other_notes(self, activities=None):\n activities = activities or self.activities.all()\n return self.aggregate_activity_attr(activities, 'other_notes')\n\n def get_physical_space(self, activities=None):\n if self.is_all_activities(activities):\n return self.physical_space_types.all()\n return self.aggregate_activity_attr(activities, 'physical_space_types', ignore_own=True)\n\n def get_required_technology(self, activities=None):\n if self.is_all_activities(activities):\n return list(self.plugin_types.all()) + list(self.tech_setup_types)\n plugin_types = self.aggregate_activity_attr(activities, 'plugin_types', ignore_own=True)\n tech_setup_types = self.aggregate_activity_attr(activities, 'tech_setup_types', ignore_own=True)\n return plugin_types + tech_setup_types\n\n def get_setup(self, activities=None):\n activities = activities or self.activities.all()\n setup = [ul_as_list(activity.setup) for activity in activities]\n deduped_setup = set(setup)\n return list(deduped_setup)\n\n def get_subjects(self, activities=None):\n if self.is_all_activities(activities):\n return self.subjects.all()\n return self.aggregate_activity_attr(activities, 'subjects', ignore_own=True)\n\n def get_taxonomy(self, activities=None):\n activities = activities or self.activities.all()\n return self.aggregate_activity_attr(activities, 'taxonomy')\n\n def get_key_image(self):\n ctype = ContentType.objects.get_by_natural_key(app_label='core_media', model='ngphoto')\n lr = self.get_related_content_type(ctype.name)\n if len(lr) > 0:\n return lr[0].content_object\n else:\n return None\n\n\nclass LessonActivity(models.Model):\n lesson = models.ForeignKey('curricula.Lesson')\n activity = models.ForeignKey('curricula.Activity')\n transition_text = models.TextField(blank=True, null=True)\n order = models.IntegerField(blank=True, null=True)\n\n class Meta:\n ordering = ('order',)\n verbose_name_plural = 'LessonActivities'\n app_label = 'curricula'\n\n\ndef aggregate_signaler(sender, instance, created, raw, using, *args, **kwargs):\n for unit in instance.units.all():\n unit.save()\n\npre_delete.connect(delete_listener, sender=Lesson)\npost_save.connect(aggregate_signaler, sender=Lesson)\n","sub_path":"curricula/models/lesson.py","file_name":"lesson.py","file_ext":"py","file_size_in_byte":16275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"126337011","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pygtk,gtk,gtk.glade,xmpp,sys,pango,os,time,pynotify\nfrom connection import *\nimport keys\nimport send\n\nfrom chatwindow import *\nfrom widgets import *\n\nclass okno:\n\tfrom mainactions import *\n\tdef __init__(self):\n\t\tself.gladefile = \"client.glade\"\n\t\tself.wTree = gtk.glade.XML(self.gladefile) \n\t\t# pobieramy główne okno\n\t\t\n\t\tself.window = self.wTree.get_widget(\"window1\")\n\t\tself.window.show()\n\t\t#wyświetlamy głowne okno\n\t\tif (self.window):\n\t\t\tself.window.connect('delete-event', self.icohide)\n\t\tmainh=self.window.get_size()[1]\n\t\tself.window.resize(300\t,mainh)\n\t\tself.window.set_default_size(300, mainh)\n\t\tself.window.move(int(gtk.gdk.screen_width()*0.7),int(gtk.gdk.screen_height(\t)*0.2))\n\t\t#po zamknięciu okna - kończymy program\n\t\t\n\t\tself.window.set_title(\"Pybber\")\n\t\t\n\t\t#pobranie obiektow z glade i przypisywanie ich do zmiennych:\n\t\tself.messages={}\n\t\tself.recipent=\"\"\n\t\tassignwidgets(self)\n\t\tcreatestatusicon(self)\n\t\tself.connection=connection(self)\n\t\tself.list.set_reorderable(True)\n\t\tself.statusentry.hide()\n\t\tself.statusbar.hide()\t\n\t\tself.hidden=False\n\t\tself.posx,self.posy=self.window.get_position()\n\t\tpynotify.init(\"Pybber\")\n\t\tself.archivewindow.hide()\n\t\t\nif __name__ == \"__main__\":\n\tgtk.gdk.threads_init()\n\tklasa=okno()\n\tgtk.main()\t\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"285131237","text":"import pygame\nimport maze\nimport time\nimgSize = 32\nsize = (704, 704)\nwidth, height = size\nrows = height // imgSize\ncolumns = width // imgSize\n\npygame.init()\nscreen = pygame.display.set_mode((width, height))\n\nfont = pygame.font.SysFont(\"Calibri.ttf\", 16)\n\ndef getMousePos():\n mx, my = pygame.mouse.get_pos()\n return mx // imgSize, my // imgSize\n\nif __name__ == '__main__':\n m = maze.Maze(rows, columns, screen, font)\n # algorith states\n running = True # state for app window\n # state for algorithm runnin\n space = False\n algoritmNeAplicat = True\n # states for drawing\n drawOn = False\n erase = False\n pickStart = False\n pickEnd = False\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 2:\n m.initializeMatrix()\n algoritmNeAplicat = True\n else:\n drawOn = True\n if event.button == 1:\n erase = False\n if pickStart:\n drawOn = False\n m.setStart(*getMousePos())\n if pickEnd:\n drawOn = False\n m.setEnd(*getMousePos())\n elif event.button == 3:\n erase = True\n elif event.type == pygame.MOUSEBUTTONUP:\n drawOn = False\n if event.type == pygame.KEYDOWN:\n drawOn = False # you can't draw while you pick the starting point and ending point\n pickStart = pickEnd = False\n if event.key == pygame.K_s:\n pickStart = True\n elif event.key == pygame.K_e:\n pickEnd = True\n elif event.key == pygame.K_SPACE:\n space = True # if you press space, the algorithm starts\n if algoritmNeAplicat:\n if (m.canStart()): # canStart() is true if you fixed the starting point and ending point\n m.startSolve()\n while(m.continueSolve()): # can continue is true iff you can apply LEE/BFS algorithm (the queue isn't empty or you didn't reached the ending point)\n pygame.display.update()\n algoritmNeAplicat = False # you can't draw anymore after you ran the algorithm\n else:\n m.minPath() # if the algorithm was applied, now you can draw the minimum path\n pygame.display.update()\n elif event.key == pygame.K_g:\n m.saveMatrix() # the key g saves the drawed map on secondary memory\n elif event.key == pygame.K_f:\n algoritmNeAplicat = True\n m.getMatrixFromFile() # if you press f, the algorithm restart with map from map.txt\n elif event.type == pygame.KEYUP:\n pickStart = False\n pickEnd = False\n if algoritmNeAplicat: # you can t draw while algorithm is working, this is just for lock drawing mode\n if drawOn:\n if erase:\n m.erase(*getMousePos())\n else:\n m.draw(*getMousePos())\n m.showMatrix()\n #screen.blit(font.render(\"69\", True, (255, 255, 0)), (420, 420))\n pygame.display.update()\n #com","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"460523812","text":"import numpy as np\nfrom scipy.io import wavfile\nfrom random import seed\nfrom random import random\n\ndef show_info(aname, a):\n\tprint(\"Array\", aname)\n\tprint(\"shape:\", a.shape)\n\tprint (\"dtype:\", a.dtype)\n\tprint (\"min, max:\", a.min(), a.max())\n\n\n\ndef add_periodic_noise(signal, noise_unit):\n\tnum_repeats_required = signal.shape[0]/len(noise_unit)\n\textra_vals_required = signal.shape[0] - (num_repeats_required*len(noise_unit))\n\n\textra_vals = noise_unit[:extra_vals_required]\t\n\n\tperiodic_noise = []\n\tfor count in range(num_repeats_required):\n\t\tperiodic_noise += noise_unit\n\t\n\tperiodic_noise += extra_vals\n\tperiodic_noise = np.array(periodic_noise)\n\n\tprint(periodic_noise.shape)\n\n\t# Add the noise to the signal\n\tcombined = np.add(signal, periodic_noise)\n\n\treturn combined\n\t\n\n\n\nrate, data = wavfile.read('data/BLXXXgrd02_original_wav/SRJMZ8RNSX/SRJMZ8RNSX_SA_03.wav')\n\nshow_info(\"data\", data)\n\n\n\n# Create 8-frame noise randomly\nNUM_FRAMES = 8\n# Allow noise values to be up to 1000 in amplitude\nNOISE_AMP = 1000\n\n\nseed(1)\n\nnoise_vals = [0]*NUM_FRAMES\nnoise_vals = [NOISE_AMP*random() - NOISE_AMP/2 for i in noise_vals]\n\n\ncombined = add_periodic_noise(data, noise_vals)\n\nshow_info(\"combined\", combined)\n\n\n\n\n","sub_path":"merger/adversarial/evolutionary/wav_manipulate_play.py","file_name":"wav_manipulate_play.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"307009747","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport argparse\n\nimport backtrader as bt\nimport backtrader.feeds as btfeeds\n\nimport pandas\n\nfrom data_connectors import StockAlphaVantage\n\nfrom dotenv import load_dotenv\n\nfrom os import environ\n\n\ndef runstrat():\n\n load_dotenv()\n\n ALPHAVANTAGE_APIKEY = environ['ALPHAVANTAGE_APIKEY']\n\n dataframe = StockAlphaVantage.get_daily(\n 'MSFT',\n 'full',\n ALPHAVANTAGE_APIKEY\n )\n \n\n args = parse_args()\n\n # Create a cerebro entity\n cerebro = bt.Cerebro(stdstats=False)\n\n # Add a strategy\n cerebro.addstrategy(bt.Strategy)\n\n # Get a pandas dataframe\n # datapath = ('../../datas/2006-day-001.txt')\n\n # Simulate the header row isn't there if noheaders requested\n skiprows = 1 if args.noheaders else 0\n header = None if args.noheaders else 0\n\n # dataframe = pandas.read_csv(datapath,\n # skiprows=skiprows,\n # header=header,\n # parse_dates=True,\n # index_col=0)\n\n if not args.noprint:\n print('--------------------------------------------------')\n print(dataframe)\n print('--------------------------------------------------')\n\n # Pass it to the backtrader datafeed and add it to the cerebro\n data = bt.feeds.PandasData(dataname=dataframe)\n\n cerebro.adddata(data)\n\n # Run over everything\n cerebro.run()\n\n # Plot the result\n cerebro.plot(style='bar')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Pandas test script')\n\n parser.add_argument('--noheaders', action='store_true', default=False,\n required=False,\n help='Do not use header rows')\n\n parser.add_argument('--noprint', action='store_true', default=False,\n help='Print the dataframe')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n runstrat()","sub_path":"test_pandas.py","file_name":"test_pandas.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"367198010","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport compas_rhino\nfrom ._primitiveartist import PrimitiveArtist\n\n\n__all__ = ['FrameArtist']\n\n\nclass FrameArtist(PrimitiveArtist):\n \"\"\"Artist for drawing frames.\n\n Parameters\n ----------\n frame: :class:`compas.geometry.Frame`\n A COMPAS frame.\n scale: float, optional\n Scale factor that controls the length of the axes.\n\n Notes\n -----\n See :class:`compas_rhino.artists.PrimitiveArtist` for all other parameters.\n\n Attributes\n ----------\n scale : float\n Scale factor that controls the length of the axes.\n Default is ``1.0``.\n color_origin : tuple of 3 int between 0 abd 255\n Default is ``(0, 0, 0)``.\n color_xaxis : tuple of 3 int between 0 abd 255\n Default is ``(255, 0, 0)``.\n color_yaxis : tuple of 3 int between 0 abd 255\n Default is ``(0, 255, 0)``.\n color_zaxis : tuple of 3 int between 0 abd 255\n Default is ``(0, 0, 255)``.\n\n Examples\n --------\n .. code-block:: python\n\n from compas.geometry import Pointcloud\n from compas.geometry import Frame\n\n import compas_rhino\n from compas_rhino.artists import FrameArtist\n\n pcl = Pointcloud.from_bounds(10, 10, 10, 100)\n tpl = Frame([0, 0, 0], [1, 0, 0], [0, 1, 0])\n\n compas_rhino.clear_layer(\"Test::FrameArtist\")\n\n for point in pcl.points:\n frame = tpl.copy()\n frame.point = point\n artist = FrameArtist(frame, layer=\"Test::FrameArtist\")\n artist.draw()\n\n \"\"\"\n\n def __init__(self, frame, layer=None, scale=1.0):\n super(FrameArtist, self).__init__(frame, layer=layer)\n self.scale = scale or 1.0\n self.color_origin = (0, 0, 0)\n self.color_xaxis = (255, 0, 0)\n self.color_yaxis = (0, 255, 0)\n self.color_zaxis = (0, 0, 255)\n\n def draw(self):\n \"\"\"Draw the frame.\n\n Returns\n -------\n guids: list\n The GUIDs of the created Rhino objects.\n \"\"\"\n points = []\n lines = []\n origin = list(self.primitive.point)\n X = list(self.primitive.point + self.primitive.xaxis.scaled(self.scale))\n Y = list(self.primitive.point + self.primitive.yaxis.scaled(self.scale))\n Z = list(self.primitive.point + self.primitive.zaxis.scaled(self.scale))\n points = [{'pos': origin, 'color': self.color_origin}]\n lines = [\n {'start': origin, 'end': X, 'color': self.color_xaxis, 'arrow': 'end'},\n {'start': origin, 'end': Y, 'color': self.color_yaxis, 'arrow': 'end'},\n {'start': origin, 'end': Z, 'color': self.color_zaxis, 'arrow': 'end'}]\n guids = compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)\n guids += compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)\n self._guids = guids\n return guids\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n pass\n","sub_path":"src/compas_rhino/artists/frameartist.py","file_name":"frameartist.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596030894","text":"\"\"\"\nLearning PDEs for density field of particles.\n\nFrom \"Particles to PDEs Parsimoniously\" by Arbabi & Kevrekidis 2020\n\nH. Arbabi, August 2020, arbabiha@gmail.com.\n\"\"\"\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport time\nimport scipy.io as sio\nimport importlib\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom scipy.integrate import solve_ivp\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage import gaussian_filter\n\nfrom sys import path\npath.append('./thehood/')\n\nimport model_library as ML\nimport CFDroutines as CR\n\n\nplt.rc('text', usetex=True)\n\nfont = {'family' : 'serif',\n 'size' : 8}\n\nmatplotlib.rc('font', **font)\n\ndef prep_data(filename = 'BurgersGT_Z500k_N128_n12.npz',\n ti=[0,10],\n smoothing_sigma=1):\n \"\"\"Load and preprocess microscopic data.\n \n Args:\n filename: name of file with gap_tooth data\n ti: index of trajectories to pick up\n smoothing_sigma: the STD of teh gaussian filter\n \n Returns:\n x: space grid\n t: time grid\n v: density field\n dvdt: density field time derivative\n rho0s: initial conditions (not gap-tooth particle estimates)\n data_tag: string to distinguish\n \"\"\"\n\n Data=np.load(filename)\n x=Data['x'].astype('float32')\n Density=Data['Density'][ti[0]:ti[1]]\n rho0s=Data['rho0s'][ti[0]:ti[1]]\n t = Data['t']\n\n \n smoother= lambda u: gaussian_filter(u,smoothing_sigma,mode='wrap')\n v_temp = np.apply_along_axis(smoother,2,Density)\n\n dvdt,v = [],[]\n dt = t[1]-t[0]\n\n for batch in range(v_temp.shape[0]):\n vt_temp= (v_temp[batch,1:,:]-v_temp[batch,:-1,:])/dt\n dvdt.append(vt_temp)\n v.append(v_temp[batch,:-1,:])\n\n dvdt = np.concatenate(dvdt,axis=0)\n v = np.concatenate(v,axis=0)\n\n data_tag = '_sigma'+str(smoothing_sigma)+'_N'+str(Density.shape[-1])\n\n return x,t,v,dvdt,rho0s.squeeze(),data_tag\n\n\ndef learn_functional_model(x,t,v,dvdt,rho0s,data_tag):\n \"\"\"Learning the functional form of the PDE via neural net.\n \n Args:\n x: space grid\n t: time grid\n v: density field\n dvdt: density field time derivative\n data_tag: string to distinguish\n\n Returns:\n trained keras model mapping v to dvdt\n \"\"\"\n\n print(200*'=')\n print('learning a functional model of the PDE ...')\n model_tag='_arch1o'+data_tag\n\n x_train,x_test,y_train,y_test=train_test_split(v,dvdt,train_size=.85,random_state=42)\n\n n_grid = x.shape[0]\n dx = x[1]-x[0]\n\n nn_model = ML.Functional_PDE_net(n_grid,dx,3,n_conv=2,n_neurons=48)\n\n adam_opt=tf.keras.optimizers.Adam(learning_rate=.001)\n nn_model.compile(optimizer=adam_opt,loss='mse')\n\n PDEfit_history=nn_model.fit(x_train,y_train,\n batch_size=64,epochs=256,\n verbose=0,validation_split=.1)\n\n plt.figure(figsize=[3,2.5])\n plt.plot(PDEfit_history.history['loss']/np.var(y_test),label='training loss')\n plt.plot(PDEfit_history.history['val_loss']/np.var(y_test),label='validation loss')\n plt.yscale('log')\n plt.legend(),plt.tight_layout()\n plt.savefig('fit'+model_tag,dpi=350)\n\n eval_loss=nn_model.evaluate(x=x_test,y=y_test,verbose=0)\n eval_lossp=100*eval_loss/np.var(y_test)\n print('test loss %',eval_lossp )\n\n # nn_model.save('./models/nn'+model_tag)\n return nn_model\n\ndef learn_discretized_model(x,t,v,dvdt,rho0s,data_tag):\n \"\"\"Learning the discretized form of the PDE via neural net.\n \n Args:\n x: space grid\n t: time grid\n v: density field\n dvdt: density field time derivative\n data_tag: string to distinguish\n\n Returns:\n trained keras model mapping v to dvdt\n \"\"\"\n\n print(200*'=')\n print('learning a discretized model of the PDE ...')\n model_tag='_arch2o'+data_tag\n\n x_train,x_test,y_train,y_test=train_test_split(v,dvdt,train_size=.85,random_state=42)\n\n n_grid = x.shape[0]\n\n nn_model = ML.Discretized_PDE_net(n_grid,3,n_conv=3,n_neurons=48)\n\n adam_opt=tf.keras.optimizers.Adam(learning_rate=.001)\n nn_model.compile(optimizer=adam_opt,loss='mse')\n\n PDEfit_history=nn_model.fit(x_train,y_train,\n batch_size=64,epochs=256,\n verbose=0,validation_split=.1)\n\n plt.figure(figsize=[3,2.5])\n plt.plot(PDEfit_history.history['loss']/np.var(y_test),label='training loss')\n plt.plot(PDEfit_history.history['val_loss']/np.var(y_test),label='validation loss')\n plt.yscale('log')\n plt.legend(),plt.tight_layout()\n plt.savefig('fit'+model_tag,dpi=350)\n\n eval_loss=nn_model.evaluate(x=x_test,y=y_test,verbose=0)\n eval_lossp=100*eval_loss/np.var(y_test)\n print('test loss %',eval_lossp )\n return nn_model\n\n\ndef test_models(nn1,nn2,x,t,v,dvdt,rho0s,data_tag):\n \"\"\"Testing nn models in estimating dvdt and trajectory predictions.\n \n \n Args:\n models: list of nn models\n x: space grid\n t: time grid\n v: density field\n dvdt: density field time derivative\n data_tag: string to distinguish\n\n Returns:\n saves comparison figures\n \"\"\"\n\n RHS1=lambda t,u: nn1.predict(u[np.newaxis,:]).squeeze()\n RHS2=lambda t,u: nn2.predict(u[np.newaxis,:]).squeeze()\n\n k = 200\n\n # dvdt plots\n plt.figure(figsize=[6.75/2,1.7])\n plt.subplot(1,2,1)\n plt.plot(x,dvdt[k],'k',label='gap tooth')\n plt.plot(x,RHS1(0,v[k]))\n plt.subplot(1,2,2)\n plt.plot(x,dvdt[k],'k',label='gap tooth')\n plt.plot(x,RHS2(0,v[k]))\n\n plt.savefig('ddensity_dt.png',dpi=450)\n\n # trajectory pred\n v_gt = v[::10]\n t_eval = t[:-1:10]\n u0_truth = rho0s\n\n dx = x[1]-x[0]\n\n def Standard_FV(t,y):\n dydt= - CR.WENO_FVM_convection(y,dx) + .05 * CR.diffusion_term(y,dx)\n return dydt\n\n v_truth = solve_ivp(Standard_FV,[0,t_eval[-1]],u0_truth,method='BDF',t_eval=t_eval,max_step=0.01).y.T\n\n\n v_nn1 = solve_ivp(RHS1,[0,t_eval[-1]],u0_truth,method='BDF',t_eval=t_eval,max_step=0.01).y.T\n v_nn2 = solve_ivp(RHS2,[0,t_eval[-1]],u0_truth,method='BDF',t_eval=t_eval,max_step=0.01).y.T\n\n\n plt.figure(figsize=[6.75,2])\n\n plt.subplot(1,4,1)\n plt.contourf(x,t_eval,v_truth,30,cmap='jet')\n plt.colorbar()\n plt.yticks([0,2]),plt.xticks([0,2*np.pi],['0',r'$2\\pi$'])\n plt.title(r'$\\rho(x,t)$'+'\\n truth')\n plt.xlabel(r'$x$'),plt.ylabel(r'$t$')\n\n er0 = v_truth - v_gt\n rmse0 = np.mean(er0**2)/np.var(v_truth)\n\n plt.subplot(1,4,2)\n plt.contourf(x,t_eval,er0,30,cmap='jet')\n plt.colorbar()\n plt.yticks([0,2]),plt.xticks([0,2*np.pi],['0',r'$2\\pi$'])\n plt.title('gap-tooth error \\n rMSE={:.1e}'.format(rmse0))\n plt.xlabel(r'$x$'),plt.ylabel(r'$t$')\n\n er1 = v_truth - v_nn1\n rmse1 = np.mean(er1**2)/np.var(v_truth)\n\n plt.subplot(1,4,3)\n plt.contourf(x,t_eval,er1,30,cmap='jet')\n plt.colorbar()\n plt.yticks([0,2]),plt.xticks([0,2*np.pi],['0',r'$2\\pi$'])\n plt.title('arch. 1 error \\n rMSE={:.1e}'.format(rmse1))\n plt.xlabel(r'$x$'),plt.ylabel(r'$t$')\n\n er2 = v_truth - v_nn2\n rmse2 = np.mean(er2**2)/np.var(v_truth)\n\n plt.subplot(1,4,4)\n plt.contourf(x,t_eval,er2,30,cmap='jet')\n plt.colorbar()\n plt.yticks([0,2]),plt.xticks([0,2*np.pi],['0',r'$2\\pi$'])\n plt.title('arch. 2 error \\n rMSE={:.1e}'.format(rmse2))\n plt.xlabel(r'$x$'),plt.ylabel(r'$t$')\n\n plt.tight_layout()\n plt.savefig('u_traj.png',dpi=450)\n\n\n\n\n\n\nif __name__=='__main__':\n ttin=time.time()\n\n train_data = prep_data(smoothing_sigma=1)\n nn1=learn_functional_model(*train_data)\n nn2=learn_discretized_model(*train_data)\n\n test_data=prep_data(smoothing_sigma=1,ti=[11,12])\n test_models(nn1,nn2,*test_data)\n\n print( 'run took {} seconds'.format(time.time()-ttin) )\n\n\n\n","sub_path":"learn_PDE_density.py","file_name":"learn_PDE_density.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"218608467","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'mfslog'\nSITENAME = u'mfslog'\nSITEURL = 'blog.mfslog.com'\n\n\n#content path\nPATH = 'content'\nPAGE_PATHS = ['pages']\nARTICLE_PATHS = ['articles']\nSTATIC_PATHS = ['images', 'files']\n\n\nEXTRA_PATH_METADATA = {\n 'files/robots.txt': {'path': 'robots.txt'},\n 'images/favicon.ico': {'path': 'favicon.ico'},\n }\nARTICLE_URL = ('articles/{slug}.html')\nARTICLE_SAVE_AS = ('articles/{slug}.html')\nPAGE_LANG_SAVE_AS = False\n\nTIMEZONE = 'Asia/Shanghai'\n\nDEFAULT_LANG = u'zh'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nDELETE_OUTPUT_DIRECTORY = False\n\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n )\n\n# Social widget\nSOCIAL = (('GitHub', 'http://github.com/mfslog'),\n )\n\n#theme\nDEFAULT_PAGINATION = 10\nTHEME = 'pelican-themes/pelican-blueidea'\n\nMD_EXTENSIONS = (['codehilite(css_class=highlight)', 'extra',\n 'fenced_code', 'tables', 'sane_lists'])\n\n\n#sietmap\nSITEMAP = {\n 'format': 'xml',\n 'priorities': {\n 'articles': 1,\n 'pages': 0.9,\n 'indexes': 0.8,\n },\n 'changefreqs': {\n 'indexes': 'daily',\n 'articles': 'daily',\n 'pages': 'weekly'\n }\n }\n\n\n# Plugin\nPLUGIN_PATHS = ['pelican-plugins']\nPLUGINS = [ 'sitemap', 'gravatar' ]\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\n\n\nSUMMARY_MAX_LENGTH = 0\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"597662904","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom django.http import JsonResponse\nfrom .models import Comic, Tag, Artist, Language, Character, Parody, Group, Category\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nfrom .forms import PicLinkForm, MultiLinkForm\nimport re, datetime\nimport json\n\n\ntest_model = Comic.objects.all()\n\ndef get_pic_2(request):\n if request.method == \"POST\":\n form = PicLinkForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['pic_link']\n information_simply_h(link)\n # link = link+\"1\"\n # data = load_pic(link)\n print\n return render(request, 'job/show.html', {})\n else:\n form = PicLinkForm()\n\n return render(request, 'job/import.html', {'form': form})\n\n\ndef information_simply_h(manga_link):\n hdr = {'User-Agent': 'Mozilla/5.0'}\n exist_link = len(Comic.objects.filter(link=manga_link))\n\n if exist_link==0:\n req = Request(manga_link, headers=hdr )\n page = urlopen(req)\n soup = BeautifulSoup(page, \"html\")\n\n # get information\n info = soup.find_all(\"div\", \"link-box\")\n link = manga_link\n\n title = soup.find(\"h1\").text\n\n thumbnail = soup.find(\"img\", \"lazyload\")[\"data-src\"]\n\n cover = soup.find(\"img\", \"lazyload\")[\"data-src\"]\n\n parady = [x.text for x in info[0].find_all('a')]\n language = [x.text for x in info[1].find_all('a')]\n character = [x.text for x in info[2].find_all('a')]\n tag = [x.text for x in info[3].find_all('a')]\n\n\n req = Request(manga_link+\"/all-pages\", headers=hdr )\n page = urlopen(req)\n soup = BeautifulSoup(page, \"html\")\n links = soup.find_all(\"a\", \"image-preview\")\n links_groups = []\n\n for link in links:\n req_1 = Request(link['href'], headers=hdr )\n page_1 = urlopen(req_1)\n soup_1 = BeautifulSoup(page_1, \"html\")\n img_link = soup_1.find(\"div\",{\"data-react-class\":True})\n page_dict = json.loads(img_link[\"data-react-props\"])\n img = page_dict['image']['sizes']['full']\n links_groups.append(img)\n\n content = \"\"\n for l in links_groups:\n content+=\"
\".format(l)\n\n\n # 加入新的 tag 進入各個 multifield\n mapping = {\n 'parody': Parody, 'character': Character,\n 'tag': Tag, 'artist': Artist, 'group': Group,\n 'language': Language, 'category': Category,\n }\n\n\n result = {\n 'parody': [parady,],\n 'character': character,\n 'tag': tag,\n 'artist': [],\n 'group': [],\n 'language': [language, ],\n 'category': [],\n }\n\n for key, value in result.items():\n for v in value:\n manager = mapping[key].objects\n create_dict = {key: v}\n if not manager.filter(**create_dict):\n manager.create(**create_dict)\n print(\"******create {}\".format(v))\n else:\n print(\"****** Already exist\")\n print(result)\n exist = Comic.objects.filter(name=title)\n\n\n if not exist:\n object = Comic.objects.create(name=title, cover=cover, link = manga_link, marked=False, date =datetime.date.today(), content=content)\n\n for x in result[\"parody\"]:\n object.parody.add(Parody.objects.get(parody=x))\n for x in result[\"character\"]:\n object.character.add(Character.objects.get(character=x))\n for x in result[\"tag\"]:\n object.tag.add(Tag.objects.get(tag=x))\n for x in result[\"language\"]:\n object.language.add(Language.objects.get(language=x))\n object.save()\n else:\n print(\"{} Repeated\".format(x.title))\n else:\n print(\"repeated link\")\n\ndef load_pic(link):\n hdr = {'User-Agent': 'Mozilla/5.0'}\n req = Request(link, headers=hdr )\n page = urlopen(req)\n soup = BeautifulSoup(page, \"html\")\n # print(soup)\n base_link = soup.find(\"img\", {\"class\", \"fit-horizontal\"})\n\n\n html = \"\"\n for x in range(1, 10):\n img_src = base_link[\"src\"].rsplit(\"/\", 1)[0] + \"/{}.jpg\".format(str(x))\n html += \"
\".format(img_src)\n\n return (html,x, base_link[\"src\"].rsplit(\"/\", 1)[0],)\n\ndef get_post(request, id):\n\n link = Comic.objects.get(id=id).link\n information(link)\n link = link+\"1\"\n data = load_pic(link)\n return render(request, 'job/show.html', {'html': data[0], 'last': data[1]+1, 'link': data[2]})\n\n load_pic(link)\n\n return render(request, 'job/record.html', {'model_object': model_object})\n","sub_path":"views2.py","file_name":"views2.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"255870912","text":"\n##-------------\n#Deep-reinforcement-learning-with-pytorch\n# self.value = nn.MSELoss()\n# self.Q1= nn.MSELoss()\n# self.Q2 = nn.MSELoss()\n\n##-------------\n\nimport os\nimport torch as T\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nfrom buffer import replaybuffer\nfrom networks2 import ActorNetwork, CriticNetwork, ValueNetwork\nimport config\n\nA_LR = config.A_LR\nC_LR = config.C_LR\ngamma = config.gamma\nMEMORY_CAPACITY = config.MEMORY_CAPACITY\nbatch_size = config.batch_size\nreward_scale = config.reward_scale\nhidden_sizes=config.hidden_dim\n\nclass SAC_agent(object):\n def __init__(self, input_dims, n_actions, max_action):\n self.input_dims, self.n_actions, self.max_action = input_dims, n_actions, max_action\n if(config.color_state):\n self.memory = replaybuffer(MEMORY_CAPACITY, config.store_color_state_dim, n_actions)\n else:\n self.memory = replaybuffer(MEMORY_CAPACITY, config.store_sate_dim, n_actions)\n\n self.actor = ActorNetwork(A_LR, input_dims, n_actions, max_action,hidden_sizes, name='actor')\n self.critic_1 = CriticNetwork(C_LR, input_dims, n_actions,hidden_sizes, name='critic_1')\n self.critic_2 = CriticNetwork(C_LR, input_dims, n_actions,hidden_sizes, name='critic_2')\n self.critic_1_target = CriticNetwork(C_LR, input_dims, n_actions, hidden_sizes, name='critic_1_target')\n self.critic_2_target = CriticNetwork(C_LR, input_dims, n_actions, hidden_sizes, name='critic_2_target')\n self.scale = reward_scale\n self.update_network_parameters(tau=1)\n\n def choose_action(self, observation, reparameterize=False, ON_TRAIN = False):\n state = T.Tensor(observation).to(self.actor.device)\n\n if ON_TRAIN:\n actions, _,_ = self.actor.sample_normal(state, reparameterize=False)\n else:\n _, _, actions = self.actor.sample_normal(state, reparameterize=False)\n\n\n return actions.cpu().detach().numpy()[0]\n #GPU類型轉成CPU,再繼續轉成numpy\n #is a cuda tensor so we have ri send it to the cpu we detach from the graph and turn it to the numpy array and take the zeroth elememt\n\n\n def remember(self,state, action, reward, new_state, done):\n self.memory.store_transition(state, action, reward, new_state, done)\n\n def update_network_parameters(self, tau=None):\n if tau is None:\n tau = config.tau\n critic_1_target_params = self.critic_1_target.named_parameters()\n critic_1_params = self.critic_1.named_parameters() #輸出參數的名稱(字符串)與這個參數(Parameter類)\n\n critic_1_target_state_dict = dict(critic_1_target_params)\n critic_1_dict = dict(critic_1_params)\n\n for name in critic_1_dict:\n critic_1_dict[name] = tau*critic_1_dict[name].clone() + \\\n (1-tau)*critic_1_target_state_dict[name].clone()\n\n self.critic_1_target.load_state_dict(critic_1_dict)\n\n def update_network_parameters2(self, tau=None):\n if tau is None:\n tau = config.tau\n critic_2_target_params = self.critic_2_target.named_parameters()\n critic_2_params = self.critic_2.named_parameters() # 輸出參數的名稱(字符串)與這個參數(Parameter類)\n\n critic_2_target_state_dict = dict(critic_2_target_params)\n critic_2_dict = dict(critic_2_params)\n\n for name in critic_2_dict:\n critic_2_dict[name] = tau * critic_2_dict[name].clone() + \\\n (1 - tau) * critic_2_target_state_dict[name].clone()\n\n self.critic_2_target.load_state_dict(critic_2_dict)\n\n def save_models(self, path, i):\n print('.... saving models ....')\n os.makedirs('./model/' + path + '/net/' + str(int(i)))\n checkpoint_path = os.path.join('./model/' + path + '/net/' + str(int(i)))\n self.actor.save_checkpoint(checkpoint_path)\n # self.value.save_checkpoint(checkpoint_path)\n # self.target_value.save_checkpoint(checkpoint_path)\n self.critic_1_target.save_checkpoint(checkpoint_path)\n self.critic_2_target.save_checkpoint(checkpoint_path)\n self.critic_1.save_checkpoint(checkpoint_path)\n self.critic_2.save_checkpoint(checkpoint_path)\n\n\n def load_models(self,path):\n print('.... loading models ....')\n\n self.actor.load_checkpoint(path)\n # self.value.load_checkpoint(path)\n # self.target_value.load_checkpoint(path)\n self.critic_1_target.load_checkpoint(path)\n self.critic_2_target.load_checkpoint(path)\n self.critic_1.load_checkpoint(path)\n self.critic_2.load_checkpoint(path)\n\n def learn(self):\n\n if self.memory.mem_cntr < batch_size:\n return 0, 0, 0\n\n state, action, reward, new_state, done = self.memory.sample_buffer(batch_size)\n\n # 創建tensor T.tensor(data, dtype=None, device=None,requires_grad=False)\n reward = T.tensor(reward, dtype=T.float).to(self.actor.device)\n done = T.tensor(done).to(self.actor.device)\n state_ = T.tensor(new_state, dtype=T.float).to(self.actor.device)\n state = T.tensor(state, dtype=T.float).to(self.actor.device)\n action = T.tensor(action, dtype=T.float).to(self.actor.device)\n\n # Bellman backup for Q functions\n with T.no_grad():\n # Target actions come from *current* policy\n action1, log_prob, _ = self.actor.sample_normal(state_, reparameterize=config.reparameterize_critic) # 看有沒有sample\n # Target Q-values\n q1_pi_targ = self.critic_1_target.forward(state_, action1)\n q2_pi_targ = self.critic_2_target.forward(state_, action1)\n q_pi_targ = T.min(q1_pi_targ, q2_pi_targ)\n backup = reward + gamma * (q_pi_targ - self.scale * log_prob)\n\n # # Set up function for computing SAC Q-losses\n q1 = self.critic_1.forward(state, action)\n q2 = self.critic_2.forward(state, action)\n loss_q1 = ((q1 - backup) ** 2).mean()\n loss_q2 = ((q2 - backup) ** 2).mean()\n\n # loss_q = loss_q1 + loss_q2\n self.critic_1.zero_grad()\n loss_q1.backward(retain_graph=True)\n self.critic_1.optimizer.step()\n\n self.critic_2.zero_grad()\n loss_q2.backward(retain_graph=True)\n self.critic_2.optimizer.step()\n\n # Set up function for computing SAC pi loss\n action2, log_prob2, _ = self.actor.sample_normal(state, reparameterize=config.reparameterize_actor)\n q1_pi = self.critic_1.forward(state, action2)\n q2_pi = self.critic_2.forward(state, action2)\n q_pi = T.min(q1_pi, q2_pi)\n loss_pi = (self.scale * log_prob2 - q_pi).mean()\n # Entropy-regularized policy loss\n\n self.actor.optimizer.zero_grad()\n loss_pi.backward(retain_graph=True)\n self.actor.optimizer.step()\n\n self.update_network_parameters()\n self.update_network_parameters2()\n\n return loss_q1.item(), loss_q2.item(), loss_pi.item()\n # 呼叫optimizer的zero_grad方法,將所有參數的梯度緩衝區(buffer)歸零\n # 呼叫loss的backward()方法開始進行反向傳播\n # 呼叫optimizer的step()方法來更新權重。","sub_path":"vrep/SAC_camera_version2/sac4.py","file_name":"sac4.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"238805569","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom conftest import service_up\nimport pytest\nimport logging\n\nlogging.getLogger(\"cookiecutter\").setLevel(logging.INFO)\n\n\n@service_up\n@pytest.mark.slow\n@pytest.mark.skip(\n reason=\"Currently identical to test_slurm.py::test_no_timeout; no adjustment done via cluster-config.yaml\"\n)\ndef test_adjust_runtime(smk_runner):\n smk_runner.exec_run(\n \"timeout.txt\", options=f\"--cluster-config {smk_runner.cluster_config}\"\n )\n m = smk_runner.check_jobstatus(\"(?P\\\\d+)\", \"-o TimeLimitRaw -n\", which=1)\n assert int(m.group(\"timelimit\")) == 2\n\n\n@service_up\n@pytest.mark.slow\ndef test_adjust_memory(smk_runner):\n smk_runner.exec_run(\n \"memory.txt\", options=f\"--cluster-config {smk_runner.cluster_config}\"\n )\n m = smk_runner.check_jobstatus(\"(?P\\\\d+)\", \"-o ReqMem -n\")\n assert int(m.group(\"mem\")) == 500\n\n\n@service_up\n@pytest.mark.slow\ndef test_memory_with_constraint(smk_runner):\n smk_runner.exec_run(\n \"memory_with_constraint.txt\",\n options=f\"--cluster-config {smk_runner.cluster_config}\",\n )\n m = smk_runner.check_jobstatus(\"(?P\\\\d+)\", \"-o ReqMem -n\")\n assert int(m.group(\"mem\")) == 800\n\n\n@service_up\n@pytest.mark.slow\ndef test_cluster_short_queue(smk_runner):\n smk_runner.exec_run(\n \"short_queue.txt\", options=f\"--cluster-config {smk_runner.cluster_config}\",\n )\n assert smk_runner.check_jobstatus(\"debug\", \"-n -o Partition\")\n","sub_path":"tests/test_slurm_advanced.py","file_name":"test_slurm_advanced.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"420988297","text":"import sqlite3\r\n\r\nclass Producto(object):\r\n\r\n def __init__(self, codigo:str, nombre: str, descripcion: str, estado: str, precio: float,\r\n descuento: float):\r\n self.__codigo = None\r\n self.__nombre = nombre\r\n self.__descripcion = descripcion\r\n self.__precio = precio\r\n self.__descuento = descuento\r\n self.__precio_desc = self.precio_venta\r\n self.__estado = estado\r\n\r\n\r\n def agregar_producto(self) -> bool:\r\n estado_op = False\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n INSERT INTO productos(codigo, nombre, descripcion, precio_venta, precio_normal,\r\n estado, descuento)\r\n VALUES ('{}', '{}', '{}', {}, {},'{}',{})\r\n '''.format(self.__generar_codigo(), self.__nombre, self.__descripcion,\r\n self.precio_venta, self.__precio, self.__estado, self.__descuento)\r\n cursor.execute(query)\r\n database.commit() # CONFIRMAR CAMBIOS QUERY\r\n estado_op = True\r\n except Exception as e:\r\n database.rollback() # RESTAURAR ANTES DE CAMBIOS POR ERROR\r\n print(\"Error: {}\".format(e))\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n return estado_op\r\n\r\n @classmethod\r\n def obtener_producto_nombre(cls, nombre:str) -> list:\r\n lista_productos = None\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n SELECT codigo, nombre, descripcion, estado, precio_normal, precio_venta, descuento\r\n FROM producto\r\n WHERE nombre LIKE '%{}%'\r\n '''.format(nombre)\r\n cursor.execute(query)\r\n lista_productos = cursor.fetchall()\r\n except Exception as e:\r\n print(\"Error: {}\".format(e))\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n lista = []\r\n if lista_productos is not None:\r\n for item in lista_productos:\r\n lista.append(cls(item[0], item[1], item[2], item[3], item[4], item[5]))\r\n return lista\r\n\r\n @classmethod\r\n def obtener_productos(self) -> list:\r\n list_product = None\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n SELECT * FROM productos\r\n '''\r\n cursor.execute(query) # EJECUTA LA OPERACION\r\n list_product = cursor.fetchall()\r\n except Exception as e:\r\n database.rollback() # RESTAURAR ANTES DE CAMBIOS POR ERROR\r\n raise e\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n return list_product\r\n\r\n @classmethod\r\n def obtener_producto(cls, producto:str) -> list:\r\n list_product = None\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n SELECT * FROM productos\r\n WHERE codigo = '{}'\r\n '''.format(producto)\r\n\r\n cursor.execute(query) # EJECUTA LA OPERACION\r\n list_product = cursor.fetchall()\r\n except Exception as e:\r\n database.rollback() # RESTAURAR ANTES DE CAMBIOS POR ERROR\r\n raise e\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n return list_product\r\n\r\n def actualizar_datos(self) -> bool:\r\n estado_ope: bool = False\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n UPDATE productos\r\n SET descripcion = '{}', precio_normal = {}, estado = '{}', descuento = {}, precio_venta = {}\r\n WHERE codigo = '{}'\r\n '''.format(self.__descripcion, self.__precio, self.__estado, self.__descuento,\r\n self.precio_venta, self.__codigo)\r\n cursor.execute(query) # EJECUTA LA OPERACION\r\n database.commit() # CONFIRMAR CAMBIOS QUERY\r\n estado_ope = True\r\n except Exception as e:\r\n database.rollback() # RESTAURAR ANTES DE CAMBIOS POR ERROR\r\n raise e\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n return estado_ope\r\n\r\n def __generar_codigo(self) -> str:\r\n count = 0\r\n database = sqlite3.connect(\"data/linioexp.db\") # ABRIR CONEXION CON BASE DE DATOS\r\n try:\r\n cursor = database.cursor() # OBTENER OBJETO CURSOR\r\n query = '''\r\n SELECT COUNT(*) FROM productos'''\r\n cursor.execute(query)\r\n count = cursor.fetchone()\r\n except Exception as e:\r\n print(\"Error: {}\".format(e))\r\n finally:\r\n database.close() # CERRAR CONEXION CON BASE DE DATOS\r\n\r\n return \"PROD\" + \"0\" * (4 - len(str(count[0] + 1))) + str(count[0] + 1)\r\n\r\n @property\r\n def precio_venta(self) -> float:\r\n return self.__precio - (self.__precio * self.__descuento)\r\n\r\n @property\r\n def descripcion(self) -> str:\r\n return self.__descripcion\r\n\r\n @property\r\n def precio(self) -> float:\r\n return self.__precio\r\n\r\n @property\r\n def descuento(self) -> float:\r\n return self.__descuento\r\n\r\n @property\r\n def estado(self) -> str:\r\n return self.__estado\r\n\r\n\r\n @descripcion.setter\r\n def descripcion(self, new_value):\r\n self.__descripcion = new_value\r\n\r\n @precio.setter\r\n def precio(self, new_value):\r\n self.__precio = new_value\r\n\r\n @descuento.setter\r\n def descuento(self, new_value):\r\n self.__descuento = new_value\r\n\r\n @estado.setter\r\n def estado(self, new_value):\r\n self.__estado = new_value\r\n\r\n\r\n def __str__(self) -> str:\r\n return \"{}-> nombre= {}, precio= {}, estado= {} \".format(self.__class__.__name__,\r\n self.__nombre, self.__precio,\r\n self.__estado)\r\n","sub_path":"models/producto.py","file_name":"producto.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"492152162","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom db.db import analyze_main, get_best_data, get_else_data\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods = ['GET', 'POST'])\ndef analyze():\n if request.method == 'POST':\n stock1 = str(request.form['stock1'])\n stock2 = str(request.form['stock2'])\n stock3 = str(request.form['stock3'])\n startdata = str(request.form['startdate'])\n names = [stock1, stock2, stock3]\n best = analyze_main(names, startdata)\n\n return redirect(url_for('report'))\n else:\n return render_template('stock.html')\n\n@app.route(\"/report\")\ndef report():\n best = get_best_data()\n nonbest = []\n for i in best:\n q_id = i.query_id\n stocklist = get_else_data(q_id)\n nonbest.append(stocklist)\n return render_template(\"report.html\", best = best, nonbest = nonbest)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"99094376","text":"\"\"\"\n///////////////////////////////////////////////////////////////////////////////////\n// copyright (c) 2012-2016 oscar riveros. all rights reserved. //\n// oscar.riveros@gmail.com //\n// //\n// without any restriction, oscar riveros reserved rights, patents and //\n// commercialization of this knowledge and which derive directly from this work. //\n///////////////////////////////////////////////////////////////////////////////////\n\"\"\"\n\nimport zt.kernel as ker\n\n\ndef ll(uu, ss, n):\n return n + 1\n\n\ndef oo(uu, ss, n):\n global k\n k += 1\n return 1 << (len(ss) - 1)\n\n\nif __name__ == '__main__':\n import random\n\n limit = 100\n uu = random.sample(range(limit), k=limit // 2)\n print(uu)\n\n # P05 (*) Reverse a list.\n\n k, kk = 0, []\n\n ker.cycles = 0\n _ = ker.abs(uu, ll, oo)\n for t in range(k):\n kk += ker.abs(uu[:k - t], ll, lambda uu, ss, n: 1 << (len(uu) - 1))\n\n print(80 * '-')\n print('Size : {}'.format(kk))\n print(80 * '-')\n print('Cycles : {}'.format(ker.cycles))\n print(80 * '-')\n","sub_path":"ABS_By_Example/examples/P03.py","file_name":"P03.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"176073545","text":"from typing import List\n'''\nYou are given an array prices where prices[i] is the price of a given stock on the ith day, and an integer fee representing a transaction fee.\n\nFind the maximum profit you can achieve. You may complete as many transactions as you like, but you need to pay the transaction fee for each transaction.\n\nNote: You may not engage in multiple transactions simultaneously (i.e., you must sell the stock before you buy again).\n\n \n\nExample 1:\n\nInput: prices = [1,3,2,8,4,9], fee = 2\nOutput: 8\nExplanation: The maximum profit can be achieved by:\n- Buying at prices[0] = 1\n- Selling at prices[3] = 8\n- Buying at prices[4] = 4\n- Selling at prices[5] = 9\nThe total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.\nExample 2:\n\nInput: prices = [1,3,7,5,10,3], fee = 3\nOutput: 6\n \n\nConstraints:\n\n1 <= prices.length <= 5 * 104\n1 <= prices[i] < 5 * 104\n0 <= fee < 5 * 104\n'''\nclass Solution:\n # Finite State Machine - 3 states 6 transitions\n # beforebuy --- (rest) ---> beforebuy\n # beforebuy --- (buy ) ---> bought\n # bought --- (rest) ---> bought\n # bought --- (sell) ---> sold\n # sold --- (rest) ---> sold\n # sold --- (buy) ---> bought\n # same FSM as lc122\n def maxProfit(self, prices: List[int], fee: int) -> int:\n if len(prices) < 2:\n return 0\n buy, sell = -prices[0], 0 # initial state\n for p in prices[1:]:\n # states must be updated at the same time due to inter-dependecy\n buy, sell = max(buy, sell-p), max(sell, buy+p-fee)\n return sell\n\nimport unittest\nfunctions = [Solution().__getattribute__(f) for f in dir(Solution()) if not f.startswith('__')]\nclass Test(unittest.TestCase): \n def test1(self):\n for f in functions:\n self.assertEqual(f([1,3,2,8,4,9],2), 8, f.__name__)\n def test2(self):\n for f in functions:\n self.assertEqual(f([1,3,7,5,10,3],3), 6, f.__name__)\nunittest.main()","sub_path":"leetcode/LC714. Best Time to Buy and Sell Stock with Transaction Fee.py","file_name":"LC714. Best Time to Buy and Sell Stock with Transaction Fee.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"580248436","text":"import requests\nimport xmltodict\nfrom datetime import datetime\nfrom flask import Flask, abort, jsonify, make_response\nfrom gtfslib.model import Stop, StopTime, Trip\n\nfrom Arrival import Arrival\nfrom config import API_KEY, FEED_ID, dao\n\napp = Flask(__name__)\n\n\ndef remove_keys(obj, rubbish):\n if isinstance(obj, dict):\n obj = {\n key: remove_keys(value, rubbish)\n for key, value in obj.items()\n if key not in rubbish}\n elif isinstance(obj, list):\n obj = [remove_keys(item, rubbish)\n for item in obj\n if item not in rubbish]\n return obj\n\n\ndef jsonify_clean(obj):\n \"\"\"Wrapper around jsonify the removes instances of '_sa_instance_state' key\n from response.\"\"\"\n return jsonify(remove_keys(obj, ['_sa_instance_state']))\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n@app.route('/thebus/api/v1.0/stops', methods=['GET'])\ndef get_stops():\n return jsonify_clean([vars(stop) for stop in dao.stops(batch_size=900)])\n\n\n@app.route('/thebus/api/v1.0/stops/', methods=['GET'])\ndef get_stop(stop_id):\n try:\n stop = dao.stop(stop_id, FEED_ID)\n return jsonify_clean(vars(stop))\n except TypeError:\n abort(404)\n\n\n@app.route('/thebus/api/v1.0/arrivals/realtime/', methods=['GET'])\ndef get_realtime_stop_arrivals(stop_id):\n url_parameters = {'key': API_KEY, 'stop': stop_id}\n try:\n response = requests.get(\n 'http://api.thebus.org/arrivals', params=url_parameters)\n except ConnectionError:\n abort(404)\n # Get xml tree from response and convert it to a dictionary\n response_dict = xmltodict.parse(response.text)\n if 'arrival' in response_dict['stopTimes']:\n return jsonify_clean(response_dict['stopTimes']['arrival'])\n else:\n return jsonify_clean([])\n\n\n@app.route(\n '/thebus/api/v1.0/arrivals/scheduled/', methods=['GET'])\ndef get_scheduled_stop_arrivals(stop_id):\n try:\n stop = dao.stop(stop_id, FEED_ID)\n response_list = []\n for stop_time in stop.stop_times:\n response_list.append(vars(Arrival.from_stop_time(stop_time)))\n return jsonify_clean(response_list)\n except AttributeError:\n abort(404)\n\n\n@app.route('/thebus/api/v1.0/routes', methods=['GET'])\ndef get_routes():\n return jsonify_clean([vars(route) for route in dao.routes()])\n\n\n@app.route('/thebus/api/v1.0/routes//shape', methods=['GET'])\ndef get_route_shape(route_id):\n try:\n route = dao.route(route_id, FEED_ID)\n shape_id = route.trips[0].shape_id\n return get_shape(shape_id)\n except AttributeError:\n abort(404)\n\n\n@app.route('/thebus/api/v1.0/routes//stops', methods=['GET'])\ndef get_route_stops(route_id):\n try:\n stops = dao.stops(fltr=(StopTime.trip_id == Trip.trip_id)\n & (Trip.route_id == route_id)\n & (StopTime.stop_id == Stop.stop_id))\n stop_set = [vars(stop) for stop in stops]\n return jsonify_clean(stop_set)\n except AttributeError:\n abort(404)\n\n\n@app.route('/thebus/api/v1.0/shapes/', methods=['GET'])\ndef get_shape(shape_id):\n try:\n points = list(map(lambda x: vars(x), dao.shape(shape_id, FEED_ID).points))\n return jsonify_clean(points)\n except AttributeError:\n abort(404)\n\n\ndef get_trip(trip_id):\n return remove_keys(vars(dao.trip(trip_id, FEED_ID)), 'stop_times')\n\n\n@app.route('/thebus/api/v1.0/vehicles/realtime/', methods=['GET'])\ndef get_realtime_vehicle(vehicle_id):\n url_parameters = {'key': API_KEY, 'num': vehicle_id}\n try:\n response = requests.get(\n 'http://api.thebus.org/vehicle', params=url_parameters)\n except ConnectionError:\n abort(404)\n # Get xml tree from response and convert it to a dictionary\n response_dict = xmltodict.parse(response.text)\n if 'vehicle' in response_dict['vehicles']:\n vehicle_response = response_dict['vehicles']['vehicle']\n # In some cases the api returns old vehicle data. Always use the new one.\n if isinstance(vehicle_response, list):\n vehicle_response.sort(\n key=lambda x: datetime.strptime(x['last_message'], '%m/%d/%Y %I:%M:%S %p'),\n reverse=True)\n vehicle_response[0]['trip'] = get_trip(vehicle_response[0]['trip'])\n return jsonify_clean(response_dict['vehicles']['vehicle'][0])\n else:\n vehicle_response['trip'] = get_trip(vehicle_response['trip'])\n return jsonify_clean(vehicle_response)\n else:\n return jsonify_clean(response_dict)\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"260549311","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\napp_name = 'usergroups'\n\nurlpatterns = [\n path('',views.UsergroupList.as_view(),name='all'),\n path('new/',views.CreateUsergroup.as_view(),name='new'),\n path('projects/in//',views.UsergroupDetail.as_view(),name='single'),\n path('join//',views.JoinUsergroup.as_view(),name='join'),\n path('leave//',views.LeaveUsergroup.as_view(),name='leave'),\n]\n","sub_path":"usergroups/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"6781098","text":"from pycondor import Job, Dagman\n\n# Define the error, output, log, and submit directories\nerror = 'condor/error'\noutput = 'condor/output'\nlog = 'condor/log'\nsubmit = 'condor/submit'\n\n# Instantiate a Dagman\ndagman = Dagman(name='tutorial_dagman',\n submit=submit)\n\n# Instantiate two Jobs\njob_date = Job(name='date_job',\n executable='/bin/date',\n submit=submit,\n error=error,\n output=output,\n log=log,\n dag=dagman)\n\njob_sleep = Job(name='sleep_job',\n executable='/bin/sleep',\n submit=submit,\n error=error,\n output=output,\n log=log,\n dag=dagman)\njob_sleep.add_arg('1')\njob_sleep.add_arg('2')\njob_sleep.add_arg('3')\n\n# Add inter-job relationships\n# Ensure that job_sleep finishes before job_date starts\njob_sleep.add_child(job_date)\n\n# Write all necessary submit files and submit job to Condor\ndagman.build_submit()\n","sub_path":"examples/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"600673112","text":"#!/usr/bin/env python\n\nimport os\nfrom app import create_app, db\nfrom flask_script import Manager\n\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\n\n\n@manager.command\ndef initdb():\n from sqlalchemy.sql import text\n cmd = \"\"\"\\\n CREATE EXTENSION hstore;\n CREATE TABLE urls ( id SERIAL, mapping HSTORE );\n INSERT INTO urls (mapping) VALUES ('');\n \"\"\"\n db.engine.execute(text(cmd))\n\n\n@manager.command\ndef key(key, value=None):\n from sqlalchemy.sql import text\n if value is None:\n q = \"UPDATE urls SET mapping = delete(mapping, :key);\"\n else:\n q = \"UPDATE urls SET mapping = mapping || :key\"\n key = '\"%s\"=>\"%s\"' % (key, value)\n db.engine.execute(text(q), key=key)\n\n\n@manager.command\ndef list():\n from sqlalchemy.sql import text\n q = \"SELECT (EACH(mapping)).* FROM urls;\"\n results = db.engine.execute(text(q), key=key).fetchall()\n print(\"#\" * 80)\n for result in results:\n print(\"http://data.qiime.org/{} => {}\".format(*result))\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"625941856","text":"'''\nCreated on 2017. 9. 1.\n\n@author: jaehyeong\n'''\nfrom sklearn.datasets import make_moons\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport mglearn\n\nX, y = make_moons(n_samples=200, noise=0.05, random_state=0)\n\n# KMeans(2개의 클���스터 사용)\nkmeans = KMeans(n_clusters=2)\nkmeans.fit(X)\ny_pred = kmeans.predict(X)\n\n# 클러스터 할당과 클러스터 중심을 표시\nplt.scatter(X[:,0], X[:,1], c=y_pred, cmap=mglearn.cm2, s=60,edgecolors='k')\nplt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],\n marker='^', c=[mglearn.cm2(0), mglearn.cm2(1)], s=100, linewidth=2,edgecolor='k')\nplt.xlabel('Feature 0'); plt.ylabel('Feature 1')\nplt.show()\n\n'''--------------------------'''\n# Kmeans(10개의 클러스터 사용)\nkmeans = KMeans(n_clusters=10)\nkmeans.fit(X)\ny_pred = kmeans.predict(X)\n\n# 클러스터 할당과 클러스터 중심을 표시\nplt.scatter(X[:,0], X[:,1], c=y_pred, cmap='Paired', s=60,edgecolors='black')\nplt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],\n marker='^', c=range(kmeans.n_clusters), s=60, linewidth=2,edgecolor='black')\nplt.xlabel('Feature 0'); plt.ylabel('Feature 1')\nplt.show()\nprint('cluster label :\\n', y_pred)\n\n# 클러스터 거리\ndistance_features = kmeans.transform(X)\nprint('클러스터 거리 데이터 형태 : ', distance_features.shape)\nprint('클러스터 거리 : ',distance_features )\n","sub_path":"chap03_unsupervised/st04_2_KMeans.py","file_name":"st04_2_KMeans.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"394048877","text":"import os\nimport datetime\nfrom configparser import ConfigParser\nconfig = ConfigParser()\nconfig.read(os.environ['CONFIG_PATH'])\nGCLOUD_PROJECT = config['app']['GCLOUD_PROJECT']\nMAILJET_API_KEY = config['app']['MAILJET_API_KEY']\nMAILJET_API_SECRET = config['app']['MAILJET_API_SECRET']\nMAILJET_SENDER = config['app']['MAILJET_SENDER']\n\nVISA_UPDATE_URL = 'https://reentryvisa.inis.gov.ie/website/INISOA/IOA.nsf/(getApps4DT)?openagent&' \\\n 'dt={}/{}/{}&type=I&num=1'\nGNIB_UPDATE_URL = 'https://burghquayregistrationoffice.inis.gov.ie/Website/AMSREG/AMSRegWeb.nsf/(getApps4DT)' \\\n '?openagent&dt={}/{}/{}&cat=Work&sbcat=All&typ=Renewal'\n\nVISA_HOST = 'reentryvisa.inis.gov.ie'\nGNIB_HOST = 'burghquayregistrationoffice.inis.gov.ie'\n\nTODAY_plus_2 = datetime.datetime.today() + datetime.timedelta(days=2)\nDAYS_UPDATE_RANGE = 90\nFETCH_APPOINTMENTS_TIMEOUT = 60\nTYPE_VISA = 'visa'\n\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n ' Chrome/52.0.2743.116 Safari/537.36',\n 'Upgrade-Insecure-Requests': '1',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6,pl;q=0.4,de;q=0.2,uk;q=0.2,it;q=0.2',\n}\n","sub_path":"app/async_source/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"563242096","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\n\nfrom datetime import datetime\n\nfrom django.db import models\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth.models import User\n\nFS = FileSystemStorage(location=('%s/media/uploaded/' % (settings.PROJECT_ROOT_PATH)))\n\n# Tach Skills class\nclass TechSkill(models.Model):\n class Meta:\n verbose_name_plural = u'Tecnologias'\n verbose_name = u'Tecnologia'\n ordering = ('-percent','name',)\n\n def __unicode__(self):\n return self.name\n\n name = models.CharField(max_length=255, verbose_name=u'Nome Tecnologia')\n percent = models.IntegerField(verbose_name=u'Porcentagem de Conhecimento')\n date_created = models.DateTimeField(default=datetime.now, blank=True)\n\n# Employment\nclass Employment(models.Model):\n class Meta:\n verbose_name_plural = u'Empregos'\n verbose_name = u'Emprego'\n ordering = ('-start_date', '-end_date',)\n\n def __unicode__(self):\n return self.name\n\n name = models.CharField(max_length=255, verbose_name=u'Nome Empresa')\n site = models.URLField()\n job_position = models.CharField(max_length=255, verbose_name=u'Cargo')\n start_date = models.DateField(blank=False,null=False, verbose_name=u'Data de Início')\n end_date = models.DateField(blank=True,null=True, verbose_name=u'Data de Desligamento')\n description = models.TextField(blank=True,null=True, verbose_name=u'Descrição')\n\n# Education\nclass Education(models.Model):\n class Meta:\n verbose_name_plural = u'Informações sobre Educação'\n verbose_name = u''\n ordering = ('-start_date', '-end_date',)\n\n def __unicode__(self):\n return self.name\n\n name = models.CharField(max_length=255, verbose_name=u'Nome Escola')\n course = models.CharField(max_length=255, verbose_name=u'Curso')\n activity = models.CharField(max_length=255, verbose_name=u'Tipo')\n start_date = models.DateField(blank=False,null=False, verbose_name=u'Data de Início')\n end_date = models.DateField(blank=True,null=True, verbose_name=u'Termino')\n\n# Certification\nclass Certification(models.Model):\n class Meta:\n verbose_name_plural = u'Certificações'\n verbose_name = u'Certificação'\n ordering = ('-start_date', '-end_date',)\n\n def __unicode__(self):\n return self.name\n\n name = models.CharField(max_length=255, verbose_name=u'Nome Certificação')\n start_date = models.DateField(blank=False,null=False, verbose_name=u'Data de Início')\n end_date = models.DateField(blank=True,null=True, verbose_name=u'Data Validade')\n\n# Training and courses\nclass Training(models.Model):\n class Meta:\n verbose_name_plural = u'Cursos e Treinamentos'\n verbose_name = u'Curso e Treinamento'\n ordering = ('-end_date',)\n\n def __unicode__(self):\n return self.name\n\n name = models.CharField(max_length=255, verbose_name=u'Nome Treinamento')\n school = models.CharField(max_length=255, verbose_name=u'Nome Escola')\n end_date = models.DateField(blank=False,null=False, verbose_name=u'Data Conclusão')\n\n# UserProfile class to store resume\nclass UserProfile(models.Model):\n STATUS = (\n ('1', 'Destaque'),\n ('0', 'Normal'),\n )\n\n user = models.OneToOneField(User)\n photo = models.ImageField(upload_to='profiles/', storage=FS, verbose_name=u'Foto')\n headline = models.CharField(max_length=255, verbose_name=u'Descrição Profissional')\n mobile_phone_1 = models.CharField(max_length=20,blank=True,null=True, verbose_name=u'Telefone Celular 1')\n mobile_phone_2 = models.CharField(max_length=20,blank=True,null=True, verbose_name=u'Telefone Celular 2')\n professional_resume = models.TextField(verbose_name=u'Descrição Profissional')\n personal_resume = models.TextField(verbose_name=u'Descrição Pessoal')\n personal_specialty = models.TextField(verbose_name=u'Especialidade')\n tech = models.ManyToManyField(TechSkill, verbose_name=u'Tecnologia')\n employment = models.ManyToManyField(Employment, verbose_name=u'Empresa')\n education = models.ManyToManyField(Education, verbose_name=u'Educação')\n certification = models.ManyToManyField(Certification, verbose_name=u'Certificação')\n training = models.ManyToManyField(Training, verbose_name=u'Treinamento')\n status = models.CharField(max_length=1, choices=STATUS, default=1, db_column='status')\n\n class Meta:\n verbose_name_plural = u'Informações de Registro'\n verbose_name = u''\n\n def __unicode__(self):\n return self.user.username\n\n @classmethod\n def get_active(cls):\n active = cls.objects.filter(status=1)\n if len(active) > 1:\n raise ValueError(u'Existe mais de um perfil ativo')\n if not active:\n raise Exception(u'Não existe um perfil ativo')\n\n return active[0]\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"132945080","text":"import time, requests, json, urllib, os\nfrom flask import Flask\napp = Flask(__name__)\n\ndef get_data():\n data = {\n \"tillfalle\":\"Urval1\",\n \"vy\":\"Antagningspoang\",\n \"antagningsomgang\":\"HT2020\",\n \"larosateId\":\"\",\n \"utbildningstyp\":\"p\",\n \"fritextFilter\":\"\",\n \"urvalsGrupp\":\"\",\n \"firstResult\":0,\n \"maxResults\":25000,\n \"sorteringsKolumn\":1,\n \"sorteringsOrdningDesc\":False,\n \"requestNumber\":1,\n \"paginate\":True\n }\n\n data = urllib.parse.quote(json.dumps(data))\n\n print(data)\n\n encoded = \"https://cors-anywhere.herokuapp.com/statistik.uhr.se/rest/stats/tableData?request=\" + data\n print(encoded)\n\n return requests.get(\n encoded,\n headers={\n \"x-requested-with\": \"Python 3.8\"\n }\n )\n\n\njdata = None\n\ntry:\n data = open(\"cache.json\")\n text = data.read()\n jdata = json.loads(text)\n data.close()\nexcept:\n\n data = get_data()\n\n cache = open(\"cache.json\",\"w+\")\n cache.write(data.text)\n cache.close()\n\n jdata = json.loads(data.text)\n\n\n@app.route('//')\ndef search(university, merit):\n #\n # Process data and return an array\n #\n return university + str(merit)\n\nif __name__ == '__main__':\n app.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"489725464","text":"import pandas as pd\nimport pytest\nimport os\nfrom optopsy.data import format_option_df\n\n\nCURRENT_FILE = os.path.abspath(os.path.dirname(__file__))\nTEST_FILE_PATH = os.path.join(CURRENT_FILE,\n '../test_data/test_options_data.csv')\n\nTEST_STRUCT = (\n ('underlying_symbol', 0),\n ('underlying_price', 1),\n ('option_symbol', 3),\n ('option_type', 5),\n ('expiration', 6),\n ('quote_date', 7),\n ('strike', 8),\n ('bid', 10),\n ('ask', 11),\n ('delta', 15),\n ('gamma', 16),\n ('theta', 17),\n ('vega', 18)\n)\n\n\n# Data to test results with ----------------------------------------------\n@pytest.fixture(scope=\"module\")\ndef options_data():\n cols = list(zip(*TEST_STRUCT))\n return (\n pd.read_csv(\n TEST_FILE_PATH,\n parse_dates=True,\n names=cols[0],\n usecols=cols[1],\n skiprows=1,\n nrows=None\n ).pipe(format_option_df))\n","sub_path":"tests/support/data_fixtures.py","file_name":"data_fixtures.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"632369547","text":"import hlt\nimport logging\nfrom collections import OrderedDict\ngame = hlt.Game(\"Rampa-V6\")\nlogging.info(\"Starting Rampa Bot\")\nlogger = logging.getLogger(__name__)\n\n#Some global\n#Max ratio of ship sent to the same planet, avoid all ship going to the same planet\nMAX_RATIO_SHIP_PER_PLANET= 0.5\n#Always try to have at least 1 ship attacking (if not alone?)\nMIN_SHIP_ATTACKERS = 1\n#Even if there are still some available planet, send a portion of the ship to attack\nMAX_RATIO_SHIP_ATTACKERS = 0.25\n#NB of docked ship per planet\nMAX_NB_DOCKED_SHIP = 5\n\n#Store all the ships that are dedicated to the attacks between MIN_SHIP_ATTACKERS and MAX_RATIO_SHIP_ATTACKERS\nship_attackers = {}\n#Store all ship that are dedicated to conquest, between MAX_RATIO_SHIP_ATTACKERS and (100% - MIN_SHIP_ATTACKERS)\nship_conquerors = {}\n#Store new ship that has never been seen before (to allocate between attack & conquest)\nnew_ship =[]\ntry:\n while True:\n logger.debug(\"In new turn loop\")\n game_map = game.update_map()\n command_queue = []\n\n #Count nb of owned planets:\n all_planets = game_map.all_planets()\n team_planets = []\n for planet in all_planets:\n #TODO check if planets is mine!\n if (planet.is_owned()) and (planet.owner.id == game_map.get_me().id):\n team_planets.append(planet)\n nb_owned_planets = len(team_planets)\n logger.info(\"OWN %s planets\" % nb_owned_planets)\n\n #All the ship available this turn\n team_ships = game_map.get_me().all_ships()\n team_ships_dict = {}\n for ship in team_ships:\n team_ships_dict[ship.id] = ship\n\n nb_attackers_died = 0\n #Check if attackers are still alive\n for ship_id in list(ship_attackers.keys()):\n dead = False\n try:\n ship = team_ships_dict[ship_id]\n if ship.health <= 0 or ship not in team_ships:\n dead = True\n # The attacker is lost\n except KeyError:\n dead = True\n except:\n logger.exeception(\"Can't find ship ?\")\n if dead:\n logger.debug(\"Attacker died\")\n nb_attackers_died+=1\n del ship_attackers[ship_id]\n\n nb_conquerors_died = 0\n #Check if attackers are still alive\n for ship_id in list(ship_conquerors.keys()):\n dead = False\n try:\n ship = team_ships_dict[ship_id]\n if ship.health <= 0 or ship not in team_ships:\n dead = True\n # The conquero is lost\n except KeyError:\n dead = True\n except:\n logger.exeception(\"Can't find ship ?\")\n if dead:\n logger.debug(\"Conqueror died\")\n nb_conquerors_died+=1\n del ship_conquerors[ship_id]\n\n #Now check for new ships\n for ship in team_ships:\n #Don't use docked ship at all!\n if ship.docking_status != ship.DockingStatus.UNDOCKED:\n # Skip this ship\n continue\n found = False\n try:\n t = ship_attackers[ship.id]\n found = True\n except:\n pass\n try:\n t = ship_conquerors[ship.id]\n found = True\n except:\n pass\n if not found:\n logger.info(\"New ship, need to allocate it\")\n new_ship.append(ship)\n\n logger.info(\"Lost %s attackers & %s conquerors\" % (nb_attackers_died, nb_conquerors_died))\n logger.info(\"Found %s new ship\" % len(new_ship))\n #Split in two: Attackers & Conquerors\n\n #If there are not enough attackers, take some ship in the new ship list\n #TODO if there are not empty planet left, send all to attack\n nb_attackers = len(ship_attackers)\n current_ratio = nb_attackers / float(len(team_ships))\n logger.debug(\"nb_attackers: %s, current_ratio: %s\" % (nb_attackers,current_ratio))\n while ((nb_attackers < MIN_SHIP_ATTACKERS) or (current_ratio < MAX_RATIO_SHIP_ATTACKERS)) and (len(new_ship)>0) :\n logger.info(\"Need new attackers\")\n #TODO look for the closest ship to an enemy\n #Take the first new ship\n ship = new_ship.pop()\n logger.debug(\"Take ship: %s \" % ship.id)\n logger.debug(\"Removed from new_ship: %s \" % ship.id)\n #Add to attackers\n ship_attackers[ship.id] = 1\n logger.debug(\"Added to attackers: %s \" % ship.id)\n nb_attackers = len(ship_attackers)\n current_ratio = nb_attackers / float(len(team_ships))\n logger.debug(\"nb_attackers: %s, current_ratio: %s\" % (nb_attackers,current_ratio))\n\n #Add all the other new_ship to conquerors\n for ship in new_ship:\n #Add to attackers\n ship_conquerors[ship.id] = 1\n\n #Reset new_ship\n new_ship = []\n\n #HANDLE all attackers\n for ship_id in ship_attackers:\n ship = team_ships_dict[ship_id]\n entities_by_distance = game_map.nearby_entities_by_distance(ship)\n entities_by_distance = OrderedDict(sorted(entities_by_distance.items(), key=lambda t: t[0]))\n closest_enemy_ships = []\n #Loop through all entities by distance check if they are an enemy, stop at the closest\n target_ship = None\n for distance in entities_by_distance:\n entity = entities_by_distance[distance][0]\n if isinstance(entity, hlt.entity.Ship) and not entity in team_ships:\n target_ship = entity\n break\n if target_ship is not None:\n navigate_command = ship.navigate(\n ship.closest_point_to(target_ship),\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n\n\n #HANDLE all conquerors\n nb_ship_per_planet = {}\n\n for ship_id in ship_conquerors:\n ship = team_ships_dict[ship_id]\n\n entities_by_distance = game_map.nearby_entities_by_distance(ship)\n entities_by_distance = OrderedDict(sorted(entities_by_distance.items(), key=lambda t: t[0]))\n\n closest_empty_planets = []\n closest_enemy_ships = []\n closest_planets = []\n #Loop through all entities by distance, separate in 2 list : empty planets & enemy ship\n for distance in entities_by_distance:\n\n entity = entities_by_distance[distance][0]\n if isinstance(entity, hlt.entity.Planet):\n closest_planets.append(entity)\n if isinstance(entity, hlt.entity.Planet) and not entity.is_owned():\n closest_empty_planets.append(entity)\n if isinstance(entity, hlt.entity.Ship) and not entity in team_ships:\n closest_enemy_ships.append(entity)\n\n #If there are no empty planets: ATTACK\n if len(closest_empty_planets) == 0:\n target_ship = closest_enemy_ships[0]\n navigate_command = ship.navigate(\n ship.closest_point_to(target_ship),\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n continue\n\n #First, make sure the ship can dock\n if ship.can_dock(closest_empty_planets[0]):\n command_queue.append(ship.dock(closest_empty_planets[0]))\n continue\n\n\n #if less than NB_DOCKED_SHIP docked to the cloest planet, dock\n if ship.can_dock(closest_planets[0]):\n if len(closest_planets[0].all_docked_ships()) < min(MAX_NB_DOCKED_SHIP,nb_owned_planets):\n command_queue.append(ship.dock(closest_planets[0]))\n continue\n\n #If there is only 1 ship left, no need to coordinate them, go to the closest planet\n if len(ship_conquerors) == 1:\n navigate_command = ship.navigate(\n ship.closest_point_to(target_planet),\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n continue\n\n #Now, look for a suitable planet\n for target_planet in closest_empty_planets:\n try:\n nb_ship_per_planet[target_planet]+=1\n except:\n nb_ship_per_planet[target_planet] = 1\n\n #Only send a ship if there is less than half of the current ship going to this destination\n if nb_ship_per_planet[target_planet] > int(len(team_ships)*MAX_RATIO_SHIP_PER_PLANET):\n logger.debug(\"Reroute the ship to another planet, too many ship already going there\")\n #This ship is not going there anymore, remove from the counter\n nb_ship_per_planet[target_planet] -= 1\n #Skip to next planet in the list\n continue\n else:\n navigate_command = ship.navigate(\n ship.closest_point_to(target_planet),\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n #Exit target planet loop\n break\n\n game.send_command_queue(command_queue)\n # TURN END\n # GAME END\nexcept:\n logger.exception(\"BIG CRASH\")","sub_path":"EricTrainingBot.py","file_name":"EricTrainingBot.py","file_ext":"py","file_size_in_byte":10183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"277951108","text":"import os\r\nimport random\r\nimport subprocess\r\nfrom itertools import product\r\n\r\nfrom PIL import Image\r\n\r\nfrom advanced_tools.algorithm_utils import natural_sorted\r\nfrom advanced_tools.path_utils import get_filepaths, is_file_created_within_given_interval\r\n\r\n\r\ndef prepare_and_sortphotos(\r\n source_dir=r\"./Pictures\",\r\n destination_dir=r\"./Pictures/ArrangedPictures\",\r\n extension_to_fix=\".HEIC\",\r\n rename=True,\r\n):\r\n \"\"\"This is a simple wrapper function for sortphotos library. Prepares images by\r\n converting .HEIC extensions to jpg if necessary.\r\n\r\n For more detail implementations check sortphotos library options.\r\n\r\n Keyword Arguments:\r\n source_dir {str} -- Directory with photos to arrange (default: {r\"./Pictures\"})\r\n destination_dir {str} -- Directory for arranged photos, in case of not existing,\r\n it creates automatically (default: {r\"./Pictures/ArrangedPictures\"})\r\n extension_to_fix {str} -- Extension to convert to .jpg (default: {\".HEIC\"})\r\n\r\n \"\"\"\r\n\r\n pics = get_filepaths(source_dir)\r\n\r\n for i in pics:\r\n if os.path.splitext(i)[1] == extension_to_fix:\r\n os.rename(i, os.path.join(os.path.splitext(i)[0] + \".jpg\"))\r\n else:\r\n pass\r\n if rename:\r\n subprocess.call(\r\n \"sortphotos {} {} --rename=%Y_%m_%d_%H%M\".format(source_dir, destination_dir)\r\n )\r\n else:\r\n subprocess.call(\"sortphotos {} {}\".format(source_dir, destination_dir))\r\n\r\n\r\ndef find_single_raw_images(path: str, for_hours: int | None = None) -> list:\r\n \"\"\"Find and return single RAF files within given path and starting from hours before\r\n\r\n Args:\r\n path (os.PathLike): [description]\r\n for_hours (int, optional): [description]. Defaults to None.\r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n raw_imgs = [os.path.splitext(i)[0] for i in get_filepaths(path, file_type=\".raf\")]\r\n all_imgs = [\r\n os.path.splitext(i)[0] for i in get_filepaths(path, file_type=[\".raf\", \".jpg\"])\r\n ]\r\n\r\n if for_hours:\r\n raw_imgs = [\r\n i\r\n for i in raw_imgs\r\n if is_file_created_within_given_interval(f\"{i}.RAF\", 3600 * for_hours)\r\n ]\r\n\r\n single_raw_imgs = [f\"{i}.RAF\" for i in raw_imgs if all_imgs.count(i) < 2]\r\n\r\n return sorted(single_raw_imgs)\r\n\r\n\r\ndef tile_image(path_to_image: str, rows: int, columns: int) -> None:\r\n \"\"\"\r\n Split given image into tiles with given columns and rows tile numbers or pixels\r\n\r\n Args:\r\n path_to_image (str): _description_\r\n row_qty (int): if row quantity is given, row pixel is ignored\r\n row_pixel (int): Used in case of missing row_qty\r\n col_qty (int): if row quantity is given, row pixel is ignored\r\n col_pixel (int): Used in case of missing col_qty\r\n \"\"\"\r\n\r\n folder, file = os.path.split(path_to_image)\r\n name, ext = os.path.splitext(file)\r\n img: Image.Image = Image.open(path_to_image)\r\n width, height = img.size\r\n\r\n tile_height = height / rows\r\n tile_width = width / columns\r\n\r\n output_folder = os.path.join(folder, \"tiles\")\r\n if not os.path.exists(output_folder):\r\n os.makedirs(output_folder)\r\n\r\n grid = list(product(range(0, rows), range(0, columns)))\r\n print(grid)\r\n for row, col in grid:\r\n # Rectangles are represented as 4-tuples, with the upper left corner given first.\r\n # Grid is like row, column [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2) ... ]\r\n box = (\r\n col * tile_width,\r\n row * tile_height,\r\n tile_width * (col + 1),\r\n tile_height * (row + 1),\r\n )\r\n print(box)\r\n out = os.path.join(folder, \"tiles\", f\"{name}_{row}_{col}{ext}\")\r\n img.crop(box).save(out) # type: ignore\r\n\r\n\r\ndef merge_images(\r\n folder_path: str, resolution: tuple, columns: int, shuffle: bool = False\r\n) -> None:\r\n # Get files in groups as list of list starting from top to subfolder\r\n folders_of_files = dict(\r\n [(root, files) for root, folder, files in os.walk(folder_path)]\r\n )\r\n\r\n images = {}\r\n for root, file_list in folders_of_files.items():\r\n images[root] = [\r\n Image.open(os.path.join(root, file)) for file in natural_sorted(file_list)\r\n ]\r\n\r\n for root, image_list in images.items():\r\n if len(image_list) % columns == 0:\r\n rows = int(len(image_list) / columns)\r\n else:\r\n rows = int(len(image_list) / columns) + 1\r\n\r\n if shuffle:\r\n random.shuffle(image_list)\r\n\r\n image_width = int(resolution[0] / columns)\r\n image_height = int(resolution[1] / rows)\r\n image_size = (image_width, image_height)\r\n\r\n images[root] = [img.resize(image_size) for img in image_list]\r\n new_image = Image.new(\"RGB\", (columns * image_width, rows * image_height))\r\n\r\n grid = product(range(0, rows), range(0, columns))\r\n boxes = [\r\n (\r\n image_width * col,\r\n image_height * row,\r\n image_width * (col + 1),\r\n image_height * (row + 1),\r\n )\r\n for row, col in grid\r\n ]\r\n\r\n # images[root] set within forloop so it is now different from image_list\r\n image_box = list(zip(images[root], boxes))\r\n\r\n for image, loc in image_box:\r\n new_image.paste(image, (loc[0], loc[1]))\r\n\r\n new_image.save(os.path.join(root, \"merged.jpeg\"), \"JPEG\")\r\n","sub_path":"advanced_tools/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"135397492","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom .serializers import BookSerializer\nfrom books.models import Book\nfrom rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.permissions import IsAuthenticated,BasePermission\n\n\nclass CanView(BasePermission):\n def has_permission(self,request,view):\n return request.user.is_active\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated,CanView])\ndef index(request):\n books = Book.objects.all()\n serializer = BookSerializer(instance=books, many=True)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef create(request):\n serializer = BookSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(data={\n \"success\":True,\n \"message\": \"Book Has been created\"\n },status=status.HTTP_201_CREATED)\n return Response(data={\n \"success\":False,\n \"errors\": serializer.errors\n },status=status.HTTP_400_BAD_REQUEST)","sub_path":"books/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"483985369","text":"import numpy as np\nimport re\nimport textwrap\nimport pandas as pd\n\nfrom .connect.connect import Connect\nfrom .regions import get_bounds\n\n\nclass Dataset:\n \"\"\"\n This is the representation of a DataCube Dataset in the DQTools library.\n \"\"\"\n\n def __init__(self, product, subproduct, region=None, tile=None, res=None,\n key_file=None):\n \"\"\"\n Connect to the datacube and extract metadata for this particular\n product/sub-product.\n\n Attributes passed from the caller are recorded in self:\n self.product: name of the product\n self.subproduct: name of sub-product\n self.region [optional]: name of region required\n self.tile [optional]: name of tile required\n\n\n NOTE: If a region/tile is defined, then metadata pertains only to\n that region or tile. If no region/tile is defined then metadata is\n returned for the entire sub-product extent.\n\n Empty attributes created for\n - self.data: The xarray DataSet\n - self.timesteps: The timesteps of data available\n\n :param product: product name (str)\n\n :param subproduct: sub-product name (str)\n\n :param region [optional]: the name of a region for data/metadata,\n as defined in the regions directory\n (NOTE: writing data for regions\n is not possible, unless the bounds\n exactly match a tile... in which case\n just use tile to define our spatial\n extent!)\n\n :param tile [optional]: the tile to extract data/metadata for\n (must match datacube record)\n\n :param res [optional]: the resolution of the output data\n required. This will ultimately enact a\n GDAL Warp inside the datacube to give\n you the required resolution within the\n bounds defined in either tile or region.\n\n :param key_file: Assimila DQ key file required to access the\n HTTP server. Allows keyfile to be in a different\n location as used by the QGIS Plugin.\n \"\"\"\n\n # write product & sub-product as attributes\n self.product = product\n self.subproduct = subproduct\n\n # Write region as an attribute\n self.region = region\n\n # Write resolution as an attribute\n self.res = res\n\n # Extract the bounds for this region, if provided\n if self.region:\n bounds = get_bounds(self.region)._asdict()\n else:\n bounds = None\n\n # Create empty attributes for later data\n self.last_timestep = None\n self.first_timestep = None\n self.last_gold = None\n self.fill_value = None\n self.all_subproduct_tiles = None\n self.description = None\n self.frequency = None\n\n # Write tile as an attribute\n self.tile = tile\n\n # Instatiate the datacube connector\n self.conn = Connect(key_file=key_file)\n\n # Download metadata for this product & sub-product & tile\n result = self.conn.get_subproduct_meta(product=self.product,\n subproduct=self.subproduct,\n bounds=bounds,\n tile=tile)\n\n # Extract relevant metadata as attributes.\n self.extract_metadata(result)\n\n # Create empty attributes\n self.data = None\n self.timesteps = None\n\n def __repr__(self):\n \"\"\"\n User-friendly representation of the dataset object\n\n :return:\n \"\"\"\n return f\"\"\"\n================================================================================\nProduct: {self.product}\nSub-product: {self.subproduct}\n================================================================================\n{textwrap.fill(self.description, 79)}\n\nTiles:\n In datacube: {self.all_subproduct_tiles}\n Selected tile: {self.tile}\n\nTimesteps available:\n First: {self.first_timestep}\n Last: {self.last_timestep}\n Frequency: {str(self.frequency)}\n\nLast Gold: {self.last_gold}\n================================================================================\nData:\n{self.data}\n================================================================================\n \"\"\"\n\n def extract_metadata(self, all_meta):\n \"\"\"\n Extract the metadata required to populate the attributes on\n initialisation. This parses the metadata sent from the data cube\n to extract and records key parameters:\n\n Metadata parameters created:\n - self.last_gold: The last 'good' time step of data. Defined by\n developer.\n - self.last_timestep: The last time step of data recorded in the\n DataCube\n - self.first_timestep: The first time step of data recorded in\n the DataCube\n - self.fill_value: The fill value for this data\n - self.all_subproduct_tiles: All tiles available for this\n sub-product in the data cube\n - self.tiles: The tile currently selected in this instance\n\n :param all_meta: metadata dump from the data cube 'get meta'\n request\n\n :return:\n \"\"\"\n\n # Silencing SettingWithCopyError caused by subset line below\n pd.options.mode.chained_assignment = None\n\n # Filter by selected tile so that mosaicking does not impact return\n # If >1 tilename specified\n if len(list(set(all_meta.tilename))) > 1:\n all_meta = all_meta.loc[all_meta['tilename'] == self.tile]\n\n # Extract the last timestep\n if 'datetime' in all_meta.columns:\n\n self.first_timestep = min(all_meta['datetime'])\n self.last_timestep = max(all_meta['datetime'])\n\n # Sort this dataframe by datetime\n all_meta.sort_values(by=['datetime'], inplace=True)\n\n # Extract last gold\n if (all_meta['gold'] == False).all():\n\n # Nothing is 'gold' so there is no concept of last gold\n self.last_gold = None\n\n elif (all_meta['gold'] == True).all():\n\n # Last gold is the same as the last timestep\n self.last_gold = self.last_timestep\n\n elif (all_meta['gold'] == False).any():\n\n # Last gold is an update point. So if we have gappy\n # gold date (i.e. a few gold, few not gold, few gold\n # again, all not gold) then the update point is the end\n # of the batch of continuous gold.\n last_gold_idx = np.where(all_meta['gold'] == False)[0][0] - 1\n self.last_gold = list(all_meta['datetime'])[last_gold_idx]\n\n else:\n raise Exception(\"Unable to ascertain last gold\")\n\n else:\n self.last_timestep = None\n self.first_timestep = None\n self.last_gold = None\n\n # Check there is only one fill value:\n if len(all_meta['datafillvalue'].unique()) == 1:\n self.fill_value = all_meta['datafillvalue'].iloc[0]\n\n else:\n raise Exception(\"Multiple fill values for single datacube \"\n \"sub-product. This shouldn't be possible.\")\n\n # Available tiles\n self.all_subproduct_tiles = all_meta['tilename'].unique()\n\n # General Meta\n self.description = all_meta['description'].unique()[0]\n\n # Extract acquisition frequency / time step from database. Store\n # as an np.timedelta64\n frequency_string = all_meta['frequency'].unique()[0]\n fs_split = re.split('(\\D+)', frequency_string)\n self.frequency = np.timedelta64(int(fs_split[0]), fs_split[1])\n\n def get_data(self, start, stop,\n region=None, tile=None, res=None,\n country=None):\n \"\"\"\n Extract data from the datacube to the specification supplied.\n\n :param start: Start datetime for dataset\n\n :param stop: Stop datetime for dataset\n\n :param region: optional - geographic region, do not use tile too\n\n :param tile: optional - specific tile, do not use region too\n Tile or region are only needed here if not already\n given when creating the Dataset object.\n\n :param res: optional - resolution required\n If providing a country and therefore expecting zonal\n averaging, it is recommended to set this value to 0.01\n to super-sample the data and ensure each county has\n at least one pixel.\n\n :param country: optional - if country name is supplied, the returned\n dataset will have been zonally averaged according to\n county definitions within that country. The country\n name is case insensitive but must be one for which\n the system has a shapefile defining its counties.\n\n :return: xarray of data\n \"\"\"\n\n # Extract the bounds information\n if region:\n bounds = get_bounds(region)\n elif self.region:\n bounds = get_bounds(self.region)\n else:\n bounds = None\n\n # Extract tile info\n if not tile and self.tile:\n tile = self.tile\n\n # Extract res info\n if not res and self.res:\n res = self.res\n\n # Fetch the data from the datacube\n data = self.conn.get_subproduct_data(product=self.product,\n subproduct=self.subproduct,\n start=start,\n stop=stop,\n bounds=bounds,\n res=res,\n tile=tile,\n country=country)\n\n # TODO Fix DQ to ALWAYS return list of xarrays\n if not country:\n # Datacube returns a list of xarrays. We only have one sub-product\n # by definition\n self.data = data[0]\n else:\n self.data = data\n\n def put(self):\n \"\"\"\n Prepare self.data and metadata and send to the datacube.\n :return:\n \"\"\"\n\n # Add product as an attribute to the data which will be written\n self.data.attrs['product'] = self.product\n\n # Process last gold. This value needs to go into the DataArray\n # attributes. The user could set them here, in the the DataSet\n # attributes or in self.data.attrs. Easiest if we just catch and\n # process all possibilities.\n if 'last_gold' not in self.data[self.subproduct].attrs or \\\n self.data[self.subproduct].attrs['last_gold'] == None:\n\n if 'last_gold' not in self.data.attrs:\n self.data[self.subproduct].attrs['last_gold'] = self.last_gold\n else:\n self.data[self.subproduct].attrs['last_gold'] = \\\n self.data.attrs['last_gold']\n\n # Check that this has given us a last gold\n if not self.data[self.subproduct].attrs['last_gold']:\n raise Exception(f\"Last gold not set for {self.subproduct}\")\n\n if self.subproduct not in self.data.data_vars.keys():\n raise NameError(\"data.name must be equal to sub-product for \"\n \"ingesting into the datacube\")\n\n # Assign parent attributes to data variable\n for x in self.data.attrs:\n self.data[self.subproduct].attrs[x] = self.data.attrs[x]\n\n # Instatiate the datacube connector\n conn = Connect()\n\n # Put the data into the datacube\n conn.put_subproduct_data(data=self.data)\n\n def update(self, script, params=None):\n \"\"\"\n Update this dataset using the update method in the script\n supplied. Following the calculation, re-initialise from the\n DataCube to update\n the metadata.\n\n :param script: The python script for updating this dataset\n :param params: A dictionary of keyword arguments to be passed\n to the update method\n\n :return:\n \"\"\"\n\n # Run the update script with the appropriate parameters\n if params:\n script.update(**params)\n else:\n script.update()\n\n # Re-initialise dataset information after update\n self.__init__(self.product, self.subproduct, region=self.region,\n tile=self.tile)\n\n def calculate_timesteps(self):\n \"\"\"\n Calculate the time steps available, given the frequency of the\n dataset (as recorded in the sub-product table) and the first and\n last time steps.\n\n NOTE: This method calculates ideal timesteps, rather than\n retrieving the actual timesteps of the data. This method cannot\n know about any data gaps.\n\n :return:\n \"\"\"\n\n # split the numpy timedelta into it's component parts (e.g.\n # ['year', 1])\n bf_fq_vals = self.frequency.__str__().split(' ')\n\n # Create a pandas DataOffset object which represents this\n # frequency\n frequency = pd.DateOffset(**{bf_fq_vals[1]: int(bf_fq_vals[0])})\n\n # Generate an array, using this as the step\n if self.first_timestep:\n timesteps = pd.date_range(self.first_timestep,\n self.last_timestep,\n freq=frequency)\n\n self.timesteps = timesteps.values\n\n else:\n self.timesteps = None\n","sub_path":"DQTools/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"19542155","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime as dt\nimport tensorflow as tf\n\nfrom envs.random_env import RandomEnv\nfrom envs.normalize_env import NormalizeEnv\nfrom naf2 import NAF2\n\ntf.get_logger().setLevel('ERROR')\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\nrandom_seed = 123\n\nif __name__ == '__main__':\n n_obs = 5\n n_act = 5\n model_name = f'NAF2_{n_obs}x{n_act}_{dt.strftime(dt.now(), \"%m%d%y_%H%M\")}'\n model_dir = os.path.join('models', model_name)\n log_dir = os.path.join('logs', model_name)\n\n rm = np.load(os.path.join('envs', 'random_env_rms', f'random_env_{n_obs}x{n_act}.npy'))\n\n env = NormalizeEnv(RandomEnv(n_obs, n_act, rm))\n eval_env = NormalizeEnv(RandomEnv(n_obs, n_act, rm))\n\n training_info = dict(polyak=0.999,\n batch_size=100,\n steps_per_batch=10,\n epochs=1,\n learning_rate=1e-3,\n discount=0.9999)\n nafnet_info = dict(hidden_sizes=[100, 100],\n activation=tf.nn.tanh,\n kernel_initializer=tf.random_normal_initializer(0, 0.05, seed=random_seed))\n eval_info = dict(eval_env=eval_env,\n frequency=100,\n nb_episodes=3,\n max_ep_steps=50)\n\n # linearly decaying noise function\n noise_episode_thresh = 40\n noise_fn = lambda act, i: act + np.random.randn(n_act) * max(1 - i/noise_episode_thresh, 0)\n agent = NAF2(env=env,\n buffer_size=int(5e3),\n train_every=1,\n training_info=training_info,\n eval_info=eval_info,\n save_frequency=1000,\n log_frequency=10,\n directory=model_dir,\n tb_log=log_dir,\n # q_smoothing_sigma=0.02,\n q_smoothing_sigma=0.02,\n q_smoothing_clip=0.05,\n nafnet_info=nafnet_info,\n noise_fn=noise_fn)\n\n try:\n agent.training(nb_steps=int(5e3+1), max_ep_steps=50, warm_up_steps=200, initial_episode_length=5)\n except KeyboardInterrupt:\n print('exiting')","sub_path":"NAF2/naf2_limits.py","file_name":"naf2_limits.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"639593961","text":"\n\nclass Solution:\n \n \n #approach 1: backtracking\n #we try every single jump pattern that takes us from the first position to the last. We start from the first position and jump to every index that is reachable. We repeat the process until last index is reached. When stuck, backtrack.\n\n def canJumpFromPosition(self, position, nums):\n if (position == len(nums) - 1 ):\n return True\n \n furthest = min(position + nums[position], len(nums) - 1)\n \n for nextposition in range(position+1, furthest):\n if (canJumpFromPosition(nextposition,nums)== True):\n return True\n \n def canJump(nums):\n return canJumpFromPosition(0, nums)\n \n \n \n #approach 2:\n #solve it as a graph problem\n #using dfs \n \n def canJump(self, nums):\n\n visited , stack = set(), [0]\n \n while stack:\n position = stack.pop()\n print(position)\n if position >= len(nums) - 1 :\n return True \n \n maxposition = min(position + nums[position], len(nums)-1)\n\n \n for i in range(position+1, maxposition+1):\n if i not in visited:\n visited.add(i)\n stack.append(i)\n \n return False\n\n \n \n \n","sub_path":"55. Jump Game.py","file_name":"55. Jump Game.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"317464344","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait#显示等待\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport json\n\nclass Douyu():\n def __init__(self):\n self.driver = webdriver.PhantomJS(executable_path=r\"E:\\phantomjs\\bin\\phantomjs.exe\")\n self.driver.get(\"https://www.douyu.com/directory/all\")#请求斗鱼首页\n time.sleep(5)\n\n def get_content(self):\n \"\"\"\n 获取斗鱼首页相关信息\n \"\"\"\n try:\n element = WebDriverWait(self.driver,10).until(\n #页面一直循环直到信息出现\n EC.presence_of_element_located((By.XPATH,'//section[@class=\"layout-Container\"]'))\n )\n data_list = self.driver.find_elements(By.XPATH,'//section[@class=\"layout-Container\"]//ul[@class=\"layout-Cover-list\"]')#获取所需信息列表 \n all_data = []#用于存储全部的信息\n for data in data_list:\n all_info = data.find_elements(By.XPATH,'./li')\n for info in all_info:\n item = {}#用于存储每一个直播间的相关信息\n item['img_url'] = info.find_element(By.XPATH,'.//img[contains(@class,\"DyImg-content\")]').get_attribute('src')#获取直播间的照片地址\n item['category'] = info.find_element(By.XPATH,'.//div[@class=\"DyListCover-info\"][1]/span').text#获取直播间的分类\n item['title'] = info.find_element(By.XPATH,'.//div[@class=\"DyListCover-info\"][1]/h3').text#获取直播间的标题\n item['hot'] = info.find_element(By.XPATH,'.//div[@class=\"DyListCover-info\"][2]/span').text#获取直播间的热度\n item['name'] = info.find_element(By.XPATH,'.//div[@class=\"DyListCover-info\"][2]/h2').text#获取直播间主播的名字\n all_data.append(item)\n # print(all_data)\n except Exception as e:\n print(e)\n return all_data\n \n def save_content(self,content):\n \"\"\"\n 存储数据为json\n \"\"\"\n with open(\"douyu.json\",\"a\",encoding='utf-8') as f:\n json.dump(content,f,ensure_ascii=False,indent=2)\n \n def run(self):\n \"\"\"\n 执行程序\n \"\"\"\n content = self.get_content()\n self.save_content(content)\n for i in range(2):\n #判断还有没下一页最多循环5页\n if self.driver.find_element(By.CLASS_NAME,'dy-Pagination-item-custom'):\n #判断有没有下一页\n self.driver.find_element(By.CLASS_NAME,'dy-Pagination-item-custom').click()\n content = self.get_content()#获取下一页数据\n self.save_content(content)#保存下一页的数据\n\n def __del__(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n douyu = Douyu()\n douyu.run()","sub_path":"爬���相关/斗鱼爬取/douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"599076772","text":"class Pure_16_Game:\n\n # Calculating Game Transitions, specific payoffs between strategies.\n\n # 8 states, ranging from 0 to 7, corresponding to 1CC to 2DD.\n # Strategies in [0,1]^16\n\n max_num_strategies = 2**16\n\n states = {\n (1,1,1): 0, # 1CC\n (1,1,0): 1, # 1CD\n (1,0,1): 2, # 1DC\n (1,0,0): 3, # 1DD\n\n (2,1,1): 4, # 2CC\n (2,1,0): 5, # 2CD\n (2,0,1): 6, # 2DC\n (2,0,0): 7, # 2DD\n }\n\n def __init__(self, c, b1, b2):\n self.b1 = b1; # benefit coop in Game 1\n self.b2 = b2; # benefit coop in Game 2\n self.c = c; # cost to coop in both games\n\n #player1, player2 payoffs for outcomes 1CC, ..., 1DD, 2CC, ..., 2DD\n self.p1_payoffs = [b1-c, -c, b1, 0, b2-c, -c, b2, 0];\n self.p2_payoffs = [b1-c, b1, -c, 0, b2-c, b2, -c, 0];\n\n\n @staticmethod\n def to_strategy(num):\n return [int(x)*(1-2*self.eps)+self.eps for x in format(num, '016b')]\n\n @classmethod\n def strat_to_str(cls, num):\n arr = cls.to_strategy(num)\n return str(arr[0:4]) + \",\" + \\\n str(arr[4:8]) + \". Transition: \" + \\\n str(arr[8:12]) + \",\" + \\\n str(arr[12:16])\n\n def mc_estimate(self, s1, s2, n = 20, initial_state = 0):\n '''\n Return avg payoffs when s1 plays s2 and CC rate\n '''\n s1_total_payoff = 0.0\n s2_total_payoff = 0.0\n\n cc_rate = 0.0\n game_1_rate = 0.0\n\n prev_state = initial_state\n\n s1 = Pure_16_Game.to_strategy(s1)\n s2 = Pure_16_Game.to_strategy(s2)\n\n for i in range(n):\n s1_move, s1_game_pref = s1[prev_state], s1[prev_state + 8]\n s2_move, s2_game_pref = s2[prev_state], s2[prev_state + 8]\n \n curr_game = 1 if s1_game_pref and s2_game_pref else 2\n curr_state = Pure_16_Game.states[(curr_game, s1_move, s2_move)]\n\n s1_total_payoff += self.p1_payoffs[curr_state]\n s2_total_payoff += self.p2_payoffs[curr_state]\n\n # mutual cooperation = 1CC or 2CC\n if curr_state % 4 == 0:\n cc_rate += 1\n\n if curr_game == 1:\n game_1_rate += 1\n\n # return s1 avg payoff, s2 avg payoff, avg CC ratex\n return (s1_total_payoff/n, s2_total_payoff/n, cc_rate/n, game_1_rate/n)\n\n\n def q_estimate(self, s1, s2, initial_state = 0):\n pass\n","sub_path":"code/old/mc_pure_16_game_class.py","file_name":"mc_pure_16_game_class.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"170759280","text":"from typing import List\n\n\nclass Solution:\n\n def __init__(self):\n self.result = []\n\n def letterCasePermutation(self, S: str) -> List[str]:\n \"\"\"\n https://leetcode.com/problems/letter-case-permutation/\n TimeComplexity: O(2^n)\n SpaceComplexity: O(n) Recursive Stack\n 'n' is the length of the string S.\n \"\"\"\n if not S:\n return self.result\n self._search(list(S), 0)\n return self.result\n\n def _search(self, chs: list, idx: int):\n # base case\n if idx == len(chs):\n self.result.append(''.join(chs))\n return\n\n # logic\n self._search(chs, idx + 1)\n if chs[idx].isalpha():\n old_letter = self._letter_toggle(chs, idx)\n self._search(chs, idx + 1)\n # backtrack\n chs[idx] = old_letter\n\n # toggle chs[i], return the old letter for future undo\n def _letter_toggle(self, chs: list, idx: int):\n res = chs[idx]\n if chs[idx].isupper():\n chs[idx] = chs[idx].lower()\n else:\n chs[idx] = chs[idx].upper()\n return res\n\n\nif __name__ == '__main__':\n h = Solution()\n print(h.letterCasePermutation(\"abcd\"))\n","sub_path":"784_letter_case_permutation.py","file_name":"784_letter_case_permutation.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"364595149","text":"#Linear search through list to find number\n#len(list) = n\n\ndef do_linear_search(list):\n for index, num in enumerate(list):\n if(num == target):\n print(\"Found your number at index \" + str(index))\n return num\n\n print(\"Sorry, didn't find your number\")\n return -1\n\ntarget = int(input(\"What number are you looking for? \"))\nnums = range(0, 1000, 2)\n\ndo_linear_search(nums)\n","sub_path":"Misc_Practice/linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"68566817","text":"import pymongo\r\n\r\n\r\n#db_url = 'mongodb://localhost:27017'\r\nclient = pymongo.MongoClient('mongodb://localhost:27017/')\r\n\r\n\r\n# this line for creating a new db or using an existing one\r\ndatabase = client[\"PyDB_local\"]\r\n# Important: In MongoDB, a database is not created until it gets content (at least one collection)!\r\n\r\n# get the wanted collection inside this database\r\n# creating a new collection or using an existing one\r\ncollection = database[\"employees\"]\r\n\r\n\r\n# 1\r\n\r\n# document\r\ndoc = {\r\n \"first\": \"Lee\",\r\n \"last\": \"Woo\",\r\n \"dob\": \"19/09/1984\",\r\n \"gender\": \"f\",\r\n \"hair_colour\": \"black\",\r\n \"occupation\": \"designer\",\r\n \"nationality\": \"American\"\r\n}\r\n\r\n# using method insert_one()\r\ncollection.insert_one(doc)\r\n\r\n\r\n# 2\r\n\r\n# my_list = [ element1, element2, element3]\r\n# The following array \"new_docs\" has 3 elements\r\nnew_docs = [\r\n {\r\n \"first\": \"Leen\",\r\n \"last\": \"cho\",\r\n \"dob\": \"19/09/1984\",\r\n \"gender\": \"f\",\r\n \"hair_colour\": \"black\",\r\n \"occupation\": \"designer\",\r\n \"nationality\": \"USA\"\r\n },\r\n {\r\n \"first\": \"Nick\",\r\n \"last\": \"Fog\",\r\n \"dob\": \"19/09/1974\",\r\n \"gender\": \"m\",\r\n \"hair_colour\": \"black\",\r\n \"occupation\": \"programmer\",\r\n \"nationality\": \"USA\"\r\n },\r\n {\r\n \"first\": \"Jen\",\r\n \"last\": \"Tea\",\r\n \"dob\": \"19/09/1977\",\r\n \"gender\": \"f\",\r\n \"hair_colour\": \"brown\",\r\n \"occupation\": \"dba\",\r\n \"nationality\": \"China\"\r\n },\r\n {\r\n \"sky\": \"this\",\r\n \"color\": \"blue\",\r\n \"when\": \"rightNow\",\r\n \"becauseTheNumberIs\": 1010\r\n }\r\n]\r\n\r\n# use the insert_many() method\r\ncollection.insert_many(new_docs)\r\n\r\nemployees = collection.find({})\r\nfor employee in employees:\r\n print(employee)\r\n","sub_path":"py-mongo2-local.py","file_name":"py-mongo2-local.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"144337522","text":"import platform\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\n\n\nos = platform.system()\n\nif os == \"Darwin\":\n plt.rc('font', family=\"AppleGothic\")\n\nelse:\n fe = fm.FontEntry(\n fname=\"pykrx/NanumBarunGothic.ttf\",\n name='NanumBarunGothic'\n )\n fm.fontManager.ttflist.insert(0, fe)\n plt.rc('font', family=fe.name)\n\nplt.rcParams['axes.unicode_minus'] = False\n","sub_path":"pykrx/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"401754434","text":"# coding=utf-8\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n__all__ = ['ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101']\n\n\ndef conv_kernel_3(in_channels, out_channels, conv_type=\"2D\", stride=1):\n if conv_type == \"2D\":\n return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n elif conv_type == \"3D\":\n return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\ndef conv_kernel_1(in_channels, out_channels, conv_type=\"2D\", stride=1):\n if conv_type == \"2D\":\n return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)\n elif conv_type == \"3D\":\n return nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, inplanes, planes, conv_type=\"2D\", stride=1):\n super(BasicBlock, self).__init__()\n self.relu = nn.ReLU(inplace=True)\n self.bn0 = nn.BatchNorm2d(inplanes) if conv_type == \"2D\" else nn.BatchNorm3d(inplanes)\n self.conv1 = conv_kernel_3(inplanes, planes, conv_type, stride)\n self.bn1 = nn.BatchNorm2d(planes) if conv_type == \"2D\" else nn.BatchNorm3d(planes)\n self.conv2 = conv_kernel_3(planes, planes, conv_type)\n self.shortcut = conv_kernel_1(inplanes, planes, conv_type, stride)\n\n def forward(self, x):\n out = self.bn0(x)\n out = self.relu(out)\n\n out = self.conv1(out)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out += self.shortcut(x)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n\n def __init__(self, inplanes, planes, conv_type=\"2D\", stride=1):\n super(Bottleneck, self).__init__()\n self.relu = nn.ReLU(inplace=True)\n self.bn0 = nn.BatchNorm2d(inplanes) if conv_type == \"2D\" else nn.BatchNorm3d(inplanes)\n self.conv1 = conv_kernel_1(inplanes, planes // 4, conv_type)\n self.bn1 = nn.BatchNorm2d(planes // 4) if conv_type == \"2D\" else nn.BatchNorm3d(planes)\n self.conv2 = conv_kernel_3(planes // 4, planes // 4, conv_type, stride)\n self.bn2 = nn.BatchNorm2d(planes // 4) if conv_type == \"2D\" else nn.BatchNorm3d(planes)\n self.conv3 = conv_kernel_1(planes // 4, planes, conv_type)\n self.shortcut = conv_kernel_1(inplanes, planes, conv_type, stride)\n\n def forward(self, x):\n out = self.bn0(x)\n out = self.relu(out)\n\n out = self.conv1(out)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n out += self.shortcut(x)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, model_type=\"2D\", image_channel=1, num_classes=2):\n self.in_channel = 64\n super(ResNet, self).__init__()\n\n self.conv1 = nn.Conv2d(image_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) if model_type == \"2D\"\\\n else nn.Conv3d(image_channel, 64, kernel_size=7, stride=2, padding=3, bias=False)\n\n self.layer1 = self._make_layer(block, 64, layers[0], model_type, stride=2)\n self.layer2 = self._make_layer(block, 128, layers[1], model_type, stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], model_type, stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], model_type, stride=2)\n\n self.global_avg_pool = nn.AdaptiveAvgPool2d(1) if model_type == \"2D\" else nn.AdaptiveAvgPool3d(1)\n self.fc = nn.Linear(512, num_classes)\n\n self.initialize_weights()\n\n def initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, nonlinearity='relu')\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def _make_layer(self, block, channels, num_blocks, model_type, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_channel, channels, model_type, stride))\n self.in_channel = channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.global_avg_pool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\nif __name__ == '__main__':\n data = torch.randn([1, 1, 64, 64, 64]).float().cuda()\n model = resnet18(model_type=\"3D\").cuda()\n\n with torch.no_grad():\n out = model(data)\n\n out = F.softmax(out, dim=1)\n print(out)","sub_path":"Models/segmentation_models/unet/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"504011797","text":"# SHEBANG GOES HERE\n\n\"\"\"\nPig Latin brute force decryption\nThis program will brtue force the\neuropean standard pig latin encryption\nand return 2 strings of the decrypted text\n\n11/07/2015 at 1:38 am\nAND DATE OF FINISHING HERE\n\nIf you have any suggestions or want to help\ncontact me at`\nhttps://www.facebook.com/AiiYourBaseRBel0ngToUs\n\nThis program abides by the rules of presentation for\nPEP-8\nshown here on\nhttps://www.python.org/dev/peps/pep-0008/\n\nThis program also abides by the Unix Philosophy\n\nThe MIT License (MIT)\n\nCopyright (c) 2015 Brandon Skerritt\nGithub: https://github.com/brandonskerritt51\nTwitter: @Ofmiceandhelp\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR I\n\"\"\"\n\n# enable DEVMODE to get accurcate error reports and other\n# cool nifty things, only use if you are debugging\nDEVMODE = True\n\nimport logging\n# imports the loggign module, creates a logging file called \"ProgramLog.txt\"\nlogging.basicConfig(filename='_PigLatin_log.txt', level=logging.DEBUG,\n format=' %(asctime)s - %(levelname)s- %(message)s')\n\nif DEVMODE == False:\n logging.disable(logging.CRITICAL)\n# if DEVMODE is false, turn off debugging.\n\n\ndef main():\n logging.info(\"Start of Main()\")\n message = str(input(\"Enter message to decrypt here: \"))\n # asks user for string to decrypt\n logging.debug(message)\n # writes user string to the logging module\n decrypted = PigLatinDecrypt(message)\n logging.debug(decrypted)\n print(decrypted)\n close()\n # closes the program\n\ndef PigLatinDecrypt(message):\n logging.info(\"At pig latin decrypt\")\n # If the message is less than or equal to 3 charecters, it's impossible to perform\n # a pig latin cipher on it unless the word was one letter long\n if len(message) <= 3:\n return (\"\\nPig Latin failed. Message less than 3 characters.\")\n # returns that pig latin doesnt excist\n # TODO can i raise an exception / error for this?\n logging.error(\"message les than 3\")\n\n else:\n messagePIGWAY = message\n # creates second pig latin cipher message to use\n # as if you was to use Message for both\n # it'll mess up\n # TODO unless put into 2 seperate functions?\n # TODO that way it can return 2 strings?\n\n messagePIGAY = message[0:len(message) - 2]\n logging.debug(\"this is the start of messagePIGAY\")\n logging.debug(messagePIGAY)\n # takes the last 2 letters of message\n message2AY = messagePIGAY[-1]\n logging.debug(message2AY)\n # takes last letter of word and puts it into a variable\n messagePIGAY = messagePIGAY[0:len(messagePIGAY) - 1]\n logging.debug(messagePIGAY)\n # removes the last letter of the word\n message3AY = message2AY + messagePIGAY\n logging.debug(message3AY)\n # creates a varaible which has the previous last letter as the first and\n # the rest of the word as the rest of it. This is one way to do Pig Latin.\n\n logging.debug(\"Message pig WAY\")\n messagePIGWAY1 = messagePIGWAY[0:len(messagePIGWAY) - 3]\n logging.debug(messagePIGWAY1)\n # takes the last 3 letters of message\n message2WAY = messagePIGWAY1\n # copies varaibles\n message2WAY = message2WAY[-1]\n logging.debug(message2WAY)\n # takes last letter of word and puts it into a variable\n messagePIGWAY1 = messagePIGWAY1[0:len(messagePIGWAY1) - 1]\n logging.debug(messagePIGWAY1)\n # removes the last letter of the word\n messagepigWAY = message2WAY + messagePIGWAY1\n logging.debug(messagepigWAY)\n # creates a varaible which has the previous last letter as the first and\n # the rest of the word as the rest of it. This is one way to do Pig Latin.\n\n #TODO find a way to return 2 variables\n # this returns 2 variables in a tuple\n return message3AY, messagepigWAY\n\ndef close():\n logging.debug(\"closing\")\n import sys\n\n sys.exit()\n\n# required in all programs, if the name \"main\" is called, run main()\n# i do this instead of main() at the bottom so this can\n# still be imported as a module\nif __name__ == '__main__':\n main()\n","sub_path":"source/PigLatin.py","file_name":"PigLatin.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"311173253","text":"# -*- coding: utf-8 -*-\nfrom node.utils import UNSET\nfrom pkg_resources import iter_entry_points\nfrom yafowil.compat import BYTES_TYPE\nfrom yafowil.compat import IS_PY2\nfrom yafowil.compat import LONG_TYPE\nfrom yafowil.compat import STR_TYPE\nfrom yafowil.compat import UNICODE_TYPE\nimport inspect\nimport json\nimport logging\nimport re\nimport unicodedata\nimport uuid\n\n\nclass entry_point(object):\n \"\"\"Decorator for yafowil entry points.\n \"\"\"\n\n def __init__(self, order=0):\n self.order = order\n\n def __call__(self, ob):\n ob.order = self.order\n return ob\n\n\n_yafowil_plugins = None\n\n\ndef get_plugins(ns=None):\n global _yafowil_plugins\n if _yafowil_plugins is None:\n _yafowil_plugins = list()\n for ep in iter_entry_points('yafowil.plugin'):\n cb = ep.load()\n _yafowil_plugins.append((ep, cb))\n _yafowil_plugins = sorted(\n _yafowil_plugins,\n key=lambda x: getattr(x[1], 'order', 0)\n )\n for ep, cb in _yafowil_plugins:\n if ns is not None and ep.name != ns:\n continue\n yield (ep, cb)\n\n\n_plugin_names = dict()\n\n\ndef get_plugin_names(ns=None):\n if ns not in _plugin_names:\n _plugin_names[ns] = list(set(\n [ep.dist.project_name for ep, cb in get_plugins(ns=ns)]\n ))\n return _plugin_names[ns]\n\n\ndef get_example(example_name):\n for ep, cb in get_plugins(ns='example'):\n if ep.dist.project_name != example_name:\n continue\n info = cb()\n return info\n\n\ndef get_example_names():\n result = []\n for ep, cb in get_plugins(ns='example'):\n result.append(ep.dist.project_name)\n return result\n\n\ndef vocabulary(definition):\n \"\"\"Convert different kinds of input into a list of bi-tuples, both strings.\n \"\"\"\n if callable(definition):\n definition = definition()\n if isinstance(definition, STR_TYPE):\n return [(definition, definition), ]\n # dict-like\n if hasattr(definition, '__getitem__') and hasattr(definition, 'keys'):\n return [(_, definition[_]) for _ in definition.keys()]\n # iterable\n if hasattr(definition, '__iter__'):\n new_vocab = []\n for entry in definition:\n if isinstance(entry, STR_TYPE):\n # entry is a string\n new_vocab.append((entry, entry))\n elif hasattr(entry, '__iter__'):\n # entry is a sequence\n parts = [_ for _ in entry]\n if len(parts) > 1:\n # take first two parts and skips others\n new_vocab.append(entry[0:2])\n else:\n # rare case, inner has one value only\n new_vocab.append((entry[0], entry[0]))\n return new_vocab\n return definition\n\n\nclass Tag(object):\n\n def __init__(self, translate):\n self.translate = translate\n self.encoding = 'utf-8'\n\n def __call__(self, tag_name, *inners, **attributes):\n \"\"\"Generates some xml/html tag.\n\n ``tagname``\n name of a valid tag\n\n ``inners``\n inner content of the tag. If empty a closed tag is generated\n\n ``attributes``\n attributes of the tag, leading or trailing ``_`` underscores are\n omitted from keywords.\n\n Example::\n\n >>> tag('p', 'Lorem Ipsum.', u'Hello World!',\n ... class_='fancy', id='2f5b8a234ff')\n

Lorem Ipsum. Hello World.

\n\n \"\"\"\n cl = list()\n for key, value in attributes.items():\n if value is None or value is UNSET:\n continue\n value = self.translate(value)\n if not isinstance(value, UNICODE_TYPE):\n # XXX: value = str(value).decode(self.encoding)\n if isinstance(value, bytes):\n value = value.decode(self.encoding)\n else:\n value = str(value)\n cl.append((key.strip('_'), value))\n attributes = u''\n # NOTE: data attributes are enclosed in single quotes, since this makes\n # passing json lists possible. jQuery only recognizes JSON lists in\n # data attributes as such, if they are enclosed in single quotes,\n # because the JSON standard requires string values to be enclosed in\n # double quotes.\n if cl:\n attributes = list()\n for attr in cl:\n if 'data-' in attr[0]:\n attributes.append(u\"{0}='{1}'\".format(*attr))\n else:\n attributes.append(u'{0}=\"{1}\"'.format(*attr))\n attributes = u' {0}'.format(u' '.join(sorted(attributes)))\n cl = list()\n for inner in inners:\n inner = self.translate(inner)\n if not isinstance(inner, UNICODE_TYPE):\n # XXX: inner = str(inner).decode(self.encoding)\n if isinstance(inner, bytes):\n inner = inner.decode(self.encoding)\n else:\n inner = str(inner)\n cl.append(inner)\n if not cl:\n return u'<{name}{attrs} />'.format(**{\n 'name': tag_name,\n 'attrs': attributes,\n })\n return u'<{name}{attrs}>{value}'.format(**{\n 'name': tag_name,\n 'attrs': attributes,\n 'value': u''.join(i for i in cl),\n })\n\n\n# Deprecation message\ndef _deprecated_null_localization(msg):\n logging.warning(\n \"Deprecated usage of 'yafowil.utils.tag', please \"\n \"use the tag factory on RuntimeData instead.\"\n )\n return msg\n\n\ntag = Tag(_deprecated_null_localization)\n\n\nclass managedprops(object):\n\n def __init__(self, *args):\n self.__yafowil_managed_props__ = args\n\n def __call__(self, func):\n func.__yafowil_managed_props__ = self.__yafowil_managed_props__\n return func\n\n\ndef cssid(widget, prefix, postfix=None):\n if widget.attrs.get('structural', False):\n return None\n path = widget.dottedpath.replace(u'.', u'-')\n cssid = u'{0}-{1}'.format(prefix, path)\n if postfix is not None:\n cssid = u'{0}-{1}'.format(cssid, postfix)\n return unicodedata.normalize('NFKD', cssid)\\\n .encode('ASCII', 'ignore')\\\n .replace(b' ', b'_').decode()\n\n\ndef callable_value(value, widget, data):\n \"\"\"Call value if callable with widget and data as arguments and return\n the callables return value. If value not callable, return as is.\n As B/C mode, if callable accepts no arguments, try to call without\n arguments.\n \"\"\"\n if not callable(value):\n return value\n try:\n # assume property factory signature\n # XXX: use keyword arguments?\n # XXX: if callable raises TypeError we get non clear follow up\n # errors.\n return value(widget, data)\n except TypeError:\n try:\n # assume function or class\n spec = inspect.getargspec(value)\n except TypeError:\n spec = None\n if spec is not None:\n # assume B/C property factory signature if argument specs found\n if len(spec.args) <= 1 and not spec.keywords:\n try:\n res = value()\n logging.warning(\n \"Deprecated usage of callback attributes. Please \"\n \"accept 'widget' and 'data' as arguments.\"\n )\n return res\n except TypeError:\n # XXX: raise here?\n return value\n # XXX: raise here?\n return value\n\n\ndef attr_value(key, widget, data, default=None):\n \"\"\"Return widget attribute value by key or default. If value is callable,\n it's return value is used.\n \"\"\"\n return callable_value(\n widget.attrs.get(key, default),\n widget,\n data\n )\n\n\ndef as_data_attrs(data):\n \"\"\"Convert either dict or list of (key, value) pairs into dict containing\n HTML5 data attributes.\n\n Keys gets prefixed with ``data-``, ``CamelCase`` gets converted\n to ``caml-case``.\n\n Values are ignored if ``None`` or ``UNSET``. If value is string, it's\n taken as is, otherwise it's assumed that value is list or dict and gets\n dumped as JSON string.\n\n :param data: Either dict or list of (key, value) pairs.\n :return: Dict containing HTML5 data attributes\n \"\"\"\n data_attrs = {}\n # no data passed, return empty dict\n if not data:\n return data_attrs\n # expect dict if no list\n if not isinstance(data, list):\n data = data.items()\n for key, val in data:\n # check against None and UNSET separately to please coverage tests\n # rnix, 2014-04-30\n if val is None:\n continue\n if val is UNSET:\n continue\n # convert value to JSON dump if no string.\n if not isinstance(val, STR_TYPE):\n # also remove leading and trailing double quotes,\n # they are not needed for data-attributes\n val = json.dumps(val).strip('\"')\n # replace camelCase with camel-case\n key = re.sub('([a-z])([A-Z])', '\\g<1>-\\g<2>', key).lower()\n data_attrs['data-{0}'.format(key)] = val\n return data_attrs\n\n\n# B/C: deprecate as of yafowil 2.4, remove in yafowil 3.0\ngeneric_html5_attrs = as_data_attrs\n\n\ndef data_attrs_helper(widget, data, attrs):\n \"\"\"Creates a dictionary of JSON encoded data-attributes from a list of\n attribute-keys, ready to inject to a tag-renderer as expanded keyword\n arguments.\n\n :param widget: The yafowil widget.\n :param data: The data object.\n :param attrs: A list of data-attributes-keys to be used to generate the\n data attributes dictionary.\n :type attrs: list\n :returns: Dictionary with keys as data-attribute-names, prefixed with\n 'data-' and values from the widget.\n :rtype: dictionary\n\n The items in the list are the keys of the attributes for the target tag.\n Each key is prepended with 'data-'. The values are fetched from properties\n set on the widget. If a value is None, it isn't set. Other values are JSON\n encoded, which includes strings, booleans, lists, dicts.\n\n .. note::\n For camelCase attribute names are automatically split on word boundaries\n and made lowercase (e.g. camel-case). Since jQuery 1.6, the keys are\n converted to camelCase again after getting them with .data().\n\n .. note::\n The Tag class encloses data-attribute values in single quotes, since the\n JSON standard requires strings to be enclosed in double-quotes. jQuery\n requires this or .data() can't create lists or arrays out of\n data-attribute values.\n \"\"\"\n items = [(key, attr_value(key, widget, data)) for key in attrs]\n return as_data_attrs(items)\n\n\ncss_managed_props = [\n 'class', 'class_add',\n 'error_class', 'error_class_default',\n 'required_class', 'required_class_default',\n]\n\n\ndef cssclasses(widget, data, classattr='class', additional=[]):\n _classes = list()\n attrs = widget.attrs\n if attrs['error_class'] and data.errors:\n if isinstance(attrs['error_class'], STR_TYPE):\n _classes.append(attrs['error_class'])\n else:\n _classes.append(attrs['error_class_default'])\n if attrs['required_class'] and attrs['required']:\n if isinstance(attrs['required_class'], STR_TYPE):\n _classes.append(attrs['required_class'])\n else:\n _classes.append(attrs['required_class_default'])\n if attrs[classattr]:\n _classes += attr_value(classattr, widget, data).split()\n if attrs['class_add']:\n _classes += attr_value('class_add', widget, data).split()\n additional = [add for add in additional if add]\n _classes += additional\n return _classes and ' '.join(sorted(_classes)) or None\n\n\nclass EmptyValue(object):\n \"\"\"Used to identify empty values in conjunction with data type conversion.\n \"\"\"\n\n def __nonzero__(self):\n return False\n\n def __str__(self):\n return ''\n\n def __len__(self):\n return 0\n\n def __repr__(self):\n return ''\n\n\nEMPTY_VALUE = EmptyValue()\n\n\nDATATYPE_PRECONVERTERS = {\n float: lambda x: isinstance(x, STR_TYPE) and x.replace(',', '.') or x\n}\n# B/C\nDATATYPE_PRECONVERTERS['float'] = DATATYPE_PRECONVERTERS[float]\nDATATYPE_CONVERTERS = {\n 'str': BYTES_TYPE,\n 'unicode': UNICODE_TYPE,\n 'int': int,\n 'integer': int,\n 'long': LONG_TYPE,\n 'float': float,\n 'uuid': uuid.UUID\n}\n\n\ndef convert_value_to_datatype(value, datatype, empty_value=EMPTY_VALUE):\n \"\"\"Convert given value to datatype.\n\n Datatype is either a callable or a string out of ``'str'``, ``'unicode'``,\n ``'int'``, ``'integer'``, ``'long'``, ``'float'`` or ``'uuid'``\n\n If value is ``UNSET``, return ``UNSET``, regardless of given datatype.\n\n If value is ``EMPTY_VALUE``, return ``empty_value``, which defaults to\n ``EMPTY_VALUE`` marker.\n\n If value is ``None`` or ``''``, return ``empty_value``, which defaults to\n ``EMPTY_VALUE`` marker. Be aware that empty value marker is even returned\n if ``str`` datatype, to provide a consistent behavior.\n\n Converter callables must raise one out of the following exceptions if\n conversion fails:\n\n * ``ValueError``\n * ``UnicodeDecodeError``\n * ``UnicodeEncodeError``\n \"\"\"\n if value is UNSET:\n return UNSET\n if value is EMPTY_VALUE:\n return empty_value\n if value in [None, '']:\n return empty_value\n if isinstance(datatype, STR_TYPE):\n converter = DATATYPE_CONVERTERS[datatype]\n else:\n converter = datatype\n try:\n if isinstance(value, converter):\n return value\n except TypeError:\n # converter is instance of class or function\n pass\n preconverter = DATATYPE_PRECONVERTERS.get(datatype)\n if preconverter:\n value = preconverter(value)\n # special case bytes or str buildin type in python 3\n # uses ascii codec to emulate same behavior as when converting with python2\n # this is supposed to change in future\n if not IS_PY2 and converter in (bytes, str):\n return converter(value, 'ascii')\n return converter(value)\n\n\ndef convert_values_to_datatype(value, datatype, empty_value=EMPTY_VALUE):\n if isinstance(value, list):\n res = list()\n for item in value:\n res.append(convert_value_to_datatype(\n item,\n datatype,\n empty_value=empty_value\n ))\n return res\n return convert_value_to_datatype(value, datatype, empty_value=empty_value)\n","sub_path":"src/yafowil/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"14650099","text":"#!/usr/bin/env python\n#coding:utf-8\n\n\"\"\"\n\nNAME:sorted练习.py\nAuthor:YuTao\nConnetc:616637861@qq.com\nDate:2018-05-06\nDesc:\n\n\n\n\n\"\"\"\ns = []\ndef count(item):\n if item == 0:\n return 1\n else:\n return 0\n\n\ndef main():\n g = input(\"请输入:\")\n s.append(g)\n for i in s:\n s.int(i)\n print(sorted(s,key=count))\n\n\n\n\nmain()\n\n\n\n\n","sub_path":"Python_training/day6/myfile/day_06/sorted练习.py","file_name":"sorted练习.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"405482515","text":"class Callout:\n\n class CSSClasses:\n CALLOUT = \"callout\"\n CALLOUT_WITH_BUTTON = \"callout--with-button\"\n CALLOUT_STATES = {\n \"success\": \"callout--success\",\n \"warn\": \"callout--warn\",\n }\n\n def __init__(self, state=None, heading=None, text=None, button=None):\n \"\"\"\n Creates a Callout object that can be used to create callout blocks in templates.\n\n :param state: STRING - (\"success\", \"warn\") - to apply optional styling\n :param heading: STRING - heading text to display\n :param text: STRING - description text to display\n :param button: CalloutButton - button to display\n \"\"\"\n self.state = state\n self.heading = heading\n self.text = text\n self.button = button\n\n # self.allowed_keys = {\"state\", \"heading\", \"text\", \"button\"}\n # self.__dict__.update((k, v) for k, v in args.items() if k in self.allowed_keys)\n\n @property\n def css_class(self):\n css_class = self.CSSClasses.CALLOUT\n if self.state:\n css_class += f\" {self.CSSClasses.CALLOUT_STATES.get(self.state.lower(), '')}\"\n\n if self.button and self.button.visible:\n css_class += f\" {self.CSSClasses.CALLOUT_WITH_BUTTON}\"\n\n return css_class\n\n\nclass CalloutButton:\n\n class CSSClasses:\n BUTTON = \"callout__button\"\n BUTTON_TYPES = {\n \"start\": \"callout__button--start\",\n }\n\n def __init__(self, form_action=None, form_method=\"POST\", text=None, href=None, button_type=None):\n \"\"\"\n Helper to add a form or simple button to Callout.\n \"\"\"\n self.form_action = form_action\n self.form_method = form_method\n self.text = text\n self.href = href\n self.button_type = button_type\n\n @property\n def visible(self):\n return self.form_action or self.href\n\n @property\n def button_css_class(self):\n css_class = self.CSSClasses.BUTTON\n if self.button_type:\n css_class += f\" {self.CSSClasses.BUTTON_TYPES.get(self.button_type.lower(), '')}\"\n return css_class\n","sub_path":"partials/callout.py","file_name":"callout.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"78696239","text":"# Copyright lowRISC contributors.\n# Licensed under the Apache License, Version 2.0, see LICENSE for details.\n# SPDX-License-Identifier: Apache-2.0\n\nCONST = struct(\n ROM_EXT = 0x4552544f,\n OWNER = 0x3042544f,\n TRUE = 0x739,\n FALSE = 0x1d4,\n)\n\n_DEFAULT_USAGE = 0xa5a5a5a5\n_SEL_DEVICE_ID = 1\n_SEL_MANUF_STATE_CREATOR = (1 << 8)\n_SEL_MANUF_STATE_OWNER = (1 << 9)\n_SEL_LIFE_CYCLE_STATE = (1 << 10)\n_HEXSTR = \"0123456789abcdef\"\n\ndef _hex(i):\n # First \"cast\" i to a 32-bit unsigned int\n i &= 0xFFFFFFFF\n r = \"0x\"\n r += _HEXSTR[(i >> 28) & 0xF]\n r += _HEXSTR[(i >> 24) & 0xF]\n r += _HEXSTR[(i >> 20) & 0xF]\n r += _HEXSTR[(i >> 16) & 0xF]\n r += _HEXSTR[(i >> 12) & 0xF]\n r += _HEXSTR[(i >> 8) & 0xF]\n r += _HEXSTR[(i >> 4) & 0xF]\n r += _HEXSTR[(i >> 0) & 0xF]\n return r\n\ndef _manifest_impl(ctx):\n mf = {}\n\n # All the easy parameters are simple assignments\n if ctx.attr.signature:\n mf[\"signature\"] = _hex(ctx.attr.signature)\n if ctx.attr.modulus:\n mf[\"modulus\"] = _hex(ctx.attr.modulus)\n if ctx.attr.identifier:\n mf[\"identifier\"] = _hex(ctx.attr.identifier)\n if ctx.attr.length:\n mf[\"length\"] = _hex(ctx.attr.length)\n if ctx.attr.version_major:\n mf[\"version_major\"] = _hex(ctx.attr.version_major)\n if ctx.attr.version_minor:\n mf[\"version_minor\"] = _hex(ctx.attr.version_minor)\n if ctx.attr.security_version:\n mf[\"security_version\"] = _hex(ctx.attr.security_version)\n if ctx.attr.timestamp:\n mf[\"timestamp\"] = _hex(ctx.attr.timestamp)\n if ctx.attr.max_key_version:\n mf[\"max_key_version\"] = _hex(ctx.attr.max_key_version)\n if ctx.attr.code_start:\n mf[\"code_start\"] = _hex(ctx.attr.code_start)\n if ctx.attr.code_end:\n mf[\"code_end\"] = _hex(ctx.attr.code_end)\n if ctx.attr.entry_point:\n mf[\"entry_point\"] = _hex(ctx.attr.entry_point)\n\n # Address Translation is a bool, but encoded as an int so we can have\n # a special value mean \"unset\" and so we can set to non-standard values\n # for testing.\n if ctx.attr.address_translation:\n mf[\"address_translation\"] = _hex(ctx.attr.address_translation)\n\n # The binding_value, if provided, must be exactly 8 words.\n if ctx.attr.binding_value:\n if len(ctx.attr.binding_value) != 8:\n fail(\"The binding_value must be exactly 8 words.\")\n mf[\"binding_value\"] = _hex(ctx.attr.binding_value)\n\n # The selector_bits are set based on the values of the usage_constraints.\n uc = {}\n selector_bits = 0\n device_id = list(ctx.attr.device_id)\n if len(device_id) > 8:\n fail(\"The device_id must be 8 words or fewer.\")\n\n # Extend the device_id to 8 words, then set the selector_bits for each\n # non-default word.\n if len(device_id) < 8:\n device_id.extend([_DEFAULT_USAGE] * (8 - len(device_id)))\n for i, d in enumerate(device_id):\n if d != _DEFAULT_USAGE:\n selector_bits |= _SEL_DEVICE_ID << i\n device_id[i] = _hex(d)\n uc[\"device_id\"] = device_id\n\n # Set the selector bits for the remaining non-default values.\n if ctx.attr.manuf_state_creator:\n uc[\"manuf_state_creator\"] = _hex(ctx.attr.manuf_state_creator)\n selector_bits |= _SEL_MANUF_STATE_CREATOR\n else:\n uc[\"manuf_state_creator\"] = _hex(_DEFAULT_USAGE)\n\n if ctx.attr.manuf_state_owner:\n uc[\"manuf_state_owner\"] = _hex(ctx.attr.manuf_state_owner)\n selector_bits |= _SEL_MANUF_STATE_OWNER\n else:\n uc[\"manuf_state_owner\"] = _hex(_DEFAULT_USAGE)\n\n if ctx.attr.life_cycle_state:\n uc[\"life_cycle_state\"] = _hex(ctx.attr.life_cycle_state)\n selector_bits |= _SEL_LIFE_CYCLE_STATE\n else:\n uc[\"life_cycle_state\"] = _hex(_DEFAULT_USAGE)\n\n # If the user supplied selector_bits, check if they make sense.\n if ctx.attr.selector_bits:\n # If they don't match, fail unless explicitly permitted to set a\n # bad value.\n if ctx.attr.selector_bits != selector_bits and ctx.attr.selector_mismatch_is_failure:\n fail(\"User provided selector_bits don't match computed selector_bits\")\n uc[\"selector_bits\"] = _hex(ctx.attr.selector_bits)\n else:\n uc[\"selector_bits\"] = _hex(selector_bits)\n\n mf[\"usage_constraints\"] = uc\n\n file = ctx.actions.declare_file(\"{}.json\".format(ctx.attr.name))\n ctx.actions.write(file, json.encode_indent(mf))\n return DefaultInfo(\n files = depset([file]),\n data_runfiles = ctx.runfiles(files = [file]),\n )\n\nmanifest = rule(\n implementation = _manifest_impl,\n attrs = {\n \"signature\": attr.string(doc = \"Image signature as a hex-encoded string\"),\n \"modulus\": attr.string(doc = \"Signing key modulus as a hex-encoded string\"),\n \"selector_bits\": attr.int(doc = \"Usage constraint selector bits\"),\n \"selector_mismatch_is_failure\": attr.bool(default = True, doc = \"A mismatch in computed selector bits is a failure\"),\n \"device_id\": attr.int_list(doc = \"Usage constraint device ID\"),\n \"manuf_state_creator\": attr.int(doc = \"Usage constraint for silicon creator manufacturing status\"),\n \"manuf_state_owner\": attr.int(doc = \"Usage constraint for silicon owner manufacturing status\"),\n \"life_cycle_state\": attr.int(doc = \"Usage constraint for life cycle status\"),\n \"address_translation\": attr.int(doc = \"Whether this image uses address translation\"),\n \"identifier\": attr.int(doc = \"Manifest identifier\"),\n \"length\": attr.int(doc = \"Length of this image\"),\n \"version_major\": attr.int(doc = \"Image major version\"),\n \"version_minor\": attr.int(doc = \"Image minor version\"),\n \"security_version\": attr.int(doc = \"Security version for anti-rollback protection\"),\n \"timestamp\": attr.int(doc = \"Unix timestamp of the image\"),\n \"binding_value\": attr.int_list(doc = \"Binding value used by key manager to derive secrets\"),\n \"max_key_version\": attr.int(doc = \"Maximum allowed version for keys generated at the next boot stage\"),\n \"code_start\": attr.int(doc = \"Start offset of the executable region in the image\"),\n \"code_end\": attr.int(doc = \"End offset of the executable region in the image\"),\n \"entry_point\": attr.int(doc = \"Offset of the first instruction in the image\"),\n },\n)\n","sub_path":"rules/manifest.bzl","file_name":"manifest.bzl","file_ext":"bzl","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"462540113","text":"import requests\nimport bs4\nfrom saavn_downloader import *\nimport sys\nimport urllib.request\nimport string\nimport eyed3\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'\n}\ndef download_album_art(url,filename):\n if not os.path.exists(filename+'.jpg'):\n print('Downloading cover art...')\n urllib.request.urlretrieve(url, filename+'.jpg')\n else:\n print('Cover art already exists')\ndef song_select(song_links):\n # n = len(songs)\n # for i,song in enumerate(songs):\n # print('{}) {} - {}'.format(i, song['title'], song['artists']))\n # sel = input('Enter the number of the song you want to download(0 to '+str(n)+')')\n # return int(sel)\n n = len(song_links) \n for i, link in enumerate(song_links):\n title = '????'\n if link.has_attr('title'):\n title = link.contents[0]\n print('{}) {}'.format(i, title))\n sel = input('Enter the number of the song you want to download(0 to '+str(n)+')')\n return int(sel)\n \ndef download(url, filename, song):\n if not os.path.exists(filename+'.mp3'): \n print('Downloading '+song['title']+'-'+song['artists'])\n urllib.request.urlretrieve(url, filename+'.mp3')\n download_album_art(song['thumbnail'], filename)\n else:\n print('File already exists. Skipping file...')\n return\n audiofile = eyed3.load(filename+'.mp3')\n if(audiofile is None):\n return\n audiofile.tag.artist = song['artists']\n audiofile.tag.album = song['album']\n audiofile.tag.title = song['title']\n audiofile.tag.release_date = song['year']\n thumbnail = open(filename+'.jpg', \"rb\").read()\n audiofile.tag.images.set(3, thumbnail,\"image/jpeg\",u\"\")\n audiofile.tag.save()\n print('Done!')\ndef get_song():\n base_path = './songs'\n base_url = 'https://www.saavn.com/search/'\n query = input('Enter search query:')\n url = base_url + query\n\n #Scraping Saavn for first result\n req = requests.get(url, headers=headers)\n soup = bs4.BeautifulSoup(req.text, \"lxml\")\n links = soup.find_all(\"a\")\n song_link = None\n song_links = []\n for link in links:\n if link.has_attr('href')and ('s/song/' in link['href']):\n song_links.append(link)\n\n #Selecting a song from the results \n song_link = song_links[song_select(song_links)]['href']\n print('Downloading from:'+song_link)\n downloader = SaavnDownloader(song_link)\n songs = downloader.get_songs()\n song = songs[0]\n #Removing '/' from file name and album and downloading the file\n download(song['url'], base_path+'/'+(song['title']).replace('/', '') , song)\ndef get_album():\n base_path = './songs'\n album_link = input('Please enter Album URL-\\n')\n album_name = input('Please enter Album Name\\n(Your album will be stored under ./songs/)\\n')\n base_path = base_path +'/'+album_name\n\n #Making Album directory if not exists\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n downloader = SaavnDownloader(album_link)\n songs = downloader.get_songs()\n for song in songs:\n #Removing '/' from file name and album and downloading the file\n download(song['url'],base_path+'/'+(song['title']).replace('/', '') ,song)\ndef main():\n choice = input('Do you want to download a song - s or an album/playlist - a\\n')\n if choice.upper() == 'A' or choice.upper()=='P':\n #Album\n get_album()\n elif choice.upper() == 'S':\n #Song\n get_song()\n else:\n print('Incorrect choice.Exiting...')\n \nif __name__ == '__main__':\n main()","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"120509870","text":"import numpy as np\n\ndef isMatrixEqual(a,b):\n if len(a[a==b]) == len(a) and len(a) == len(b):\n return True\n return False\n\nif __name__ == '__main__':\n A = np.random.randint(1, 3, 3)\n B = np.random.randint(1, 3, 3)\n print(A)\n print(B)\n\n # numpy function\n if np.allclose(A,B):\n print('True')\n\n # custom function\n if isMatrixEqual(A,B):\n print('True')","sub_path":"1/1-10_matrix_equal.py","file_name":"1-10_matrix_equal.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"21948165","text":"\"\"\"\nThis is a demo task.\n\nWrite a function:\n\ndef solution(A)\n\nthat, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.\n\nFor example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.\n\nGiven A = [1, 2, 3], the function should return 4.\n\nGiven A = [−1, −3], the function should return 1.\n\nWrite an efficient algorithm for the following assumptions:\n\nN is an integer within the range [1..100,000];\neach element of array A is an integer within the range [−1,000,000..1,000,000].\n\"\"\"\n\n\n# print(\"this is a debug message\")\n\ndef solution(A):\n\n sorted_arr = sorted(A)\n max_el = sorted_arr[len(sorted_arr) - 1]\n ref_list = range(min(sorted_arr), max(sorted_arr) + 1)\n absent = []\n for i in ref_list:\n ele = i\n if ele not in sorted_arr:\n absent.append(ele)\n\n if not absent:\n if max_el + 1 == 0:\n print(max_el)\n return max_el + 2\n else:\n return max_el + 1\n elif absent[0] < 0:\n return max_el + 2\n\n\na = [1, 3, 6, 4, 1, 2]\nprint(solution(a))\n\n","sub_path":"exercise/arrays/smallest_positive_number.py","file_name":"smallest_positive_number.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"311518471","text":"import os\nimport string\nalphabets=string.ascii_lowercase\nvalue=\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\nnew_value=\"\"\ny=0;\nfor i in value:\n if i in alphabets:\n print ('hi')\n y=alphabets.index(i)+2\n print (y)\n if (y > 25):\n y=y-26\n i=alphabets[y]\n new_value+=i\nprint (new_value) \n","sub_path":"MoveStringByTwo.py","file_name":"MoveStringByTwo.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"207263645","text":"# April 21 2016\n# modularized the subplot tuning curves function;\n\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nimport glob\nimport re\nimport os\nimport datetime\nimport math\ngen_fn_dir = os.path.abspath('..') + '/shared_scripts'\nsys.path.append(gen_fn_dir)\nfrom general_file_fns import load_file\n\n\ndef get_subplot_tc(session_id, tuning_curve_data_path, save_data_path):\n ''' returns a subplots of tuning curves for all the cells of a single session; (a) session_id,\n (b)data path of tuning curves, and (b) directory to save the subplot '''\n tuning_curves = load_file(tuning_curve_data_path + session_id + '_bins_29' + '.p')\n firing_rates = tuning_curves['tuning_curve_data']\n bins = tuning_curves['angle_bins']\n\n # We are just deciding on the number of rows and columns in the grid\n num = int(math.ceil(np.sqrt(len(firing_rates.keys()))))\n\n fig = plt.figure(figsize=(8.0, 5.0))\n #plt.suptitle('Tuning curves from session %s'%session_id, fontsize=20, fontweight='bold')\n for i, cell in enumerate(sorted(firing_rates.keys())):\n ax = fig.add_subplot(num, num, i + 1)\n bins_to_plot = bins[1:] - bins[1] / 2.\n plt.plot(bins_to_plot, firing_rates[cell], marker='o', linestyle='-', color='b')\n plt.xlim([0, 2 * np.pi])\n ax.text(5, np.max(firing_rates[cell]), '%.1f Hz' % np.max(\n firing_rates[cell]), fontsize=12, bbox=dict(facecolor='white', edgecolor='black'))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n plt.title('%s' % str(cell)) # cell is used\n # plt.xlabel('Angle(radians)')\n # plt.ylabel('firing rate')\n file_to_save = save_data_path + session_id + '.png'\n fig.set_size_inches(14, 10)\n plt.tight_layout()\n # fig.savefig(file_to_save, format='png')\n # plt.close(fig)\n\n\nwith open(os.path.expanduser('~') + '/path_to_hd_data.txt', \"r\") as myfile:\n data_path = myfile.readlines()[0] + 'th-1/'\n\nsess_info_data_path = data_path + 'processed/'\ntuning_curve_data_path = data_path + 'analyses/tuning_curves/'\nsession_names = [os.path.splitext(x)[0]\n for x in os.listdir(data_path + 'processed/') if re.match('Mouse', x)]\nsave_data_path = data_path + 'results/05-16-2016/subplots_tun_curves/'\n\n# implementing the function above; returns the subplot for each session\nfor session_id in session_names:\n get_subplot_tc(session_id, tuning_curve_data_path, save_data_path)\n","sub_path":"tuning_curves/subplot_tuning_curves.py","file_name":"subplot_tuning_curves.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"353055422","text":"# Copyright 2017 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\n\ndef revert_dictionary(dictionary):\n \"\"\"Given a dictionary revert it's mapping\n\n :param dictionary: A dictionary to be reverted\n :returns: A dictionary with the keys and values reverted\n\n \"\"\"\n return {v: k for k, v in dictionary.items()}\n\n\ndef get_members_ids(members):\n \"\"\"Extract and return a tuple of members identities\n\n :param members: A list of members in JSON format\n :returns: A tuple containing the members identities\n\n \"\"\"\n members_list = []\n for member in members:\n identity = member.get('@odata.id')\n if not identity:\n continue\n members_list.append(os.path.basename(identity))\n\n return tuple(members_list)\n","sub_path":"sushy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"38415054","text":"def solution(s):\n # write your code in Python 3.6\n letters = dict()\n for i in s:\n if i not in letters.keys():\n letters[i] = 1\n else:\n letters[i] += 1\n \n \n deleteNum = 0;\n for j in letters.keys():\n if letters[j] %2 != 0:\n deleteNum += 1\n \n return deleteNum;\n \n\ndef solution2(A):\n maxNum = 0\n maxPosNag = 0\n\n for i in A:\n if i >= maxNum:\n print(maxNum)\n maxNum = i\n for j in A:\n if -maxNum == j:\n maxPosNag = maxNum \n return maxPosNag\n\ndef solution3(N):\n listStr = list(str(N))\n isNeg = 0\n if N < 0:\n isNeg = 1\n listStr = listStr[1:]\n\n insertIndex = 0\n #see if the num is negative or not\n if not isNeg:\n for i in range(len(listStr)):\n if int(listStr[i]) > 5:\n insertIndex += 1\n else:\n break\n listStr.insert(insertIndex,\"5\")\n strNum = \"\"\n for i in listStr:\n strNum += i\n num = int(strNum)\n\n else:\n for i in range(len(listStr)):\n if int(listStr[i])<5:\n insertIndex += 1\n else:\n break\n listStr.insert(insertIndex,\"5\")\n strNum = \"\"\n for i in listStr:\n strNum += i\n num = -int(strNum)\n \n return num \n\nif __name__ == \"__main__\":\n print(solution(\"aaxxxa\"))\n print(solution2([3,2,-2,5,-3]))\n print(solution3(-2698))","sub_path":"leetcode/marshallWace.py","file_name":"marshallWace.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"558705275","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Bool, Float32, Float32MultiArray\n\nimport os\nimport sys, tty, termios\n\nfd = sys.stdin.fileno()\nold_settings = termios.tcgetattr(fd)\ndef getch():\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\nfrom dynamixel_sdk import *\n\n# Control table address\nADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model\nADDR_PRO_GOAL_POSITION = 116\nADDR_PRO_PRESENT_POSITION = 132\n\n# Data Byte Length\nLEN_PRO_GOAL_POSITION = 4\nLEN_PRO_PRESENT_POSITION = 4\n\n# Protocol version\nPROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel\n\n# Default setting\n\nRGRIPPER_ID = 21 # Dynamixel#1 ID : 21\nLGRIPPER_ID = 22 # Dynamixel#1 ID : 22\n\nBAUDRATE = 1000000 # Dynamixel default baudrate : 57600\nDEVICENAME = '/dev/u2d2-2' # Check which port is being used on your controller\n # ex) Windows: \"COM1\" Linux: \"/dev/ttyUSB0\" Mac: \"/dev/tty.usbserial-*\"\n\nTORQUE_ENABLE = 1 # Value for enabling the torque\nTORQUE_DISABLE = 0 # Value for disabling the torque\nDXL_MOVING_STATUS_THRESHOLD = 5 # Dynamixel moving status threshold\n\n# Initialize PortHandler instance\n# Set the port path\n# Get methods and members of PortHandlerLinux or PortHandlerWindows\nportHandler = PortHandler(DEVICENAME)\n\n# Initialize PacketHandler instance\n# Set the protocol version\n# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler\npacketHandler = PacketHandler(PROTOCOL_VERSION)\n\n# Initialize GroupSyncWrite instance\ngroupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)\n\n# Initialize GroupSyncRead instace for Present Position\ngroupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)\n\n# Open port\nif portHandler.openPort():\n print(\"Succeeded to open the port\")\nelse:\n print(\"Failed to open the port\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n\n\n# Set port baudrate\nif portHandler.setBaudRate(BAUDRATE):\n print(\"Succeeded to change the baudrate\")\nelse:\n print(\"Failed to change the baudrate\")\n print(\"Press any key to terminate...\")\n getch()\n quit()\n\n\ndef torque(enable):\n # Enable Dynamixel#21 Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, RGRIPPER_ID, ADDR_PRO_TORQUE_ENABLE, enable)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n rospy.loginfo('Torque for Dynamixel #%d: %d' % (LGRIPPER_ID, enable))\n\n # Enable Dynamixel#22 Torque\n dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, LGRIPPER_ID, ADDR_PRO_TORQUE_ENABLE, enable)\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n elif dxl_error != 0:\n print(\"%s\" % packetHandler.getRxPacketError(dxl_error))\n else:\n rospy.loginfo('Torque for Dynamixel #%d: %d' % (RGRIPPER_ID, enable))\n\n\n# Add parameter storage for Dynamixel#1 present position value\ndxl_addparam_result = groupSyncRead.addParam(RGRIPPER_ID)\nif dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncRead addparam failed\" % RGRIPPER_ID)\n quit()\n\n# Add parameter storage for Dynamixel#2 present position value\ndxl_addparam_result = groupSyncRead.addParam(LGRIPPER_ID)\nif dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncRead addparam failed\" % LGRIPPER_ID)\n quit()\n\ndef gripper_torque_callback(msg):\n if msg.data == True:\n torque(1)\n elif msg.data == False:\n torque(0)\n\ndef left_pos_callback(msg):\n val = msg.data\n\n if val > 100.0:\n val = 100.0\n elif val < 0.0:\n val = 0.0\n\n l_goal = int((824-2473)*val/100+2473)\n\n param_goal_position = [DXL_LOBYTE(DXL_LOWORD(l_goal)), DXL_HIBYTE(DXL_LOWORD(l_goal)), DXL_LOBYTE(DXL_HIWORD(l_goal)), DXL_HIBYTE(DXL_HIWORD(l_goal))]\n\n # Add Dynamixel#2 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(LGRIPPER_ID, param_goal_position)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % LGRIPPER_ID)\n quit()\n\n # Syncwrite goal position\n dxl_comm_result = groupSyncWrite.txPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Clear syncwrite parameter storage\n groupSyncWrite.clearParam()\n\ndef right_pos_callback(msg):\n val = msg.data\n\n if val > 100.0:\n val = 100.0\n elif val < 0.0:\n val = 0.0\n \n r_goal = int((3240-1620)*val/100+1620)\n\n param_goal_position = [DXL_LOBYTE(DXL_LOWORD(r_goal)), DXL_HIBYTE(DXL_LOWORD(r_goal)), DXL_LOBYTE(DXL_HIWORD(r_goal)), DXL_HIBYTE(DXL_HIWORD(r_goal))]\n\n # Add Dynamixel#1 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(RGRIPPER_ID, param_goal_position)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % RGRIPPER_ID)\n quit()\n\n # Syncwrite goal position\n dxl_comm_result = groupSyncWrite.txPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Clear syncwrite parameter storage\n groupSyncWrite.clearParam()\n\ndef both_pos_callback(msg):\n left_val = msg.data[0]\n right_val = msg.data[1]\n\n r_goal = int((3240-1620)*right_val/100+1620)\n l_goal = int((824-2473)*left_val/100+2473)\n\n param_goal_position = [DXL_LOBYTE(DXL_LOWORD(r_goal)), DXL_HIBYTE(DXL_LOWORD(r_goal)), DXL_LOBYTE(DXL_HIWORD(r_goal)), DXL_HIBYTE(DXL_HIWORD(r_goal))]\n\n # Add Dynamixel#1 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(RGRIPPER_ID, param_goal_position)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % RGRIPPER_ID)\n quit()\n\n param_goal_position = [DXL_LOBYTE(DXL_LOWORD(l_goal)), DXL_HIBYTE(DXL_LOWORD(l_goal)), DXL_LOBYTE(DXL_HIWORD(l_goal)), DXL_HIBYTE(DXL_HIWORD(l_goal))]\n\n # Add Dynamixel#2 goal position value to the Syncwrite parameter storage\n dxl_addparam_result = groupSyncWrite.addParam(LGRIPPER_ID, param_goal_position)\n if dxl_addparam_result != True:\n print(\"[ID:%03d] groupSyncWrite addparam failed\" % LGRIPPER_ID)\n quit()\n\n # Syncwrite goal position\n dxl_comm_result = groupSyncWrite.txPacket()\n if dxl_comm_result != COMM_SUCCESS:\n print(\"%s\" % packetHandler.getTxRxResult(dxl_comm_result))\n\n # Clear syncwrite parameter storage\n groupSyncWrite.clearParam()\n \n \nrospy.init_node('gripper')\nrospy.loginfo('Gripper node open.')\n\ntorque(1)\nrospy.Subscriber(\"/grippers/torque\", Bool, gripper_torque_callback)\nrospy.Subscriber(\"/grippers/left_pos\", Float32, left_pos_callback)\nrospy.Subscriber(\"/grippers/right_pos\", Float32, right_pos_callback)\nrospy.Subscriber(\"/grippers/both_pos\", Float32MultiArray, both_pos_callback)\n\nrospy.spin()\n","sub_path":"src/op3_gripper/script/op3_gripper_node.py","file_name":"op3_gripper_node.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"288044072","text":"\ndef run(argv):\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--host', default='0.0.0.0', help='the hostname to use to access this server')\n parser.add_argument('--port', type=int, default=5000, help='an integer for the accumulator')\n args = parser.parse_args(argv)\n\n from . import server\n server.app.run(\n host=args.host, port=args.port,\n debug=True, use_evalex=False,\n use_reloader=True,\n )\n","sub_path":"pheweb/serve/serverun.py","file_name":"serverun.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"498812751","text":"# 获取线程的唯一标识\nfrom threading import get_ident\n# 获取携程的唯一标识\nfrom greenlet import getcurrent as get_ident\n\n\nclass Local(object):\n\n\tdef __init__(self):\n\t\tobject.__setattr__(self, 'storage', {})\n\n\tdef __setattr__(self, key, value):\n\t\tident = get_ident()\n\t\tif ident not in self.storage:\n\t\t\tself.storage[ident] = {key: value}\n\t\telse:\n\t\t\tself.storage[ident][key] = value\n\n\tdef __getattr__(self, item):\n\t\tident = get_ident()\n\t\tif ident in self.storage:\n\t\t\treturn self.storage[ident].get(item)\n\t\telse:\n\t\t\treturn None","sub_path":"Flask/z_loacl_stack.py","file_name":"z_loacl_stack.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"159004675","text":"#-*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.http import HttpRequest\nfrom api.control.user import User\nfrom api.control.comment import Comment\nimport json\n\ndef home():\n \"\"\"\n request:\n URL: post/comment/\n POST: username, message, wallpaper\n\n return:\n {\n \"statusOk\", 0 ou 1\n \"statusId\", # Status (voire README)\n \"statusName\", # nom (description) du status\n }\n\n \"\"\"\n statusName = \"\"\n statusOk = 1\n statusId = 200\n if not HttpRequest.POST[\"username\"] :\n statusName += \"Le username n'a pas été renseigné \"\n statusOk = 0\n statusId = 510\n elif not HttpRequest.POST[\"message\"] :\n statusName += \"Le message n'a pas été renseigné \"\n statusOk = 0\n statusId = 510\n elif not HttpRequest.POST[\"wallpaper\"] :\n statusName += \"Le wallpaper n'a pas été renseigné \"\n statusOk = 0\n statusId = 510\n elif User().userExistByUsername(HttpRequest.POST[\"username\"]):\n if Comment().addComment(HttpRequest.POST[\"username\"], HttpRequest.POST[\"message\"], HttpRequest.POST['wallpaper']) == 1500:\n statusName += \"Le comment n'a pas pu etre rentré \"\n statusOk = 0\n statusId = 500\n else:\n statusName = \"\"\n statusOk = 1\n statusId = 200\n else:\n statusName = \"\"\n statusOk = 1\n statusId = 200\n\n\n text = {\n \"statusOk\": statusOk,\n \"statusId\": statusId,\n \"statusName\": statusName,\n }\n\n return HttpResponse(json.dumps(text)) # On converti le JSON en String pour le renvoyer\n\n\n","sub_path":"api/view/post_comment.py","file_name":"post_comment.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"186025990","text":"from flask import Blueprint\nfrom flask_restful import Resource, reqparse, fields, marshal\n\nfrom .resource import *\n\numum_fields = {\n 'id': fields.Integer,\n 'luas_desa': fields.Float,\n 'total_dusun': fields.Integer,\n 'bw_utara': fields.String,\n 'bw_selatan': fields.String,\n 'bw_timur': fields.String,\n 'bw_barat': fields.String,\n 'jp_kecamatan': fields.Float,\n 'jp_kabupaten': fields.Float,\n 'jp_provinsi': fields.Float,\n 'link_maps': fields.String\n}\n\n\ndef counter():\n try:\n val = models.InfUmum.select().count()\n except models.InfUmum.DoesNotExist:\n abort(404)\n else:\n return val\n\n\ndef get_or_abort(id):\n try:\n query = models.InfUmum.get_by_id(id)\n except models.InfUmum.DoesNotExist:\n abort(404)\n else:\n return query\n\n\nclass BaseUmum(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super().__init__()\n\n def reqargs(self):\n self.reqparse.add_argument(\n 'luas_desa', type=float,\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'total_dusun', type=int,\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'bw_utara',\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'bw_selatan',\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'bw_timur',\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'bw_barat',\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'jp_kecamatan', type=float,\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'jp_kabupaten', type=float,\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'jp_provinsi', type=float,\n required=False, location=['form', 'json'])\n self.reqparse.add_argument(\n 'link_maps',\n required=False, location=['form', 'json'])\n\n\nclass GetPost(BaseUmum):\n # index\n # @login_required\n def get(self):\n umum = [marshal(umum, umum_fields)\n for umum in models.InfUmum.select()]\n return {'success': True,\n 'data': umum}\n\n # store\n # @login_required\n def post(self):\n if counter() >= 1:\n abort(400, \"Data can only be created once, please edit as an alternative\")\n\n self.reqargs()\n args = self.reqparse.parse_args()\n\n try:\n umum = models.InfUmum.create(**args)\n return {'success': True,\n 'data': marshal(umum, umum_fields)}\n except models.InfUmum.DoesNotExist:\n return {'success': False,\n 'message': 'Model does not exist'}\n\n\nclass GetPutDel(BaseUmum):\n # show\n # @login_required\n def get(self, id):\n umum = get_or_abort(id)\n return {'success': True,\n 'data': marshal(umum, umum_fields)}\n\n # edit\n # @login_required\n def put(self, id):\n self.reqargs()\n\n get_or_abort(id)\n args = self.reqparse.parse_args()\n\n try:\n models.InfUmum.update(**args).where(models.InfUmum.id == id).execute()\n return {'success': True,\n 'data': marshal(get_or_abort(id), umum_fields)}\n except models.InfUmum.DoesNotExist:\n return {'success': False,\n 'message': 'Model does not exist'}\n\n # delete\n # @login_required\n def delete(self, id):\n get_or_abort(id)\n models.InfUmum.delete().where(models.InfUmum.id == id).execute()\n return {'success': True,\n 'message': \"Info Umum Desa is deleted\"}\n\n\ninf_umum_api = Blueprint('resources.inf_umum', __name__)\napi = Api(inf_umum_api)\napi.add_resource(GetPost, '/inf-umum', endpoint='inf-umum/gp')\napi.add_resource(GetPutDel, '/inf-umum/', endpoint='inf-umum/gpd')\n","sub_path":"resources/inf_umum.py","file_name":"inf_umum.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"561511161","text":"import math\n\n\nclass Base():\n \n def getPairs(self):\n pass\n\n def getCurrencys(self):\n pass \n \n def get_reserve(self, CoinA, CoinB):\n pass\n \n def quote(self, amountA, reserveA, reserveB):\n assert amountA > 0 and reserveA > 0 and reserveB > 0\n amountB = amountA * reserveB // reserveA\n return amountB\n\n def getOutputAmountWithoutFee(self, amountIn, reserveIn, reserveOut):\n assert amountIn > 0 and reserveIn > 0 and reserveOut\n amountOut = amountIn * reserveOut // (reserveIn + amountIn);\n return amountOut\n\n def getOutputAmountsWithoutFee(self, amountIn, path):\n assert amountIn > 0 and len(path) >= 2\n amounts = []\n amounts.append(amountIn)\n for i in range(len(path) - 1):\n (reserveIn, reserveOut) = self.get_reserve(path[i], path[i + 1])\n assert reserveIn > 0 and reserveOut > 0\n amountOut = self.getOutputAmountWithoutFee(amounts[i], reserveIn, reserveOut)\n amounts.append(amountOut)\n return amounts\n \n def getOutputAmount(self, amountIn, reserveIn, reserveOut):\n assert amountIn > 0 and reserveIn > 0 and reserveOut\n amountInWithFee = amountIn * 997;\n numerator = amountInWithFee * reserveOut;\n denominator = reserveIn * 1000 + amountInWithFee;\n amountOut = numerator // denominator;\n return amountOut\n\n def getInputAmount(self, amountOut, reserveIn, reserveOut):\n assert amountOut > 0 and reserveIn > 0 and reserveOut\n numerator = reserveIn * amountOut * 1000;\n denominator = (reserveOut - amountOut) * 997;\n amountIn = numerator // denominator + 1;\n return amountIn\n\n def getOutputAmounts(self, amountIn, path):\n assert amountIn > 0 and len(path) >= 2\n amounts = []\n amounts.append(amountIn)\n for i in range(len(path) - 1):\n (reserveIn, reserveOut) = self.get_reserve(path[i], path[i + 1])\n assert reserveIn > 0 and reserveOut > 0\n amountOut = self.getOutputAmount(amounts[i], reserveIn, reserveOut)\n amounts.append(amountOut)\n return amounts\n\n def getInputAmounts(self, amountOut, path):\n assert amountOut > 0 and len(path) >= 2\n amounts = [None] * len(path)\n amounts[len(path) - 1] = amountOut\n for i in range(len(path) - 1, 0, -1):\n (reserveIn, reserveOut) = self.get_reserve(path[i - 1], path[i])\n assert reserveIn > 0 and reserveOut > 0\n amounts[i - 1] = self.getInputAmount(amounts[i], reserveIn, reserveOut)\n return amounts\n\n def bestTradeExactIn(self, pairs, idIn, idOut, amountIn, originalAmountIn, path=[], bestTrades=[]):\n assert len(pairs) > 0\n assert originalAmountIn == amountIn or len(path) > 0\n if len(path) == 0:\n path.append(idIn)\n for i in range(0, len(pairs)):\n pair = pairs[i]\n (reserveIn, reserveOut) = self.get_reserve(pair[0], pair[1])\n if pair[0] != idIn and pair[1] != idIn:\n continue\n if reserveIn == 0 or reserveOut == 0:\n continue\n amountOut = self.getOutputAmount(amountIn, reserveIn, reserveOut)\n newIdIn = pair[1] if idIn == pair[0] else pair[0]\n if idOut == pair[0] or idOut == pair[1]:\n path.append(idOut)\n bestTrades.append((path, amountOut))\n elif len(pairs) > 1:\n pairsExcludingThisPair = pairs[:]\n del (pairsExcludingThisPair[i])\n newPath = path + [newIdIn]\n self.bestTradeExactIn(pairsExcludingThisPair, newIdIn, idOut, amountOut, originalAmountIn, newPath, bestTrades)\n \n return sorted(bestTrades, key=lambda k: k[1], reverse=True)\n \n \n def bestTradeExactOut(self, pairs, idIn, idOut, amountOut, originalAmountOut, path=[], bestTrades=[]):\n assert len(pairs) > 0\n assert originalAmountOut == amountOut or len(path) > 0\n if len(path) == 0:\n path.append(idOut)\n for i in range(0, len(pairs)):\n pair = pairs[i]\n (reserveIn, reserveOut) = self.get_reserve(pair[0], pair[1])\n if pair[0] != idOut and pair[1] != idOut:\n continue\n if reserveIn == 0 or reserveOut == 0:\n continue\n amountIn = self.getInputAmount(amountOut, reserveIn, reserveOut)\n newIdOut = pair[1] if idOut == pair[0] else pair[0]\n if idIn == pair[0] or idIn == pair[1]:\n path.insert(0, idIn)\n bestTrades.append((path, amountIn))\n elif len(pairs) > 1:\n pairsExcludingThisPair = pairs[:]\n del (pairsExcludingThisPair[i])\n newPath = [newIdOut] + path\n self.bestTradeExactOut(pairsExcludingThisPair, idIn, newIdOut, amountIn, originalAmountOut, newPath, bestTrades)\n \n return sorted(bestTrades, key=lambda k: k[1], reverse=True)","sub_path":"violas_client/extypes/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"160674255","text":"from __future__ import division\nimport sys\nimport os\nfrom datetime import datetime\nfrom yahoo_auth import get_yahoo_auth\nfrom predict import update_model, predict_today, linear_interpolation\n\nsys.path.append('..')\nfrom utility.database import DatabaseCreate\nfrom utility.Send_Text import send_text\n\ndb = DatabaseCreate(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data'), 'fantasy_football.db')\n\n\ndef career_record(mobile, carrier, manager, opponent, week, year, type):\n \"\"\"Returns career record (including playoffs) of two opponents\"\"\"\n\n query = \"select count(*) from (select * from scoreboard_all where (manager1_name = '{0}' and manager2_name = '{1}')\\\n or (manager2_name = '{0}' and manager1_name = '{1}')) where team1_points>0 and type='{2}' and (week!={3} or\\\n year!= {4});\".format(manager, opponent, type, week, year)\n x = int(db.fetch_one(query))\n\n query = \"select count(*) from (select * from scoreboard_all where ((manager1_name = '{0}' and manager2_name = '{1}' \\\n and team1_points > team2_points) or (manager2_name = '{0}' AND manager1_name = '{1}' and \\\n team2_points > team1_points))) where type = '{2}' and (week != {3} OR year != {4})\".format(\n manager, opponent, type, week, year)\n y = int(db.fetch_one(query))\n\n try:\n career_winning_percentage = '{:.2%}'.format(float(y) / float(x))\n except ZeroDivisionError:\n career_winning_percentage = 0\n wins = str(y)\n losses = str(x - y)\n message = \"{0}, today you play {1} in fantasy. Your career winning % is {2} with a record of {3}-{4}.\".format(\n manager, opponent, career_winning_percentage, wins, losses)\n\n for mob, car in zip(mobile, carrier):\n send_text(mob, message, car)\n\n\ndef current_projections(mobile, carrier, manager, prob):\n rate_win = '{:.2%}'.format(prob)\n message = \"{0}, you are projected a {1} chance of victory\".format(manager, rate_win)\n\n for mob, car in zip(mobile, carrier):\n send_text(mob, message, car)\n\n\nif __name__ == \"__main__\":\n token, y3 = get_yahoo_auth()\n year = datetime.now().year\n update_model(year)\n\n query = \"select max(year), sport_id, league_id, type from leagues\"\n year, sport_id, league_id, type = db.fetch_line(query)\n\n query = \"select * from fantasysports.leagues where league_key='{}'\".format(sport_id + \".l.\" + league_id)\n data_yql = y3.execute(query, token=token).rows\n current_week = int(data_yql[0]['current_week'])\n\n predictions = predict_today(year, current_week)\n query = \"select manager1_name, manager2_name, type from scoreboard_all where year={0} and week={1};\".format(year, current_week)\n results = db.fetch_all(query)\n predictions_interpolated = linear_interpolation(results, predictions)\n\n for manager, type in predictions_interpolated.keys():\n query = \"select number, carrier from mobile where nickname = '{0}' and type = '{1}'\".format(manager, type)\n result = db.fetch_all(query)\n mobile, carrier = zip(*result)\n prob = predictions_interpolated[manager, type]\n\n try:\n query = \"select manager2_name from scoreboard_all where manager1_name='{0}' and year={1} and \\\n week = {2} and type = '{3}';\".format(manager, year, current_week, type)\n opponent = str(db.fetch_one(query))\n\n except TypeError:\n query = \"select manager1_name from scoreboard_all where manager2_name='{0}' and year={1} and \\\n week = {2} and type = '{3}';\".format(manager, year, current_week, type)\n opponent = str(db.fetch_one(query))\n\n career_record(mobile, carrier, manager, opponent, current_week, year, type)\n current_projections(mobile, carrier, manager, prob)\n\n\n","sub_path":"Yahoo_Fantasy/live_updates.py","file_name":"live_updates.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"546437841","text":"# pylint: disable=W1202\nimport threading\nfrom datetime import datetime\nfrom typing import List\n\nfrom oadr2 import logger\nfrom oadr2.schemas import EventSchema\n\nCONTROL_LOOP_INTERVAL = 30 # update control state every X second\n\n\n# Used by poll.OpenADR2 to handle events\nclass EventController(object):\n '''\n EventController tracks active events and fires a callback when event levels have\n changed.\n\n Member Variables:\n --------\n event_handler -- The EventHandler instance\n current_signal_level -- current signal level of a realy/point\n control_loop_interval -- How often to run the control loop\n control_thread -- threading.Thread() object w/ name of 'oadr2.control'\n _control_loop_signal -- threading.Event() object\n _exit -- A threading.Thread() object\n '''\n\n def __init__(\n self,\n event_handler,\n signal_changed_callback=None,\n start_thread=True,\n control_loop_interval=CONTROL_LOOP_INTERVAL\n ):\n '''\n Initialize the Event Controller\n\n event_handler -- An instance of event.EventHandler\n start_thread -- Start the control thread\n control_loop_interval -- How often to run the control loop\n '''\n\n self.event_handler = event_handler\n self.current_signal_level = 0\n\n self.signal_changed_callback = signal_changed_callback \\\n if signal_changed_callback is not None \\\n else self.default_signal_callback\n\n # Add an exit thread for the module\n self._exit = threading.Event()\n self._exit.clear()\n\n self._control_loop_signal = threading.Event()\n self.control_loop_interval = control_loop_interval\n\n # The control thread\n self.control_thread = None\n\n if start_thread:\n self.control_thread = threading.Thread(\n name='oadr2.control',\n target=self._control_event_loop\n )\n self.control_thread.daemon = True\n self.control_thread.start()\n\n def events_updated(self):\n '''\n Call this when some events have updated to cause the control\n loop to refresh\n '''\n self._control_loop_signal.set()\n\n def get_current_signal_level(self):\n '''\n Return the signal level and event ID of the currently active event.\n If no events are active, this will return (0,None)\n '''\n\n signal_level, event_id, expired_events = self._calculate_current_event_status(\n self.event_handler.get_active_events()\n )\n\n return signal_level, event_id\n\n def _control_event_loop(self):\n '''\n This is the threading loop to perform control based on current oadr events\n Note the current implementation simply loops based on CONTROL_LOOP_INTERVAL\n except when an updated event is received by a VTN.\n '''\n while not self._exit.is_set():\n try:\n logger.debug(\"Updating control states...\")\n events = self.event_handler.get_active_events()\n\n new_signal_level = self._update_control(events)\n logger.debug(\"Highest signal level is: %f\", new_signal_level)\n\n changed = self._update_signal_level(new_signal_level)\n if changed:\n logger.debug(\"Updated current signal level!\")\n\n except Exception as ex:\n logger.exception(\"Control loop error: %s\", ex)\n\n self._control_loop_signal.wait(self.control_loop_interval)\n self._control_loop_signal.clear() # in case it was triggered by a poll update\n\n logger.info(\"Control loop exiting.\")\n\n def _update_control(self, events):\n '''\n Called by `control_event_loop()` to determine the current signal level.\n This also deletes any events from the database that have expired.\n\n events -- List of lxml.etree.ElementTree objects (with OpenADR 2.0 tags)\n '''\n signal_level, event_id, remove_events = self._calculate_current_event_status(events)\n\n if remove_events:\n # remove any events that we've detected have ended or been cancelled.\n # TODO callback for expired events??\n logger.debug(\"Removing completed or cancelled events: %s\", remove_events)\n self.event_handler.remove_events(remove_events)\n\n if event_id:\n self.event_handler.update_active_status(event_id)\n\n return signal_level\n\n def _calculate_current_event_status(self, events: List[EventSchema]):\n '''\n returns a 3-tuple of (current_signal_level, current_event_id, remove_events=[])\n '''\n\n highest_signal_val = 0\n current_event = None\n remove_events = [] # to collect expired events\n now = datetime.utcnow()\n\n for evt in events:\n try:\n if evt.status is None:\n logger.debug(f\"Ignoring event {evt.id} - no valid status\")\n continue\n\n if evt.status.lower() == \"cancelled\" and datetime.utcnow() > evt.end:\n logger.debug(f\"Event {evt.id}({evt.mod_number}) has been cancelled\")\n remove_events.append(evt.id)\n continue\n\n if not evt.signals:\n logger.debug(f\"Ignoring event {evt.id} - no valid signals\")\n continue\n\n current_interval = evt.get_current_interval(now=now)\n if current_interval is None:\n if evt.end < now:\n logger.debug(f\"Event {evt.id}({evt.mod_number}) has ended\")\n remove_events.append(evt.id)\n continue\n\n elif evt.start > now:\n logger.debug(f\"Event {evt.id}({evt.mod_number}) has not started yet.\")\n continue\n\n else:\n logger.warning(f\"Error getting current interval for event {evt.id}({evt.mod_number}):\"\n f\"Signals: {evt.signals}\")\n continue\n\n if evt.test_event:\n logger.debug(f\"Ignoring event {evt.id} - test event\")\n continue\n\n logger.debug(\n f'Control loop: Evt ID: {evt.id}({evt.mod_number}); '\n f'Interval: {current_interval.index}; Current Signal: {current_interval.level}'\n )\n\n if current_interval.level > highest_signal_val or not current_event:\n if not current_event or evt.priority > current_event.priority:\n highest_signal_val = current_interval.level\n current_event = evt\n\n except Exception as ex:\n logger.exception(f\"Error parsing event: {evt.id}: {ex}\")\n\n return highest_signal_val, current_event.id if current_event else None, remove_events\n\n def _update_signal_level(self, signal_level):\n '''\n Called once each control interval with the 'current' signal level.\n If the signal level has changed from `current_signal_level`, this\n calls `self.signal_changed_callback(current_signal_level, new_signal_level)`\n and then sets `self.current_signal_level = new_signal_level`.\n\n signal_level -- If it is the same as the current signal level, the\n function will exit. Else, it will change the\n signal relay\n\n returns True if the signal level has changed from the `current_signal_level`\n or False if the signal level has not changed.\n '''\n\n # check if the current signal level is different from the new signal level\n if signal_level == self.current_signal_level:\n return False\n\n try:\n self.signal_changed_callback(self.current_signal_level, signal_level)\n\n except Exception as ex:\n logger.exception(\"Error from callback! %s\", ex)\n\n self.current_signal_level = signal_level\n return True\n\n def default_signal_callback(self, old_level, new_level):\n '''\n The default callback just logs a message.\n '''\n logger.debug(f\"Signal level changed from {old_level} to {new_level}\")\n\n def exit(self):\n '''\n Shutdown the threads for the module\n '''\n self._exit.set()\n self._control_loop_signal.set() # interrupt sleep\n self.control_thread.join(2)\n","sub_path":"oadr2/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"512953032","text":"import sys\nimport os\nimport re\nfrom os import path\nimport subprocess\nimport json\nimport requests\nimport bs4\nimport dateparser\nimport tempfile\nfrom datetime import datetime, timedelta\nfrom werkzeug.contrib.cache import FileSystemCache\n\nfrom flask import Flask, request, render_template\n\napp = Flask(__name__)\ncache_dir = tempfile.TemporaryDirectory(prefix=\"ilswlol-\")\ncache = FileSystemCache(cache_dir.name)\n\ndef ist_lukas_schon_wach():\n confidence = 0\n\n # Check using steam profile\n steamprofile = requests.get(\"http://steamcommunity.com/id/Ahti333\")\n soup = bs4.BeautifulSoup(steamprofile.text, \"html.parser\")\n\n online_offline_info = soup.find(class_='responsive_status_info')\n\n # Check whether user is online or offline right now\n if online_offline_info.find_all(text='Currently Online') or \\\n online_offline_info.find_all(text=re.compile('Online using')) or \\\n online_offline_info.find_all(text=re.compile('Currently In-Game')):\n # If he's online in steam now, we're pretty confident\n confidence += 70\n else:\n last_online = online_offline_info.find(class_='profile_in_game_name').string\n last_online_date = last_online.replace('Last Online ', '')\n date = dateparser.parse(last_online_date)\n delta = datetime.utcnow() - date\n\n # Check whether Lukas has been online recently and assign confidence\n if delta < timedelta(hours=1):\n confidence += 40\n elif delta < timedelta(hours=3):\n confidence += 30\n elif delta < timedelta(hours=7):\n confidence += 20\n\n # Check using telegram\n\n # For development purposes, figure out the path ourselves\n tg_path = None\n if os.environ.get('TG_PATH') is None:\n tg_path = path.join(path.dirname(sys.executable), \"..\", \"..\", \"externals\", \"tg\")\n else:\n tg_path = os.environ.get('TG_PATH')\n\n tg_cli_path = path.join(tg_path, \"bin\", \"telegram-cli\")\n tg_pubkey_path = path.join(tg_path, \"tg-server.pub\")\n\n tg_output = None\n # Retry a few times in case the token has expired\n attempt = 3\n success = False\n while attempt > 0 and not success:\n tg_output = subprocess.run([tg_cli_path, \"-k\", tg_pubkey_path,\n \"-e\", \"contact_list\", \"--json\", \"-D\", \"-R\"],\n stdout=subprocess.PIPE)\n if tg_output.returncode != 0:\n attempt -= 1\n else:\n success = True\n\n split_contacts = tg_output.stdout.splitlines()[0].decode(\"utf-8\")\n parsed_contacts = json.loads(split_contacts)\n for contact in parsed_contacts:\n if 'username' in contact and contact['username'] == 'lukasovich':\n date = dateparser.parse(contact['when'])\n delta = datetime.utcnow() - date\n\n # Check whether Lukas has been online recently and assign confidence\n if delta < timedelta(minutes=5):\n confidence += 70\n if delta < timedelta(minutes=45):\n confidence += 50\n elif delta < timedelta(hours=1):\n confidence += 40\n elif delta < timedelta(hours=3):\n confidence += 30\n elif delta < timedelta(hours=7):\n confidence += 20\n\n break\n\n return confidence >= 50\n\n\n@app.route(\"/\")\ndef index():\n schon_wach = cache.get('ist_lukas_schon_wach')\n if schon_wach is None:\n schon_wach = ist_lukas_schon_wach()\n cache.set('ist_lukas_schon_wach', schon_wach, timeout=5 * 60)\n\n if schon_wach:\n if request.args.get('raw'):\n return \"JA\"\n else:\n return render_template('index.html', schon_wach=True)\n else:\n if request.args.get('raw'):\n return \"NEIN\"\n else:\n return render_template('index.html', schon_wach=False)\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"ilswlol/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"53106185","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nSTART_BIT = 83\nSTART_BIT_EXTENSION = 15\nSTROBE_LENGTH = 84\nSTROBE_SPACE = 40\n\n\nclass Tic(object):\n u\"\"\" конструктор класса \"\"\"\n def __init__(self, number, header, data):\n self.number = number\n self.header = header\n self.data = data\n self.extension = None\n\n def __str__(self):\n u\"\"\" перезагрузка символьного представления класса \"\"\"\n res = u'Header: ' + self.header + u'\\n'\n for item in self.data.keys():\n res = res + item + u' ' + u' '.join(self.data[item]) + u'\\n'\n return res\n\n def get_strobes(self):\n u\"\"\" формирование стробов \"\"\"\n # преобразование в длинную строку основного такта\n longdata = []\n for key in self.data.keys():\n longdata.extend(self.data[key])\n longdata = longdata[START_BIT:]\n\n # преобразование в длинную строку дополнительного такта\n if self.extension:\n longdataextension = []\n for key in self.extension.keys():\n longdataextension.extend(self.extension[key])\n longdataextension = longdataextension[START_BIT_EXTENSION:]\n longdata.extend(longdataextension)\n res = []\n while len(longdata) >= STROBE_LENGTH:\n res.append(longdata[0:STROBE_LENGTH])\n longdata = longdata[STROBE_LENGTH + STROBE_SPACE:]\n return res\n\n def setextention(self, extend_data):\n u\"\"\" присоединение дополнения тика при соответствующем условии \"\"\"\n bytes_tic = u'0x' + self.data[u'0x0010'][4] + self.data[u'0x0010'][5]\n bytes_extend = u'0x' + extend_data[u'0x0010'][4] + extend_data[u'0x0010'][5]\n if int(bytes_extend, 16) == (int(bytes_tic, 16) + 1):\n self.extension = extend_data\n return True\n return False\n","sub_path":"tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"271890353","text":"\"\"\"\nMachine Learning - Author Attribution.\n\nBy Bartu Sivri.\n\"\"\"\nimport sys\nimport os\nimport glob\nimport re\nimport string\nimport math\nimport matplotlib.pyplot as plt\n\n\nclass Sample:\n \"\"\"Data Holder For Sample Text Files.\"\"\"\n\n features_count = 0\n\n def __init__(self, path, words):\n \"\"\"Store Words and Store Path For Accuracy Check Later.\"\"\"\n self.path = path.split('problems/')[1]\n self.words = set(words)\n\n\n def fill_feature_vector(self, features):\n self.feature_vector = [0] * len(features)\n \n for index in range(0, len(features)):\n if features[index] in self.words:\n self.feature_vector[index] += 1\n\n\n def debug_print(self):\n print('Sample path = {}'.format(self.path))\n print('Total document count is = {}'.format(Training.total_docs))\n print('Label Probability P(c) = {}'.format(self.label_prob))\n print('Number of Features = {}'.format(Training.features_count))\n print('')\n print('-----Feature Vector-----')\n print(self.feature_vector)\n print('')\n print('-----P(fi | c)-----')\n print(self.fi_probs)\n print('')\n\n\nclass Training:\n \"\"\"Data Holder For Training Text Files.\"\"\"\n \n total_docs = 0\n features_count = 0\n\n def __init__(self, author, words_list, count):\n \"\"\"Store All Words Used by an Author Across All given Training Texts.\"\"\"\n self.author = 'Author' + author\n self.word_list = []\n self.word_sets = []\n\n for words in words_list:\n self.word_list.append(words)\n self.word_sets.append(set(words))\n\n self.num_of_docs = count\n Training.total_docs += self.num_of_docs\n\n\n def calc_label_prob(self):\n self.label_prob = self.num_of_docs / Training.total_docs\n \n\n def debug_print(self):\n print('Author = {} has {} docs'.format(self.author, self.num_of_docs))\n print('Total document count is = {}'.format(Training.total_docs))\n print('Label Probability P(c) = {}'.format(self.label_prob))\n print('Number of Features = {}'.format(Training.features_count))\n print('')\n print('-----Feature Vector-----')\n print(self.feature_vector)\n print('')\n print('-----P(fi | c)-----')\n print(self.fi_probs)\n print('')\n\n\n def fill_feature_vector(self, features):\n self.feature_vector = [0] * Training.features_count\n \n for index in range(0, len(features)):\n for words in self.word_sets:\n if features[index] in words:\n self.feature_vector[index] += 1\n\n\n def count_occurence(self, features):\n self.count_vector = [0] * len(features)\n \n for index in range(0, len(features)):\n for words in self.word_list:\n self.count_vector[index] += words.count(features[index])\n\n\n def freq_calcs(self, sub_features, size):\n self.feature_vector = [0] * size\n \n for index in range(0, size):\n for words in self.word_sets:\n if sub_features[index] in words:\n self.feature_vector[index] += 1\n \n self.fi_probs = [0] * size\n \n for index in range(0, len(self.feature_vector)):\n self.fi_probs[index] = (self.feature_vector[index] + 1) / (self.num_of_docs + 2)\n\n def calc_fi_prob(self):\n self.fi_probs = [0] * Training.features_count\n \n for index in range(0, len(self.feature_vector)):\n self.fi_probs[index] = (self.feature_vector[index] + 1) / (self.num_of_docs + 2)\n\n\ndef fill_features_list(stop_words, features):\n \"\"\"Store Each Given Feature In a List.\"\"\"\n with open(stop_words, errors='ignore') as input_file:\n for line in input_file:\n modified_line = line.replace('\\n', '')\n features.append(modified_line)\n\n n = len(features)\n Training.features_count = n\n Sample.features_count = n\n\n\ndef tokenize(input_files, samples, trainings):\n \"\"\"Tokenize Every File and Parse Accordingly.\"\"\"\n def strip_whitespace(input_string):\n \"\"\"Strip Whitespace from Input String.\"\"\"\n return re.sub(\"\\s+\", \" \", input_string.strip())\n\n training_by_author = {}\n\n for file_path in input_files:\n file_path = file_path.replace('\\\\', '/')\n words = []\n\n with open(file_path, errors='ignore') as input_file:\n for line in input_file:\n whitespace_stripped = strip_whitespace(line)\n punctuation_removed = \"\".join([x for x in whitespace_stripped\n if x not in string.punctuation])\n lowercased = punctuation_removed.lower()\n words.extend(lowercased.split())\n\n file_name = os.path.basename(file_path)\n\n # if its a sample file\n if file_name[1] == 's':\n samples.append(Sample(file_path, words))\n\n # if its a training file\n else:\n author_no = file_name.split('train')[1].split('-')[0]\n\n if author_no not in training_by_author:\n training_by_author[author_no] = {\n 'words' : [],\n 'count' : 1\n }\n else:\n training_by_author[author_no]['count'] += 1\n\n training_by_author[author_no]['words'].append(words)\n\n for author in training_by_author:\n trainings.append(Training(\n author,\n training_by_author[author]['words'],\n training_by_author[author]['count']\n ))\n\n\ndef do_training(trainings, features):\n\n for train in trainings:\n train.calc_label_prob()\n\n train.fill_feature_vector(features)\n\n train.calc_fi_prob()\n\n\ndef sum_count_vectors(trainings, features):\n\n for train in trainings:\n train.count_occurence(features)\n\n total_count_vector = [0] * Training.features_count\n\n for train in trainings:\n count_vec = train.count_vector\n\n for i in range(0, len(count_vec)):\n total_count_vector[i] += count_vec[i]\n\n return total_count_vector\n\n\ndef do_testing(sample, trainings, features, answers):\n\n answers[sample.path] = {\n 'max' : float('-inf'),\n 'author' : ''\n }\n sample.fill_feature_vector(features)\n\n for training in trainings:\n result = calculate_class(sample, training)\n\n if result > answers[sample.path]['max']:\n answers[sample.path]['max'] = result\n answers[sample.path]['author'] = training.author\n\n\ndef calculate_class(sample, training):\n\n result = math.log2(training.label_prob)\n\n for i in range(0, len(sample.feature_vector)):\n if sample.feature_vector[i] == 1:\n result += math.log(training.fi_probs[i], 2)\n else:\n result += math.log(1 - training.fi_probs[i], 2)\n\n return result\n\n\ndef build_answer_dict(answer_file, problem_label, truth_dict):\n\n with open(answer_file, errors='ignore') as input_file:\n for line in input_file:\n if len(line) > 1 and line[7] == problem_label:\n mod_line = line[:-1]\n answer_line = mod_line.split(' ')\n truth_dict[answer_line[0]] = answer_line[1]\n\n\ndef test_acc(answers, truth_dict):\n\n x = 0\n n = len(answers)\n\n for sample, val in answers.items():\n if truth_dict[sample] == val['author']:\n x += 1\n\n acc = (x / n) * 100\n print('Accuracy:')\n print('---------')\n print(acc)\n print('')\n\n\ndef computeConfusionMatrix(predicted, groundTruth, nAuthors):\n confusionMatrix = [[0 for i in range(nAuthors+1)] for j in range(nAuthors+1)]\n\n for i in range(len(groundTruth)):\n confusionMatrix[predicted[i]][groundTruth[i]] += 1\n\n return confusionMatrix\n\n\ndef outputConfusionMatrix(confusionMatrix):\n columnWidth = 4\n\n print('Confusion Matrix:')\n print('-----------------')\n \n print(str(' ').center(columnWidth),end=' ')\n for i in range(1,len(confusionMatrix)):\n print(str(i).center(columnWidth),end=' ')\n\n print()\n\n for i in range(1,len(confusionMatrix)):\n print(str(i).center(columnWidth),end=' ')\n for j in range(1,len(confusionMatrix)):\n print(str(confusionMatrix[j][i]).center(columnWidth),end=' ')\n print()\n print('')\n\n\ndef calc_cce(trainings, features):\n\n top = 20\n CCE = [0] * Training.features_count\n\n for i in range(0, len(features)):\n res = 0\n for train in trainings:\n fi_prob = train.fi_probs[i]\n res += train.label_prob * fi_prob * math.log(fi_prob, 2)\n\n CCE[i] = -res\n\n\n print('Top Features:')\n print('-------------')\n for x in range(0, top):\n m = max(CCE)\n max_index = CCE.index(m)\n print('{}: {}'.format(features[max_index], m))\n CCE[max_index] = 0\n print('')\n\n\ndef frequency_training(total_feature_vector, features, trainings, samples, truth_dict, freq_acc):\n\n feature_ranking = total_feature_vector.copy()\n sub_features = []\n current_num = 10\n\n print('Training w/ Frequent Features:')\n print('------------------------------')\n\n while current_num < Training.features_count:\n sub_answers = dict()\n for i in range(0, 10):\n m = max(feature_ranking)\n max_index = feature_ranking.index(m)\n sub_features.append(features[max_index])\n feature_ranking[max_index] = -1\n\n for train in trainings:\n train.freq_calcs(sub_features, current_num)\n\n for sample in samples:\n do_testing(sample, trainings, sub_features, sub_answers)\n\n x = 0\n n = len(sub_answers)\n\n for sample, val in sub_answers.items():\n if truth_dict[sample] == val['author']:\n x += 1\n\n acc = (x / n)\n print('{}: {}'.format(current_num, acc))\n freq_acc.append(acc)\n\n current_num += 10\n\n\ndef create_plot(freq_acc, problem_label):\n\n feature_nums = []\n name = 'graph_' + problem_label\n i = 10\n while i < Training.features_count:\n feature_nums.append(i)\n i += 10\n\n plt.rcParams[\"figure.figsize\"] = [16,9]\n plt.scatter(feature_nums, freq_acc)\n plt.plot(feature_nums, freq_acc)\n plt.savefig(name)\n plt.show()\n\n\ndef main():\n \"\"\"Author Attribution.\n\n Train on Data With Given Features.\n Find Author of Given Sample Files.\n \"\"\"\n n_authors = 13\n samples = []\n trainings = []\n features = []\n answers = dict()\n truth_dict = dict()\n input_folder = sys.argv[1]\n problem_label = input_folder[-2]\n\n stop_words = 'stopwords.txt'\n ground_truth = 'test_ground_truth.txt'\n build_answer_dict(ground_truth, problem_label, truth_dict)\n\n fill_features_list(stop_words, features)\n\n input_files = glob.glob(input_folder + '/*.txt')\n tokenize(input_files, samples, trainings)\n\n do_training(trainings, features)\n total_count_vector = sum_count_vectors(trainings, features)\n\n for sample in samples:\n do_testing(sample, trainings, features, answers)\n\n test_acc(answers, truth_dict)\n\n predicted_authors = []\n truth_authors = []\n freq_acc = []\n\n for key, val in answers.items():\n no = val['author'][-2:]\n if no[0] == '0':\n no = no[-1:]\n predicted_authors.append(int(no))\n\n for key, val in truth_dict.items():\n no = val[-2:]\n if no[0] == '0':\n no = no[-1:]\n truth_authors.append(int(no))\n\n # print(predicted_authors)\n # print(truth_authors)\n outputConfusionMatrix(computeConfusionMatrix(predicted_authors, truth_authors, n_authors))\n\n calc_cce(trainings, features)\n\n frequency_training(total_count_vector, features, trainings, samples, truth_dict, freq_acc)\n\n # create_plot(freq_acc, problem_label)\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"246362568","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/12/24 下午3:42\n# @Author : yu_hsuan_chen@trendmicro.com\n# @File : teacher\n# @Version : 3.6\n\nimport json\n\nfrom flasgger import Swagger\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\napp.config['SWAGGER'] = {\n 'title': 'Teachers API Documents',\n \"specs\": [\n {\n \"endpoint\": 'swagger',\n \"route\": '/swagger.json',\n \"rule_filter\": lambda rule: True, # all in\n \"model_filter\": lambda tag: True, # all in\n }\n ],\n}\n\nSwagger(app)\n\n\n# Generate data as database\n@app.route('/api/teachers/init', methods=['POST'])\ndef init():\n data = [\n {\n \"id\": 1,\n \"name\": \"Doris Wilson\",\n \"class\": [\n \"Chinese\",\n \"English\"\n ]\n },\n {\n \"id\": 2,\n \"name\": \"Mrs. A. T. Whitecotton\",\n \"class\": [\n \"Physics\",\n \"Chemistry\"\n ]\n }\n ]\n\n with open(\"teacher.json\", \"w\") as json_file:\n json.dump(data, json_file)\n\n return jsonify({\"states\": \"OK\"})\n\n\n@app.route('/api/teachers/', methods=['GET'])\ndef teachers():\n \"\"\"\n Use the API to get all teachers information\n ---\n tags:\n - Get all teachers information\n produces:\n - application/json\n responses:\n 200:\n description: Get all teachers information\n schema:\n type: array\n items:\n $ref: \"#/definitions/teacher_information\"\n \"\"\"\n with open(\"teacher.json\", \"r\") as json_file:\n teachers_data = json.load(json_file)\n\n return jsonify(teachers_data), 200\n\n\n@app.route('/api/teachers/', methods=['GET'])\ndef teacher(teacher_id):\n \"\"\"\n Use the API to get teacher's information by id\n ---\n tags:\n - Get all teachers information\n produces:\n - application/json\n parameters:\n - name: teacher_id\n in: path\n required: true\n type: integer\n description: teacher id\n x-example: 1\n responses:\n 200:\n description: Get teacher's information by id\n schema:\n id: teacher_information\n properties:\n id:\n type: integer\n description: teacher's id\n default: 1\n name:\n type: string\n description: teacher's name\n default: Doris Wilson\n class:\n type: array\n description: teacher's classes\n items:\n type: string\n default: [\"English\", \"PE\", \"Math\"]\n required:\n - id\n - name\n - class\n \"\"\"\n\n with open(\"teacher.json\", \"r\") as json_file:\n teachers_data = json.load(json_file)\n\n info = {}\n for teacher_data in teachers_data:\n if teacher_data[\"id\"] == teacher_id:\n info = teacher_data\n break\n\n if info == {}:\n return jsonify(info), 500\n else:\n return jsonify(info), 200\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=8001)\n","sub_path":"demo_services/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"521198365","text":"''' CTypes interface to the B-PROST feature set implementation, which was\n introduced in the paper below.\n\n Yitao Liang, Marlos C. Machado, Erik Talvitie, Michael H. Bowling:\n State of the Art Control of Atari Games Using Shallow Reinforcement Learning.\n AAMAS 2016: 485-493\n\n Author: Marlos C. Machado\n'''\n\nimport os\nimport ctypes\n\n# This implementation is a little bit tricky because I wanted to receive a vector,\n# passed by reference from the C++ code. To do so, I had to redefine several vector\n# functions. Still, ideally the unique functions that should be used explicitly are:\n# getSizeActionSet(self)\n# getActiveFeatures(self, screen)\n\nclass BPROST(object):\n # loading C++ code using CTypes\n dir_path = os.path.dirname(os.path.realpath(__file__))\n path = f'{dir_path}/BPROSTLibrary.so'\n bprost_features = ctypes.CDLL(path)\n # args and return types for constructor\n bprost_features.new_vector.restype = ctypes.c_void_p\n bprost_features.new_vector.argtypes = []\n # args and return types for destructor\n bprost_features.delete_vector.restype = None\n bprost_features.delete_vector.argtypes = [ctypes.c_void_p]\n # args and return types for clear function\n bprost_features.clear_vector.restype = None\n bprost_features.clear_vector.argtypes = [ctypes.c_void_p]\n # args and return types for size function\n bprost_features.vector_size.restype = ctypes.c_int\n bprost_features.vector_size.argtypes = [ctypes.c_void_p]\n # args and return types for get function ([] operator)\n bprost_features.vector_get.restype = ctypes.c_int\n bprost_features.vector_get.argtypes = [ctypes.c_void_p, ctypes.c_int]\n # args and return types for the functions we really care about\n bprost_features.getBROSTFeatures.restype = None\n bprost_features.getBROSTFeatures.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int), \\\n \tctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]\n bprost_features.getNumberOfFeatures.restype = ctypes.c_int\n bprost_features.getNumberOfFeatures.argtypes = [ctypes.c_void_p]\n\n # these arguments are initialized by the constructor and they are kept fixed\n _screen_height = -1\n _screen_width = -1\n _num_colors = -1\n _num_rows = -1\n _num_cols = -1\n\n # C++ constructor and initialization of Python variables\n def __init__(self, screenHeight, screenWidth, numRows, numCols, numColors):\n self._screen_height = screenHeight\n self._screen_width = screenWidth\n self._num_colors = numColors\n self._num_rows = numRows\n self._num_cols = numCols\n\n self.vector = BPROST.bprost_features.new_vector() # pointer to new vector\n\n # C++ destructor\n def __del__(self): # when reference count hits 0 in Python,\n BPROST.bprost_features.delete_vector(self.vector) # call C++ vector destructor\n\n # C++ size method\n def __len__(self):\n return BPROST.bprost_features.vector_size(self.vector)\n\n # C++ get method, which is the same as the [] operator\n def __getitem__(self, i): # access elements in vector at index\n if 0 <= i < len(self):\n return BPROST.bprost_features.vector_get(self.vector, ctypes.c_int(i))\n raise IndexError('Vector index out of range')\n\n # C++ method that allows us to properly print a vector\n def __repr__(self):\n return '[{}]'.format(', '.join(str(self[i]) for i in range(len(self))))\n\n # C++ clear method\n def _clear(self): # we can clear the vector without deleting it\n BPROST.bprost_features.clear_vector(self.vector) # call C++ vector clear\n\n # C++ implementation for obtaining B-PROS features from a screen\n def getActiveFeatures(self, screen):\n self._clear()\n BPROST.bprost_features.getBROSTFeatures(self.vector, screen,\n \tctypes.c_int(self._screen_height), ctypes.c_int(self._screen_width),\n ctypes.c_int(self._num_rows), ctypes.c_int(self._num_cols),\n ctypes.c_int(self._num_colors))\n\n # C++ implementation that returns the size of the action set\n def getSizeActionSet(self):\n return BPROST.bprost_features.getNumberOfFeatures(self._num_rows, \\\n self._num_cols, self._num_colors)\n","sub_path":"sandbox/wrappers/wrapper_shallow_atari/BPROSTFeatures.py","file_name":"BPROSTFeatures.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"17081415","text":"#!/usr/bin/env python\n\"\"\"\nCreate a gaussian data set and its fourier transform\n\"\"\"\nimport os, sys, Numeric, math\n\ndef main():\n\n # Create gaussian data set\n a = 50.0\n x = Numeric.arange(-3,3,0.001,typecode=Numeric.Float)\n# y = Numeric.exp(-a*x**2.0)\n\n # Analytic solution for fourier transform of above gaussian\n N = float(len(x))\n dx = x[1] - x[0]\n T = N * dx\n df = 1.0 / T\n f = Numeric.arange(N,typecode=Numeric.Float)*df\n Y = (math.pi/a)**0.5 * Numeric.exp(-4.0 * f**2.0 / a) \n\n # Write to file\n out = open('ftgaussian.dat','w')\n for i in range(len(x)):\n out.write(str(f[i])+' '+str(Y[i])+'\\n')\n out.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"FT_gaussian.py","file_name":"FT_gaussian.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"574960379","text":"# https://gist.github.com/AFAgarap/4f8a8d8edf352271fa06d85ba0361f26\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('TkAgg',warn=False, force=True)\n\nfrom src.autoencoders.basic_autoencoder import AutoEncoder\nfrom src.dataset_utils.vm_dataset import VisuomotorDataset\n\nMODEL_SAVE = \"/home/anirudh/HBRS/Master-Thesis/NJ-2020-thesis/AutoEncoders/model/\" \\\n \"gpu_ae_prototype.pth\"\nINPUT_SHAPE = (64, 64)\nINPUT_DIM = 64*64\n\nmodel = AutoEncoder(input_shape=INPUT_DIM,output_shape=10)\nmodel.load_state_dict(torch.load(MODEL_SAVE))\nmodel.eval()\n\ntransform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n\nDATASET_PATH = \"/home/anirudh/Desktop/main_dataset/door_5/*.png\"\ntest_dataset = VisuomotorDataset(DATASET_PATH, transform, INPUT_SHAPE)\n\ntest_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=10, shuffle=False\n)\n\ntest_examples = None\n\nwith torch.no_grad():\n for batch_features in test_loader:\n batch_features = batch_features[0]\n test_examples = batch_features.view(-1, INPUT_DIM)\n reconstruction = model(test_examples)\n break\n\nwith torch.no_grad():\n number = 5\n plt.figure(figsize=(20, 4))\n for index in range(number):\n # display original\n ax = plt.subplot(2, number, index + 1)\n plt.imshow(test_examples[index].numpy().reshape(INPUT_SHAPE))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, number, index + 1 + number)\n plt.imshow(reconstruction[index].numpy().reshape(INPUT_SHAPE))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()","sub_path":"prototypes/inference/inference_ae.py","file_name":"inference_ae.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"605801267","text":"\"\"\"Convert all jupytext files in this repo to ipynb.\"\"\"\nfrom subprocess import run\nfrom pathlib import Path\nfrom glob import glob\n\n\ndef _is_jupytext(fname):\n return Path(fname).read_text(encoding=\"utf-8\").startswith(\"---\")\n\n\ndef create_notebooks(dest_dir=\"notebooks\"):\n dest_dir = Path(dest_dir)\n dest_dir.mkdir(exist_ok=True)\n for f in glob(\"**/*.md\", recursive=True):\n if \"_build\" not in f and _is_jupytext(f):\n dest = dest_dir / f\"{Path(f).stem}.ipynb\"\n run([\"jupytext\", \"--to\", \"ipynb\", \"-o\", str(dest), f])\n\n\nif __name__ == \"__main__\":\n create_notebooks()\n","sub_path":"ipynb_convert.py","file_name":"ipynb_convert.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"597239615","text":"def dec2bin(n):\r\n stack = []\r\n while n > 0:\r\n stack.append(str(n % 2))\r\n n = n // 2\r\n stack.reverse()\r\n return stack\r\n\r\nfor i in range(32):\r\n ans = dec2bin(i)\r\n if len(ans) != 5:\r\n ans = list('0' * (5 - len(ans))) + ans\r\n print(''.join(ans))","sub_path":"蓝桥杯/basic_2.py","file_name":"basic_2.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"21735905","text":"import base64\nimport requests\n\n\nclass PBClient(object):\n def __init__(self, token, api_base_url, ssl_verify=True):\n self.token = token\n self.api_base_url = api_base_url\n self.ssl_verify = ssl_verify\n self.auth = base64.encodestring('%s:%s' % (token, '')).replace('\\n', '')\n\n def do_get(self, object_url):\n headers = {'Accept': 'text/plain',\n 'Authorization': 'Basic %s' % self.auth}\n url = '%s/%s' % (self.api_base_url, object_url)\n resp = requests.get(url, headers=headers, verify=self.ssl_verify)\n return resp\n\n def do_patch(self, object_url, payload):\n headers = {'Content-type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/plain',\n 'Authorization': 'Basic %s' % self.auth}\n url = '%s/%s' % (self.api_base_url, object_url)\n resp = requests.patch(url, data=payload, headers=headers, verify=self.ssl_verify)\n return resp\n\n def do_put(self, object_url, payload=None):\n headers = {'Content-type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/plain',\n 'Authorization': 'Basic %s' % self.auth}\n url = '%s/%s' % (self.api_base_url, object_url)\n resp = requests.put(url, data=payload, headers=headers, verify=self.ssl_verify)\n return resp\n\n def do_delete(self, object_url):\n headers = {'Content-type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/plain',\n 'Authorization': 'Basic %s' % self.auth}\n url = '%s/%s' % (self.api_base_url, object_url)\n resp = requests.delete(url, headers=headers, verify=self.ssl_verify)\n return resp\n\n def do_instance_patch(self, instance_id, payload):\n url = 'instances/%s' % instance_id\n resp = self.do_patch(url, payload)\n return resp\n\n def get_instance_description(self, instance_id):\n resp = self.do_get('instances/%s' % instance_id)\n if resp.status_code != 200:\n raise RuntimeError('Cannot fetch data for provisioned blueprints, %s' % resp.reason)\n return resp.json()\n\n def get_blueprint_description(self, blueprint_id):\n resp = self.do_get('blueprints/%s' % blueprint_id)\n if resp.status_code != 200:\n raise RuntimeError('Cannot fetch data for provisioned blueprints, %s' % resp.reason)\n return resp.json()\n\n def get_user_key_data(self, user_id):\n return self.do_get('users/%s/keypairs' % user_id)\n\n def get_instances(self):\n resp = self.do_get('instances')\n if resp.status_code != 200:\n raise RuntimeError('Cannot fetch data for instances, %s' % resp.reason)\n return resp.json()\n\n def get_instance(self, instance_id):\n resp = self.do_get('instances/%s' % instance_id)\n if resp.status_code != 200:\n raise RuntimeError('Cannot fetch data for instances %s, %s' % (instance_id, resp.reason))\n return resp.json()\n\n def get_instance_parent_data(self, instance_id):\n blueprint_id = self.get_instance(instance_id)['blueprint_id']\n\n resp = self.do_get('blueprints/%s' % blueprint_id)\n if resp.status_code != 200:\n raise RuntimeError('Error loading blueprint data: %s, %s' % (blueprint_id, resp.reason))\n\n return resp.json()\n\n def get_plugin_data(self, plugin_id):\n resp = self.do_get('plugins/%s' % plugin_id)\n if resp.status_code != 200:\n raise RuntimeError('Error loading plugin data: %s, %s' % (plugin_id, resp.reason))\n\n return resp.json()\n\n def obtain_lock(self, lock_id):\n resp = self.do_put('locks/%s' % lock_id)\n if resp.status_code == 200:\n return lock_id\n elif resp.status_code == 409:\n return None\n else:\n raise RuntimeError('Error obtaining lock: %s, %s' % (lock_id, resp.reason))\n\n def release_lock(self, lock_id):\n resp = self.do_delete('locks/%s' % lock_id)\n if resp.status_code == 200:\n return lock_id\n else:\n raise RuntimeError('Error deleting lock: %s, %s' % (lock_id, resp.reason))\n","sub_path":"pebbles/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"5545854","text":"import asyncio\nimport base64\nimport contextlib\nimport errno\nimport logging\nfrom datetime import datetime, time\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sysctl\nimport tempfile\nimport uuid\n\nimport bsd\nimport psutil\n\nfrom libzfs import ZFSException\nfrom middlewared.alert.base import AlertCategory, AlertClass, AlertLevel, SimpleOneShotAlertClass\nfrom middlewared.job import JobProgressBuffer\nfrom middlewared.schema import (accepts, Attribute, Bool, Cron, Dict, EnumMixin, Int, List, Patch,\n Str, UnixPerm)\nfrom middlewared.service import (\n ConfigService, filterable, item_method, job, private, CallError, CRUDService, ValidationErrors\n)\nfrom middlewared.service_exception import ValidationError\nfrom middlewared.utils import Popen, filter_list, run, start_daemon_thread\nfrom middlewared.utils.asyncio_ import asyncio_map\nfrom middlewared.utils.shell import join_commandline\nfrom middlewared.validators import Range, Time\n\nlogger = logging.getLogger(__name__)\n\nGELI_KEYPATH = '/data/geli'\nRE_DISKPART = re.compile(r'^([a-z]+\\d+)(p\\d+)?')\nRE_HISTORY_ZPOOL_SCRUB = re.compile(r'^([0-9\\.\\:\\-]{19})\\s+zpool scrub', re.MULTILINE)\nRE_HISTORY_ZPOOL_CREATE = re.compile(r'^([0-9\\.\\:\\-]{19})\\s+zpool create', re.MULTILINE)\nZPOOL_CACHE_FILE = '/data/zfs/zpool.cache'\nZPOOL_KILLCACHE = '/data/zfs/killcache'\n\n\nclass ZfsDeadmanAlertClass(AlertClass, SimpleOneShotAlertClass):\n category = AlertCategory.SYSTEM\n level = AlertLevel.WARNING\n title = \"Device Is Causing Slow I/O on Pool\"\n text = \"Device %(vdev)s is causing slow I/O on pool %(pool)s.\"\n\n deleted_automatically = False\n\n hardware = True\n\n\nclass Inheritable(EnumMixin, Attribute):\n def __init__(self, *args, **kwargs):\n self.value = kwargs.pop('value')\n super(Inheritable, self).__init__(*args, **kwargs)\n\n def clean(self, value):\n if value == 'INHERIT':\n return value\n\n return self.value.clean(value)\n\n def validate(self, value):\n if value == 'INHERIT':\n return\n\n return self.value.validate(value)\n\n def to_json_schema(self, parent=None):\n schema = self.value.to_json_schema(parent)\n type_schema = schema.pop('type')\n schema['nullable'] = 'null' in type_schema\n if schema['nullable']:\n type_schema.remove('null')\n if len(type_schema) == 1:\n type_schema = type_schema[0]\n schema['anyOf'] = [{'type': type_schema}, {'type': 'string', 'enum': ['INHERIT']}]\n return schema\n\n\ndef _none(x):\n if x in (0, None):\n return 'none'\n return x\n\n\ndef _null(x):\n if x == 'none':\n return None\n return x\n\n\nasync def is_mounted(middleware, path):\n mounted = await middleware.run_in_thread(bsd.getmntinfo)\n return any(fs.dest == path for fs in mounted)\n\n\nasync def mount(device, path, fs_type, fs_options, options):\n options = options or []\n\n if isinstance(device, str):\n device = device.encode(\"utf-8\")\n\n if isinstance(path, str):\n path = path.encode(\"utf-8\")\n\n if fs_type == \"msdosfs\":\n options.append(\"large\")\n\n executable = \"/sbin/mount\"\n arguments = []\n\n if fs_type == \"ntfs\":\n executable = \"/usr/local/bin/ntfs-3g\"\n elif fs_type == \"msdosfs\" and fs_options:\n executable = \"/sbin/mount_msdosfs\"\n if fs_options.get(\"locale\"):\n arguments.extend([\"-L\", fs_options[\"locale\"]])\n arguments.extend(sum([[\"-o\", option] for option in options], []))\n options = []\n else:\n arguments.extend([\"-t\", fs_type])\n\n if options:\n arguments.extend([\"-o\", \",\".join(options)])\n\n proc = await Popen(\n [executable] + arguments + [device, path],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf8\",\n )\n output = await proc.communicate()\n\n if proc.returncode != 0:\n logger.debug(\"Mount failed (%s): %s\", proc.returncode, output)\n raise ValueError(\"Mount failed (exit code {0}):\\n{1}{2}\" .format(\n proc.returncode,\n output[0].decode(\"utf-8\"),\n output[1].decode(\"utf-8\"),\n ))\n else:\n return True\n\n\nclass ScrubError(CallError):\n pass\n\n\nclass PoolResilverService(ConfigService):\n\n class Config:\n namespace = 'pool.resilver'\n datastore = 'storage.resilver'\n datastore_extend = 'pool.resilver.resilver_extend'\n\n @private\n async def resilver_extend(self, data):\n data['begin'] = data['begin'].strftime('%H:%M')\n data['end'] = data['end'].strftime('%H:%M')\n data['weekday'] = [int(v) for v in data['weekday'].split(',') if v]\n return data\n\n @private\n async def validate_fields_and_update(self, data, schema):\n verrors = ValidationErrors()\n\n begin = data.get('begin')\n if begin:\n data['begin'] = time(int(begin.split(':')[0]), int(begin.split(':')[1]))\n\n end = data.get('end')\n if end:\n data['end'] = time(int(end.split(':')[0]), int(end.split(':')[1]))\n\n weekdays = data.get('weekday')\n if not weekdays:\n verrors.add(\n f'{schema}.weekday',\n 'At least one weekday should be selected'\n )\n else:\n data['weekday'] = ','.join([str(day) for day in weekdays])\n\n return verrors, data\n\n @accepts(\n Dict(\n 'pool_resilver',\n Str('begin', validators=[Time()]),\n Str('end', validators=[Time()]),\n Bool('enabled'),\n List('weekday', items=[Int('weekday', validators=[Range(min=1, max=7)])])\n )\n )\n async def do_update(self, data):\n \"\"\"\n Configure Pool Resilver Priority.\n\n If `begin` time is greater than `end` time it means it will rollover the day, e.g.\n begin = \"19:00\", end = \"05:00\" will increase pool resilver priority from 19:00 of one day\n until 05:00 of the next day.\n\n `weekday` follows crontab(5) values 0-7 (0 or 7 is Sun).\n\n .. examples(websocket)::\n\n Enable pool resilver priority all business days from 7PM to 5AM.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.resilver.update\",\n \"params\": [{\n \"enabled\": true,\n \"begin\": \"19:00\",\n \"end\": \"05:00\",\n \"weekday\": [1, 2, 3, 4, 5]\n }]\n }\n \"\"\"\n config = await self.config()\n original_config = config.copy()\n config.update(data)\n\n verrors, new_config = await self.validate_fields_and_update(config, 'pool_resilver_update')\n if verrors:\n raise verrors\n\n # before checking if any changes have been made, original_config needs to be mapped to new_config\n original_config['weekday'] = ','.join([str(day) for day in original_config['weekday']])\n original_config['begin'] = time(*(int(value) for value in original_config['begin'].split(':')))\n original_config['end'] = time(*(int(value) for value in original_config['end'].split(':')))\n if len(set(original_config.items()) ^ set(new_config.items())) > 0:\n # data has changed\n await self.middleware.call(\n 'datastore.update',\n self._config.datastore,\n new_config['id'],\n new_config\n )\n\n await self.middleware.call('service.restart', 'cron')\n await self.middleware.call('pool.configure_resilver_priority')\n\n return await self.config()\n\n\nclass KernelModuleContextManager:\n def __init__(self, module):\n self.module = module\n\n async def __aenter__(self):\n if self.module is not None:\n if not await self.module_loaded():\n await run('kldload', self.module, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if not await self.module_loaded():\n raise Exception('Kernel module %r failed to load', self.module)\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if self.module is not None:\n try:\n await run('kldunload', self.module, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n except Exception:\n pass\n\n async def module_loaded(self):\n return (await run('kldstat', '-n', self.module, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=False)).returncode == 0\n\n\nclass MountFsContextManager:\n def __init__(self, middleware, device, path, *args, **kwargs):\n self.middleware = middleware\n self.device = device\n self.path = path\n self.args = args\n self.kwargs = kwargs\n\n async def __aenter__(self):\n await mount(self.device, self.path, *self.args, **self.kwargs)\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if await is_mounted(self.middleware, self.path):\n await self.middleware.run_in_thread(bsd.unmount, self.path)\n\n\nclass PoolService(CRUDService):\n\n GELI_KEYPATH = '/data/geli'\n\n class Config:\n datastore = 'storage.volume'\n datastore_extend = 'pool.pool_extend'\n datastore_prefix = 'vol_'\n\n @item_method\n @accepts(\n Int('id', required=True),\n Str('action', enum=['START', 'STOP', 'PAUSE'], required=True)\n )\n @job()\n async def scrub(self, job, oid, action):\n \"\"\"\n Performs a scrub action to pool of `id`.\n\n `action` can be either of \"START\", \"STOP\" or \"PAUSE\".\n\n .. examples(websocket)::\n\n Start scrub on pool of id 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.scrub\",\n \"params\": [1, \"START\"]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n return await job.wrap(\n await self.middleware.call('zfs.pool.scrub', pool['name'], action)\n )\n\n @accepts(List('types', items=[Str('type', enum=['FILESYSTEM', 'VOLUME'])], default=['FILESYSTEM', 'VOLUME']))\n async def filesystem_choices(self, types):\n \"\"\"\n Returns all available datasets, except system datasets.\n\n .. examples(websocket)::\n\n Get all datasets.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.filesystem_choices\",\n \"params\": []\n }\n\n Get only filesystems (exclude volumes).\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.filesystem_choices\",\n \"params\": [[\"FILESYSTEM\"]]\n }\n \"\"\"\n vol_names = [vol['name'] for vol in (await self.query())]\n return [\n y['name'] for y in await self.middleware.call(\n 'zfs.dataset.query',\n [\n ('name', 'rnin', '.system'),\n ('pool', 'in', vol_names),\n ('type', 'in', types),\n ],\n {'extra': {'retrieve_properties': False}},\n )\n ]\n\n @accepts(Int('id', required=True))\n @item_method\n async def is_upgraded(self, oid):\n \"\"\"\n Returns whether or not the pool of `id` is on the latest version and with all feature\n flags enabled.\n\n .. examples(websocket)::\n\n Check if pool of id 1 is upgraded.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.is_upgraded\",\n \"params\": [1]\n }\n \"\"\"\n name = (await self._get_instance(oid))['name']\n proc = await Popen(\n f'zpool get -H -o value version {name}',\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', shell=True\n )\n res, err = await proc.communicate()\n if proc.returncode != 0:\n return True\n res = res.decode('utf8').rstrip('\\n')\n try:\n int(res)\n except ValueError:\n\n if res == '-':\n proc = await Popen(\n f\"zpool get -H -o property,value all {name}\",\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', shell=True\n )\n data = (await proc.communicate())[0].decode('utf8').strip('\\n')\n for line in [l for l in data.split('\\n') if l.startswith('feature') and '\\t' in l]:\n prop, value = line.split('\\t', 1)\n if value not in ('active', 'enabled'):\n return False\n return True\n else:\n return False\n else:\n return False\n\n @accepts(Int('id'))\n @item_method\n async def upgrade(self, oid):\n \"\"\"\n Upgrade pool of `id` to latest version with all feature flags.\n\n .. examples(websocket)::\n\n Upgrade pool of id 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.upgrade\",\n \"params\": [1]\n }\n \"\"\"\n # Should we check first if upgrade is required ?\n await self.middleware.call(\n 'zfs.pool.upgrade',\n (await self._get_instance(oid))['name']\n )\n return True\n\n def _topology(self, x, geom_scan=True):\n \"\"\"\n Transform topology output from libzfs to add `device` and make `type` uppercase.\n \"\"\"\n if isinstance(x, dict):\n path = x.get('path')\n if path is not None:\n device = None\n if path.startswith('/dev/'):\n device = self.middleware.call_sync('disk.label_to_dev', path[5:], geom_scan)\n x['device'] = device\n x['disk'] = RE_DISKPART.sub(r'\\1', device) if device else None\n for key in x:\n if key == 'type' and isinstance(x[key], str):\n x[key] = x[key].upper()\n else:\n x[key] = self._topology(x[key], False)\n elif isinstance(x, list):\n for i, entry in enumerate(x):\n x[i] = self._topology(x[i], False)\n return x\n\n @private\n def pool_extend(self, pool):\n\n \"\"\"\n If pool is encrypted we need to check if the pool is imported\n or if all geli providers exist.\n \"\"\"\n pool['path'] = f'/mnt/{pool[\"name\"]}'\n try:\n zpool = self.middleware.call_sync('zfs.pool.query', [('id', '=', pool['name'])])[0]\n except Exception:\n zpool = None\n\n if zpool:\n pool.update({\n 'status': zpool['status'],\n 'scan': zpool['scan'],\n 'topology': self._topology(zpool['groups']),\n 'healthy': zpool['healthy'],\n 'status_detail': zpool['status_detail'],\n })\n else:\n pool.update({\n 'status': 'OFFLINE',\n 'scan': None,\n 'topology': None,\n 'healthy': False,\n 'status_detail': None,\n })\n\n if pool['encrypt'] > 0:\n if zpool:\n pool['is_decrypted'] = True\n else:\n decrypted = True\n for ed in self.middleware.call_sync('datastore.query', 'storage.encrypteddisk', [('encrypted_volume', '=', pool['id'])]):\n if not os.path.exists(f'/dev/{ed[\"encrypted_provider\"]}.eli'):\n decrypted = False\n break\n pool['is_decrypted'] = decrypted\n pool['encryptkey_path'] = os.path.join(GELI_KEYPATH, f'{pool[\"encryptkey\"]}.key')\n else:\n pool['encryptkey_path'] = None\n pool['is_decrypted'] = True\n return pool\n\n @accepts(Dict(\n 'pool_create',\n Str('name', required=True),\n Bool('encryption', default=False),\n Str('deduplication', enum=[None, 'ON', 'VERIFY', 'OFF'], default=None, null=True),\n Dict(\n 'topology',\n List('data', items=[\n Dict(\n 'datavdevs',\n Str('type', enum=['RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR', 'STRIPE'], required=True),\n List('disks', items=[Str('disk')], required=True),\n ),\n ], required=True),\n List('cache', items=[\n Dict(\n 'cachevdevs',\n Str('type', enum=['STRIPE'], required=True),\n List('disks', items=[Str('disk')], required=True),\n ),\n ]),\n List('log', items=[\n Dict(\n 'logvdevs',\n Str('type', enum=['STRIPE', 'MIRROR'], required=True),\n List('disks', items=[Str('disk')], required=True),\n ),\n ]),\n List('spares', items=[Str('disk')], default=[]),\n required=True,\n ),\n register=True,\n ))\n @job(lock='pool_createupdate')\n async def do_create(self, job, data):\n \"\"\"\n Create a new ZFS Pool.\n\n `topology` is a object which requires at least one `data` entry.\n All of `data` entries (vdevs) require to be of the same type.\n\n `encryption` when set to true means that the pool is encrypted.\n\n `deduplication` when set to ON or VERIFY makes sure that no block of data is duplicated in the pool. When\n VERIFY is specified, if two blocks have similar signatures, byte to byte comparison is performed to ensure that\n the blocks are identical. This should be used in special circumstances as it carries a significant overhead.\n\n Example of `topology`:\n\n {\n \"data\": [\n {\"type\": \"RAIDZ1\", \"disks\": [\"da1\", \"da2\", \"da3\"]}\n ],\n \"cache\": [\n {\"type\": \"STRIPE\", \"disks\": [\"da4\"]}\n ],\n \"log\": [\n {\"type\": \"RAIDZ1\", \"disks\": [\"da5\"]}\n ],\n \"spares\": [\"da6\"]\n }\n\n\n .. examples(websocket)::\n\n Create a pool named \"tank\", raidz1 with 3 disks, 1 cache disk, 1 ZIL/log disk\n and 1 hot spare disk.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.create\",\n \"params\": [{\n \"name\": \"tank\",\n \"topology\": {\n \"data\": [\n {\"type\": \"RAIDZ1\", \"disks\": [\"da1\", \"da2\", \"da3\"]}\n ],\n \"cache\": [\n {\"type\": \"STRIPE\", \"disks\": [\"da4\"]}\n ],\n \"log\": [\n {\"type\": \"RAIDZ1\", \"disks\": [\"da5\"]}\n ],\n \"spares\": [\"da6\"]\n }\n }]\n }\n \"\"\"\n\n verrors = ValidationErrors()\n\n if await self.middleware.call('pool.query', [('name', '=', data['name'])]):\n verrors.add('pool_create.name', 'A pool with this name already exists.', errno.EEXIST)\n\n if not data['topology']['data']:\n verrors.add('pool_create.topology.data', 'At least one data vdev is required')\n\n await self.__common_validation(verrors, data, 'pool_create')\n disks, vdevs = await self.__convert_topology_to_vdevs(data['topology'])\n disks_cache = await self.__check_disks_availability(verrors, disks)\n\n if verrors:\n raise verrors\n\n if data['encryption']:\n enc_key = str(uuid.uuid4())\n enc_keypath = os.path.join(GELI_KEYPATH, f'{enc_key}.key')\n else:\n enc_key = ''\n enc_keypath = None\n\n enc_disks = await self.__format_disks(job, disks, enc_keypath)\n\n options = {\n 'feature@lz4_compress': 'enabled',\n 'altroot': '/mnt',\n 'cachefile': ZPOOL_CACHE_FILE,\n 'failmode': 'continue',\n 'autoexpand': 'on',\n }\n\n fsoptions = {\n 'compression': 'lz4',\n 'aclmode': 'passthrough',\n 'aclinherit': 'passthrough',\n 'mountpoint': f'/{data[\"name\"]}',\n }\n\n dedup = data.get('deduplication')\n if dedup:\n fsoptions['dedup'] = dedup.lower()\n\n cachefile_dir = os.path.dirname(ZPOOL_CACHE_FILE)\n if not os.path.isdir(cachefile_dir):\n os.makedirs(cachefile_dir)\n\n job.set_progress(90, 'Creating ZFS Pool')\n z_pool = await self.middleware.call('zfs.pool.create', {\n 'name': data['name'],\n 'vdevs': vdevs,\n 'options': options,\n 'fsoptions': fsoptions,\n })\n\n job.set_progress(95, 'Setting pool options')\n pool_id = None\n try:\n # Inherit mountpoint after create because we set mountpoint on creation\n # making it a \"local\" source.\n await self.middleware.call('zfs.dataset.update', data['name'], {\n 'properties': {\n 'mountpoint': {'source': 'INHERIT'},\n },\n })\n await self.middleware.call('zfs.dataset.mount', data['name'])\n\n pool = {\n 'name': data['name'],\n 'guid': z_pool['guid'],\n 'encrypt': int(data['encryption']),\n 'encryptkey': enc_key,\n }\n pool_id = await self.middleware.call(\n 'datastore.insert',\n 'storage.volume',\n pool,\n {'prefix': 'vol_'},\n )\n\n await self.__save_encrypteddisks(pool_id, enc_disks, disks_cache)\n\n await self.middleware.call(\n 'datastore.insert',\n 'storage.scrub',\n {'volume': pool_id},\n {'prefix': 'scrub_'},\n )\n except Exception as e:\n # Something wrong happened, we need to rollback and destroy pool.\n try:\n await self.middleware.call('zfs.pool.delete', data['name'])\n except Exception:\n self.logger.warn('Failed to delete pool on pool.create rollback', exc_info=True)\n if pool_id:\n await self.middleware.call('datastore.delete', 'storage.volume', pool_id)\n raise e\n\n # There is really no point in waiting all these services to reload so do them\n # in background.\n async def restart_services():\n await self.middleware.call('service.reload', 'disk')\n if (await self.middleware.call('systemdataset.config'))['pool'] == 'freenas-boot':\n await self.middleware.call('service.restart', 'system_datasets')\n # regenerate crontab because of scrub\n await self.middleware.call('service.restart', 'cron')\n\n asyncio.ensure_future(restart_services())\n\n pool = await self._get_instance(pool_id)\n await self.middleware.call_hook('pool.post_create_or_update', pool=pool)\n return pool\n\n @accepts(Int('id'), Patch(\n 'pool_create', 'pool_update',\n ('rm', {'name': 'name'}),\n ('rm', {'name': 'encryption'}),\n ('edit', {'name': 'topology', 'method': lambda x: setattr(x, 'update', True)}),\n ))\n @job(lock='pool_createupdate')\n async def do_update(self, job, id, data):\n \"\"\"\n Update pool of `id`, adding the new topology.\n\n The `type` of `data` must be the same of existing vdevs.\n\n .. examples(websocket)::\n\n Add a new set of raidz1 to pool of id 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.update\",\n \"params\": [1, {\n \"topology\": {\n \"data\": [\n {\"type\": \"RAIDZ1\", \"disks\": [\"da7\", \"da8\", \"da9\"]}\n ]\n }\n }]\n }\n \"\"\"\n pool = await self._get_instance(id)\n\n verrors = ValidationErrors()\n\n await self.__common_validation(verrors, data, 'pool_update', old=pool)\n disks, vdevs = await self.__convert_topology_to_vdevs(data['topology'])\n disks_cache = await self.__check_disks_availability(verrors, disks, 'pool_update')\n\n if verrors:\n raise verrors\n\n if pool['encryptkey']:\n enc_keypath = os.path.join(GELI_KEYPATH, f'{pool[\"encryptkey\"]}.key')\n else:\n enc_keypath = None\n\n enc_disks = await self.__format_disks(job, disks, enc_keypath)\n\n job.set_progress(90, 'Extending ZFS Pool')\n\n extend_job = await self.middleware.call('zfs.pool.extend', pool['name'], vdevs)\n await extend_job.wait()\n\n if extend_job.error:\n raise CallError(extend_job.error)\n\n await self.__save_encrypteddisks(id, enc_disks, disks_cache)\n\n if pool['encrypt'] >= 2:\n # FIXME: ask current passphrase and validate\n await self.middleware.call('disk.geli_passphrase', pool, None)\n await self.middleware.call(\n 'datastore.update', 'storage.volume', id, {'encrypt': 1}, {'prefix': 'vol_'},\n )\n\n pool = await self._get_instance(id)\n await self.middleware.call_hook('pool.post_create_or_update', pool=pool)\n return pool\n\n async def __common_validation(self, verrors, data, schema_name, old=None):\n topology_data = list(data['topology'].get('data') or [])\n\n if old:\n def disk_to_stripe():\n \"\"\"\n We need to convert the original topology to use STRIPE\n instead of DISK to match the user input data\n \"\"\"\n rv = []\n spare = None\n for i in old['topology']['data']:\n if i['type'] == 'DISK':\n if spare is None:\n spare = {\n 'type': 'STRIPE',\n 'disks': [i['path']],\n }\n rv.append(spare)\n else:\n spare['disks'].append(i['path'])\n else:\n rv.append({\n 'type': i['type'],\n 'disks': [j['type'] for j in i['children']],\n })\n return rv\n\n topology_data += disk_to_stripe()\n lastdatatype = None\n for i, vdev in enumerate(topology_data):\n numdisks = len(vdev['disks'])\n minmap = {\n 'STRIPE': 1,\n 'MIRROR': 2,\n 'RAIDZ1': 3,\n 'RAIDZ2': 4,\n 'RAIDZ3': 5,\n }\n mindisks = minmap[vdev['type']]\n if numdisks < mindisks:\n verrors.add(\n f'{schema_name}.topology.data.{i}.disks',\n f'You need at least {mindisks} disk(s) for this vdev type.',\n )\n\n if lastdatatype and lastdatatype != vdev['type']:\n verrors.add(\n f'{schema_name}.topology.data.{i}.type',\n 'You are not allowed to create a pool with different data vdev types '\n f'({lastdatatype} and {vdev[\"type\"]}).',\n )\n lastdatatype = vdev['type']\n\n for i in ('cache', 'log', 'spare'):\n value = data['topology'].get(i)\n if value and len(value) > 1:\n verrors.add(\n f'{schema_name}.topology.{i}',\n f'Only one row for the virtual device of type {i} is allowed.',\n )\n\n async def __convert_topology_to_vdevs(self, topology):\n # We do two things here:\n # 1. Gather all disks transversing the topology\n # 2. Keep track of the vdev each disk is supposed to be located\n # along with a flag whether we should use swap partition in said vdev\n # This is required so we can format all disks in one pass, allowing it\n # to be performed in parallel if we wish to do so.\n disks = {}\n vdevs = []\n for i in ('data', 'cache', 'log'):\n t_vdevs = topology.get(i)\n if not t_vdevs:\n continue\n for t_vdev in t_vdevs:\n vdev_devs_list = []\n vdev = {\n 'root': i.upper(),\n 'type': t_vdev['type'],\n 'devices': vdev_devs_list,\n }\n vdevs.append(vdev)\n # cache and log devices should not have a swap\n create_swap = True if i == 'data' else False\n for disk in t_vdev['disks']:\n disks[disk] = {'vdev': vdev_devs_list, 'create_swap': create_swap}\n\n if topology.get('spares'):\n vdev_devs_list = []\n vdevs.append({\n 'root': 'SPARE',\n 'type': 'STRIPE',\n 'devices': vdev_devs_list,\n })\n for disk in topology['spares']:\n disks[disk] = {'vdev': vdev_devs_list, 'create_swap': True}\n\n return disks, vdevs\n\n async def __check_disks_availability(self, verrors, disks, schema='pool_create'):\n \"\"\"\n Makes sure the disks are present in the system and not reserved\n by anything else (boot, pool, iscsi, etc).\n\n Returns:\n dict - disk.query for all disks\n \"\"\"\n disks_cache = dict(map(\n lambda x: (x['devname'], x),\n await self.middleware.call(\n 'disk.query', [('devname', 'in', list(disks.keys()))]\n )\n ))\n\n disks_set = set(disks.keys())\n disks_not_in_cache = disks_set - set(disks_cache.keys())\n if disks_not_in_cache:\n verrors.add(\n f'{schema}.topology',\n f'The following disks were not found in system: {\",\" .join(disks_not_in_cache)}.'\n )\n\n disks_reserved = await self.middleware.call('disk.get_reserved')\n disks_reserved = disks_set - (disks_set - set(disks_reserved))\n if disks_reserved:\n verrors.add(\n f'{schema}.topology',\n f'The following disks are already in use: {\",\" .join(disks_reserved)}.'\n )\n return disks_cache\n\n async def __format_disks(self, job, disks, enc_keypath, passphrase=None):\n \"\"\"\n Format all disks, putting all freebsd-zfs partitions created\n into their respectives vdevs.\n \"\"\"\n\n # Make sure all SED disks are unlocked\n await self.middleware.call('disk.sed_unlock_all')\n\n swapgb = (await self.middleware.call('system.advanced.config'))['swapondrive']\n\n enc_disks = []\n formatted = 0\n\n async def format_disk(arg):\n nonlocal enc_disks, formatted\n disk, config = arg\n await self.middleware.call(\n 'disk.format', disk, swapgb if config['create_swap'] else 0, False,\n )\n devname = await self.middleware.call('disk.gptid_from_part_type', disk, 'freebsd-zfs')\n if enc_keypath:\n enc_disks.append({\n 'disk': disk,\n 'devname': devname,\n })\n devname = await self.middleware.call('disk.encrypt', devname, enc_keypath, passphrase)\n formatted += 1\n job.set_progress(15, f'Formatting disks ({formatted}/{len(disks)})')\n config['vdev'].append(f'/dev/{devname}')\n\n job.set_progress(15, f'Formatting disks (0/{len(disks)})')\n await asyncio_map(format_disk, disks.items(), limit=16)\n\n await self.middleware.call('disk.sync_all')\n\n return enc_disks\n\n async def __save_encrypteddisks(self, pool_id, enc_disks, disks_cache):\n for enc_disk in enc_disks:\n await self.middleware.call(\n 'datastore.insert',\n 'storage.encrypteddisk',\n {\n 'volume': pool_id,\n 'disk': disks_cache[enc_disk['disk']]['identifier'],\n 'provider': enc_disk['devname'],\n },\n {'prefix': 'encrypted_'},\n )\n\n @item_method\n @accepts(Int('id', required=False, default=None, null=True))\n async def get_disks(self, oid):\n \"\"\"\n Get all disks in use by pools.\n If `id` is provided only the disks from the given pool `id` will be returned.\n \"\"\"\n filters = []\n if oid:\n filters.append(('id', '=', oid))\n for pool in await self.query(filters):\n if pool['is_decrypted'] and pool['status'] != 'OFFLINE':\n for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):\n yield i\n else:\n for encrypted_disk in await self.middleware.call(\n 'datastore.query',\n 'storage.encrypteddisk',\n [('encrypted_volume', '=', pool['id'])]\n ):\n # Use provider and not disk because a disk is not a guarantee\n # to point to correct device if its locked and its not in the system\n # (e.g. temporarily). See #50291\n prov = encrypted_disk[\"encrypted_provider\"]\n if not prov:\n continue\n\n disk_name = await self.middleware.call('disk.label_to_disk', prov)\n if not disk_name:\n continue\n\n disk = await self.middleware.call('disk.query', [('name', '=', disk_name)])\n if not disk:\n continue\n disk = disk[0]\n\n if os.path.exists(os.path.join(\"/dev\", disk['devname'])):\n yield disk['devname']\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('label', required=True),\n Str('disk', required=True),\n Bool('force', default=False),\n Str('passphrase', private=True),\n ))\n @job(lock='pool_replace')\n async def replace(self, job, oid, options):\n \"\"\"\n Replace a disk on a pool.\n\n `label` is the ZFS guid or a device name\n `disk` is the identifier of a disk\n\n .. examples(websocket)::\n\n Replace missing ZFS device with disk {serial}FOO.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.replace\",\n \"params\": [1, {\n \"label\": \"80802394992848654\",\n \"disk\": \"{serial}FOO\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n\n unused_disks = await self.middleware.call('disk.get_unused')\n disk = list(filter(lambda x: x['identifier'] == options['disk'], unused_disks))\n if not disk:\n verrors.add('options.disk', 'Disk not found.', errno.ENOENT)\n else:\n disk = disk[0]\n\n if not options['force'] and not await self.middleware.call(\n 'disk.check_clean', disk['devname']\n ):\n verrors.add('options.force', 'Disk is not clean, partitions were found.')\n\n if pool['encrypt'] == 2:\n if not options.get('passphrase'):\n verrors.add('options.passphrase', 'Passphrase is required for encrypted pool.')\n elif not await self.middleware.call(\n 'disk.geli_testkey', pool, options['passphrase']\n ):\n verrors.add('options.passphrase', 'Passphrase is not valid.')\n\n found = self.__find_disk_from_topology(options['label'], pool)\n\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found.', errno.ENOENT)\n\n if verrors:\n raise verrors\n\n if found[0] in ('data', 'spare'):\n create_swap = True\n else:\n create_swap = False\n\n swap_disks = [disk['devname']]\n # If the disk we are replacing is still available, remove it from swap as well\n if found[1] and os.path.exists(found[1]['path']):\n from_disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n if from_disk:\n swap_disks.append(from_disk)\n\n await self.middleware.call('disk.swaps_remove_disks', swap_disks)\n\n vdev = []\n passphrase_path = None\n if options.get('passphrase'):\n passf = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')\n os.chmod(passf.name, 0o600)\n passf.write(options['passphrase'])\n passf.flush()\n passphrase_path = passf.name\n try:\n enc_disks = await self.__format_disks(\n job,\n {disk['devname']: {'vdev': vdev, 'create_swap': create_swap}},\n pool['encryptkey_path'],\n passphrase_path,\n )\n finally:\n if passphrase_path:\n passf.close()\n\n new_devname = vdev[0].replace('/dev/', '')\n\n try:\n await self.middleware.call(\n 'zfs.pool.replace', pool['name'], options['label'], new_devname\n )\n # If we are replacing a faulted disk, kick it right after replace\n # is initiated.\n try:\n vdev = await self.middleware.call(\n 'zfs.pool.get_vdev', pool['name'], options['label'],\n )\n if vdev['status'] not in ('ONLINE', 'DEGRADED'):\n await self.middleware.call('zfs.pool.detach', pool['name'], options['label'])\n except Exception:\n self.logger.warn('Failed to detach device', exc_info=True)\n except Exception as e:\n try:\n # If replace has failed lets detach geli to not keep disk busy\n await self.middleware.call('disk.geli_detach_single', new_devname)\n except Exception:\n self.logger.warn(f'Failed to geli detach {new_devname}', exc_info=True)\n raise e\n finally:\n # Needs to happen even if replace failed to put back disk that had been\n # removed from swap prior to replacement\n await self.middleware.call('disk.swaps_configure')\n\n await self.__save_encrypteddisks(oid, enc_disks, {disk['devname']: disk})\n\n return True\n\n def __find_disk_from_topology(self, label, pool):\n check = []\n found = None\n for root, children in pool['topology'].items():\n check.append((root, children))\n\n while check:\n root, children = check.pop()\n for c in children:\n if c['type'] == 'DISK':\n if label in (c['path'].replace('/dev/', ''), c['guid']):\n found = (root, c)\n break\n if c['children']:\n check.append((root, c['children']))\n return found\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('label', required=True),\n ))\n async def detach(self, oid, options):\n \"\"\"\n Detach a disk from pool of id `id`.\n\n `label` is the vdev guid or device name.\n\n .. examples(websocket)::\n\n Detach ZFS device.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.detach,\n \"params\": [1, {\n \"label\": \"80802394992848654\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n found = self.__find_disk_from_topology(options['label'], pool)\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found on this pool.')\n if verrors:\n raise verrors\n\n disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n if disk:\n await self.middleware.call('disk.swaps_remove_disks', [disk])\n\n await self.middleware.call('zfs.pool.detach', pool['name'], found[1]['guid'])\n\n await self.middleware.call('pool.sync_encrypted', oid)\n\n if disk:\n await self.middleware.call('disk.unlabel', disk)\n\n return True\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('label', required=True),\n ))\n async def offline(self, oid, options):\n \"\"\"\n Offline a disk from pool of id `id`.\n\n `label` is the vdev guid or device name.\n\n .. examples(websocket)::\n\n Offline ZFS device.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.offline,\n \"params\": [1, {\n \"label\": \"80802394992848654\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n found = self.__find_disk_from_topology(options['label'], pool)\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found on this pool.')\n if verrors:\n raise verrors\n\n disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n await self.middleware.call('disk.swaps_remove_disks', [disk])\n\n await self.middleware.call('zfs.pool.offline', pool['name'], found[1]['guid'])\n\n if found[1]['path'].endswith('.eli'):\n devname = found[1]['path'].replace('/dev/', '')[:-4]\n await self.middleware.call('disk.geli_detach_single', devname)\n await self.middleware.call(\n 'datastore.delete',\n 'storage.encrypteddisk',\n [('encrypted_volume', '=', oid), ('encrypted_provider', '=', devname)],\n )\n return True\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('label', required=True),\n ))\n async def online(self, oid, options):\n \"\"\"\n Online a disk from pool of id `id`.\n\n `label` is the vdev guid or device name.\n\n .. examples(websocket)::\n\n Online ZFS device.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.online,\n \"params\": [1, {\n \"label\": \"80802394992848654\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n\n found = self.__find_disk_from_topology(options['label'], pool)\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found on this pool.')\n\n if pool['encrypt'] > 0:\n verrors.add('id', 'Disk cannot be set to online in encrypted pool.')\n\n if verrors:\n raise verrors\n\n await self.middleware.call('zfs.pool.online', pool['name'], found[1]['guid'])\n\n disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n if disk:\n await self.middleware.call('disk.swaps_configure')\n\n return True\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('label', required=True),\n ))\n async def remove(self, oid, options):\n \"\"\"\n Remove a disk from pool of id `id`.\n\n `label` is the vdev guid or device name.\n\n Error codes:\n\n EZFS_NOSPC(2032): out of space to remove a device\n EZFS_NODEVICE(2017): no such device in pool\n EZFS_NOREPLICAS(2019): no valid replicas\n\n .. examples(websocket)::\n\n Remove ZFS device.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.remove,\n \"params\": [1, {\n \"label\": \"80802394992848654\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n\n found = self.__find_disk_from_topology(options['label'], pool)\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found on this pool.')\n\n if verrors:\n raise verrors\n\n await self.middleware.call('zfs.pool.remove', pool['name'], found[1]['guid'])\n\n await self.middleware.call('pool.sync_encrypted', oid)\n\n if found[1]['path'].endswith('.eli'):\n devname = found[1]['path'].replace('/dev/', '')[:-4]\n await self.middleware.call('disk.geli_detach_single', devname)\n\n disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n if disk:\n await self.middleware.call('disk.swaps_remove_disks', [disk])\n await self.middleware.call('disk.unlabel', disk)\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('passphrase', private=True, required=True, null=True),\n Str('admin_password', private=True),\n ))\n async def passphrase(self, oid, options):\n \"\"\"\n Create/Change/Remove passphrase for an encrypted pool.\n\n Setting passphrase to null will remove the passphrase.\n `admin_password` is required when changing or removing passphrase.\n\n .. examples(websocket)::\n\n Change passphrase for pool 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.passphrase,\n \"params\": [1, {\n \"passphrase\": \"mysecretpassphrase\",\n \"admin_password\": \"rootpassword\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = await self.__common_encopt_validation(pool, options)\n\n if (\n pool['name'] == (await self.middleware.call('systemdataset.config'))['pool'] and (\n pool['encrypt'] == 1 or (pool['encrypt'] == 2 and options['passphrase'])\n )\n ):\n # Only allow removing passphrase for pools being used by system dataset service\n verrors.add(\n 'id',\n f'Pool {pool[\"name\"]} contains the system dataset. Passphrases are not allowed on the '\n 'system dataset pool.'\n )\n\n # For historical reasons (API v1.0 compatibility) we only require\n # admin_password when changing/removing passphrase\n if pool['encrypt'] == 2 and not options.get('admin_password'):\n verrors.add('options.admin_password', 'This attribute is required.')\n\n verrors.check()\n\n await self.middleware.call('disk.geli_passphrase', pool, options['passphrase'], True)\n\n if pool['encrypt'] == 1 and options['passphrase']:\n await self.middleware.call(\n 'datastore.update', 'storage.volume', oid, {'vol_encrypt': 2}\n )\n elif pool['encrypt'] == 2 and not options['passphrase']:\n await self.middleware.call(\n 'datastore.update', 'storage.volume', oid, {'vol_encrypt': 1}\n )\n return True\n\n async def __common_encopt_validation(self, pool, options):\n verrors = ValidationErrors()\n\n if pool['encrypt'] == 0:\n verrors.add('id', 'Pool is not encrypted.')\n\n # admin password is optional, its choice of the client to enforce\n # it or not.\n if 'admin_password' in options and not await self.middleware.call(\n 'auth.check_user', 'root', options['admin_password']\n ):\n verrors.add('options.admin_password', 'Invalid admin password.')\n\n if verrors:\n raise verrors\n return verrors\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('admin_password', private=True, required=False),\n ))\n async def rekey(self, oid, options):\n \"\"\"\n Rekey encrypted pool `id`.\n\n .. examples(websocket)::\n\n Rekey pool 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.rekey,\n \"params\": [1, {\n \"admin_password\": \"rootpassword\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n await self.__common_encopt_validation(pool, options)\n\n await self.middleware.call('disk.geli_rekey', pool)\n\n if pool['encrypt'] == 2:\n await self.middleware.call(\n 'datastore.update', 'storage.volume', oid, {'vol_encrypt': 1}\n )\n\n await self.middleware.call_hook('pool.rekey_done', pool=pool)\n return True\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('admin_password', private=True, required=False),\n ))\n @job(lock=lambda x: f'pool_reckey_{x[0]}', pipes=['output'])\n async def recoverykey_add(self, job, oid, options):\n \"\"\"\n Add Recovery key for encrypted pool `id`.\n\n This is to be used with `core.download` which will provide an URL\n to download the recovery key.\n \"\"\"\n pool = await self._get_instance(oid)\n\n await self.__common_encopt_validation(pool, options)\n\n reckey = await self.middleware.call('disk.geli_recoverykey_add', pool)\n\n job.pipes.output.w.write(base64.b64decode(reckey))\n job.pipes.output.w.close()\n\n return True\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('admin_password', private=True, required=False),\n ))\n async def recoverykey_rm(self, oid, options):\n \"\"\"\n Remove recovery key for encrypted pool `id`.\n\n .. examples(websocket)::\n\n Remove recovery key for pool 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.recoverykey_rm,\n \"params\": [1, {\n \"admin_password\": \"rootpassword\"\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n await self.__common_encopt_validation(pool, options)\n\n await self.middleware.call('disk.geli_recoverykey_rm', pool)\n\n return True\n\n @accepts(Int('id'))\n async def unlock_services_restart_choices(self, oid):\n \"\"\"\n Get a mapping of services identifiers and labels that can be restart\n on volume unlock.\n \"\"\"\n pool = await self._get_instance(oid)\n\n services = {\n 'afp': 'AFP',\n 'cifs': 'SMB',\n 'ftp': 'FTP',\n 'iscsitarget': 'iSCSI',\n 'nfs': 'NFS',\n 'webdav': 'WebDAV',\n }\n\n result = {}\n\n for k, v in services.items():\n service = await self.middleware.call('service.query', [['service', '=', k]], {'get': True})\n if service['enable'] or service['state'] == 'RUNNING':\n result[k] = v\n\n try:\n activated_pool = await self.middleware.call('jail.get_activated_pool')\n except Exception:\n activated_pool = None\n # If iocage is not activated yet, there is a chance that this pool might have it activated there\n if activated_pool is None:\n result['jails'] = 'Jails/Plugins'\n\n if await self._unlock_restarted_vms(pool['name']):\n result['vms'] = 'Virtual Machines'\n\n return result\n\n async def _unlock_restarted_vms(self, pool_name):\n result = []\n vms = (await self.middleware.call(\n 'vm.query', [('autostart', '=', True)])\n )\n for vm in vms:\n for device in vm['devices']:\n if device['dtype'] not in ('DISK', 'RAW'):\n continue\n\n path = device['attributes'].get('path')\n if not path:\n continue\n\n if path.startswith(f'/dev/zvol/{pool_name}/') or path.startswith(f'/mnt/{pool_name}/'):\n result.append(vm)\n break\n\n return result\n\n @item_method\n @accepts(Int('id'), Dict(\n 'options',\n Str('passphrase', private=True, required=False),\n Bool('recoverykey', default=False),\n List('services_restart', default=[]),\n ))\n @job(lock='unlock_pool', pipes=['input'], check_pipes=False)\n async def unlock(self, job, oid, options):\n \"\"\"\n Unlock encrypted pool `id`.\n\n `passphrase` is required of a recovery key is not provided.\n\n If `recoverykey` is true this method expects the recovery key file to be uploaded using\n the /_upload/ endpoint.\n\n `services_restart` is a list of services to be restarted when the pool gets unlocked.\n Said list be be retrieve using `pool.unlock_services_restart_choices`.\n\n .. examples(websocket)::\n\n Unlock pool of id 1, restarting \"cifs\" service.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.unlock,\n \"params\": [1, {\n \"passphrase\": \"mysecretpassphrase\",\n \"services_restart\": [\"cifs\"]\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n\n if pool['encrypt'] == 0:\n verrors.add('id', 'Pool is not encrypted.')\n elif pool['status'] != 'OFFLINE':\n verrors.add('id', 'Pool already unlocked.')\n\n if options.get('passphrase') and options['recoverykey']:\n verrors.add(\n 'options.passphrase', 'Either provide a passphrase or a recovery key, not both.'\n )\n elif not options.get('passphrase') and not options['recoverykey']:\n verrors.add(\n 'options.passphrase', 'Provide a passphrase or a recovery key.'\n )\n\n if verrors:\n raise verrors\n\n if options['recoverykey']:\n job.check_pipe(\"input\")\n with tempfile.NamedTemporaryFile(mode='wb+', dir='/tmp/') as f:\n os.chmod(f.name, 0o600)\n await self.middleware.run_in_thread(shutil.copyfileobj, job.pipes.input.r, f)\n await self.middleware.run_in_thread(f.flush)\n failed = await self.middleware.call('disk.geli_attach', pool, None, f.name)\n else:\n failed = await self.middleware.call('disk.geli_attach', pool, options['passphrase'])\n\n # We need to try to import the pool even if some disks failed to attach\n try:\n await self.middleware.call('zfs.pool.import_pool', pool['guid'], {\n 'altroot': '/mnt',\n 'cachefile': ZPOOL_CACHE_FILE,\n })\n except Exception as e:\n # mounting filesystems may fail if we have readonly datasets as parent\n if not isinstance(e, ZFSException) or e.code.name != 'MOUNTFAILED':\n detach_failed = await self.middleware.call('disk.geli_detach', pool)\n if failed > 0:\n msg = f'Pool could not be imported: {failed} devices failed to decrypt.'\n if detach_failed > 0:\n msg += (\n f' {detach_failed} devices failed to detach and were left decrypted.'\n )\n raise CallError(msg)\n elif detach_failed > 0:\n self.logger.warn('Pool %s failed to import', pool['name'], exc_info=True)\n raise CallError(f'Pool could not be imported ({detach_failed} devices left decrypted): {str(e)}')\n raise e\n\n await self.middleware.call('pool.sync_encrypted', oid)\n\n await self.middleware.call('core.bulk', 'service.restart', [\n [i] for i in set(options['services_restart']) | {'system_datasets', 'disk'} - {'jails', 'vms'}\n ])\n if 'jails' in options['services_restart']:\n await self.middleware.call('core.bulk', 'jail.rc_action', [['RESTART']])\n if 'vms' in options['services_restart']:\n for vm in await self._unlock_restarted_vms(pool['name']):\n await self.middleware.call('vm.stop', vm['id'])\n await self.middleware.call('vm.start', vm['id'])\n\n await self.middleware.call_hook('pool.post_unlock', pool=pool)\n\n return True\n\n @item_method\n @accepts(Int('id'), Str('passphrase', private=True))\n @job(lock='lock_pool')\n async def lock(self, job, oid, passphrase):\n \"\"\"\n Lock encrypted pool `id`.\n \"\"\"\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n\n if pool['encrypt'] == 0:\n verrors.add('id', 'Pool is not encrypted.')\n elif pool['status'] == 'OFFLINE':\n verrors.add('id', 'Pool already locked.')\n\n if not verrors:\n # Make sure that this pool is not being used by system dataset service\n if pool['name'] == (await self.middleware.call('systemdataset.config'))['pool']:\n verrors.add(\n 'id',\n f'Pool {pool[\"name\"]} contains the system dataset. The system dataset pool cannot be locked.'\n )\n else:\n if not await self.middleware.call('disk.geli_testkey', pool, passphrase):\n verrors.add(\n 'passphrase',\n 'The entered passphrase was not valid. Please enter the correct passphrase to lock the pool.'\n )\n\n if verrors:\n raise verrors\n\n await self.middleware.call_hook('pool.pre_lock', pool=pool)\n\n sysds = await self.middleware.call('systemdataset.config')\n if sysds['pool'] == pool['name']:\n job = await self.middleware.call('systemdataset.update', {\n 'pool': None, 'pool_exclude': pool['name'],\n })\n await job.wait()\n if job.error:\n raise CallError(job.error)\n\n await self.middleware.call('zfs.pool.export', pool['name'])\n\n for ed in await self.middleware.call(\n 'datastore.query', 'storage.encrypteddisk', [('encrypted_volume', '=', pool['id'])]\n ):\n await self.middleware.call('disk.geli_detach_single', ed['encrypted_provider'])\n\n await self.middleware.call_hook('pool.post_lock', pool=pool)\n await self.middleware.call('service.restart', 'system_datasets')\n\n return True\n\n @item_method\n @accepts(Int('id'), Str('filename', default='geli.key'))\n async def download_encryption_key(self, oid, filename):\n \"\"\"\n Download encryption key for a given pool `id`.\n \"\"\"\n pool = await self.query([('id', '=', oid)], {'get': True})\n if not pool['encryptkey']:\n return None\n\n job_id, url = await self.middleware.call(\n 'core.download',\n 'filesystem.get',\n [os.path.join(self.GELI_KEYPATH, f\"{pool['encryptkey']}.key\")],\n filename,\n )\n return url\n\n @private\n def configure_resilver_priority(self):\n \"\"\"\n Configure resilver priority based on user selected off-peak hours.\n \"\"\"\n resilver = self.middleware.call_sync('datastore.config', 'storage.resilver')\n\n if not resilver['enabled'] or not resilver['weekday']:\n return\n\n higher_prio = False\n weekdays = map(lambda x: int(x), resilver['weekday'].split(','))\n now = datetime.now()\n now_t = now.time()\n # end overlaps the day\n if resilver['begin'] > resilver['end']:\n if now.isoweekday() in weekdays and now_t >= resilver['begin']:\n higher_prio = True\n else:\n lastweekday = now.isoweekday() - 1\n if lastweekday == 0:\n lastweekday = 7\n if lastweekday in weekdays and now_t < resilver['end']:\n higher_prio = True\n # end does not overlap the day\n else:\n if now.isoweekday() in weekdays and now_t >= resilver['begin'] and now_t < resilver['end']:\n higher_prio = True\n\n if higher_prio:\n resilver_delay = 0\n resilver_min_time_ms = 9000\n scan_idle = 0\n else:\n resilver_delay = 2\n resilver_min_time_ms = 3000\n scan_idle = 50\n\n sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay\n sysctl.filter('vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms\n sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle\n\n @accepts()\n @job()\n async def import_find(self, job):\n \"\"\"\n Get a list of pools available for import with the following details:\n name, guid, status, hostname.\n \"\"\"\n\n existing_guids = [i['guid'] for i in await self.middleware.call('pool.query')]\n\n result = []\n for pool in await self.middleware.call('zfs.pool.find_import'):\n if pool['status'] == 'UNAVAIL':\n continue\n # Exclude pools with same guid as existing pools (in database)\n # It could be the pool is in the database but was exported/detached for some reason\n # See #6808\n if pool['guid'] in existing_guids:\n continue\n entry = {}\n for i in ('name', 'guid', 'status', 'hostname'):\n entry[i] = pool[i]\n result.append(entry)\n return result\n\n @accepts(Dict(\n 'pool_import',\n Str('guid', required=True),\n Str('name'),\n Str('passphrase', private=True),\n Bool('enable_attachments'),\n ))\n @job(lock='import_pool', pipes=['input'], check_pipes=False)\n async def import_pool(self, job, data):\n \"\"\"\n Import a pool found with `pool.import_find`.\n\n If a `name` is specified the pool will be imported using that new name.\n\n `passphrase` is required while importing an encrypted pool. In that case this method needs to\n be called using /_upload/ endpoint with the encryption key.\n\n If `enable_attachments` is set to true, attachments that were disabled during pool export will be\n re-enabled.\n\n Errors:\n ENOENT - Pool not found\n\n .. examples(websocket)::\n\n Import pool of guid 5571830764813710860.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.import_pool,\n \"params\": [{\n \"guid\": \"5571830764813710860\"\n }]\n }\n \"\"\"\n\n pool = None\n for p in await self.middleware.call('zfs.pool.find_import'):\n if p['guid'] == data['guid']:\n pool = p\n break\n if pool is None:\n raise CallError(f'Pool with guid \"{data[\"guid\"]}\" not found', errno.ENOENT)\n\n try:\n job.check_pipe(\"input\")\n key = job.pipes.input.r\n except ValueError:\n key = None\n\n passfile = None\n if key and data.get('passphrase'):\n encrypt = 2\n passfile = tempfile.mktemp(dir='/tmp/')\n with open(passfile, 'w') as f:\n os.chmod(passfile, 0o600)\n f.write(data['passphrase'])\n elif key:\n encrypt = 1\n else:\n encrypt = 0\n\n try:\n activated_jail_pool = await self.middleware.call('jail.get_activated_pool')\n except Exception:\n activated_jail_pool = None\n\n pool_name = data.get('name') or pool['name']\n scrub_id = pool_id = None\n try:\n pool_id = await self.middleware.call('datastore.insert', 'storage.volume', {\n 'vol_name': pool_name,\n 'vol_encrypt': encrypt,\n 'vol_guid': data['guid'],\n })\n pool = await self.middleware.call('pool.query', [('id', '=', pool_id)], {'get': True})\n if encrypt > 0:\n if not os.path.exists(GELI_KEYPATH):\n os.mkdir(GELI_KEYPATH)\n with open(pool['encryptkey_path'], 'wb') as f:\n f.write(key.read())\n\n scrub_id = (await self.middleware.call('pool.scrub.create', {\n 'pool': pool_id,\n }))['id']\n\n await self.middleware.call('zfs.pool.import_pool', pool['guid'], {\n 'altroot': '/mnt',\n 'cachefile': ZPOOL_CACHE_FILE,\n })\n\n await self.middleware.call('zfs.dataset.update', pool_name, {\n 'properties': {\n 'aclmode': {'value': 'passthrough'},\n 'aclinherit': {'value': 'passthrough'},\n },\n })\n\n # Reset all mountpoints\n await self.middleware.call('zfs.dataset.inherit', pool_name, 'mountpoint', True)\n\n await self.middleware.call('pool.sync_encrypted', pool_id)\n except Exception:\n if scrub_id:\n await self.middleware.call('pool.scrub.delete', scrub_id)\n if pool_id:\n await self.middleware.call('datastore.delete', 'storage.volume', pool_id)\n if passfile:\n os.unlink(passfile)\n raise\n\n if activated_jail_pool:\n # It is possible the imported pool had iocage set up. System will give preference to\n # the already configured pool in this case, user can always change this later\n try:\n await self.middleware.call('jail.activate', activated_jail_pool)\n except CallError as e:\n self.middleware.logger.debug(\n f'Failed to activate {activated_jail_pool} after importing {pool_name} pool: {e}'\n )\n\n key = f'pool:{pool[\"name\"]}:enable_on_import'\n if await self.middleware.call('keyvalue.has_key', key):\n for name, ids in (await self.middleware.call('keyvalue.get', key)).items():\n for delegate in PoolDatasetService.attachment_delegates:\n if delegate.name == name:\n attachments = await delegate.query(pool['path'], False)\n attachments = [attachment for attachment in attachments if attachment['id'] in ids]\n if attachments:\n await delegate.toggle(attachments, True)\n await self.middleware.call('keyvalue.delete', key)\n\n await self.middleware.call('service.reload', 'disk')\n await self.middleware.call_hook('pool.post_import', pool)\n\n return True\n\n @accepts(\n Str('device'),\n Str('fs_type'),\n Dict('fs_options', additional_attrs=True),\n Str('dst_path')\n )\n @job(lock=lambda args: 'volume_import', logs=True)\n async def import_disk(self, job, device, fs_type, fs_options, dst_path):\n \"\"\"\n Import a disk, by copying its content to a pool.\n\n .. examples(websocket)::\n\n Import a FAT32 (msdosfs) disk.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.import_disk,\n \"params\": [\n \"/dev/da0\", \"msdosfs\", {}, \"/mnt/tank/mydisk\"\n ]\n }\n \"\"\"\n job.set_progress(None, description=\"Mounting\")\n\n src = os.path.join('/var/run/importcopy/tmpdir', os.path.relpath(device, '/'))\n\n if os.path.exists(src):\n os.rmdir(src)\n\n try:\n os.makedirs(src)\n\n async with KernelModuleContextManager({\"ext2fs\": \"ext2fs\",\n \"msdosfs\": \"msdosfs_iconv\",\n \"ntfs\": \"fuse\"}.get(fs_type)):\n async with MountFsContextManager(self.middleware, device, src, fs_type, fs_options, [\"ro\"]):\n job.set_progress(None, description=\"Importing\")\n\n line = [\n '/usr/local/bin/rsync',\n '--info=progress2',\n '--modify-window=1',\n '-rltvh',\n '--no-perms',\n src + '/',\n dst_path\n ]\n rsync_proc = await Popen(\n line, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, preexec_fn=os.setsid,\n )\n try:\n progress_buffer = JobProgressBuffer(job)\n while True:\n line = await rsync_proc.stdout.readline()\n job.logs_fd.write(line)\n if line:\n try:\n line = line.decode(\"utf-8\", \"ignore\").strip()\n bits = re.split(r\"\\s+\", line)\n if len(bits) == 6 and bits[1].endswith(\"%\") and bits[1][:-1].isdigit():\n progress_buffer.set_progress(int(bits[1][:-1]))\n elif not line.endswith('/'):\n if (\n line not in ['sending incremental file list'] and\n 'xfr#' not in line\n ):\n progress_buffer.set_progress(None, extra=line)\n except Exception:\n logger.warning('Parsing error in rsync task', exc_info=True)\n else:\n break\n\n progress_buffer.flush()\n await rsync_proc.wait()\n if rsync_proc.returncode != 0:\n raise Exception(\"rsync failed with exit code %r\" % rsync_proc.returncode)\n except asyncio.CancelledError:\n rsync_proc.kill()\n raise\n\n job.set_progress(100, description=\"Done\", extra=\"\")\n finally:\n os.rmdir(src)\n\n @accepts(Str(\"device\"))\n def import_disk_autodetect_fs_type(self, device):\n \"\"\"\n Autodetect filesystem type for `pool.import_disk`.\n\n .. examples(websocket)::\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.import_disk_autodetect_fs_type\",\n \"params\": [\"/dev/da0\"]\n }\n \"\"\"\n proc = subprocess.Popen([\"blkid\", device], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding=\"utf-8\")\n output = proc.communicate()[0].strip()\n\n if proc.returncode == 2:\n proc = subprocess.Popen([\"file\", \"-s\", device], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n encoding=\"utf-8\")\n output = proc.communicate()[0].strip()\n if proc.returncode != 0:\n raise CallError(f\"blkid failed with code 2 and file failed with code {proc.returncode}: {output}\")\n\n if \"Unix Fast File system\" in output:\n return \"ufs\"\n\n raise CallError(f\"blkid failed with code 2 and file produced unexpected output: {output}\")\n\n if proc.returncode != 0:\n raise CallError(f\"blkid failed with code {proc.returncode}: {output}\")\n\n m = re.search(\"TYPE=\\\"(.+?)\\\"\", output)\n if m is None:\n raise CallError(f\"blkid produced unexpected output: {output}\")\n\n fs = {\n \"ext2\": \"ext2fs\",\n \"ext3\": \"ext2fs\",\n \"ntfs\": \"ntfs\",\n \"vfat\": \"msdosfs\",\n }.get(m.group(1))\n if fs is None:\n self.logger.info(\"Unknown FS: %s\", m.group(1))\n return None\n\n return fs\n\n @accepts()\n def import_disk_msdosfs_locales(self):\n \"\"\"\n Get a list of locales for msdosfs type to be used in `pool.import_disk`.\n \"\"\"\n return [\n locale.strip()\n for locale in subprocess.check_output([\"locale\", \"-a\"], encoding=\"utf-8\").split(\"\\n\")\n if locale.strip() and locale.strip() not in [\"C\", \"POSIX\"]\n ]\n\n @item_method\n @accepts(\n Int('id'),\n Dict(\n 'options',\n Bool('cascade', default=False),\n Bool('restart_services', default=False),\n Bool('destroy', default=False),\n ),\n )\n @job(lock='pool_export')\n async def export(self, job, oid, options):\n \"\"\"\n Export pool of `id`.\n\n `cascade` will delete all attachments of the given pool (`pool.attachments`).\n `restart_services` will restart services that have open files on given pool.\n `destroy` will also PERMANENTLY destroy the pool/data.\n\n .. examples(websocket)::\n\n Export pool of id 1.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.export,\n \"params\": [1, {\n \"cascade\": true,\n \"destroy\": false\n }]\n }\n \"\"\"\n pool = await self._get_instance(oid)\n\n pool_count = await self.middleware.call('pool.query', [], {'count': True})\n is_freenas = await self.middleware.call('system.is_freenas')\n if (\n pool_count == 1 and not is_freenas and\n await self.middleware.call('failover.licensed') and\n not (await self.middleware.call('failover.config'))['disabled']\n ):\n raise CallError('Disable failover before exporting last pool on system.')\n\n enable_on_import_key = f'pool:{pool[\"name\"]}:enable_on_import'\n enable_on_import = {}\n if not options['cascade']:\n if await self.middleware.call('keyvalue.has_key', enable_on_import_key):\n enable_on_import = await self.middleware.call('keyvalue.get', enable_on_import_key)\n\n for i, delegate in enumerate(PoolDatasetService.attachment_delegates):\n job.set_progress(\n i, f'{\"Deleting\" if options[\"cascade\"] else \"Disabling\"} pool attachments: {delegate.title}')\n\n attachments = await delegate.query(pool['path'], True)\n if attachments:\n if options[\"cascade\"]:\n await delegate.delete(attachments)\n else:\n await delegate.toggle(attachments, False)\n enable_on_import[delegate.name] = list(\n set(enable_on_import.get(delegate.name, [])) |\n {attachment['id'] for attachment in attachments}\n )\n\n if enable_on_import:\n await self.middleware.call('keyvalue.set', enable_on_import_key, enable_on_import)\n else:\n await self.middleware.call('keyvalue.delete', enable_on_import_key)\n\n job.set_progress(20, 'Terminating processes that are using this pool')\n try:\n await self.middleware.call('pool.dataset.kill_processes', pool['name'],\n options.get('restart_services', False))\n except ValidationError as e:\n if e.errno == errno.ENOENT:\n # Dataset might not exist (e.g. pool is not decrypted), this is not an error\n pass\n else:\n raise\n await self.middleware.call('iscsi.global.terminate_luns_for_pool', pool['name'])\n\n job.set_progress(30, 'Removing pool disks from swap')\n disks = [i async for i in await self.middleware.call('pool.get_disks')]\n await self.middleware.call('disk.swaps_remove_disks', disks)\n\n sysds = await self.middleware.call('systemdataset.config')\n if sysds['pool'] == pool['name']:\n job.set_progress(40, 'Reconfiguring system dataset')\n sysds_job = await self.middleware.call('systemdataset.update', {\n 'pool': None, 'pool_exclude': pool['name'],\n })\n await sysds_job.wait()\n if sysds_job.error:\n raise CallError(sysds_job.error)\n\n if pool['status'] == 'OFFLINE':\n # Pool exists only in database, its not imported\n pass\n elif options['destroy']:\n job.set_progress(60, 'Destroying pool')\n await self.middleware.call('zfs.pool.delete', pool['name'])\n\n job.set_progress(80, 'Cleaning disks')\n\n async def unlabel(disk):\n return await self.middleware.call('disk.unlabel', disk, False)\n await asyncio_map(unlabel, disks, limit=16)\n\n await self.middleware.call('disk.sync_all')\n\n await self.middleware.call('disk.geli_detach', pool, True)\n if pool['encrypt'] > 0:\n try:\n os.remove(pool['encryptkey_path'])\n except OSError as e:\n self.logger.warn(\n 'Failed to remove encryption key %s: %s',\n pool['encryptkey_path'],\n e,\n exc_info=True,\n )\n else:\n job.set_progress(80, 'Exporting pool')\n await self.middleware.call('zfs.pool.export', pool['name'])\n await self.middleware.call('disk.geli_detach', pool)\n\n job.set_progress(90, 'Cleaning up')\n if os.path.isdir(pool['path']):\n try:\n # We dont try to remove recursively to avoid removing files that were\n # potentially hidden by the mount\n os.rmdir(pool['path'])\n except OSError as e:\n self.logger.warn('Failed to remove pointoint %s: %s', pool['path'], e)\n\n await self.middleware.call('datastore.delete', 'storage.volume', oid)\n\n # scrub needs to be regenerated in crontab\n await self.middleware.call('service.restart', 'cron')\n\n await self.middleware.call_hook('pool.post_export', pool=pool['name'], options=options)\n\n @item_method\n @accepts(Int('id'))\n async def attachments(self, oid):\n \"\"\"\n Return a list of services dependent of this pool.\n\n Responsible for telling the user whether there is a related\n share, asking for confirmation.\n \"\"\"\n pool = await self._get_instance(oid)\n return await self.middleware.call('pool.dataset.attachments', pool['name'])\n\n @item_method\n @accepts(Int('id'))\n async def processes(self, oid):\n \"\"\"\n Returns a list of running processes using this pool.\n \"\"\"\n pool = await self._get_instance(oid)\n return await self.middleware.call('pool.dataset.processes', pool['name'])\n\n @staticmethod\n def __get_dev_and_disk(topology):\n rv = []\n for values in topology.values():\n values = values.copy()\n while values:\n value = values.pop()\n if value['type'] == 'DISK':\n rv.append((value['path'].replace('/dev/', ''), value['disk']))\n values += value.get('children') or []\n return rv\n\n @private\n def sync_encrypted(self, pool=None):\n \"\"\"\n This syncs the EncryptedDisk table with the current state\n of a volume\n \"\"\"\n if pool is not None:\n filters = [('id', '=', pool)]\n else:\n filters = []\n\n pools = self.middleware.call_sync('pool.query', filters)\n if not pools:\n return\n\n # Grab all disks at once to avoid querying every iteration\n disks = {i['devname']: i['identifier'] for i in self.middleware.call_sync('disk.query')}\n\n for pool in pools:\n if not pool['is_decrypted'] or pool['status'] == 'OFFLINE' or pool['encrypt'] == 0:\n continue\n\n provs = []\n for dev, disk in self.__get_dev_and_disk(pool['topology']):\n if not dev.endswith(\".eli\"):\n continue\n prov = dev[:-4]\n diskid = disks.get(disk)\n ed = self.middleware.call_sync('datastore.query', 'storage.encrypteddisk', [\n ('encrypted_provider', '=', prov)\n ])\n if not ed:\n if not diskid:\n self.logger.warn('Could not find Disk entry for %s', disk)\n self.middleware.call_sync('datastore.insert', 'storage.encrypteddisk', {\n 'encrypted_volume': pool['id'],\n 'encrypted_provider': prov,\n 'encrypted_disk': diskid,\n })\n elif diskid and ed[0]['encrypted_disk'] != diskid:\n self.middleware.call_sync(\n 'datastore.update', 'storage.encrypteddisk', ed[0]['id'],\n {'encrypted_disk': diskid},\n )\n provs.append(prov)\n\n # Delete devices no longer in pool from database\n self.middleware.call_sync('datastore.delete', 'storage.encrypteddisk', [\n ('encrypted_volume', '=', pool['id']), ('encrypted_provider', 'nin', provs)\n ])\n\n def __dtrace_read(self, job, proc):\n while True:\n read = proc.stdout.readline()\n if read == b'':\n break\n read = read.decode(errors='ignore').strip()\n job.set_progress(None, read)\n\n @private\n @job()\n def import_on_boot(self, job):\n cachedir = os.path.dirname(ZPOOL_CACHE_FILE)\n if not os.path.exists(cachedir):\n os.mkdir(cachedir)\n\n if (\n not self.middleware.call_sync('system.is_freenas') and\n self.middleware.call_sync('failover.licensed')\n ):\n return\n\n zpool_cache_saved = f'{ZPOOL_CACHE_FILE}.saved'\n if os.path.exists(ZPOOL_KILLCACHE):\n with contextlib.suppress(Exception):\n os.unlink(ZPOOL_CACHE_FILE)\n with contextlib.suppress(Exception):\n os.unlink(zpool_cache_saved)\n else:\n with open(ZPOOL_KILLCACHE, 'w') as f:\n os.fsync(f)\n\n try:\n stat = os.stat(ZPOOL_CACHE_FILE)\n if stat.st_size > 0:\n copy = False\n if not os.path.exists(zpool_cache_saved):\n copy = True\n else:\n statsaved = os.stat(zpool_cache_saved)\n if stat.st_mtime > statsaved.st_mtime:\n copy = True\n if copy:\n shutil.copy(ZPOOL_CACHE_FILE, zpool_cache_saved)\n except FileNotFoundError:\n pass\n\n job.set_progress(0, 'Beginning pools import')\n\n try:\n proc = subprocess.Popen(\n ['dtrace', '-qn', 'zfs-dbgmsg{printf(\"%s\\\\n\", stringof(arg0))}'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n start_daemon_thread(target=self.__dtrace_read, args=[job, proc])\n\n pools = self.middleware.call_sync('pool.query', [\n ('encrypt', '<', 2),\n ('status', '=', 'OFFLINE')\n ])\n for i, pool in enumerate(pools):\n # Importing pools is currently 80% of the job because we may still need\n # to set ACL mode for windows\n job.set_progress(int((i + 1) / len(pools) * 80), f'Importing {pool[\"name\"]}')\n imported = False\n if pool['guid']:\n try:\n self.middleware.call_sync('zfs.pool.import_pool', pool['guid'], {\n 'altroot': '/mnt',\n 'cachefile': 'none',\n }, True, zpool_cache_saved if os.path.exists(zpool_cache_saved) else None)\n except Exception:\n # Importing a pool may fail because of out of date guid database entry\n # or because bad cachefile. Try again using the pool name and wihout\n # the cachefile\n self.logger.error('Failed to import %s', pool['name'], exc_info=True)\n else:\n imported = True\n if not imported:\n try:\n self.middleware.call_sync('zfs.pool.import_pool', pool['name'], {\n 'altroot': '/mnt',\n 'cachefile': 'none',\n })\n except Exception:\n self.logger.error('Failed to import %s', pool['name'], exc_info=True)\n continue\n\n try:\n self.middleware.call_sync(\n 'zfs.pool.update', pool['name'], {'properties': {\n 'cachefile': {'value': ZPOOL_CACHE_FILE},\n }}\n )\n except Exception:\n self.logger.warn(\n 'Failed to set cache file for %s', pool['name'], exc_info=True,\n )\n\n try:\n if os.path.isdir('/mnt/mnt'):\n # Reset all mountpoints\n self.middleware.call_sync(\n 'zfs.dataset.inherit', pool['name'], 'mountpoint', True\n )\n except Exception:\n self.logger.warn(\n 'Failed to inherit mountpoints for %s', pool['name'], exc_info=True,\n )\n\n finally:\n proc.kill()\n proc.wait()\n\n with contextlib.suppress(OSError):\n os.unlink(ZPOOL_KILLCACHE)\n\n if os.path.exists(ZPOOL_CACHE_FILE):\n shutil.copy(ZPOOL_CACHE_FILE, zpool_cache_saved)\n\n # Now that pools have been imported we are ready to configure system dataset,\n # collectd and syslogd which may depend on them.\n try:\n self.middleware.call_sync('etc.generate', 'system_dataset')\n except Exception:\n self.logger.warn('Failed to setup system dataset', exc_info=True)\n\n try:\n self.middleware.call_sync('etc.generate', 'collectd')\n except Exception:\n self.logger.warn('Failed to configure collectd', exc_info=True)\n\n try:\n self.middleware.call_sync('etc.generate', 'syslogd')\n except Exception:\n self.logger.warn('Failed to configure syslogd', exc_info=True)\n\n self.middleware.call_sync('zettarepl.update_tasks')\n\n # Configure swaps after importing pools. devd events are not yet ready at this\n # stage of the boot process.\n self.middleware.run_coroutine(self.middleware.call('disk.swaps_configure'), wait=False)\n\n job.set_progress(100, 'Pools import completed')\n\n \"\"\"\n These methods are hacks for old UI which supports only one volume import at a time\n \"\"\"\n\n dismissed_import_disk_jobs = set()\n\n @private\n async def get_current_import_disk_job(self):\n import_jobs = await self.middleware.call('core.get_jobs', [('method', '=', 'pool.import_disk')])\n not_dismissed_import_jobs = [job for job in import_jobs if job[\"id\"] not in self.dismissed_import_disk_jobs]\n if not_dismissed_import_jobs:\n return not_dismissed_import_jobs[0]\n\n @private\n async def dismiss_current_import_disk_job(self):\n current_import_job = await self.get_current_import_disk_job()\n if current_import_job:\n self.dismissed_import_disk_jobs.add(current_import_job[\"id\"])\n\n\nclass PoolDatasetService(CRUDService):\n\n attachment_delegates = []\n\n class Config:\n namespace = 'pool.dataset'\n\n @filterable\n def query(self, filters=None, options=None):\n \"\"\"\n Query Pool Datasets with `query-filters` and `query-options`.\n \"\"\"\n # Optimization for cases in which they can be filtered at zfs.dataset.query\n zfsfilters = []\n for f in filters or []:\n if len(f) == 3:\n if f[0] in ('id', 'name', 'pool', 'type'):\n zfsfilters.append(f)\n datasets = self.middleware.call_sync(\n 'zfs.dataset.query', zfsfilters, {'extra': (options or {}).get('extra', {})}\n )\n return filter_list(self.__transform(datasets), filters, options)\n\n def __transform(self, datasets):\n \"\"\"\n We need to transform the data zfs gives us to make it consistent/user-friendly,\n making it match whatever pool.dataset.{create,update} uses as input.\n \"\"\"\n def transform(dataset):\n for orig_name, new_name, method in (\n ('org.freenas:description', 'comments', None),\n ('org.freenas:quota_warning', 'quota_warning', None),\n ('org.freenas:quota_critical', 'quota_critical', None),\n ('org.freenas:refquota_warning', 'refquota_warning', None),\n ('org.freenas:refquota_critical', 'refquota_critical', None),\n ('dedup', 'deduplication', str.upper),\n ('aclmode', None, str.upper),\n ('atime', None, str.upper),\n ('casesensitivity', None, str.upper),\n ('exec', None, str.upper),\n ('sync', None, str.upper),\n ('compression', None, str.upper),\n ('compressratio', None, None),\n ('origin', None, None),\n ('quota', None, _null),\n ('refquota', None, _null),\n ('reservation', None, _null),\n ('refreservation', None, _null),\n ('copies', None, None),\n ('snapdir', None, str.upper),\n ('readonly', None, str.upper),\n ('recordsize', None, None),\n ('sparse', None, None),\n ('volsize', None, None),\n ('volblocksize', None, None),\n ):\n if orig_name not in dataset['properties']:\n continue\n i = new_name or orig_name\n dataset[i] = dataset['properties'][orig_name]\n if method:\n dataset[i]['value'] = method(dataset[i]['value'])\n del dataset['properties']\n\n rv = []\n for child in dataset['children']:\n rv.append(transform(child))\n dataset['children'] = rv\n\n return dataset\n\n rv = []\n for dataset in datasets:\n rv.append(transform(dataset))\n return rv\n\n @accepts(Dict(\n 'pool_dataset_create',\n Str('name', required=True),\n Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),\n Int('volsize'), # IN BYTES\n Str('volblocksize', enum=[\n '512', '1K', '2K', '4K', '8K', '16K', '32K', '64K', '128K',\n ]),\n Bool('sparse'),\n Bool('force_size'),\n Str('comments'),\n Str('sync', enum=[\n 'STANDARD', 'ALWAYS', 'DISABLED',\n ]),\n Str('compression', enum=[\n 'OFF', 'LZ4', 'GZIP', 'GZIP-1', 'GZIP-9', 'ZLE', 'LZJB',\n ]),\n Str('atime', enum=['ON', 'OFF']),\n Str('exec', enum=['ON', 'OFF']),\n Int('quota', null=True),\n Int('quota_warning', validators=[Range(0, 100)]),\n Int('quota_critical', validators=[Range(0, 100)]),\n Int('refquota', null=True),\n Int('refquota_warning', validators=[Range(0, 100)]),\n Int('refquota_critical', validators=[Range(0, 100)]),\n Int('reservation'),\n Int('refreservation'),\n Int('copies'),\n Str('snapdir', enum=['VISIBLE', 'HIDDEN']),\n Str('deduplication', enum=['ON', 'VERIFY', 'OFF']),\n Str('readonly', enum=['ON', 'OFF']),\n Str('recordsize', enum=[\n '512', '1K', '2K', '4K', '8K', '16K', '32K', '64K', '128K', '256K', '512K', '1024K',\n ]),\n Str('casesensitivity', enum=['SENSITIVE', 'INSENSITIVE', 'MIXED']),\n Str('aclmode', enum=['PASSTHROUGH', 'RESTRICTED']),\n Str('share_type', default='GENERIC', enum=['GENERIC', 'SMB']),\n register=True,\n ))\n async def do_create(self, data):\n \"\"\"\n Creates a dataset/zvol.\n\n `volsize` is required for type=VOLUME and is supposed to be a multiple of the block size.\n `sparse` and `volblocksize` are only used for type=VOLUME.\n\n .. examples(websocket)::\n\n Create a dataset within tank pool.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.dataset.create,\n \"params\": [{\n \"name\": \"tank/myuser\",\n \"comments\": \"Dataset for myuser\"\n }]\n }\n \"\"\"\n\n verrors = ValidationErrors()\n\n if '/' not in data['name']:\n verrors.add('pool_dataset_create.name', 'You need a full name, e.g. pool/newdataset')\n else:\n await self.__common_validation(verrors, 'pool_dataset_create', data, 'CREATE')\n\n mountpoint = os.path.join('/mnt', data['name'])\n if os.path.exists(mountpoint):\n verrors.add('pool_dataset_create.name', f'Path {mountpoint} already exists')\n\n if data['share_type'] == 'SMB':\n data['casesensitivity'] = 'INSENSITIVE'\n data['aclmode'] = 'RESTRICTED'\n\n if verrors:\n raise verrors\n\n props = {}\n for i, real_name, transform in (\n ('aclmode', None, str.lower),\n ('atime', None, str.lower),\n ('casesensitivity', None, str.lower),\n ('comments', 'org.freenas:description', None),\n ('compression', None, str.lower),\n ('copies', None, lambda x: str(x)),\n ('deduplication', 'dedup', str.lower),\n ('exec', None, str.lower),\n ('quota', None, _none),\n ('quota_warning', 'org.freenas:quota_warning', str),\n ('quota_critical', 'org.freenas:quota_critical', str),\n ('readonly', None, str.lower),\n ('recordsize', None, None),\n ('refquota', None, _none),\n ('refquota_warning', 'org.freenas:refquota_warning', str),\n ('refquota_critical', 'org.freenas:refquota_critical', str),\n ('refreservation', None, _none),\n ('reservation', None, _none),\n ('snapdir', None, str.lower),\n ('sparse', None, None),\n ('sync', None, str.lower),\n ('volblocksize', None, None),\n ('volsize', None, lambda x: str(x)),\n ):\n if i not in data:\n continue\n name = real_name or i\n props[name] = data[i] if not transform else transform(data[i])\n\n await self.middleware.call('zfs.dataset.create', {\n 'name': data['name'],\n 'type': data['type'],\n 'properties': props,\n })\n\n data['id'] = data['name']\n\n await self.middleware.call('zfs.dataset.mount', data['name'])\n\n if data['type'] == 'FILESYSTEM' and data['share_type'] == 'SMB':\n await self.middleware.call('pool.dataset.permission', data['id'], {'mode': None})\n\n return await self._get_instance(data['id'])\n\n def _add_inherit(name):\n def add(attr):\n attr.enum.append('INHERIT')\n return {'name': name, 'method': add}\n\n @accepts(Str('id', required=True), Patch(\n 'pool_dataset_create', 'pool_dataset_update',\n ('rm', {'name': 'name'}),\n ('rm', {'name': 'type'}),\n ('rm', {'name': 'casesensitivity'}), # Its a readonly attribute\n ('rm', {'name': 'share_type'}), # This is something we should only do at create time\n ('rm', {'name': 'sparse'}), # Create time only attribute\n ('rm', {'name': 'volblocksize'}), # Create time only attribute\n ('edit', _add_inherit('atime')),\n ('edit', _add_inherit('exec')),\n ('edit', _add_inherit('sync')),\n ('edit', _add_inherit('compression')),\n ('edit', _add_inherit('deduplication')),\n ('edit', _add_inherit('readonly')),\n ('edit', _add_inherit('recordsize')),\n ('edit', _add_inherit('snapdir')),\n ('add', Inheritable('quota_warning', value=Int('quota_warning', validators=[Range(0, 100)]))),\n ('add', Inheritable('quota_critical', value=Int('quota_critical', validators=[Range(0, 100)]))),\n ('add', Inheritable('refquota_warning', value=Int('refquota_warning', validators=[Range(0, 100)]))),\n ('add', Inheritable('refquota_critical', value=Int('refquota_critical', validators=[Range(0, 100)]))),\n ('attr', {'update': True}),\n ))\n async def do_update(self, id, data):\n \"\"\"\n Updates a dataset/zvol `id`.\n\n .. examples(websocket)::\n\n Update the `comments` for \"tank/myuser\".\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.dataset.update,\n \"params\": [\"tank/myuser\", {\n \"comments\": \"Dataset for myuser, UPDATE #1\"\n }]\n }\n \"\"\"\n\n verrors = ValidationErrors()\n\n dataset = await self.middleware.call('pool.dataset.query', [('id', '=', id)])\n if not dataset:\n verrors.add('id', f'{id} does not exist', errno.ENOENT)\n else:\n data['type'] = dataset[0]['type']\n data['name'] = dataset[0]['name']\n if data['type'] == 'VOLUME':\n data['volblocksize'] = dataset[0]['volblocksize']['value']\n await self.__common_validation(verrors, 'pool_dataset_update', data, 'UPDATE')\n if 'volsize' in data:\n if data['volsize'] < dataset[0]['volsize']['parsed']:\n verrors.add('pool_dataset_update.volsize',\n 'You cannot shrink a zvol from GUI, this may lead to data loss.')\n if verrors:\n raise verrors\n\n props = {}\n for i, real_name, transform, inheritable in (\n ('aclmode', None, str.lower, True),\n ('atime', None, str.lower, True),\n ('comments', 'org.freenas:description', None, False),\n ('sync', None, str.lower, True),\n ('compression', None, str.lower, True),\n ('deduplication', 'dedup', str.lower, True),\n ('exec', None, str.lower, True),\n ('quota', None, _none, False),\n ('quota_warning', 'org.freenas:quota_warning', str, True),\n ('quota_critical', 'org.freenas:quota_critical', str, True),\n ('refquota', None, _none, False),\n ('refquota_warning', 'org.freenas:refquota_warning', str, True),\n ('refquota_critical', 'org.freenas:refquota_critical', str, True),\n ('reservation', None, _none, False),\n ('refreservation', None, _none, False),\n ('copies', None, None, False),\n ('snapdir', None, str.lower, True),\n ('readonly', None, str.lower, True),\n ('recordsize', None, None, True),\n ('volsize', None, lambda x: str(x), False),\n ):\n if i not in data:\n continue\n name = real_name or i\n if inheritable and data[i] == 'INHERIT':\n props[name] = {'source': 'INHERIT'}\n else:\n props[name] = {'value': data[i] if not transform else transform(data[i])}\n\n rv = await self.middleware.call('zfs.dataset.update', id, {'properties': props})\n\n if data['type'] == 'VOLUME' and 'volsize' in data:\n if await self.middleware.call('iscsi.extent.query', [('path', '=', f'zvol/{id}')]):\n await self._service_change('iscsitarget', 'reload')\n\n return rv\n\n async def __common_validation(self, verrors, schema, data, mode):\n assert mode in ('CREATE', 'UPDATE')\n\n parent = await self.middleware.call(\n 'zfs.dataset.query',\n [('id', '=', data['name'].rsplit('/')[0])]\n )\n\n if not parent:\n verrors.add(\n f'{schema}.name',\n 'Please specify a pool which exists for the dataset/volume to be created'\n )\n else:\n parent = parent[0]\n\n if data['type'] == 'FILESYSTEM':\n for i in ('force_size', 'sparse', 'volsize', 'volblocksize'):\n if i in data:\n verrors.add(f'{schema}.{i}', 'This field is not valid for FILESYSTEM')\n elif data['type'] == 'VOLUME':\n if mode == 'CREATE' and 'volsize' not in data:\n verrors.add(f'{schema}.volsize', 'This field is required for VOLUME')\n\n for i in (\n 'aclmode', 'atime', 'casesensitivity', 'quota', 'refquota', 'recordsize',\n ):\n if i in data:\n verrors.add(f'{schema}.{i}', 'This field is not valid for VOLUME')\n\n if 'volsize' in data and parent:\n\n avail_mem = int(parent['properties']['available']['rawvalue'])\n\n if mode == 'UPDATE':\n avail_mem += int((await self.middleware.call(\n 'zfs.dataset.query',\n [['id', '=', data['name']]]\n ))[0]['properties']['used']['rawvalue'])\n\n if (\n data['volsize'] > (avail_mem * 0.80) and\n not data.get('force_size', False)\n ):\n verrors.add(\n f'{schema}.volsize',\n 'It is not recommended to use more than 80% of your available space for VOLUME'\n )\n\n if 'volblocksize' in data:\n\n if data['volblocksize'].isdigit():\n block_size = int(data['volblocksize'])\n else:\n block_size = int(data['volblocksize'][:-1]) * 1024\n\n if data['volsize'] % block_size:\n verrors.add(\n f'{schema}.volsize',\n 'Volume size should be a multiple of volume block size'\n )\n\n @accepts(Str('id'), Dict(\n 'dataset_delete',\n Bool('recursive', default=False),\n Bool('force', default=False),\n ))\n async def do_delete(self, id, options):\n \"\"\"\n Delete dataset/zvol `id`.\n\n `recursive` will also delete/destroy all children datasets.\n `force` will force delete busy datasets.\n\n .. examples(websocket)::\n\n Delete \"tank/myuser\" dataset.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.dataset.delete\",\n \"params\": [\"tank/myuser\"]\n }\n \"\"\"\n\n if not options['recursive'] and await self.middleware.call('zfs.dataset.query', [['id', '^', f'{id}/']]):\n raise CallError(f'Failed to delete dataset: cannot destroy {id!r}: filesystem has children',\n errno.ENOTEMPTY)\n\n dataset = await self._get_instance(id)\n path = self.__attachments_path(dataset)\n if path:\n for delegate in self.attachment_delegates:\n attachments = await delegate.query(path, True)\n if attachments:\n await delegate.delete(attachments)\n\n return await self.middleware.call('zfs.dataset.delete', id, {\n 'force': options['force'],\n 'recursive': options['recursive'],\n })\n\n @item_method\n @accepts(Str('id'))\n async def promote(self, id):\n \"\"\"\n Promote the cloned dataset `id`.\n \"\"\"\n dataset = await self.middleware.call('zfs.dataset.query', [('id', '=', id)])\n if not dataset:\n raise CallError(f'Dataset \"{id}\" does not exist.', errno.ENOENT)\n if not dataset[0]['properties']['origin']['value']:\n raise CallError('Only cloned datasets can be promoted.', errno.EBADMSG)\n return await self.middleware.call('zfs.dataset.promote', id)\n\n @accepts(\n Str('id', required=True),\n Dict(\n 'pool_dataset_permission',\n Str('user'),\n Str('group'),\n UnixPerm('mode', null=True),\n List(\n 'acl',\n items=[\n Dict(\n 'aclentry',\n Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),\n Int('id', null=True),\n Str('type', enum=['ALLOW', 'DENY']),\n Dict(\n 'perms',\n Bool('READ_DATA'),\n Bool('WRITE_DATA'),\n Bool('APPEND_DATA'),\n Bool('READ_NAMED_ATTRS'),\n Bool('WRITE_NAMED_ATTRS'),\n Bool('EXECUTE'),\n Bool('DELETE_CHILD'),\n Bool('READ_ATTRIBUTES'),\n Bool('WRITE_ATTRIBUTES'),\n Bool('DELETE'),\n Bool('READ_ACL'),\n Bool('WRITE_ACL'),\n Bool('WRITE_OWNER'),\n Bool('SYNCHRONIZE'),\n Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),\n ),\n Dict(\n 'flags',\n Bool('FILE_INHERIT'),\n Bool('DIRECTORY_INHERIT'),\n Bool('NO_PROPAGATE_INHERIT'),\n Bool('INHERIT_ONLY'),\n Bool('INHERITED'),\n Str('BASIC', enum=['INHERIT', 'NOINHERIT']),\n ),\n )\n ],\n default=[\n {\n \"tag\": \"owner@\",\n \"id\": None,\n \"type\": \"ALLOW\",\n \"perms\": {\"BASIC\": \"FULL_CONTROL\"},\n \"flags\": {\"BASIC\": \"INHERIT\"}\n },\n {\n \"tag\": \"group@\",\n \"id\": None,\n \"type\": \"ALLOW\",\n \"perms\": {\"BASIC\": \"FULL_CONTROL\"},\n \"flags\": {\"BASIC\": \"INHERIT\"}\n }\n ],\n ),\n Dict(\n 'options',\n Bool('stripacl', default=False),\n Bool('recursive', default=False),\n Bool('traverse', default=False),\n )\n\n ),\n )\n @item_method\n @job(lock=\"dataset_permission_change\")\n async def permission(self, job, id, data):\n \"\"\"\n Set permissions for a dataset `id`. Permissions may be specified as\n either a posix `mode` or an nfsv4 `acl`. Setting mode will fail if the\n dataset has an existing nfsv4 acl. In this case, the option `stripacl`\n must be set to `True`.\n\n .. examples(websocket)::\n\n Change permissions of dataset \"tank/myuser\" to myuser:wheel and 755.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.dataset.permission\",\n \"params\": [\"tank/myuser\", {\n \"user\": \"myuser\",\n \"acl\": [],\n \"group\": \"wheel\",\n \"mode\": \"755\",\n \"options\": {\"recursive\": true, \"stripacl\": true},\n }]\n }\n\n \"\"\"\n path = (await self._get_instance(id))['mountpoint']\n user = data.get('user', None)\n group = data.get('group', None)\n uid = gid = -1\n mode = data.get('mode', None)\n options = data.get('options', {})\n acl = data.get('acl', [])\n pjob = None\n\n verrors = ValidationErrors()\n if user:\n try:\n uid = (await self.middleware.call('dscache.get_uncached_user', user))['pw_uid']\n except Exception as e:\n verrors.add('pool_dataset_permission.user', str(e))\n\n if group:\n try:\n gid = (await self.middleware.call('dscache.get_uncached_group', group))['gr_gid']\n except Exception as e:\n verrors.add('pool_dataset_permission.group', str(e))\n\n if acl and mode:\n verrors.add('pool_dataset_permission.mode',\n 'setting mode and ACL simultaneously is not permitted.')\n\n if acl and options['stripacl']:\n verrors.add('pool_dataset_permissions.acl',\n 'Simultaneously setting and removing ACL is not permitted.')\n\n if mode and not options['stripacl']:\n if not await self.middleware.call('filesystem.acl_is_trivial', path):\n verrors.add('pool_dataset_permissions.options',\n f'{path} has an extended ACL. The option \"stripacl\" must be selected.')\n\n if verrors:\n raise verrors\n\n if not acl and mode is None and not options['stripacl']:\n \"\"\"\n Neither an ACL, mode, or removing the existing ACL are\n specified in `data`. Perform a simple chown.\n \"\"\"\n options.pop('stripacl', None)\n pjob = await self.middleware.call('filesystem.chown', {\n 'path': path,\n 'uid': uid,\n 'gid': gid,\n 'options': options\n })\n\n elif acl:\n pjob = await self.middleware.call('filesystem.setacl', {\n 'path': path,\n 'dacl': acl,\n 'uid': uid,\n 'gid': gid,\n 'options': options\n })\n\n elif mode or options['stripacl']:\n \"\"\"\n `setperm` performs one of two possible actions. If\n `mode` is not set, but `stripacl` is specified, then\n the existing ACL on the file is converted in place via\n `acl_strip_np()`. This preserves the existing posix mode\n while removing any extended ACL entries.\n\n If `mode` is set, then the ACL is removed from the file\n and the new `mode` is applied.\n \"\"\"\n pjob = await self.middleware.call('filesystem.setperm', {\n 'path': path,\n 'mode': mode,\n 'uid': uid,\n 'gid': gid,\n 'options': options\n })\n else:\n \"\"\"\n This should never occur, but fail safely to avoid undefined\n or unintended behavior.\n \"\"\"\n raise CallError(f\"Unexpected parameter combination: {data}\",\n errno.EINVAL)\n\n await pjob.wait()\n if pjob.error:\n raise CallError(pjob.error)\n return data\n\n @accepts(Str('pool'))\n async def recommended_zvol_blocksize(self, pool):\n \"\"\"\n Helper method to get recommended size for a new zvol (dataset of type VOLUME).\n\n .. examples(websocket)::\n\n Get blocksize for pool \"tank\".\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.dataset.recommended_zvol_blocksize\",\n \"params\": [\"tank\"]\n }\n \"\"\"\n pool = await self.middleware.call('pool.query', [['name', '=', pool]])\n if not pool:\n raise CallError('Pool not found.', errno.ENOENT)\n pool = pool[0]\n numdisks = 4\n for vdev in pool['topology']['data']:\n if vdev['type'] == 'RAIDZ1':\n num = len(vdev['children']) - 1\n elif vdev['type'] == 'RAIDZ2':\n num = len(vdev['children']) - 2\n elif vdev['type'] == 'RAIDZ3':\n num = len(vdev['children']) - 3\n elif vdev['type'] == 'MIRROR':\n num = 1\n else:\n num = len(vdev['children'])\n if num > numdisks:\n numdisks = num\n return '%dK' % 2 ** ((numdisks * 4) - 1).bit_length()\n\n @item_method\n @accepts(Str('id', required=True))\n async def attachments(self, oid):\n \"\"\"\n Return a list of services dependent of this dataset.\n\n Responsible for telling the user whether there is a related\n share, asking for confirmation.\n\n Example return value:\n [\n {\n \"type\": \"NFS Share\",\n \"service\": \"nfs\",\n \"attachments\": [\"/mnt/tank/work\"]\n }\n ]\n \"\"\"\n result = []\n dataset = await self._get_instance(oid)\n path = self.__attachments_path(dataset)\n if path:\n for delegate in self.attachment_delegates:\n attachments = {\"type\": delegate.title, \"service\": delegate.service, \"attachments\": []}\n for attachment in await delegate.query(path, True):\n attachments[\"attachments\"].append(await delegate.get_attachment_name(attachment))\n if attachments[\"attachments\"]:\n result.append(attachments)\n return result\n\n def __attachments_path(self, dataset):\n if dataset['type'] == 'FILESYSTEM':\n return dataset['mountpoint']\n\n if dataset['type'] == 'VOLUME':\n return os.path.join('/mnt', dataset['name'])\n\n @item_method\n @accepts(Str('id', required=True))\n async def processes(self, oid):\n \"\"\"\n Return a list of processes using this dataset.\n\n Example return value:\n\n [\n {\n \"pid\": 2520,\n \"name\": \"smbd\",\n \"service\": \"cifs\"\n },\n {\n \"pid\": 97778,\n \"name\": \"minio\",\n \"cmdline\": \"/usr/local/bin/minio -C /usr/local/etc/minio server --address=0.0.0.0:9000 --quiet /mnt/tank/wk\"\n }\n ]\n \"\"\"\n result = []\n dataset = await self._get_instance(oid)\n path = self.__attachments_path(dataset)\n zvol_path = f\"/dev/zvol/{dataset['name']}\"\n if path:\n lsof = await run('lsof',\n '-F', 'pcn', # Output format parseable by `parse_lsof`\n '-l', '-n', '-P', # Inhibits login name, hostname and port number conversion\n stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=False, encoding='utf8')\n for pid, name in parse_lsof(lsof.stdout, [path, zvol_path]):\n service = await self.middleware.call('service.identify_process', name)\n if service:\n result.append({\n \"pid\": pid,\n \"name\": name,\n \"service\": service,\n })\n else:\n try:\n cmdline = await self.middleware.run_in_thread(\n lambda: psutil.Process(pid).cmdline()\n )\n except psutil.NoSuchProcess:\n pass\n else:\n result.append({\n \"pid\": pid,\n \"name\": name,\n \"cmdline\": join_commandline(cmdline),\n })\n\n return result\n\n @private\n async def kill_processes(self, oid, restart_services, max_tries=5):\n manually_restart_services = []\n for process in await self.middleware.call('pool.dataset.processes', oid):\n if process.get(\"service\") is not None:\n manually_restart_services.append(process[\"service\"])\n if manually_restart_services and not restart_services:\n raise CallError('Some services have open files and need to be restarted', errno.EBUSY, {\n 'code': 'services_restart',\n 'services': manually_restart_services,\n })\n\n for i in range(max_tries):\n processes = await self.middleware.call('pool.dataset.processes', oid)\n if not processes:\n return\n\n for process in processes:\n if process.get(\"service\") is not None:\n self.logger.info('Restarting service %r that holds dataset %r', process['service'], oid)\n await self.middleware.call('service.restart', process['service'])\n else:\n self.logger.info('Killing process %r (%r) that holds dataset %r', process['pid'],\n process['cmdline'], oid)\n await self.middleware.call('service.terminate_process', process['pid'])\n\n processes = await self.middleware.call('pool.dataset.processes', oid)\n if not processes:\n return\n\n self.logger.info('The following processes don\\'t want to stop: %r', processes)\n raise CallError('Unable to stop processes that have open files', errno.EBUSY, {\n 'code': 'unstoppable_processes',\n 'processes': processes,\n })\n\n @private\n def register_attachment_delegate(self, delegate):\n self.attachment_delegates.append(delegate)\n\n\nclass PoolScrubService(CRUDService):\n\n class Config:\n datastore = 'storage.scrub'\n datastore_extend = 'pool.scrub.pool_scrub_extend'\n datastore_prefix = 'scrub_'\n namespace = 'pool.scrub'\n\n @private\n async def pool_scrub_extend(self, data):\n pool = data.pop('volume')\n data['pool'] = pool['id']\n data['pool_name'] = pool['vol_name']\n Cron.convert_db_format_to_schedule(data)\n return data\n\n @private\n async def validate_data(self, data, schema):\n verrors = ValidationErrors()\n\n pool_pk = data.get('pool')\n if pool_pk:\n pool_obj = await self.middleware.call(\n 'datastore.query',\n 'storage.volume',\n [('id', '=', pool_pk)]\n )\n\n if len(pool_obj) == 0:\n verrors.add(\n f'{schema}.pool',\n 'The specified volume does not exist'\n )\n elif (\n 'id' not in data.keys() or\n (\n 'id' in data.keys() and\n 'original_pool_id' in data.keys() and\n pool_pk != data['original_pool_id']\n )\n ):\n scrub_obj = await self.query(filters=[('pool', '=', pool_pk)])\n if len(scrub_obj) != 0:\n verrors.add(\n f'{schema}.pool',\n 'A scrub with this pool already exists'\n )\n\n return verrors, data\n\n @accepts(\n Dict(\n 'pool_scrub_create',\n Int('pool', validators=[Range(min=1)], required=True),\n Int('threshold', validators=[Range(min=0)]),\n Str('description'),\n Cron(\n 'schedule',\n defaults={\n 'minute': '00',\n 'hour': '00',\n 'dow': '7'\n }\n ),\n Bool('enabled', default=True),\n register=True\n )\n )\n async def do_create(self, data):\n \"\"\"\n Create a scrub task for a pool.\n\n `threshold` refers to the minimum amount of time in days has to be passed before\n a scrub can run again.\n\n .. examples(websocket)::\n\n Create a scrub task for pool of id 1, to run every sunday but with a threshold of\n 35 days.\n The check will run at 3AM every sunday.\n\n :::javascript\n {\n \"id\": \"6841f242-840a-11e6-a437-00e04d680384\",\n \"msg\": \"method\",\n \"method\": \"pool.scrub.create\"\n \"params\": [{\n \"pool\": 1,\n \"threshold\": 35,\n \"description\": \"Monthly scrub for tank\",\n \"schedule\": \"0 3 * * 7\",\n \"enabled\": true\n }]\n }\n \"\"\"\n verrors, data = await self.validate_data(data, 'pool_scrub_create')\n\n if verrors:\n raise verrors\n\n data['volume'] = data.pop('pool')\n Cron.convert_schedule_to_db_format(data)\n\n data['id'] = await self.middleware.call(\n 'datastore.insert',\n self._config.datastore,\n data,\n {'prefix': self._config.datastore_prefix}\n )\n\n await self.middleware.call('service.restart', 'cron')\n\n return await self.query(filters=[('id', '=', data['id'])], options={'get': True})\n\n @accepts(\n Int('id', validators=[Range(min=1)]),\n Patch('pool_scrub_create', 'pool_scrub_update', ('attr', {'update': True}))\n )\n async def do_update(self, id, data):\n \"\"\"\n Update scrub task of `id`.\n \"\"\"\n task_data = await self._get_instance(id)\n original_data = task_data.copy()\n task_data['original_pool_id'] = original_data['pool']\n task_data.update(data)\n verrors, task_data = await self.validate_data(task_data, 'pool_scrub_update')\n\n if verrors:\n raise verrors\n\n task_data.pop('original_pool_id')\n Cron.convert_schedule_to_db_format(task_data)\n Cron.convert_schedule_to_db_format(original_data)\n\n if len(set(task_data.items()) ^ set(original_data.items())) > 0:\n\n task_data['volume'] = task_data.pop('pool')\n task_data.pop('pool_name', None)\n\n await self.middleware.call(\n 'datastore.update',\n self._config.datastore,\n id,\n task_data,\n {'prefix': self._config.datastore_prefix}\n )\n\n await self.middleware.call('service.restart', 'cron')\n\n return await self._get_instance(id)\n\n @accepts(Int('id'))\n async def do_delete(self, id):\n \"\"\"\n Delete scrub task of `id`.\n \"\"\"\n response = await self.middleware.call(\n 'datastore.delete',\n self._config.datastore,\n id\n )\n\n await self.middleware.call('service.restart', 'cron')\n return response\n\n @accepts(Str('name'), Int('threshold', default=35))\n async def run(self, name, threshold):\n \"\"\"\n Initiate a scrub of a pool `name` if last scrub was performed more than `threshold` days before.\n \"\"\"\n await self.middleware.call('alert.oneshot_delete', 'ScrubNotStarted', name)\n await self.middleware.call('alert.oneshot_delete', 'ScrubStarted', name)\n try:\n started = await self.__run(name, threshold)\n except ScrubError as e:\n await self.middleware.call('alert.oneshot_create', 'ScrubNotStarted', {\n 'pool': name,\n 'text': e.errmsg,\n })\n else:\n if started:\n await self.middleware.call('alert.oneshot_create', 'ScrubStarted', name)\n\n async def __run(self, name, threshold):\n if name == 'freenas-boot':\n pool = await self.middleware.call('zfs.pool.query', [['name', '=', name]], {'get': True})\n else:\n if not await self.middleware.call('system.is_freenas'):\n if await self.middleware.call('failover.status') == 'BACKUP':\n return\n\n pool = await self.middleware.call('pool.query', [['name', '=', name]], {'get': True})\n if pool['status'] == 'OFFLINE':\n if not pool['is_decrypted']:\n raise ScrubError(f'Pool {name} is not decrypted, skipping scrub')\n else:\n raise ScrubError(f'Pool {name} is offline, not running scrub')\n\n if pool['scan']['state'] == 'SCANNING':\n return False\n\n history = (await run('zpool', 'history', name, encoding='utf-8')).stdout\n for match in reversed(list(RE_HISTORY_ZPOOL_SCRUB.finditer(history))):\n last_scrub = datetime.strptime(match.group(1), '%Y-%m-%d.%H:%M:%S')\n break\n else:\n # creation time of the pool if no scrub was done\n for match in RE_HISTORY_ZPOOL_CREATE.finditer(history):\n last_scrub = datetime.strptime(match.group(1), '%Y-%m-%d.%H:%M:%S')\n break\n else:\n logger.warning(\"Could not find last scrub of pool %r\", name)\n last_scrub = datetime.min\n\n if (datetime.now() - last_scrub).total_seconds() < (threshold - 1) * 86400:\n logger.debug(\"Pool %r last scrub %r\", name, last_scrub)\n return False\n\n await self.middleware.call('zfs.pool.scrub', pool['name'])\n return True\n\n\ndef parse_lsof(lsof, dirs):\n pids = {}\n\n pid = None\n command = None\n for line in lsof.split(\"\\n\"):\n if line.startswith(\"p\"):\n pid = None\n command = None\n\n try:\n pid = int(line[1:])\n except ValueError:\n pass\n\n if line.startswith(\"c\"):\n command = line[1:]\n\n if line.startswith(\"f\"):\n pass\n\n if line.startswith(\"n\"):\n path = line[1:]\n if os.path.isabs(path) and any(os.path.commonpath([path, dir]) == dir for dir in dirs):\n if pid is not None and command is not None:\n pids[pid] = command\n\n return list(pids.items())\n\n\nasync def devd_zfs_hook(middleware, data):\n if data.get('subsystem') != 'ZFS':\n return\n\n if data.get('type') in (\n 'ATTACH',\n 'DETACH',\n 'resource.fs.zfs.removed',\n 'misc.fs.zfs.config_sync',\n ):\n asyncio.ensure_future(middleware.call('pool.sync_encrypted'))\n\n if data.get('type') == 'ereport.fs.zfs.deadman':\n asyncio.ensure_future(middleware.call('alert.oneshot_create', 'ZfsDeadman', {\n 'vdev': data.get('vdev_path', ''),\n 'pool': data.get('pool', ''),\n }))\n\n\ndef setup(middleware):\n middleware.register_hook('devd.zfs', devd_zfs_hook)\n asyncio.ensure_future(middleware.call('pool.configure_resilver_priority'))\n","sub_path":"src/middlewared/middlewared/plugins/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":129069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"382789960","text":"from flask import Flask, jsonify, request, render_template\nfrom argparse import ArgumentParser\nfrom blockchain import Blockchain\nimport threading\nimport requests\nimport logging\nimport ast\nimport zmq\nimport time, datetime, json\nfrom messenger import Messenger\nfrom message import Message\n\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger('werkzeug').setLevel(logging.ERROR)\n\n# Instantiate our Node\napp = Flask(__name__)\n\n# Instantiate the Blockchain\nblockchain = Blockchain()\n\n# Messenger init\nmsgr = Messenger()\n\n############################## NAVBAR CONTROLS ##############################\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/transactions')\ndef transactions():\n return render_template('transactions.html', transaction_info='')\n\n@app.route('/mining')\ndef mining():\n return render_template('mining.html', mine_info='')\n\n@app.route('/chain', methods=['GET'])\ndef chain():\n response = {\n 'length': len(blockchain.chain),\n 'chain': blockchain.chain\n\n }\n return render_template('blockchain.html', blockchain=response)\n\n@app.route('/txs', methods=['GET'])\ndef txs():\n response = {\n 'num_txs': len(blockchain.current_transactions),\n 'txs': blockchain.current_transactions\n }\n # print(blockchain.utxo_pool)\n return render_template('txs.html', txs=response)\n\n@app.route('/jtxs', methods=['GET'])\ndef full_txs():\n response = {\n 'txs': blockchain.current_transactions\n }\n return jsonify(response), 200\n\n@app.route('/genesis', methods=['GET'])\ndef genesis():\n blockchain.genesis()\n return jsonify({}), 200\n\n@app.route('/keys', methods=['GET'])\ndef keys():\n return render_template('keys.html', key_info='')\n\n@app.route('/utxo')\ndef utxo():\n response = {\n 'num_utxos': len(blockchain.utxo_pool),\n 'utxos': blockchain.utxo_pool\n }\n return render_template('utxo.html', utxo=response)\n\n@app.route('/key')\ndef key():\n response = {\n 'pk': blockchain.key_to_addr(blockchain.vk),\n 'sk': blockchain.key_to_addr(blockchain.sk)\n }\n return jsonify(response), 200\n\n@app.route('/nodes')\ndef nodes():\n peers = blockchain.nodes\n return render_template('nodes.html', peers=peers, key_info=[blockchain.key_to_addr(blockchain.vk), blockchain.key_to_addr(blockchain.sk)])\n\n############################## FUNCTIONALITY ##############################\n\n@app.route('/mine', methods=['GET', 'POST'])\ndef mine():\n if len(blockchain.chain) == 0:\n #haven't synced the genesis block\n logging.info(\"Syncing genesis @ %s\" % blockchain.address)\n blockchain.force_resolve()\n logging.info(\"After Syncing genesis: %s %s\" % (blockchain.address, len(blockchain.chain)))\n block = blockchain.mine()\n response = {\n 'message': \"New Block Forged\",\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n }\n #return jsonify(response), 200\n return render_template('mining.html', mine_info=response)\n\n@app.route('/transactions/new', methods=['POST'])\ndef new_transaction():\n # Check for required fields.\n values = request.form\n if len(values) == 0:\n values = json.loads(request.data)\n\n fields = [k for k in values]\n required = ['inputs', 'outputs', 'priv_key']\n if not all(k in fields for k in required):\n logging.info(\"xxxxx New TX Failed1 xxxxx\")\n return render_template('transactions.html', transaction_info={'_message':'Incorrect transaction format.'})\n # Check to make sure they put in something.\n if len(values['inputs']) == 0 or len(values['outputs']) == 0 or len(values['priv_key']) == 0:\n logging.info(\"xxxxx New TX Failed2 xxxxx\")\n return render_template('transactions.html', transaction_info={'_message':'Missing values -- please provide transaction inputs, outputs, and signing key'})\n # Create a new transaction\n try:\n inputs = ast.literal_eval(values['inputs'])\n outputs = ast.literal_eval(values['outputs'])\n except Exception as e:\n inputs = values['inputs']\n outputs = values['outputs']\n valid = blockchain.new_transaction(inputs, outputs, values['priv_key'])\n print(valid)\n if len(blockchain.current_transactions) >= blockchain.transactions_per_block:\n blockchain.mine()\n return blockchain.last_block['index']\n if type(valid) == int:\n response = {'_message': f'Transaction will be added to Block {valid}',\n 'ins': values['inputs'],\n 'outs': values['outputs']}\n return render_template(\"transactions.html\", transaction_info=response)\n else:\n return render_template(\"transactions.html\", transaction_info=valid[0])\n\n@app.route('/keys/generate', methods=['POST'])\ndef generate_keypair():\n '''\n In the real world you would do this locally so the node can't see your pk/sk pair, but this isn't the real world, now is it?\n '''\n pk, vk = blockchain.generate_keypair()\n response = {\n 'public_key': blockchain.key_to_addr(vk),\n 'secret_key': blockchain.key_to_addr(pk)\n }\n return render_template('keys.html', key_info=response)\n\n@app.route('/jutxo', methods=['GET'])\ndef full_utxo():\n response = {\n 'utxos': blockchain.utxo_pool\n }\n return jsonify(response), 200\n\n@app.route('/jchain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain),\n }\n return jsonify(response), 200\n\n@app.route('/good', methods=['GET'])\ndef is_peer_good():\n response = {\n 'good': blockchain.good\n }\n return jsonify(response), 200\n\n@app.route('/nodes/flip', methods=['GET', 'POST'])\ndef flip_node():\n '''\n This will flip a node from good to bad or vice versa.\n '''\n blockchain.flip_node()\n response = {\n 'message': 'Flipped node'\n }\n return jsonify(response), 200\n\n@app.route('/nodes/register',methods=['POST'])\ndef register_node():\n # NOTE: correct format for this is something like node=127.0.0.1:4000\n values = json.loads(request.data)\n node = values['node'][0]\n msgr.subscribe(node)\n if node is None:\n return \"Error: please supply a valid node argument (i.e. /nodes/register?node=http://127.0.0.1:4000)\", 400\n try:\n blockchain.register_node(node)\n except ValueError as e:\n logging.error(\"Error registering node: %s error: %s\", node, e)\n response = {\n 'message': 'Current blockchain nodes',\n 'total_nodes': list(blockchain.nodes),\n }\n\n # Skip resolving right after registration\n # Somehow calling /nodes/resolve here cause problem, but it works outside\n # We can resolve elsewhere\n # Sync node to all other nodes, do chain resolution.\n # for peer in blockchain.nodes:\n # requests.get(f\"http://{peer}/nodes/resolve\") # chain resolution\n\n return jsonify(response), 201\n\n\n@app.route('/nodes/peers', methods=['GET'])\ndef share_peers():\n response = {\n 'message': 'Current blockchain nodes',\n 'nodes': list(blockchain.nodes),\n }\n return jsonify(response), 200\n\n\n@app.route('/nodes/resolve', methods=['GET'])\ndef consensus():\n '''\n Will perform resolution among chain versions. \n '''\n replaced = blockchain.resolve_conflicts()\n\n if replaced:\n response = {\n 'message': 'Our chain was replaced',\n 'new_chain': blockchain.chain\n }\n else:\n response = {\n 'message': 'Our chain is authoritative',\n 'chain': blockchain.chain\n }\n\n return jsonify(response), 200\n\n\ndef register_with_neighbor(neighbor, address):\n # register current node with its neighbors\n payload = {'node': [address]}\n try:\n requests.post(f\"{neighbor}/nodes/register\", json=payload)\n except requests.exceptions.RequestException as e:\n logging.error(\"Error connecting to neighbor %s : %s\", neighbor, e)\n\n\ndef sync_with_peers(seeds, address):\n neighbours = seeds.split(\",\")\n for neighbor in neighbours:\n try:\n blockchain.register_node(neighbor)\n except ValueError as e:\n logging.error(\"Error registering node: %s error: %s\", neighbor, e)\n continue\n register_with_neighbor(neighbor, address)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-s', '--seeds', type=str, help='Initial neighboring blockchain nodes')\n parser.add_argument('-a', '--address', type=str, default=\"http://127.0.0.1:5000\", help='Local address')\n parser.add_argument('-e', '--ensemble', action='store_true')\n args = parser.parse_args()\n blockchain.address = args.address.split(\"//\")[1]\n blockchain.set_msgr(msgr)\n\n if args.seeds is not None:\n sync_with_peers(args.seeds, args.address)\n thr = threading.Thread(target=blockchain.query_nodes)\n thr.daemon = True\n thr.start()\n\n msgr.start(args.address, blockchain)\n\n if args.seeds is not None:\n for seed in args.seeds.split(','):\n msgr.subscribe(seed)\n\n if not args.ensemble: #for ensemble mode, we wait for instruction\n blockchain.start()\n\n app.run(host='0.0.0.0', port=int(args.address.split(\":\")[2]), debug=False, use_reloader=False)\n","sub_path":"hw3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"426236318","text":"\n\nfrom requests import get\nfrom bs4 import BeautifulSoup\nimport xlsxwriter\nimport pandas as pd\nimport csv\nimport urllib\n\nurl = 'https://www.dst.dk/da/TilSalg/Forskningsservice/Data/Register_Variabeloversigter'\nresponse = get(url)\nsoup = BeautifulSoup(response.content, \"lxml\")\n\narbejde = 1\n\nif arbejde == 1:\n path = \"C:/Users/mikkel-bj/Desktop/datamanager/script/from_dst/\"\nelse:\n path = \"C:\\\\Users\\\\Mikkel\\Desktop\\\\arbejde\\\\Project database\\\\datamanager\\\\script\\\\from_dst\\\\\"\ndef crawler(path):\n\n for a in soup.find_all('a', href=True):\n\n if a['href'].__contains__(\"extranet\"):\n if a['href'].__contains__(\"http://\"):\n continue\n start_index = a['href'].find(\"Variabellister/\")\n length = len(\"Variabellister/\")\n total = start_index + length\n name = a['href']\n name = name[total::]\n name = name.split(' -')[0]\n\n #code to update a specific register:\n #if name != \"AKAS\":\n # print(\"not this\" + name)\n # continue\n\n\n #name = a['href'][total:total+4]\n #name = name.replace(\" \",\"\")\n #name = name.replace(\"-\", \"\")\n #name = name.replace(\"_\", \"\")\n print(name)\n url2 = 'https://www.dst.dk' + a['href']\n url2 = url2.replace(\" \",\"%20\")\n url2 = url2.replace(\"Æ\",\"%C3%86\")\n url2 = url2.replace(\"æ\",\"%C3%A6\")\n url2 = url2.replace(\"ø\",\"%C3%B8\")\n url2 = url2.replace(\"å\",\"%C3%A5\")\n url2 = url2.replace(\"–\",\"%E2%80%93\") #note this charachter is NOT a normal \"-\"!!\n url2 = url2.replace(\"Ø\",\"%C3%98\")\n url2 = url2.replace(\"§\", \"%C2%A7\")\n\n print(url2)\n try:\n dfs = pd.read_html(url2)\n dfs = dfs[0]\n dfs.to_excel(path + name + \"2.xlsx\")\n except (urllib.error.HTTPError, IndexError,ValueError):\n print(\"error\")\n\n\ncrawler(path)\n\n\n\n\n\n\n\n","sub_path":"venv/dst_crawler.py","file_name":"dst_crawler.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"227983492","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport heapq, operator, pprint, time, matplotlib, community\nfrom modularity import modularity\nimport numpy as np\nfrom simrank import simrank\nimport collections as cl\n\n# Creates the networks used by all the auxilliary functions, based off a target user\n# and desired level (office/role etc)\ndef makeNetwork(target, level, df):\n\n\n # get correct level of network\n if level == 'Local':\n # find all rows where target is sender or receiver and\n # reduce df to only the those rows\n dfTarget = df.loc[(df['Sender'] == target) | (df['Recipient']==target)]\n\n elif level == 'Global':\n dfTarget = df\n\n\n else:\n # find targets office/dpartment/whatever\n targetLevel = df.loc[(df['Sender'] == target)][level].iloc[0]\n # check if level of target exists (i.e. sender XYZ has a non-blank 'SendersDepartment')\n while True:\n #### be careful with this if statement, didnt get time to test properly (i.e. 'if targetLevel' vs 'if targetLevel None' etc)\n if targetLevel: \n break\n else:\n raise Exception('Target {} is missing attribute {}. try agian'.format(target, level))\n # reduce df to only the targets office/department/whatever\n dfTarget = df.loc[(df[level]==targetLevel)]\n\n\n\n # get edges\n edges = []\n for temp in zip(dfTarget['Sender'], dfTarget['Recipient']):\n edges.append(tuple(temp))\n\n # get edge weights {'node1', 'node2': numberOfEmails} (unordered)\n edgesWeight = cl.Counter(map(frozenset, edges))\n G = nx.Graph()\n nodeWeightList = []\n for edge in edgesWeight:\n temp = list(edge)\n if len(temp) < 2: ## CATCH FOR SENDING EMAIL TO SELF\n continue\n nodeWeightList.append((temp[0], temp[1], edgesWeight[edge]))\n\n # add edges with weight attr\n G.add_weighted_edges_from(nodeWeightList)\n\n # add distance attribute to edges, defined as 1/weight\n distanceDict = {(e1, e2): 1 / weight for e1, e2, weight in G.edges(data='weight')}\n nx.set_edge_attributes(G, distanceDict, 'distance')\n\n\n\n\n # get nodes (think this is actually unnecesary and its smart enough to not add duplicates?)\n dfUnique = dfTarget.drop_duplicates(subset = ['Sender', 'Recipient'])\n IDList = list(dfUnique['Sender'])\n\n # add nodes\n G.add_nodes_from(IDList)\n\n\n\n # grab only target for labelling/colouring\n labels = {}\n for node in G.nodes():\n if node == target:\n labels[node] = 'Target - ' + target\n\n\n # get list of thickness for edges based off weights\n edges = G.edges()\n edgeThicc = [G[u][v]['weight'] for u,v in edges]\n n_nodes = (len(G.nodes()))\n return (G, labels, edgeThicc, n_nodes,nodeWeightList)\n4","sub_path":"makeNetwork.py","file_name":"makeNetwork.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"576838310","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport re\n\n\"\"\"\nSimple and stupid implementation of vhdl formater, no parser based on regular expressions\n\"\"\"\n\nindentIncr = [\"^entity\", \"^port\\s*\\(\", \"^port\\s*map\\s*\\(\", \"^generic\\s*map\\s*\\(\", \"^generic\\s*\\(\",\n \"^architecture\", \"^if\", \"^case\", \"^port\\s+map\\s*\\(\", \"^process\", \"^while\", \"^component\",\n \"\\S+\\s*:\\s*process\"]\nindentDecr = [\"^end[^\\w\\d_]\", \"^\\)\"]\nindentPeak = [\"^begin\", \"^elsif\", \"^else\", \"^when\",]\n\nindentIncr = list(map(lambda x: re.compile(x, re.IGNORECASE), indentIncr))\nindentDecr = list(map(lambda x: re.compile(x, re.IGNORECASE), indentDecr))\nindentPeak = list(map(lambda x: re.compile(x, re.IGNORECASE), indentPeak))\n\n\ndef get_indent(i):\n return \"\".join([\" \" for _ in range(i)])\n\n\ndef formatVhdl(vhdlString):\n indent = 0\n lines = []\n\n def getIndent(i):\n return get_indent(i * 4)\n\n for l in vhdlString.split(\"\\n\"):\n l = l.strip()\n if any([x.match(l) for x in indentDecr]):\n indent -= 1\n lines.append(getIndent(indent) + l)\n elif any([x.match(l) for x in indentIncr]):\n lines.append(getIndent(indent) + l)\n indent += 1\n elif any([x.match(l) for x in indentPeak]):\n lines.append(getIndent(indent - 1) + l)\n else:\n lines.append(getIndent(indent) + l)\n return \"\\n\".join(lines)\n","sub_path":"hwt/serializer/vhdl/formater.py","file_name":"formater.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"249536117","text":"from settings import TZ, JiraSettings\nfrom datetime import datetime\nfrom jira.client import JIRA\n\n\n_SEP = '—' * 35 + '\\n\\n'\n\nsession = JIRA(JiraSettings.URL, basic_auth=(JiraSettings.USER, JiraSettings.TOKEN))\n\n\ndef get_tickets(query):\n return session.search_issues(query)\n\n\ndef create_ticket(pfx, sections):\n date_local = datetime.now(TZ).date()\n\n descr = ''.join([_SEP + s.get_section() for s in sections])\n\n issue_fields = {\n 'project': JiraSettings.PROJECT,\n 'summary': f'{pfx} NOC Handover {date_local}',\n 'description': descr,\n 'issuetype': {'name': 'Story'},\n }\n\n ticket = session.create_issue(fields=issue_fields)\n return ticket\n\n\ndef update_ticket(ticket, sections):\n descr = ''.join([_SEP + s.get_section() for s in sections])\n ticket.update(description=descr)\n","sub_path":"jira_interface.py","file_name":"jira_interface.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"565848937","text":"# Licensed Materials - Property of IBM\n# Copyright IBM Corp. 2016\n\n# Import the SPL decorators\nfrom streamsx.spl import spl\n\n#------------------------------------------------------------------\n# Test passing in SPL types functions\n#------------------------------------------------------------------\n\n# Defines the SPL namespace for any functions in this module\n# Multiple modules can map to the same namespace\ndef spl_namespace():\n return \"com.ibm.streamsx.topology.pytest.pytypes\"\n\n@spl.map()\ndef ToBlob(*s):\n return (s[0].encode('utf-8'),)\n\n@spl.map()\ndef ToListBlob(*s):\n return ([s[0].encode('utf-8')],)\n\n@spl.map()\ndef ToMapBlob(*s):\n return ({\"BLOB\": s[0].encode('utf-8')},)\n\ndef validate_mv_blob(v):\n if not isinstance(v, memoryview):\n return (\"Expected memory view is\" + str(type(v)),)\n bs = v.tobytes()\n\n if not v.readonly:\n return \"Expected readonly memory view\",\n\n if v.itemsize != 1:\n return \"Expected readonly memory view\",\n\n return None\n\ndef validate_mv_blob_release(l):\n for b in l:\n try:\n bs = b.tobytes()\n return \"Expected released memory view\",\n except ValueError as ve:\n pass\n return None\n\n@spl.map()\nclass BlobTest:\n \"\"\"\n Expect blob tuples, need to verify that after\n the call the previous value cannot be accessed.\n \"\"\"\n def __init__(self, keep):\n self.last = list()\n self.keep = keep\n\n def __call__(self, *tuple):\n v = tuple[0]\n mvc = validate_mv_blob(v)\n if mvc:\n return mvc\n\n mvc = validate_mv_blob_release(self.last)\n if mvc:\n return mvc\n\n if self.keep:\n self.last.append(v)\n return str(v, 'utf-8'),\n\n@spl.map()\nclass ListBlobTest:\n \"\"\"\n Expect list tuples, need to verify that after\n the call the previous value cannot be accessed.\n \"\"\"\n def __init__(self):\n self.last = list()\n\n def __call__(self, *tuple):\n v = tuple[0][0]\n mvc = validate_mv_blob(v)\n if mvc:\n return mvc\n\n mvc = validate_mv_blob_release(self.last)\n if mvc:\n return mvc\n\n self.last.append(v)\n return str(v, 'utf-8'),\n\n@spl.map()\nclass MapBlobTest:\n \"\"\"\n Expect map tuples, need to verify that after\n the call the previous value cannot be accessed.\n \"\"\"\n def __init__(self):\n self.last = list()\n\n def __call__(self, *tuple):\n v = tuple[0][\"BLOB\"]\n mvc = validate_mv_blob(v)\n if mvc:\n return mvc\n\n mvc = validate_mv_blob_release(self.last)\n if mvc:\n return mvc\n\n self.last.append(v)\n return str(v, 'utf-8'),\n\n","sub_path":"test/python/spl/testtkpy/opt/python/streams/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"59336783","text":"import os\nfrom PyQt5 import QtCore\n\nfrom PyQt5.QtWidgets import QWizardPage, QFileDialog\n\nfrom gui.data_source import DataSource\nfrom gui.select_string_resources.select_resources_form import Ui_SelectResourcesForm\n\n\nclass SelectStringResourcesPage(QWizardPage, Ui_SelectResourcesForm):\n\n filenameSelected = QtCore.pyqtSignal()\n\n\n def __init__(self, dataSource: DataSource, parent=None):\n super().__init__(parent)\n self.dataSource = dataSource\n self.setupUi(self)\n self.chooseAndroidFile.clicked.connect(self.selectAndroidFile)\n self.chooseIOSFile.clicked.connect(self.selectIosFile)\n self.filenameSelected.connect(self.completeChanged)\n\n def isComplete(self):\n return os.path.isfile(self.androidResourcesPath.text()) \\\n or os.path.isfile(self.iOSResorcesPath.text())\n\n def selectAndroidFile(self):\n fileName = QFileDialog.getOpenFileName(\n parent=self,\n caption=\"Open Android strings.xml\",\n directory=self.androidResourcesPath.text(),\n filter=\"Android String Resource File (strings.xml)\"\n )[0]\n if fileName:\n self.androidResourcesPath.setText(fileName)\n self.dataSource.androidFile = fileName\n self.filenameSelected.emit()\n\n\n def selectIosFile(self):\n self.filenameSelected.emit()\n fileName = QFileDialog.getOpenFileName(\n parent=self,\n caption=\"Open iOS *.strings file\",\n directory=self.iOSResorcesPath.text(),\n filter=\"iOS String Resource File (*.strings)\"\n )[0]\n if fileName:\n self.iOSResorcesPath.setText(fileName)\n self.dataSource.iosFile = fileName\n self.filenameSelected.emit()\n\n","sub_path":"gui/select_string_resources/select_string_resources.py","file_name":"select_string_resources.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"413674333","text":"from django.conf.urls import url\n\nfrom .views import ProfileRetrieveAPIView, ProfileUpdateAPIView\n\napp_name = 'profiles'\n\nurlpatterns = [\n url(r'^profile/?$', ProfileUpdateAPIView.as_view()),\n url(r'^profile/(?P\\w+)/?$', ProfileRetrieveAPIView.as_view()),\n]\n","sub_path":"Exercise/Exercise/apps/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"458117018","text":"# coding=utf-8\n\nfrom constantes import *\nfrom gui import PNJSpeaking\n\n\nSTANDART_MOVE = [\n (0, 0),\n (0, -1),\n (0, -2),\n (1, -2),\n (2, -2),\n (3, -2),\n (3, -1),\n (3, 0),\n (2, 0),\n (1, 0)\n]\nCROSS_MOVE = [\n (0, 0),\n (0, 1),\n (0, 2),\n (-1, 2),\n (-2, 2),\n (-1, 2),\n (0, 2),\n (0, 3),\n (0, 4),\n (0, 3),\n (0, 2),\n (1, 2),\n (2, 2),\n (1, 2),\n (0, 2),\n (0, 1)\n]\nVERTICAL_MOVE = [\n (0, 0),\n (0, 1),\n (0, 2),\n (0, 3)\n]\nHORIZONTAL_MOVE = [\n (0, 0),\n (1, 0),\n (2, 0),\n (3, 0)\n]\n\n\nclass PNJ:\n def __init__(self, ecran, carte_mgr, pos: tuple, type_mvt: list,\n font, texte: str, dir_: int=1, sprite: str='bas.png') -> None:\n self.ecran = ecran\n self.carte_mgr = carte_mgr\n self.pos = list(pos)\n self.type_mvt = type_mvt\n self.font = font\n self.cur_scheme = 0\n self.real_pos = self.pos\n self.speak = False\n self.dir = dir_\n self.mdt = 0\n self.orientation = BAS\n self.sprite = rendering_engine.load_image(os.path.join(\"..\", \"assets\", \"pnj\", sprite))\n self.on_speak = PNJSpeaking(texte, self.ecran, self.font)\n\n def update(self, dt: int=1):\n self.mdt += dt\n self.mdt %= 150\n if not self.mdt:\n self.move()\n self.render(dt)\n\n def get_pos(self):\n return self.pos\n\n def move_scheme(self):\n self.cur_scheme += self.dir\n if self.cur_scheme + self.dir < 0:\n self.dir = +1\n if self.cur_scheme + self.dir >= len(self.type_mvt):\n self.dir = -1\n\n def speaking(self, dt: int=1):\n return self.on_speak.update(dt)\n\n def move(self):\n self.move_scheme()\n\n tmp = self.type_mvt[self.cur_scheme]\n\n actual_x, actual_y = tmp\n actual_x *= TILE_SIZE\n actual_y *= TILE_SIZE\n actual_x += self.pos[0]\n actual_y += self.pos[1]\n\n if tmp[0] > 0:\n self.orientation = DROITE\n if tmp[0] < 0:\n self.orientation = GAUCHE\n if tmp[1] > 0:\n self.orientation = HAUT\n if tmp[1] < 0:\n self.orientation = BAS\n\n # Détection des collisions\n if self.orientation == HAUT:\n if self.carte_mgr.collide_at(actual_x // TILE_SIZE, actual_y // TILE_SIZE):\n actual_y += TILE_SIZE\n self.dir = -self.dir\n\n if self.orientation == GAUCHE:\n if self.carte_mgr.collide_at(actual_x // TILE_SIZE, actual_y // TILE_SIZE):\n actual_x += TILE_SIZE\n self.dir = -self.dir\n\n if self.orientation == DROITE:\n if self.carte_mgr.collide_at(actual_x // TILE_SIZE, actual_y // TILE_SIZE):\n actual_x -= TILE_SIZE\n self.dir = -self.dir\n\n if self.orientation == BAS:\n if self.carte_mgr.collide_at(actual_x // TILE_SIZE, actual_y // TILE_SIZE):\n actual_y -= TILE_SIZE\n self.dir = -self.dir\n\n self.real_pos = (actual_x, actual_y)\n\n def render(self, dt: int=1):\n self.ecran.blit(self.sprite, self.real_pos)\n if self.speak:\n self.speak = self.speaking(dt)\n\n def player_want_to_talk(self):\n self.speak = True","sub_path":"src/pnj_manager.py","file_name":"pnj_manager.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"589008498","text":"\"\"\"\n-*- coding: utf-8 -*-\nProject\t:Python-100-Days\n\nName\t:斐波拉契数列\n\nDate : 2019-07-02 13:00:08\nAuthor : Younth Yang (8593009@qq.com)\n\"\"\"\n\na = 0\nb = 1\nfor _ in range(200):\n a, b = b, a + b\n print(a, end=' ')","sub_path":"Day01-15/Day05/variable4.py","file_name":"variable4.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"155183011","text":"# Copyright (C) 2022, Pyronear.\n\n# This program is licensed under the Apache License 2.0.\n# See LICENSE or go to for full license details.\n\n\nimport argparse\nimport json\nimport logging\nimport os\nimport time\nfrom pathlib import Path\n\nimport urllib3\nfrom dotenv import load_dotenv\n\nfrom pyroengine import SystemController\nfrom pyroengine.engine import Engine\nfrom pyroengine.sensors import ReolinkCamera\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nlogging.basicConfig(format=\"%(asctime)s | %(levelname)s: %(message)s\", level=logging.INFO, force=True)\n\n\ndef main(args):\n print(args)\n\n # .env loading\n load_dotenv(\".env\")\n API_URL = os.environ.get(\"API_URL\")\n LAT = float(os.environ.get(\"LAT\"))\n LON = float(os.environ.get(\"LON\"))\n assert isinstance(API_URL, str) and isinstance(LAT, float) and isinstance(LON, float)\n CAM_USER = os.environ.get(\"CAM_USER\")\n CAM_PWD = os.environ.get(\"CAM_PWD\")\n assert isinstance(CAM_USER, str) and isinstance(CAM_PWD, str)\n\n # Loading camera creds\n with open(args.creds, \"rb\") as json_file:\n cameras_credentials = json.load(json_file)\n\n # Check if model is available in cache\n cache = Path(args.cache)\n _model, _config = args.model, args.config\n if cache.is_dir():\n if cache.joinpath(\"model.onnx\").is_file():\n _model = str(cache.joinpath(\"model.onnx\"))\n if cache.joinpath(\"config.json\").is_file():\n _config = str(cache.joinpath(\"config.json\"))\n\n if isinstance(_model, str):\n logging.info(f\"Loading model from: {_model}\")\n\n engine = Engine(\n args.hub,\n args.thresh,\n API_URL,\n cameras_credentials,\n LAT,\n LON,\n frame_saving_period=args.save_period // args.period,\n model_path=_model,\n cfg_path=_config,\n cache_folder=args.cache,\n revision=args.revision,\n backup_size=args.backup_size,\n )\n\n sys_controller = SystemController(\n engine,\n [ReolinkCamera(_ip, CAM_USER, CAM_PWD) for _ip in cameras_credentials],\n )\n\n while True:\n start_ts = time.time()\n sys_controller.run()\n # Sleep only once all images are processed\n time.sleep(max(args.period - time.time() + start_ts, 0))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Raspberry Pi system controller\", formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n # Model\n parser.add_argument(\"--hub\", type=str, default=\"pyronear/rexnet1_3x\", help=\"HF Hub repo to use\")\n parser.add_argument(\"--model\", type=str, default=None, help=\"Overrides the ONNX model\")\n parser.add_argument(\"--config\", type=str, default=None, help=\"Overrides the model config\")\n parser.add_argument(\"--thresh\", type=float, default=0.5, help=\"Confidence threshold\")\n parser.add_argument(\"--revision\", type=str, default=None, help=\"HF Hub revision to use for model download\")\n # Camera & cache\n parser.add_argument(\"--creds\", type=str, default=\"data/credentials.json\", help=\"Camera credentials\")\n parser.add_argument(\"--cache\", type=str, default=\"./data\", help=\"Cache folder\")\n # Backup\n parser.add_argument(\"--cache\", type=int, default=30, help=\"Number of days before local backup is delete\")\n # Time config\n parser.add_argument(\"--period\", type=int, default=30, help=\"Number of seconds between each camera stream analysis\")\n parser.add_argument(\"--save-period\", type=int, default=3600, help=\"Number of seconds between each media save\")\n args = parser.parse_args()\n\n main(args)\n","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"119532712","text":"import torch\nfrom model import Transformer, CompletionTransformer\nimport open3d as o3d\nfrom dataset import SharpNetCompletionDataset\nimport numpy as np\nfrom loss import CDList\nimport utils\n\n\nw = 22\nd = 2 / w\nk = 20\nparam1_load_path = \"../params/transformer-w%d-pos-emb-mask-fix.pth\" % w\nparam2_load_path = \"../params/completion-step2-w%d-ptconv-k20-cd-adam-upatte.pth\" % w\ngpu, cpu = torch.device(\"cuda:0\"), torch.device(\"cpu\")\n\nvoc_size = w**3\npad_num, start_num, end_num = w**3-1, w**3-2, w**3-3\nstep_1_net = Transformer(inp_voc_size=voc_size, out_voc_size=voc_size, out_dim=voc_size, d=512,\n n_encoder=6, n_encoder_head=8, n_decoder=6, n_decoder_head=8, pad_num=pad_num)\nstep_2_net = CompletionTransformer(n_shift_points=7, d=128, n_encoder=6, n_head=8, k=k, downsample=\"PointConv\")\nloss_fn = CDList()\n# step 1\nstep_1_net.to(gpu)\nstep_1_net.load_state_dict(torch.load(param1_load_path))\nstep_1_net.to(cpu)\n# step 2\nstep_2_net.to(gpu)\nstep_2_net.load_state_dict(torch.load(param2_load_path))\nstep_2_net.to(cpu)\nprint(\"init finish\")\n\n\ndef processbar(current, totle):\n process_str = \"\"\n for i in range(int(20*current/totle)):\n process_str += \"█\"\n while len(process_str) < 20:\n process_str += \" \"\n return \"%s| %d / %d\" % (process_str, current, totle)\n\n\ndef to_categorical(y, num_classes):\n return torch.eye(num_classes)[y.cpu().data.numpy(), ].to(gpu)\n\n\ndef evaluate():\n step_1_net.eval()\n # step_2_net.eval()\n # cls_list = [\"Airplane\", \"Bag\", \"Cap\", \"Car\", \"Chair\", \"Earphone\",\n # \"Guitar\", \"Knife\", \"Lamp\", \"Laptop\", \"Motorbike\", \"Mug\",\n # \"Pistol\", \"Rocket\", \"Skateboard\", \"Table\"]\n cls_list = [\"Airplane\"]\n utils.fps_rand = False\n with torch.no_grad():\n for test_cls in range(0, 16):\n test_dataset = SharpNetCompletionDataset(json_path=\"train_test_split/shuffled_test_file_list.json\", cls_=test_cls, w=w)\n rand_idx = torch.arange(0, len(test_dataset))\n\n print(\"%s\" % cls_list[test_cls])\n process = 0\n pred_to_gt, gt_to_pred = 0, 0\n for i in range(len(test_dataset)):\n step_1_net.to(gpu)\n\n remain_pc_list, crop_list, remain_grid_list, crop_grid_list, remain_grid_id_list, crop_grid_id_list, cls_ = test_dataset[rand_idx[i]]\n decode_grid_list = []\n for remain_grid_id in remain_grid_id_list:\n remain_grid_id = torch.LongTensor(remain_grid_id).unsqueeze(0)\n remain_grid_id = remain_grid_id.to(gpu)\n cur_num = [start_num]\n last_num = start_num\n pts = []\n pts_num, max_pt_num = 0, 400\n while last_num != end_num and pts_num < max_pt_num:\n inp = torch.LongTensor([cur_num]).to(gpu)\n decoder_out = step_1_net(remain_grid_id, inp)\n # decoder_out = decoder_out.view(-1, w).argmax(1).view(-1, 3)\n decoder_out = decoder_out.view(-1, voc_size).argmax(1)\n # last_xyz = decoder_out[-1, :]\n last_xyz = [decoder_out[-1] // w ** 2, decoder_out[-1] % w ** 2 // w, decoder_out[-1] % w ** 2 % w]\n pts.append([last_xyz[0] * d + 0.5 * d, last_xyz[1] * d + 0.5 * d, last_xyz[2] * d + 0.5 * d])\n # last_num = last_xyz[0]*w**2+last_xyz[1]*w+last_xyz[2]\n last_num = decoder_out[-1]\n cur_num.append(last_num)\n pts_num += 1\n # print(pts_num)\n pts = np.array(pts)[:-1, :] - 1\n decode_grid_list.append(pts)\n # print(len(decode_grid_list))\n process += 1\n step_1_net.to(cpu)\n # step 2\n # 算最多的点数\n max_pt_num = 0\n need_pts_num = []\n for j in range(len(decode_grid_list)):\n max_pt_num = max(max_pt_num, remain_pc_list[j].shape[0]+decode_grid_list[j].shape[0])\n need_pts_num.append(decode_grid_list[j].shape[0])\n # 把点补成一样多\n inp = []\n for j in range(len(decode_grid_list)):\n if remain_pc_list[j].shape[0]+decode_grid_list[j].shape[0] < max_pt_num:\n need_num = max_pt_num - (remain_pc_list[j].shape[0]+decode_grid_list[j].shape[0])\n remain_pc_list[j] = torch.cat([remain_pc_list[j][0].view(1, 3).repeat([need_num, 1]), remain_pc_list[j]], dim=0)\n inp.append(torch.cat([remain_pc_list[j], torch.Tensor(decode_grid_list[j].astype(np.float32))], dim=0))\n step_2_net.to(gpu)\n inp = torch.stack(inp, dim=0).to(gpu)\n categorical = to_categorical(torch.LongTensor(cls_), 16)\n shifteds = step_2_net(inp, categorical, need_pts_num)\n for j in range(len(crop_list)):\n crop_list[j] = crop_list[j].to(gpu)\n pred_to_gt_mean, gt_to_pred_mean = loss_fn(shifteds, crop_list, True)\n pred_to_gt += pred_to_gt_mean.item()\n gt_to_pred += gt_to_pred_mean.item()\n step_2_net.to(cpu)\n print(\"\\rtest process: %s pred to gt: %.5f gt to pred: %.5f\" % (processbar(process, len(test_dataset)), pred_to_gt / process, gt_to_pred / process), end=\"\")\n # look look\n for j in range(len(decode_grid_list)):\n # open3d\n remain_pts = remain_pc_list[j].cpu().numpy()\n remain_pc = o3d.geometry.PointCloud()\n remain_pc.points = o3d.Vector3dVector(remain_pts)\n remain_pc.colors = o3d.Vector3dVector(np.array([[1, 0.706, 0]] * remain_pts.shape[0]))\n # before completion\n crop_pts = decode_grid_list[j]\n crop_pc = o3d.geometry.PointCloud()\n crop_pc.points = o3d.Vector3dVector(crop_pts)\n crop_pc.colors = o3d.Vector3dVector(np.array([[0, 0.651, 0.929]] * crop_pts.shape[0]))\n # after completion\n shifted_pts = shifteds[j].cpu().numpy()\n shifted_pc = o3d.geometry.PointCloud()\n shifted_pc.points = o3d.Vector3dVector(shifted_pts)\n shifted_pc.colors = o3d.Vector3dVector(np.array([[0, 0.651, 0.929]] * shifted_pts.shape[0]))\n o3d.draw_geometries([remain_pc, crop_pc], window_name=\"test step1\", width=1000, height=800)\n o3d.draw_geometries([remain_pc, shifted_pc], window_name=\"test step2\", width=1000, height=800)\n pred_to_gt, gt_to_pred = pred_to_gt / len(test_dataset), gt_to_pred / len(test_dataset)\n print(\"\\n%s pred to gt: %.5f gt to pred: %.5f\" % (cls_list[test_cls], pred_to_gt, gt_to_pred))\n utils.fps_rand = True\n\n\nif __name__ == '__main__':\n evaluate()","sub_path":"evaluate/evaluate_completion.py","file_name":"evaluate_completion.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"255655704","text":"import os, sys\nimport multiprocessing\nimport glob, os\nimport subprocess as sp\nimport time\nimport datetime\n\ntotal=20\ndefinables=0\n\nrunning=[]\n\nfor i in range(total):\n g = sp.Popen([\"python\",\"random_model_generator.py\", \"-d0.2\", \"-a2\",\"-u20\", \"-q10\"],stdin=sp.PIPE,stdout=sp.PIPE,stderr=sp.PIPE)\n running.append(sp.Popen([\"python\",\"../../main.py\"],stdin=g.stdout,stdout=sp.PIPE,stderr=sp.PIPE))\n\nwhile running:\n time.sleep(1)\n for p in running:\n if p.poll() is not None:\n #delete finished\n stdout = p.stdout.read().decode()\n if \"NOT DEFINABLE\" in stdout:\n print(\"nd\")\n elif \"DEFINABLE\" in stdout:\n print(\"d\")\n definables+=1\n else:\n print(\"er\")\n print(p.stderr.read().decode())\n running.remove(p)\n\nprint(\"Definability: %.2f\"%(definables/total))\n","sub_path":"oldtests/random/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"435428516","text":"\"\"\"\nFilename: plot_interhemispheric_energy_timeseries.py\nAuthor: Damien Irving, irving.damien@gmail.com\nDescription: Plot the interhemispheric timeseries for various energy budget terms\n\n\"\"\"\n\n# Import general Python modules\n\nimport sys, os, pdb\nimport argparse\nimport numpy\nimport iris\nimport iris.plot as iplt\niris.FUTURE.netcdf_promote = True\nimport matplotlib.pyplot as plt\nimport seaborn\n\n# Import my modules\n\ncwd = os.getcwd()\nrepo_dir = '/'\nfor directory in cwd.split('/')[1:]:\n repo_dir = os.path.join(repo_dir, directory)\n if directory == 'ocean-analysis':\n break\n\nmodules_dir = os.path.join(repo_dir, 'modules')\nsys.path.append(modules_dir)\ntry:\n import general_io as gio\n import timeseries\n import convenient_universal as uconv\nexcept ImportError:\n raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')\n\n\n# Define functions\n\nexperiment_colors = {'historical': 'orange',\n 'historicalGHG': 'red',\n 'historicalAA': 'blue',\n 'rcp26': '#16DB65',\n 'rcp45': '#058C42',\n 'rcp60': '#04471C',\n 'rcp85': '#0D2818'}\n\n \ndef set_title(infile):\n \"\"\"Get the plot title.\"\"\"\n\n cube = iris.load(infile)\n title = '%s interhemispheric difference' %(cube[0].attributes['model_id'])\n \n plt.suptitle(title, size='large')\n\n\ndef plot_hemispheres(n_dict, s_dict, ax, variable, region, realm, runmean, units):\n \"\"\"Plot the hemisphere data.\"\"\"\n\n plt.sca(ax)\n for experiment in experiment_colors.keys():\n try:\n n_cube = n_dict[(experiment, variable, region, realm)]\n s_cube = s_dict[(experiment, variable, region, realm)]\n except KeyError:\n continue\n\n iplt.plot(n_cube, label=experiment + ', NH', color=experiment_colors[experiment])\n iplt.plot(s_cube, label=experiment + ', SH', color=experiment_colors[experiment], linestyle='--')\n if runmean: \n n_smooth_cube = n_cube.rolling_window('time', iris.analysis.MEAN, runmean) \n s_smooth_cube = s_cube.rolling_window('time', iris.analysis.MEAN, runmean) \n iplt.plot(n_smooth_cube, color=experiment_colors[experiment], linewidth=2) \n iplt.plot(s_smooth_cube, color=experiment_colors[experiment], linewidth=2, linestyle='--') \n \n title = 'Annual Mean %s' %(variable)\n ax.set_title(title)\n ax.legend()\n ax.set_xlabel('year')\n\n ylabel = '%s (%s over %s)' %(units, region, get_realm_title(realm))\n ax.set_ylabel(ylabel)\n\n\ndef plot_comparison(diff_dict, ax, variable, region, realm, runmean, operator, units):\n \"\"\"Plot the comparison data.\"\"\"\n\n plt.sca(ax)\n for experiment in experiment_colors.keys():\n try:\n cube = diff_dict[(experiment, variable, region, realm)]\n except KeyError:\n continue\n\n iplt.plot(cube, label=experiment, color=experiment_colors[experiment])\n if runmean: \n smooth_cube = cube.rolling_window('time', iris.analysis.MEAN, runmean) \n iplt.plot(smooth_cube, color=experiment_colors[experiment], linewidth=2) \n \n title = 'Annual Mean %s' %(variable)\n ax.set_title(title)\n ax.legend()\n ax.set_xlabel('year')\n\n if operator == 'subtract':\n ylabel = 'n%s mean - s%s mean, over %s (%s)' %(region, region, get_realm_title(realm), units)\n else:\n ylabel = 'n%s mean / s%s mean, over %s (%%; orig %s)' %(region, region, get_realm_title(realm), units)\n ax.set_ylabel(ylabel)\n\n\ndef get_realm_label(realm, var):\n \"\"\"Insert a space in the realm name.\"\"\"\n\n if 'Downward Heat Flux at Sea Water Surface' in var:\n realm_label = ' ocean'\n elif realm == 'all':\n realm_label = ''\n else:\n realm_label = ' ' + realm\n \n return realm_label\n \n \ndef get_realm_title(realm):\n \"\"\"Realm name for plot title.\"\"\"\n \n if realm == 'all':\n realm_title = 'land & ocean'\n else:\n realm_title = realm\n \n return realm_title\n \n\ndef get_diff(infile, variable, region, realm, time_constraints, operator):\n \"\"\"Calculate interhemispheric difference for a given variable\"\"\"\n\n if 'rcp' in infile:\n time_constraint = time_constraints['rcp']\n else:\n time_constraint = time_constraints['historical']\n\n svar = '%s s%s%s mean' %(variable, region, get_realm_label(realm, variable))\n nvar = '%s n%s%s mean' %(variable, region, get_realm_label(realm, variable)) \n\n with iris.FUTURE.context(cell_datetime_objects=True):\n s_cube = iris.load_cube(infile, svar & time_constraint)\n n_cube = iris.load_cube(infile, nvar & time_constraint)\n\n orig_units = str(n_cube.units)\n\n if operator == 'subtract':\n diff_cube = n_cube - s_cube\n else:\n diff_cube = n_cube / s_cube\n\n history = s_cube.attributes['history']\n model = s_cube.attributes['model_id']\n experiment = s_cube.attributes['experiment_id']\n if experiment == 'historicalMisc':\n experiment = 'historicalAA'\n run = 'r' + str(s_cube.attributes['realization'])\n\n return diff_cube, n_cube, s_cube, history, model, experiment, run, orig_units\n\n\ndef get_time_constraint(time_bounds):\n \"\"\"Get the iris time constraint for given time bounds.\"\"\"\n\n if time_bounds:\n try:\n time_constraint = gio.get_time_constraint(time_bounds)\n except AttributeError:\n time_constraint = iris.Constraint() \n else:\n time_constraint = iris.Constraint()\n\n return time_constraint\n \n \ndef main(inargs):\n \"\"\"Run the program.\"\"\"\n\n time_constraints = {}\n time_constraints['historical'] = get_time_constraint(inargs.hist_time)\n time_constraints['rcp'] = get_time_constraint(inargs.rcp_time)\n\n variables = ['Surface Downwelling Net Radiation', 'Surface Upward Latent Heat Flux',\n 'Downward Heat Flux at Sea Water Surface', 'Downward Heat Flux at Sea Water Surface']\n\n if inargs.infer_hfds:\n variables[-2] = 'Inferred Downward Heat Flux at Sea Water Surface'\n variables[-1] = 'Inferred Downward Heat Flux at Sea Water Surface'\n diff_dict = {}\n s_dict = {}\n n_dict = {}\n plot_details_list = []\n for infile in inargs.energy_infiles:\n for plotnum, var in enumerate(variables):\n region = inargs.regions[plotnum]\n realm = inargs.realms[plotnum]\n diff_cube, n_cube, s_cube, history, model, experiment, run, orig_units = get_diff(infile, var, region, realm, time_constraints, inargs.operator)\n diff_dict[(experiment, var, region, realm)] = diff_cube\n n_dict[(experiment, var, region, realm)] = n_cube\n s_dict[(experiment, var, region, realm)] = s_cube\n plot_ref = (var, region, realm)\n if not plot_ref in plot_details_list:\n plot_details_list.append(plot_ref)\n\n width=16\n height=10\n fig = plt.figure(figsize=(width, height))\n ax1 = fig.add_subplot(2, 2, 1)\n ax2 = fig.add_subplot(2, 2, 2)\n ax3 = fig.add_subplot(2, 2, 3)\n ax4 = fig.add_subplot(2, 2, 4)\n axes_list = [ax1, ax2, ax3, ax4]\n plotnum = 0\n for ax, plot_details in zip(axes_list, plot_details_list):\n var, region, realm = plot_details\n plot_type = inargs.plot_type[plotnum]\n if plot_type == 'comparison':\n plot_comparison(diff_dict, ax, var, region, realm, inargs.runmean, inargs.operator, orig_units)\n else:\n plot_hemispheres(n_dict, s_dict, ax, var, region, realm, inargs.runmean, orig_units)\n plotnum = plotnum + 1\n\n title = '%s interhemispheric difference' %(model)\n plt.suptitle(title, size='large')\n plt.subplots_adjust(top=0.90)\n\n plt.savefig(inargs.outfile, bbox_inches='tight')\n gio.write_metadata(inargs.outfile, file_info={inargs.energy_infiles[-1]: history})\n\n\nif __name__ == '__main__':\n\n extra_info =\"\"\" \n\nauthor:\n Damien Irving, irving.damien@gmail.com\n\n\"\"\"\n\n description = 'Plot the interhemispheric timeseries for various energy budget terms'\n parser = argparse.ArgumentParser(description=description,\n epilog=extra_info, \n argument_default=argparse.SUPPRESS,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n \n parser.add_argument(\"energy_infiles\", type=str, nargs='*', \n help=\"Input energy budget files generated from calc_system_heat_distribution.py\") \n parser.add_argument(\"outfile\", type=str, help=\"Output file\") \n\n parser.add_argument(\"--regions\", type=str, nargs=4, choices=('tropics', 'h'),\n default=('h', 'tropics', 'tropics', 'subpolar'),\n help=\"Region used for rnds, rlus, hfls & hfds respectively\")\n parser.add_argument(\"--realms\", type=str, nargs=4, choices=('ocean', 'land', 'all'),\n default=('all', 'ocean', 'ocean', 'ocean'),\n help=\"Realms used for rnds, hfls, hfds & hfds respectively\")\n parser.add_argument(\"--plot_type\", type=str, nargs=4, choices=('hemisphere', 'comparison'),\n default=('comparison', 'comparison', 'hemisphere', 'hemisphere'),\n help=\"Difference operator used for rnds, hfls, hfds & hfds respectively\")\n\n parser.add_argument(\"--operator\", type=str, choices=('subtract', 'divide'), default='divide', \n help=\"Operator to use for comparison plots\")\n\n parser.add_argument(\"--runmean\", type=int, default=None,\n help=\"Window for running mean [default = None]\")\n\n parser.add_argument(\"--hist_time\", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'), default=None,\n help=\"Time period [default = all]\")\n parser.add_argument(\"--rcp_time\", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'), default=None,\n help=\"Time period [default = all]\")\n parser.add_argument(\"--infer_hfds\", action=\"store_true\", default=False,\n help=\"Use inferred hfds data\")\n\n args = parser.parse_args() \n main(args)\n","sub_path":"visualisation/plot_interhemispheric_energy_timeseries.py","file_name":"plot_interhemispheric_energy_timeseries.py","file_ext":"py","file_size_in_byte":10330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"474955782","text":"from selenium.webdriver import Chrome\nfrom time import sleep\nfrom urllib.parse import urlparse\nimport json\n\n\n\nchrome = Chrome()\nurl = 'https://selenium.dunossauro.live/exercicio_04.html'\nchrome.get(url)\nsleep(5)\n\ndef preencher_formulario(navegador, nome, email, senha, telefone):\n navegador.find_element_by_name('nome').send_keys(nome)\n navegador.find_element_by_name('email').send_keys(email)\n navegador.find_element_by_name('senha').send_keys(senha)\n navegador.find_element_by_name('telefone').send_keys(telefone)\n navegador.find_element_by_name('btn').click()\n\n\ndados = {\n 'nome':'Wagner',\n 'email':'wag@ner.com',\n 'senha':'123456',\n 'telefone':'(00)00000-0000',\n}\n\ndict_elementos = {\n '%40':'@',\n '%28':'(',\n '%29':')'\n}\n\ndict_url = {}\n\npreencher_formulario(chrome,**dados)\nsleep(5)\n\nurl_parseada = urlparse(chrome.current_url)\nlist_query = url_parseada.query.split('&')\n\nfor texto in list_query:\n atributo, valor = texto.split('=')\n if atributo != 'btn':\n dict_url[atributo] = valor\n\nfor cod, decod in dict_elementos.items():\n for chave, valor in dict_url.items():\n dict_url[chave] = valor.replace(cod, decod)\n\ntextarea = chrome.find_element_by_tag_name('textarea')\ndict_text = json.loads(textarea.text.replace('\\'','\\\"'))\n\nassert dict_text == dict_url\n","sub_path":"Exercicio004.py","file_name":"Exercicio004.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"346783787","text":"# -*- coding:UTF-8 -*-\nimport unicodedata\nimport string\n\n\"\"\"\n去除音符\n\"\"\"\n\n\ndef shave_marks(txt):\n \"\"\"Remove all diacritic marks\"\"\"\n norm_txt = unicodedata.normalize('NFD', txt)\n shaved = ''.join(c for c in norm_txt\n if not unicodedata.combining(c))\n return unicodedata.normalize('NFC', shaved)\n\n\ndef shave_marks_latin(txt):\n \"\"\"Remove all diacritic marks from Latin base characters\"\"\"\n norm_txt = unicodedata.normalize('NFD', txt)\n latin_base = False\n keepers = []\n for c in norm_txt:\n if unicodedata.combining(c) and latin_base:\n continue # ignore diacritic on Latin base char\n keepers.append(c)\n # if it isn't combining char, it's a new base char\n if not unicodedata.combining(c):\n latin_base = c in string.ascii_letters\n shaved = ''.join(keepers)\n return unicodedata.normalize('NFC', shaved)\n\n\nif __name__ == '__main__':\n order = '“Herr Voß: • ½ cup of OEtker™ caffè latte • bowl of açaí.”'\n print(shave_marks(order))\n print(shave_marks_latin(order))\n Greek = 'Ζέφυρος, Zéfiro'\n print(shave_marks(Greek))\n print(shave_marks_latin(Greek))\n","sub_path":"sanitize.py","file_name":"sanitize.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"296716736","text":"import json\nimport datetime\n\nCARD_NUMBER_LEN = 16\nACCOUNT_NUMBER_LEN = 20\n\ndef catch_data():\n with open('operations.json', 'r', encoding='utf-8') as f:\n json_data = json.load(f)\n return json_data\n\ndef filter_dates(data):\n new_data = []\n for user_data in data:\n if user_data == {}:\n pass\n elif user_data['state'] != \"EXECUTED\":\n pass\n else:\n new_data.append(user_data)\n return new_data\n\ndef sort_dates(data):\n sort = sorted(data, key=lambda user_data: user_data['date']) \n return sort\n\ndef get_results(sorted_data):\n five_datas_result = get_five_datas(sorted_data)\n\n for d in five_datas_result:\n from_account = '****'\n\n if 'from' in d:\n from_account = d['from']\n \n to = d['to']\n to_acc = extract_acc_type(to)\n to_number = extract_number(to)\n to_masked = get_masked_number(to_number)\n \n from_acc = extract_acc_type(from_account)\n from_number = extract_number(from_account)\n from_masked = get_masked_number(from_number)\n \n print_result(d, to_acc, to_masked, from_acc, from_masked)\n \n\ndef get_five_datas (data):\n result_datas = data[-5:]\n result_datas.reverse()\n return result_datas\n\ndef format_date(date_str):\n date_format=datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%f')\n result=date_format.strftime('%d.%m.%Y')\n return result\n \n\ndef extract_number(string):\n splited = string.split()\n number = splited[-1]\n if number.isdigit():\n return number\n return 'not a number'\n\ndef get_masked_number(number):\n masked = ''\n if len(number) == CARD_NUMBER_LEN:\n masked = mask_card_number(number)\n elif len(number) == ACCOUNT_NUMBER_LEN:\n masked = mask_acc_number(number)\n\n return masked\n\ndef extract_acc_type(string):\n splited = string.split()\n acc_name = []\n for my_str in splited:\n if not my_str.isdigit():\n acc_name.append(my_str)\n acc_format = ' '.join(acc_name)\n return acc_format\n\ndef mask_acc_number(acc_number):\n masked = '**' + acc_number[-4:] \n return masked\n\ndef mask_card_number(card_number):\n masked = card_number[:4]+ ' '+card_number[5:7]+ '** **** ' + card_number[-4:]\n return masked\n\ndef print_result(data_to_print, to_acc, to_masked, from_acc, from_masked):\n des = data_to_print['description']\n amount = data_to_print['operationAmount']['amount']\n currency = data_to_print['operationAmount']['currency']['name']\n date_str = format_date(data_to_print['date'])\n print('{date} {des}\\n{from_type} {from_acc} -> {to_account} {to}\\n{amount} {currency}\\n'.format(\n date=date_str,\n des=des,\n to_account=to_acc,\n to=to_masked,\n from_type=from_acc,\n from_acc=from_masked,\n amount=amount,\n currency=currency\n ))\n\ndata_from_json = catch_data()\nfiltered_data = filter_dates(data_from_json)\nsorted_data = sort_dates(filtered_data)\nresult = get_results(sorted_data)","sub_path":"python_operations_task/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"457900218","text":"g = list(open(\"input.txt\"))\ng_c = []\nfor i in g:\n g_c.append(i.split())\ng.clear()\nfor h in g_c:\n for j in h:\n g.append(int(j))\n\nprint(g)","sub_path":"QT3/filestat.py","file_name":"filestat.py","file_ext":"py","file_size_in_byte":149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"596155096","text":"#!/usr/bin/env python\n#coding=utf-8\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.request import CommonRequest\nimport json\nimport config\n\n\nclass AliSms(object):\n\n def __init__(self):\n self.access_key_id = config.ALISMS_ACCESS_KEY_ID\n self.access_key_secret = config.ALISMS_ACSSESS_KEY_SECRET\n self.sign_name = config.ALISMS_SIGN_NAME\n\n # 定义短信模板\n self.template_code_complate = config.TEMPLATE_CODE_COMPLATE\n self.template_code_complate2 = config.TEMPLATE_CODE_COMPLATE2\n self.template_code_eat_all_apple = config.TEMPLATE_CODE_EAT_ALL_APPLE\n self.template_code_wait = config.TEMPLATE_CODE_WAIT\n self.template_code_error = config.TEMPLATE_CODE_ERROR\n self.template_code_start = config.TEMPLATE_CODE_START\n\n # 创建客户端\n self.client = AcsClient(self.access_key_id, self.access_key_secret, 'default')\n\n def __init_request(self, phone, template_code, data):\n if data is None:\n data = {}\n\n self.request = CommonRequest()\n self.request.set_accept_format('json')\n self.request.set_domain('dysmsapi.aliyuncs.com')\n self.request.set_method('POST')\n self.request.set_version('2017-05-25')\n self.request.set_action_name('SendSms')\n self.request.add_query_param('PhoneNumbers', phone)\n self.request.add_query_param('SignName', self.sign_name)\n self.request.add_query_param('TemplateCode', template_code)\n self.request.add_query_param('TemplateParam', json.dumps(data))\n\n def send_complate2(self, phone, data=None):\n '''订单完成'''\n self.__init_request(phone, self.template_code_complate2, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None # 将self.request 设为None,防止无限创建request而不销毁\n\n def send_complate(self, phone, data=None):\n '''订单完成,不用这个'''\n self.__init_request(phone, self.template_code_complate, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None\n\n def send_eat_all_apple(self, phone, data=None):\n '''苹果吃完了'''\n self.__init_request(phone, self.template_code_eat_all_apple, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None\n\n def send_eat_wait(self, phone, data=None):\n '''等待安排'''\n self.__init_request(phone, self.template_code_wait, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None\n\n def send_error(self, phone, data=None):\n '''账号密码错误'''\n self.__init_request(phone, self.template_code_error, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None\n\n def send_eat_start(self, phone, data=None):\n '''开工了'''\n self.__init_request(phone, self.template_code_start, data)\n response = self.client.do_action_with_exception(self.request)\n print(response)\n self.request = None\n\n\nali_sms = AliSms()\n# ali_sms.send_eat_start('13751320203')\n","sub_path":"utils/aliyun_sms.py","file_name":"aliyun_sms.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"624565045","text":"from room import Room\nfrom player import Player\nfrom world import World\nfrom util import Stack, Queue\nimport random\nfrom ast import literal_eval\nimport sys\nsys.setrecursionlimit(15000)\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n',\"s\",\"e\"]\ntraversal_path = []\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\n# Maybe create graph\nclass Graph:\n \"\"\"Represent a graph as a dictionary of vertices mapping labels to edges.\"\"\"\n def __init__(self):\n self.vertices = {}\n def add_vertex(self, vertex_id):\n \"\"\"\n Add a vertex to the graph.\n \"\"\"\n self.vertices[vertex_id] = {}\n def add_edge(self, v1, v2):\n \"\"\"\n Add a directed edge to the graph.\n If both exist, and a connection from v1 to v2\n \"\"\"\n\n self.vertices[v1]=(v2)\n \n # if v1 in self.vertices and v2 in self.vertices:\n # self.vertices[v1].add(v2)\n # else:\n # raise IndexError(\"That vertex does not exist!\")\n def get_neighbors(self, vertex_id):\n \"\"\"\n Get all neighbors (edges) of a vertex.\n \"\"\"\n return self.vertices[vertex_id]\n\n def dft_recursive(self, starting_vertex, visited=None):\n \"\"\"\n Print each vertex in depth-first order\n beginning from starting_vertex.\n This should be done using recursion.\n \"\"\"\n Directions=[\"n\",\"s\",\"e\",\"w\"]\n\n if len(self.vertices)<=1:\n self.add_vertex(starting_vertex)\n objects={}\n for i in player.current_room.get_exits():\n objects[i]=\"?\" \n self.add_edge(player.current_room.id,objects)\n if visited is None:\n visited=set()\n\n if \"?\" not in self.vertices[starting_vertex].values():\n visited.add(starting_vertex)\n arr=self.get_all_social_paths(starting_vertex,\"?\")\n print(\"arr\",arr)\n if arr== None:\n print(\"Break\")\n return\n else:\n print(\"Continue\", self.vertices)\n\n # print(\"arr\",arr,\"arr[starting_vertex]\",starting_vertex) \n for movement in arr:\n # print(\"TEEEEST\",movement,starting_vertex,self.vertices[starting_vertex].keys())\n for move in self.vertices[starting_vertex].keys():\n # print(\"TEEEEST\",movement,move,starting_vertex)\n if self.vertices[starting_vertex][move]==movement: \n player.travel(move)\n traversal_path.append(move)\n\n self.dft_recursive(player.current_room.id, visited) \n\n else:\n RandomDirection=random.randrange(0,4)\n for child_vert in self.vertices[starting_vertex]: \n # print(\"Directions[RandomDirection]\",Directions[RandomDirection],\"player.current_room.id\",player.current_room.id,\"starting_vertex\",starting_vertex,\"child_vert\",child_vert,\"self.vertices\",self.vertices )\n if child_vert ==Directions[RandomDirection]:\n # print(\"SUCCESS\")\n if self.vertices[starting_vertex][child_vert] == \"?\":\n player.travel(child_vert)\n # print(\"Directions[RandomDirection] 2222\",Directions[RandomDirection],\"player.current_room.id\",player.current_room.id,\"starting_vertex\",starting_vertex,\"child_vert\",child_vert)\n if player.current_room.id not in self.vertices.keys():\n self.add_vertex(player.current_room.id)\n obj={}\n for i in player.current_room.get_exits():\n obj[i]=\"?\" \n self.add_edge(player.current_room.id,obj)\n obj={}\n if player.current_room.id not in visited:\n traversal_path.append(child_vert)\n # print(\"child vert\", child_vert)\n if child_vert==\"n\":\n # print(\"Wow\",player.current_room.id)\n self.vertices[starting_vertex][child_vert]=player.current_room.id\n # self.vertices[player.current_room.id][\"s\"]=starting_vertex\n elif child_vert==\"s\":\n self.vertices[starting_vertex][child_vert]=player.current_room.id\n # self.vertices[player.current_room.id][\"n\"]=starting_vertex\n elif child_vert==\"e\":\n self.vertices[starting_vertex][child_vert]=player.current_room.id\n # self.vertices[player.current_room.id][\"w\"]=starting_vertex\n elif child_vert==\"w\":\n self.vertices[starting_vertex][child_vert]=player.current_room.id\n # self.vertices[player.current_room.id][\"e\"]=starting_vertex\n print(\"inside\",player.current_room.id)\n self.dft_recursive(player.current_room.id, visited)\n # else:\n self.dft_recursive(player.current_room.id, visited)\n\n def get_all_social_paths(self, starting_vertex,destination_vertex):\n \"\"\"\n Takes a user's starting_vertex as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n # variable=0\n queue=Queue()\n queue.enqueue([starting_vertex])\n while queue.size()>0:\n path=queue.dequeue()\n # print(\"path\",path)\n current_user = path[-1]\n # print(\"current_user\", current_user)\n if current_user not in visited:\n if \"?\" in self.vertices[current_user].values():\n # print(\"path\",path, self.vertices[current_user].values())\n return path\n\n # print(\"current_user\", current_user)\n visited[current_user]=path\n\n for ID in self.vertices[current_user].values():\n # print(\"inside\")\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n\n\n \n\ngraphs=Graph()\n\ngraphs.dft_recursive(player.current_room.id)\n\nprint(\"HI\",player.current_room.id)\nprint(graphs.vertices)\n\n# print(bfs(player.current_room, \"?\"))\nplayer.current_room = world.starting_room\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n # print(\"move\",move,\"player.current_room\",player.current_room,\"visited_rooms\",visited_rooms)\n\n# print(\"visited_rooms\",len(visited_rooms), traversal_path)\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":8363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"548365355","text":"import secrets\nimport base64\n\nfrom cryptography.hazmat.primitives import serialization as cser\nfrom cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey\n\n\nif __name__ == \"__main__\":\n\n # Generate symmetric key for the file encryption\n print(\"Generating shared key ...\")\n shared_key_i = 0\n\n while shared_key_i == 0:\n shared_key_i = secrets.randbits(8)\n\n shared_key = base64.b64encode(shared_key_i.to_bytes(1, 'big'))\n\n print(f\"Generated shared: {shared_key_i}\")\n with open(\"shared.key\", \"wb\") as shared_key_pf:\n shared_key_pf.write(shared_key)\n\n # Generate asymmetric keys\n print(\"Generating asymmetric key pair ...\")\n ed_skey = Ed25519PrivateKey.generate()\n sender_sk = ed_skey.private_bytes(\n cser.Encoding.PEM,\n cser.PrivateFormat.PKCS8,\n cser.NoEncryption()\n )\n\n ed_pkey = ed_skey.public_key()\n sender_pk = ed_pkey.public_bytes(\n cser.Encoding.PEM,\n cser.PublicFormat.SubjectPublicKeyInfo,\n )\n\n with open(\"sender_private.pem\", \"wb\") as sender_sk_pf:\n sender_sk_pf.write(sender_sk)\n\n with open(\"sender_public.pem\", \"wb\") as sender_pk_pf:\n sender_pk_pf.write(sender_pk)\n","sub_path":"e07_encrypt_file_and_sign_it/01_generate_keys.py","file_name":"01_generate_keys.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"285757583","text":"#!/usr/bin/env python\n\"\"\"\n Computer practical 2. Internal reflection and refraction.\n =========================================================\n\n This is part of the 'computer practical' set of assignments.\n Demonstrates internal reflection and refraction.\n Find the Brewster- and critical angle and from this determine\n the refractice index of the glass.\n\"\"\"\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport sys\nimport math\nimport webbrowser\n\n\nif sys.version_info[0] < 3:\n from Tkinter import *\n import Tkinter as Tk\nelse:\n from tkinter import *\n import tkinter as Tk\nfrom LightPipes import *\n\nroot = Tk.Tk()\nroot.wm_title(\"Computer practical: 2. Internal reflection and refraction. LP-version = \" + LPversion)\nroot.wm_protocol(\"WM_DELETE_WINDOW\", root.quit)\n\ndeg=1.0\nwavelength=632.8*nm;\nNair = 1.0;\nNglass = 1.53\nPhiPol=79.0*deg\nPhiIn=40.0*deg\nsize = 5.0*mm;\nN=100\n\nfig=plt.figure(figsize=(4,6))\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\n\n\nphipol = DoubleVar()\nphiin = DoubleVar()\nv=StringVar()\nphipol.set(PhiPol)\nphiin.set(PhiIn)\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas._tkcanvas.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)\n\ndef TheExample(event):\n rad = math.pi/180.0\n F=Begin(size,wavelength,N)\n PhiIn=phiin.get()\n PhiPol=phipol.get()\n I_s = math.pow(math.sin(PhiPol * rad), 2.0)\n I_p = math.pow(math.cos(PhiPol * rad), 2.0)\n PIn = PhiIn * rad\n P = Nglass * math.sin(PIn) / Nair\n if (P >= 1.0):\n T_s = 0.0\n T_p = 0.0\n else:\n PhiT = math.asin(P);\n if (PIn == 0.0 and PhiT == 0.0):\n T_s = 4.0 * Nair * Nglass/math.pow(Nair + Nglass,2.0);\n T_p = T_s;\n else:\n T_s = Nair * math.cos(PhiT) * math.cos(PIn) / Nglass * math.pow(2.0*math.sin(PhiT) / math.sin(PIn + PhiT), 2.0);\n T_p = Nair * math.cos(PhiT) * math.cos(PIn) / Nglass * math.pow(2.0*math.sin(PhiT) / (math.sin(PIn + PhiT)*math.cos(PIn - PhiT)), 2.0);\n R_s = 1.0 - T_s;\n R_p = 1.0 - T_p;\n I_T = T_s * I_s + T_p * I_p;\n I_R = R_s * I_s + R_p * I_p;\n F1=GaussHermite(F, size/4, 0, 0, I_R);\n F2=GaussHermite(F, size/4, 0, 0, I_T);\n v.set( \"Reflected power = %5.3f mW\\n\"% I_R +\n \"Transmitted power = %5.3f mW\\n\"%I_T\n )\n I1=Intensity(0,F1);\n I2=Intensity(0,F2);\n ax1.clear()\n ax1.contourf(I1,50,vmin=0., vmax=1.,cmap='hot'); ax1.axis('off'); ax1.axis('equal')\n ax1.set_title('Reflected irradiance') \n ax2.clear()\n ax2.contourf(I2,50,vmin=0., vmax=1.,cmap='hot'); ax2.axis('off'); ax2.axis('equal')\n ax2.set_title('Transmitted irradiance')\n canvas.draw()\n\ndef openbrowser(event):\n webbrowser.open_new(r\"https://opticspy.github.io/lightpipes/Reflect.html\")\n\ndef _quit():\n root.quit()\n\n\n\nLabel(root, textvariable=v).pack(pady=50)\n\nScale( root,\n takefocus=1,\n orient='horizontal',\n label = 'polarization angle [deg]',\n length = 200, from_=0.000, to=90.0,\n resolution = 0.01,\n variable = phipol,\n cursor=\"hand2\",\n command = TheExample\n ).pack()\n\nScale( root,\n takefocus=1,\n orient='horizontal',\n label = 'angle of incidence [deg]',\n length = 200,\n from_=0.000, to=90.0,\n resolution = 0.01,\n variable = phiin,\n cursor=\"hand2\",\n command = TheExample\n ).pack()\n\nButton( root,\n width = 24,\n text='Quit',\n cursor=\"hand2\",\n command=_quit).pack( pady=10)\n\nlink = Label(root, text=\"help\", fg=\"blue\", cursor=\"hand2\")\nlink.pack()\nlink.bind(\"\", openbrowser)\n\nTheExample(0)\n\nroot.mainloop()\nroot.destroy()\n\n","sub_path":"Examples/ComputerPrac/ReflectRefract.py","file_name":"ReflectRefract.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"618186318","text":"import threading\nfrom gui import game\nfrom control import katch\nfrom connection import server,connectionManager\nimport urllib.request\nimport sys\nimport socket\n\n# Launch of the game\n\n# Retrieving the local ip from the outside\n# Getting the port\nif len(sys.argv) < 2:\n print(\"Please give the port you wish to use for the server!\")\nelse:\n port = sys.argv[1]\n try:\n # By default, we try to use a local network\n ip = socket.gethostbyname(socket.getfqdn()) + \":\" + str(port)\n except:\n # if this doesn't work, we get the connection through the internet\n ip = urllib.request.urlopen('http://ip.42.pl/raw').read().decode() + \":\" + str(port)\n\n # Initializing the game and its component\n connectionManager.ConnectionManager()._ip_serv = ip\n myGame = game.Game()\n katch.Katch().init(connectionManager.ConnectionManager(), myGame.get_player_manager(), myGame.get_display_manager(), myGame.get_collectable_manager())\n\n # Launching the server in another thread\n t = threading.Thread(target=server.create_server, args=(ip,))\n t.start()\n\n # Launching the server in another thread\n t2 = threading.Thread(target=katch.Katch().run)\n t2.start()\n\n # Launching the game\n myGame.main()\n\n # When the game has stopped, we have to send a message to other players\n katch.Katch().leave()\n\n # Shutting down the server\n t._stop()\n","sub_path":"src/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"574768154","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom django.urls import resolve\n\nfrom django.core.urlresolvers import reverse\nimport django.apps\nimport copy\n\nfrom .decorators import classproperty\n# Generic views\n\n\nclass GenericViewMixin(object):\n ''' Class to extend existing generic views with common context info,\n e.g. meta data\n '''\n _template_base_name = None\n\n def view_name(self):\n return self.build_view_name(self.model.tag)\n\n def build_view_name(self, view_name):\n if not self.model.app_name:\n msg = \"Property app name not implemented for \"\n msg += self.model.__class__.__name__\n raise ValueError()\n return self.model.app_name + \":\" + view_name\n\n def get_template_name(self):\n return self.model.app_name + \"/\" + self._template_base_name\n\n def get_view_info(self, obj, args={}, view_type=''):\n this_args = copy.copy(args)\n if view_type == 'list':\n view_name = self.build_view_name(obj.tag)\n label = obj.class_label\n else:\n view_name = self.build_view_name(obj.tag) + '_display'\n label = obj.label\n args['p%d' % obj.hierachy_level] = obj.slug\n # this_args['slug'] = obj.slug\n print(this_args)\n view_dict = {'name': view_name,\n 'label': label,\n 'hierachy_level': obj.hierachy_level,\n 'slug': obj.slug, }\n return view_dict\n\n def add_view_infos(self, context):\n # add empty list of parent views to context if it does not exist yet\n if 'parent_views' not in context:\n context['views_hierachy'] = []\n print('adding parent views')\n viewinfo_list = []\n args = {}\n # view_args = {}\n # check if the current view is a list or detail view\n if 'object_list' in context:\n parents = list(set([obj.parent for obj in context['object_list']\n if obj.parent]))\n if len(parents) > 1:\n print(\"Too many parents\")\n return None\n parent = None\n if parents:\n parent = parents[0]\n obj = None\n if context['object_list'] and context['object_list'][0]:\n obj = context['object_list'][0]\n view_type = 'list'\n # recurse_view_add(self,viewinfo_list, parents[0], True)\n else:\n obj = context['object']\n parent = obj.parent\n args['slug'] = obj.slug\n view_type = 'detail'\n\n while parent:\n print(parent.class_label)\n viewinfo_list.append(self.get_view_info(parent,\n args=args,\n view_type='detail'))\n viewinfo_list.append(self.get_view_info(parent,\n args=args,\n view_type='list'))\n print(viewinfo_list[-1]['name'])\n parent = parent.parent\n\n viewinfo_list.reverse()\n if obj and view_type == 'detail':\n viewinfo_list.append(self.get_view_info(obj,\n args=args,\n view_type='list'))\n for i, viewinfo in enumerate(viewinfo_list):\n this_args = {}\n if '_display' in viewinfo['name']:\n this_args['slug'] = viewinfo['slug']\n for i in range(viewinfo['hierachy_level']):\n key = 'p%d' % i\n this_args[key] = args[key]\n print(viewinfo['name'])\n viewinfo['url'] = reverse(viewinfo['name'], kwargs=this_args)\n\n context['views_hierachy'] = viewinfo_list\n if obj:\n context['viewinfo'] = self.get_view_info(obj,\n args=copy.copy(args),\n view_type=view_type)\n context['viewinfo']['url'] = reverse(context['viewinfo']['name'],\n kwargs=args)\n\n def get_parent_class_field(self, field_name):\n if not self.model.parent_class:\n return None\n return getattr(self.model.parent_class, field_name)\n\n def extend_context_metadata(self, context):\n context['type_label'] = self.model.label\n context['type_tag'] = self.model.tag\n context['view_name'] = self.view_name()\n for field_name in (\"tag\", \"class_label\"):\n context_field = 'parent_' + field_name\n context[context_field] = self.get_parent_class_field(field_name)\n # print(context['parent_' + field_name])\n\n\nclass GenericModelListView(generic.ListView, GenericViewMixin):\n _template_base_name = 'type_list_view.html'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.template_name = self.get_template_name()\n\n def get_queryset(self):\n level = self.model.hierachy_level\n filter_kwargs = {}\n model = self.model\n filter_str = ''\n while level:\n pslug = self.kwargs[\"p%d\" % (level-1)]\n filter_str += model.parent_field_name + \"__\"\n filter_kwargs[filter_str + 'slug'] = pslug\n model = model.parent_class\n level = model.hierachy_level\n if filter_kwargs:\n return self.model.objects.filter(**filter_kwargs)\n else:\n return self.model.objects.all()\n\n def get_context_data(self, **kwargs):\n print(\"In view \" + self.model.tag)\n # Call the base implementation first to get a context\n context = super(GenericModelListView, self).get_context_data(**kwargs)\n self.add_view_infos(context)\n return context\n\n\nclass GenericModelDetailView(generic.DetailView, GenericViewMixin):\n _template_base_name = 'type_display_view.html'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.template_name = self.get_template_name()\n\n def view_name(self):\n return super(GenericModelDetailView, self).view_name() + \"_display\"\n\n def get_queryset(self):\n level = self.model.hierachy_level\n filter_kwargs = {'slug': self.kwargs['slug']}\n print(self.kwargs)\n filter_str = ''\n model = self.model\n while level:\n pslug = self.kwargs[\"p%d\" % (level-1)]\n filter_str += model.parent_field_name + \"__\"\n filter_kwargs[filter_str + 'slug'] = pslug\n model = model.parent_class\n level = model.hierachy_level\n objects = self.model.objects.filter(**filter_kwargs)\n return objects\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(GenericModelDetailView,\n self).get_context_data(**kwargs)\n self.add_view_infos(context)\n self.extend_context_metadata(context)\n return context\n","sub_path":"django_generic_plus/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"547478476","text":"import main\n\n\n# return songs played the threshold number of times or more\ndef _parse(sp, exi, threshold) -> list:\n recent = sp.current_user_recently_played()['items']\n over_threshold = []\n count = {}\n\n for track in recent:\n track_uri = track['track']['uri']\n if track_uri not in exi:\n if threshold == 0:\n over_threshold.append(track_uri)\n else:\n if track_uri not in count.keys():\n count[track_uri] = 1\n elif count[track_uri] == threshold - 1 and track_uri not in over_threshold:\n over_threshold.append(track_uri)\n else:\n count[track_uri] += 1\n\n return over_threshold\n\n\n# return tracks whose features are all within the range of features for the songs already in the playlist\ndef _fits(features, exi_features, pot) -> list:\n adding = []\n min_max = {}\n\n # initialize\n for feature in features:\n min_max[feature] = [exi_features[0][feature], exi_features[0][feature]]\n\n # create dictionary of feature -> [min, max] values in playlist\n for track in exi_features:\n if track is not None:\n for feature in features:\n if track[feature] < min_max[feature][0]:\n min_max[feature][0] = track[feature]\n elif track[feature] > min_max[feature][1]:\n min_max[feature][1] = track[feature]\n\n # count how many tracks in pot are within the min, max range established above\n for track in pot:\n count = 0\n for feature in features:\n if min_max[feature][0] <= track[feature] <= min_max[feature][1]:\n count += 1\n\n if count == len(features):\n adding.append(track['uri'])\n\n return adding\n\n\n# add tracks to a given playlist if they fit the given features of that playlist\n\n# playlist_id: the playlist uri (string)\n# features: list of audio features to consider; full options are \"danceability\", \"energy\", \"key\", \"loudness\", \"mode\",\n# \"speechiness\", \"acousticness\", \"instrumentalness\", \"liveness\", \"valence\", and \"tempo\"\n# pot: list of potential tracks to add to the playlist; default is recently played tracks\n# threshold: number of times recently played track has to be played to be considered; this field is only available\n# when the potential tracks are the recently played tracks\ndef execute(sp, playlist_id, features=None, pot=None, threshold=0):\n exi = main.existing_tracks(sp, playlist_id)\n exi_features = sp.audio_features(exi)\n\n if pot is None:\n pot_features = sp.audio_features(_parse(sp, exi, threshold))\n else:\n pot_features = sp.audio_features(pot)\n\n if features is None:\n features = ['danceability', 'energy', 'loudness', 'mode', 'acousticness', 'instrumentalness', 'valence',\n 'tempo']\n\n main.add_tracks(sp, playlist_id, _fits(features, exi_features, pot_features))\n","sub_path":"playlist_sorter.py","file_name":"playlist_sorter.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"31375281","text":"#!/usr/bin/env python3\n#\n# Copyright this project and its contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# encoding=utf8\n\nimport csv\nimport urllib.request\nimport json\nimport os\n\nprojectsCsvFile = os.path.dirname(os.path.realpath(__file__))+'/../../_data/projects.csv'\n\nlandscapeBaseURL = 'https://landscape.aswf.io'\nlandscapeHostedProjects = landscapeBaseURL+'/data/exports/projects-hosted.json'\nlandscapeSingleItem = landscapeBaseURL+'/data/items/{}.json'\n\ncsvRows = []\n\nwith urllib.request.urlopen(landscapeHostedProjects) as hostedProjectsResponse:\n for projectStage in json.load(hostedProjectsResponse):\n for project in projectStage['items']:\n with urllib.request.urlopen(landscapeSingleItem.format(project['id'])) as singleItemResponse:\n projectData = json.load(singleItemResponse)\n print(\"Processing {}...\".format(projectData['name']))\n csvRows.append({\n 'Name': projectData['name'],\n 'Level': projectData['project'],\n 'Logo URL': project['logo'],\n 'Slug': projectData['id'],\n 'Website': projectData['homepage_url'],\n 'Leads': projectData['extra']['leads'] if 'extra' in projectData and 'leads' in projectData['extra'] else None,\n 'TAC Representative': projectData['extra']['TAC_representative'] if 'extra' in projectData and 'TAC_representative' in projectData['extra'] else None,\n 'Documentation': projectData['extra']['documentation'] if 'extra' in projectData and 'documentation' in projectData['extra'] else None,\n 'SBOM': projectData['extra']['SBOM'] if 'extra' in projectData and 'SBOM' in projectData['extra'] else None,\n 'Calendar': projectData['extra']['calendar'] if 'extra' in projectData and 'calendar' in projectData['extra'] else None,\n 'Contribution Guidelines': projectData['extra']['contribution_guidelines'] if 'extra' in projectData and 'contribution_guidelines' in projectData['extra'] else None,\n 'Wiki Page': projectData['extra']['wiki_page'] if 'extra' in projectData and 'wiki_page' in projectData['extra'] else None,\n 'Meeting Cadence': projectData['extra']['meeting_cadence'] if 'extra' in projectData and 'meeting_cadence' in projectData['extra'] else None,\n 'LFX Insights URL': projectData['extra']['LFX_insights'] if 'extra' in projectData and 'LFX_insights' in projectData['extra'] else None,\n 'LFX Security URL': projectData['extra']['LFX_security'] if 'extra' in projectData and 'LFX_security' in projectData['extra'] else None,\n 'TSC Meeting Notes': projectData['extra']['TSC_meeting_notes'] if 'extra' in projectData and 'TSC_meeting_notes' in projectData['extra'] else None,\n 'Accepted Date': projectData['extra']['date_accepted'] if 'extra' in projectData and 'date_accepted' in projectData['extra'] else None,\n 'Last Review Date': projectData['extra']['last_review_date'] if 'extra' in projectData and 'last_review_date' in projectData['extra'] else None,\n 'Next Review Date': projectData['extra']['next_review_date'] if 'extra' in projectData and 'next_review_date' in projectData['extra'] else None,\n 'Slack': projectData['extra']['slack_channel'] if 'extra' in projectData and 'slack_channel' in projectData['extra'] else None,\n 'Mailing List': projectData['extra']['mailing_list_url'] if 'extra' in projectData and 'mailing_list_url' in projectData['extra'] else None,\n 'User Mailing List': projectData['extra']['user_mailing_list_url'] if 'extra' in projectData and 'user_mailing_list_url' in projectData['extra'] else None,\n 'Dev Mailing List': projectData['extra']['dev_mailing_list_url'] if 'extra' in projectData and 'dev_mailing_list_url' in projectData['extra'] else None,\n 'Primary Github Repo': projectData['project_org'] if 'project_org' in projectData else None,\n 'Best Practices Badge ID': projectData['bestPracticeBadgeId'] if 'bestPracticeBadgeId' in projectData else None,\n 'Github Org': projectData['repo_url'] if 'repo_url' in projectData else None,\n 'Contributed By': projectData['extra']['contributed_by'] if 'extra' in projectData and 'contributed_by' in projectData['extra'] else None\n })\n\nwith open(projectsCsvFile, 'w') as projectsCsvFileObject:\n writer = csv.DictWriter(projectsCsvFileObject, fieldnames = csvRows[0].keys())\n writer.writeheader() \n writer.writerows(csvRows) \n","sub_path":".github/workflows/updateprojects.py","file_name":"updateprojects.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"160132935","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nimport sh\nimport sys\nimport re\nimport eapi\n\nagent = \"MacsecEnableAgent\"\n\n_, local, host = sys.argv[:3]\n\nrpm = os.path.basename(local)\n\n_match = re.search(r\"(?:https?\\:\\/\\/)?(?P.*)\", host)\n\nssh_host = _match.group(\"hostaddr\")\nsh.scp(local, \"admin@{}:/tmp/{}\".format(ssh_host, rpm))\n\n#package = os.path.basename(local)\nprint(host, rpm)\n\n\n\nr = eapi.execute(host, [\n \"configure\",\n \"no extension %s\" % rpm,\n \"end\"\n], auth=(\"admin\", \"\"), verify=False)\nprint(r)\n\n\nr = eapi.execute(host, [\n \"copy file:/tmp/%s extension:\" % rpm,\n \"configure\",\n \"extension %s\" % rpm,\n \"end\"\n], auth=(\"admin\", \"\"), verify=False)\nprint(r)\n\nr = eapi.execute(host, [\n \"configure\",\n \"trace %s-%s setting %s/*\" % (agent, agent, agent),\n \"daemon %s\" % agent,\n \"shutdown\",\n \"exec /usr/bin/%s\" % agent,\n \"no shutdown\",\n], auth=(\"admin\", \"\"), verify=False)\nprint(r)\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"304016894","text":"#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n# *********************************************************************\r\n# Software : PyCharm\r\n#\r\n# loader.py - \r\n#\r\n# Author :yanwh(yanwh@digitalchina.com)\r\n#\r\n# Version 1.0.0\r\n#\r\n# Copyright (c) 2004-9999 Digital China Networks Co. Ltd \r\n#\r\n#\r\n# *********************************************************************\r\n# Change log:\r\n# - 2018/10/16 19:29 add by yanwh\r\n#\r\n# *********************************************************************\r\nimport re\r\nfrom inspect import ismodule, isclass\r\nfrom keyword import kwlist\r\nfrom pathlib import Path\r\nfrom unittest import TestLoader, TestSuite\r\n\r\nimport sys\r\n\r\nimport os\r\n\r\nfrom library.conf import settings\r\nfrom library.exceptions import InvalidArgument, InvalidPath, ParsePathError, TestCaseNotFound\r\nfrom library.log import log\r\n\r\nDOT_MODULE_MATH = re.compile('([A-Za-z_]+\\w*\\.)+([A-Za-z_]+\\w*)$') # 匹配 a.b.c\r\n\r\n\r\ndef valid_import_path(path):\r\n \"\"\"\r\n 输入路径,输出可以进行import的路径\r\n :param path:\r\n :return:\r\n \"\"\"\r\n parts = Path(path).resolve().parts[1:]\r\n\r\n def _join(_parts):\r\n return '.'.join(_parts)\r\n\r\n while parts:\r\n try:\r\n __import__(_join(parts))\r\n break\r\n except ImportError:\r\n parts = parts[1:]\r\n return _join(parts)\r\n\r\n\r\nclass Loader(TestLoader):\r\n def __init__(self, top_level_dir=None):\r\n super(TestLoader, self).__init__()\r\n self.top_level_dir = top_level_dir\r\n self._suite = TestSuite()\r\n\r\n @property\r\n def suite(self):\r\n return self._suite\r\n\r\n def _parse_path(self, discovery):\r\n log(\" parse path\", level='info')\r\n discovery = Path(discovery).resolve()\r\n if discovery.is_dir(): # 如果传入dir要求必须是测试用例路,不能是配置文件\r\n self._suite.addTests(self.discover(discovery, top_level_dir=self.top_level_dir))\r\n log(\"文件夹\", level='info')\r\n elif discovery.is_file(): # 如果传入file要求必须是配置文件\r\n # todo\r\n if '.ini' == discovery.suffix:\r\n log(\"parse ini\", level='info')\r\n elif '.json' == discovery.suffix:\r\n log('parse json', level='info')\r\n elif '.ymal' == discovery.suffix:\r\n log('parse toml', level='info')\r\n elif '.toml' == discovery.suffix:\r\n log('parse toml', level='info')\r\n elif '.py' == discovery.suffix:\r\n log('parse py', level='info')\r\n else:\r\n log('error')\r\n else:\r\n raise ParsePathError(f'无法解析{discovery}路径')\r\n\r\n def _parse_str(self, discovery):\r\n if discovery in sys.modules: # 尝试本地命名空间中是否存在该模块存在\r\n log(f'\\n===sys.modules===\\n通过sys.modules导入{discovery}')\r\n self._suite.addTests(self.loadTestsFromModule(sys.modules[discovery], pattern=None))\r\n elif DOT_MODULE_MATH.match(discovery): # 校验字符串合法性,匹配绝对路径\r\n if any([not _.isidentifier() and _ not in kwlist for _ in\r\n discovery.split('.')]):\r\n raise InvalidArgument(f'非法参数{discovery}')\r\n else:\r\n name = discovery\r\n try:\r\n self._suite.addTests(self.loadTestsFromName(name)) # 直接进行解析\r\n log(f'\\n===绝对路径===\\n通过loadTestsFromNames导入{discovery}')\r\n except (ModuleNotFoundError, Exception): # no qa可能路径带有具体方法???\r\n parts = discovery.split('.')\r\n from importlib import import_module\r\n while parts:\r\n try:\r\n mod = import_module('.'.join(parts))\r\n if callable(getattr(mod, _class)):\r\n self._suite.addTest(getattr(mod, _class)(_method))\r\n log(f'\\n===实例化对象引入===\\n通过addTest导入{mod}{_class}{_method}')\r\n break\r\n except ImportError:\r\n _class = parts[-2]\r\n _method = parts[-1]\r\n parts = parts[:-2]\r\n elif discovery.startswith('.'): # 如果为相对引入路径,结合top_level_dir进行相对引入\r\n name = valid_import_path(self.top_level_dir) + discovery\r\n self._suite.addTests(self.loadTestsFromName(name))\r\n log(f'\\n===相对路径===\\n通过loadTestsFromName导入{name}')\r\n\r\n else:\r\n try:\r\n if Path(discovery).resolve().is_dir() or Path(discovery).resolve().is_file(): # 如果传入路径进行路径解析\r\n self._parse_path(discovery) # 委托给parse_path进行解析\r\n else:\r\n raise InvalidPath(f'无效路径 {discovery}') # 必须为有效路径\r\n except OSError:\r\n if '*' in discovery or '?' in discovery or '.' in discovery: # 尝试解析例如E://testcase/test_*.py\r\n parent = Path(discovery).parent # 父路径\r\n pattern = Path(discovery).parts[-1] # 正则\r\n if parent.is_dir():\r\n glob_path_list = list(parent.glob(pattern))\r\n if len(glob_path_list):\r\n from pprint import pformat\r\n log(f'\\n===加载如下测试用例===\\n{pformat(glob_path_list)}')\r\n for _ in glob_path_list:\r\n valid_import_path(_.parent)\r\n self._suite.addTests(\r\n self.loadTestsFromName('.'.join([valid_import_path(_.parent), _.stem])))\r\n else:\r\n raise TestCaseNotFound('没有找到测试用例')\r\n\r\n else:\r\n raise InvalidPath(f'无效路径 {discovery}') # 必须为有效路径\r\n else:\r\n raise InvalidArgument(f'无法解析参数 {discovery}')\r\n\r\n def parse(self, discovery):\r\n if isinstance(discovery, str):\r\n log(\"===解析字符串===\")\r\n self._parse_str(discovery)\r\n elif isinstance(discovery, os.PathLike):\r\n log(\"===解析PathLike路径===\")\r\n self._parse_path(discovery)\r\n elif ismodule(discovery):\r\n log(\"===解析模块===\")\r\n self._suite.addTests(self.loadTestsFromModule(discovery, pattern=None))\r\n elif isclass(discovery):\r\n log(\"===解析类===\")\r\n self._suite.addTests(self.loadTestsFromTestCase(discovery))\r\n elif isinstance(discovery, (list, tuple)):\r\n log(\"===解析Sequence===\")\r\n for _discovery in discovery:\r\n self.parse(_discovery)\r\n\r\n def load(self, discovery):\r\n if discovery:\r\n self.parse(discovery)\r\n else: # 默认加载方式 先从全局settings中获取,失败再从当前python文件中获取\r\n if settings.get('testcase_ini'): # 尝试全局扫描到的配置文件\r\n log(\"使用自动扫描到的配置文件加载测试用例\", level='info')\r\n else: # 尝试通过py加载\r\n log(\"使用当前环境中的py变量加载测试用例\", level='info')\r\n\r\n","sub_path":"library/unittest/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"269564848","text":"\"\"\"\nDefines tools for dealing with functional groups and their reactions.\n\n.. _`adding functional groups`:\n\nExtending stk: Adding more functional groups.\n----------------------------------------------\n\nIf ``stk`` is to incorporate a new functional group, a new\n:class:`FGInfo` instance should be added to\n:data:`functional_groups`.\n\nAdding a new :class:`FGInfo` instance to :data:`functional_groups` will\nallow :meth:`.Topology.build` to connect the functional group to\nall others during assembly. In most cases, nothing except adding this\ninstance should be necessary in order to incorporate new functional\ngroups.\n\nNote that when adding SMARTS, if you want to make a SMARTS that targets\nan atom in an environment, for example, a bromine connected to a\ncarbon::\n\n [$([Br][C])]\n\nThe atom you are targeting needs to be written first. The above SMARTS\nworks but::\n\n [$([C][Br])]\n\ndoes not.\n\nIf a new functional group is to connect to another functional group\nwith a bond other than a single, the names of the functional groups\nshould be added to :data:`bond_orders`, along with the desired bond\norder.\n\nSupporting complex reactions.\n.............................\n\nDuring assembly, two functional groups are provided to\n:func:`react`. By default, placing an :class:`FGInfo` instance into\n:data:`functional_groups` will result in the creation of a single bond\nbetween the atoms tagged as ``'bonder'`` in the two functional groups.\nIn addtion, any atoms tagged as ``'del'`` will be removed. The bond\norder of the created bond can be modified by editing\n:data:`bond_orders`.\n\nHowever, some reactions cannot be described by a simple combination of\nadding a bond while deleting some existing atoms. For example, consider\nthe aldol reaction:\n\n CH3C(=O)CH3 + CH3C(=O)CH3 --> CH3(=O)CH2C(OH)(CH3)CH3\n\nHere a ketone is converted into an alcohol. In order to support more\ncomplex conversions, a specific function needs to be defined which\nmodifies the molecule as desired. The function then needs\nto be added to :data:`custom_reactions`. See\n:func:`boronic_acid_with_diol`\nas an example.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\nimport rdkit.Chem.AllChem as rdkit\nimport rdkit.Geometry.rdGeometry as rdkit_geo\nfrom collections import Counter\nfrom ..utilities import AtomicPeriodicBond\n\n\nclass FGKey:\n \"\"\"\n Used to create a key from a :class:`list` of fg names.\n\n Used by :data:`bond_orders`, :data:`custom_reactions` and\n :data:`periodic_custom_reactions`.\n\n Attributes\n ----------\n key : :class:`tuple`\n A unique key based on the functional groups provided to the\n intializer.\n\n \"\"\"\n\n def __init__(self, fgs):\n \"\"\"\n Intializer.\n\n Paramters\n ---------\n fgs : :class:`list` of :class:`str`\n A :class:`list` holding the names of functional groups.\n\n \"\"\"\n c = Counter(fgs)\n self.key = tuple(sorted((key, value) for key, value in c.items()))\n\n def __eq__(self, other):\n return self.key == other.key\n\n def __hash__(self):\n return hash(self.key)\n\n def __repr__(self):\n fg_names = [name for name, count in self.key\n for i in range(count)]\n return f'FGInfo({fg_names})'\n\n def __str__(self):\n return repr(self)\n\n\nclass Match:\n \"\"\"\n A container for SMARTS queries.\n\n Attributes\n ----------\n smarts : :class:`str`\n A SMARTS string which matches some atoms.\n\n n : :class:`int`\n The maximum number of atoms to be matched by :attr:`smarts`,\n per functional group.\n\n \"\"\"\n\n __slots__ = ['smarts', 'n']\n\n def __init__(self, smarts, n):\n self.smarts = smarts\n self.n = n\n\n\nclass FGInfo:\n \"\"\"\n Contains key information about functional groups.\n\n The point of this class is to register which atoms of a functional\n group form bonds, and which are deleted during assembly of\n macromolecules.\n\n Attributes\n ----------\n name : :class:`str`\n The name of the functional group.\n\n fg_smarts : :class:`str`\n A SMARTS string which matches the functional group.\n\n bonder_smarts : :class:`list`\n A :class:`list` of the form\n\n .. code-block:: python\n\n bonder_smarts = [Match(smarts='[$([N]([H])[H])]', n=1),\n Match(smarts='[$([H][N][H])]', n=1)]\n\n Each string is SMARTS string which matches an atom in the\n functional group which is to be tagged as ``'bonder'``. The\n number represents how many matched atoms should be tagged, per\n functional group.\n\n In the example, ``Match(smarts='[$([N]([H])[H])]', n=1)``\n matches the nitrogen atom in the amine functional group. The\n ``n=1`` means that 1 nitrogen atom per functional group will be\n tagged as ``'bonder'``. The second\n ``Match(smarts='[$([H][N][H])]', n=1)``, matches the hydrogen\n atom in the amine functional group. Because ``n=1``, only 1 of\n hydrogen atom per amine functional group will be tagged\n ``'bonder'``. If instead\n ``Match(smarts='[$([H][N][H])]', n=2)`` was used, then both of\n the hydrogen atoms in the functional group would be tagged.\n\n del_smarts : :class:`list`\n Same as :attr:`bonder_smarts` but matched atoms are tagged\n as ``'del'``.\n\n \"\"\"\n\n __slots__ = ['name', 'fg_smarts', 'bonder_smarts', 'del_smarts']\n\n def __init__(self, name, fg_smarts, bonder_smarts, del_smarts):\n \"\"\"\n Initializes a :class:`FGInfo` instnace.\n\n Parameters\n ---------\n name : :class:`str`\n The name of the functional group.\n\n fg_smarts : :class:`str`\n A SMARTS string which matches the functional group.\n\n bonder_smarts : :class:`list`\n See :attr:`bonder_smarts`.\n\n del_smarts : :class:`list`\n See :attr:`del_smarts`.\n\n \"\"\"\n\n self.name = name\n self.fg_smarts = fg_smarts\n self.bonder_smarts = bonder_smarts\n self.del_smarts = del_smarts\n\n\ndef fg_name(mol, fg):\n \"\"\"\n Retruns the name of the functional group with id `fg`.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n An ``rdkit`` molecule with its functional groups tagged.\n\n fg : :class:`int`\n The id of a functional group as given by the 'fg_id' property.\n\n Returns\n -------\n :class:`str`\n The name of a functional group.\n\n \"\"\"\n\n for atom in mol.GetAtoms():\n if atom.HasProp('fg_id') and atom.GetIntProp('fg_id') == fg:\n return atom.GetProp('fg')\n raise RuntimeError(f'No functional group with id {fg} found.')\n\n\ndef react(mol, del_atoms, *fgs):\n \"\"\"\n Crates bonds between functional groups.\n\n This function first looks at the functional group ids provided via\n the `*fgs` argument and checks which functional groups are\n involved in the reaction. If the functional groups are handled\n by one of the custom reactions specified in\n :data:`custom_reactions` then that function is executed.\n\n In all other cases the function is assumed to have received two\n functional groups to react via `*fgs`. In these functional groups\n the atoms tagged ``'del'`` are deleted and the atoms tagged\n ``'bonder'`` have a bond added. The bond is a single, unless\n specified otherwise in :data:`bond_orders`.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n A molecule being assembled.\n\n del : :class:`bool`\n Toggles if atoms with the ``'del'`` property are deleted.\n\n *fgs : :class:`int`\n The ids of the functional groups to react. The ids are held\n by atom of `mol` in the ``'fg_id'`` property.\n\n Returns\n -------\n :class:`tuple`\n The first element is an :class:`rdkit.Chem.rdchem.Mol`. It is\n the molecule with bonds added between the functional groups.\n\n The second element is a :class:`int`. It is the number\n of bonds added.\n\n \"\"\"\n\n names = [fg_name(mol, fg) for fg in fgs]\n reaction_key = FGKey(names)\n if reaction_key in custom_reactions:\n return custom_reactions[reaction_key](mol, del_atoms, *fgs)\n\n emol = rdkit.EditableMol(mol)\n\n bonders = []\n for atom in mol.GetAtoms():\n if not (atom.HasProp('fg_id') and atom.GetIntProp('fg_id') in fgs):\n continue\n if atom.HasProp('bonder'):\n bonders.append(atom.GetIdx())\n\n bond = bond_orders.get(reaction_key, rdkit.rdchem.BondType.SINGLE)\n bonder1, bonder2 = bonders\n emol.AddBond(bonder1, bonder2, bond)\n\n for atom in reversed(mol.GetAtoms()):\n if not (atom.HasProp('fg_id') and atom.GetIntProp('fg_id') in fgs):\n continue\n\n if atom.HasProp('del') and del_atoms:\n emol.RemoveAtom(atom.GetIdx())\n\n return emol.GetMol(), 1\n\n\ndef periodic_react(mol, del_atoms, direction, *fgs):\n \"\"\"\n Like :func:`react` but returns periodic bonds.\n\n As periodic bonds are returned, no bonds are added to `mol`.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n A molecule being assembled.\n\n del : :class:`bool`\n Toggles if atoms with the ``'del'`` property are deleted.\n\n direction : :class:`list` of :class:`int`\n A 3 member list describing the axes along which the created\n bonds are periodic. For example, ``[1, 0, 0]`` means that the\n bonds are periodic along the x axis in the positive direction.\n\n *fgs : :class:`int`\n The ids of the functional groups to react. The ids are held\n by atom of `mol` in the ``'fg_id'`` property.\n\n Returns\n -------\n :class:`tuple`\n The first element is an :class:`rdkit.Chem.rdchem.Mol`. It is\n the molecule after the reaction.\n\n The second element is a :class:`int`. It is the number\n of bonds added.\n\n The third element is a :class:`list` holding\n :class:`.AtomicPeriodicBond`.\n\n \"\"\"\n\n names = [fg_name(mol, fg) for fg in fgs]\n reaction_key = FGKey(names)\n if reaction_key in periodic_custom_reactions:\n return periodic_custom_reactions[reaction_key](mol,\n del_atoms,\n direction,\n *fgs)\n\n emol = rdkit.EditableMol(mol)\n\n bonders = {}\n for atom in mol.GetAtoms():\n if not (atom.HasProp('fg_id') and atom.GetIntProp('fg_id') in fgs):\n continue\n if atom.HasProp('bonder'):\n bonders[atom.GetIntProp('fg_id')] = atom.GetIntProp('bonder')\n\n bond = bond_orders.get(FGKey(names), rdkit.rdchem.BondType.SINGLE)\n\n # Make sure the direction of the periodic bond is maintained.\n fg1, fg2 = fgs\n bonder1, bonder2 = bonders[fg1], bonders[fg2]\n periodic_bonds = [AtomicPeriodicBond(bonder1,\n bonder2,\n bond,\n direction)]\n\n for atom in reversed(mol.GetAtoms()):\n if not (atom.HasProp('fg_id') and atom.GetIntProp('fg_id') in fgs):\n continue\n\n if atom.HasProp('del') and del_atoms:\n emol.RemoveAtom(atom.GetIdx())\n\n return emol.GetMol(), 1, periodic_bonds\n\n\ndef diol_with_difluorne(mol, del_atoms, fg1, fg2):\n \"\"\"\n Crates bonds between functional groups.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n A molecule being assembled.\n\n del : :class:`bool`\n Toggles if atoms with the ``'del'`` property are deleted.\n\n fg1 : :class:`int`\n The id of the first functional group which\n is to be joined, as given by the 'fg_id' property.\n\n fg2 : :class:`int`\n The id of the second functional group which\n is to be joined, as given by the 'fg_id' property.\n\n Returns\n -------\n :class:`tuple`\n The first element is an :class:`rdkit.Chem.rdchem.Mol`. It is\n the molecule with bonds added between the functional groups.\n\n The second element is a :class:`int`. It is the number\n of bonds added.\n\n \"\"\"\n\n bond = rdkit.rdchem.BondType.SINGLE\n fgs = {fg1, fg2}\n oxygens = []\n carbons = []\n deleters = []\n\n for a in reversed(mol.GetAtoms()):\n if not a.HasProp('fg_id') or a.GetIntProp('fg_id') not in fgs:\n continue\n\n if a.HasProp('del'):\n deleters.append(a)\n\n if a.GetProp('fg') == 'difluorene' and a.HasProp('bonder'):\n carbons.append(a)\n\n if a.GetProp('fg') == 'diol' and a.HasProp('bonder'):\n oxygens.append(a)\n\n conf = mol.GetConformer()\n distances = []\n for c in carbons:\n cpos = np.array([*conf.GetAtomPosition(c.GetIdx())])\n for o in oxygens:\n opos = np.array([*conf.GetAtomPosition(o.GetIdx())])\n d = euclidean(cpos, opos)\n distances.append((d, c.GetIdx(), o.GetIdx()))\n distances.sort()\n\n deduped_pairs = []\n seen_o, seen_c = set(), set()\n for d, c, o in distances:\n if c not in seen_c and o not in seen_o:\n deduped_pairs.append((c, o))\n seen_c.add(c)\n seen_o.add(o)\n\n (c1, o1), (c2, o2), *_ = deduped_pairs\n assert c1 != c2 and o1 != o2\n emol = rdkit.EditableMol(mol)\n emol.AddBond(c1, o1, bond)\n emol.AddBond(c2, o2, bond)\n\n if del_atoms:\n for a in deleters:\n emol.RemoveAtom(a.GetIdx())\n\n return emol.GetMol(), 2\n\n\ndef boronic_acid_with_diol(mol, del_atoms, fg1, fg2):\n \"\"\"\n Crates bonds between functional groups.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n A molecule being assembled.\n\n del : :class:`bool`\n Toggles if atoms with the ``'del'`` property are deleted.\n\n fg1 : :class:`int`\n The id of the first functional group which\n is to be joined, as given by the 'fg_id' property.\n\n fg2 : :class:`int`\n The id of the second functional group which\n is to be joined, as given by the 'fg_id' property.\n\n Returns\n -------\n :class:`tuple`\n The first element is an :class:`rdkit.Chem.rdchem.Mol`. It is\n the molecule with bonds added between the functional groups.\n\n The second element is a :class:`int`. It is the number\n of bonds added.\n\n \"\"\"\n\n bond = rdkit.rdchem.BondType.SINGLE\n fgs = {fg1, fg2}\n oxygens = []\n deleters = []\n\n for a in reversed(mol.GetAtoms()):\n if not a.HasProp('fg_id') or a.GetIntProp('fg_id') not in fgs:\n continue\n\n if a.HasProp('del'):\n deleters.append(a)\n\n if a.GetProp('fg') == 'boronic_acid' and a.HasProp('bonder'):\n boron = a\n\n if a.GetProp('fg') == 'diol' and a.HasProp('bonder'):\n oxygens.append(a)\n\n emol = rdkit.EditableMol(mol)\n emol.AddBond(boron.GetIdx(), oxygens[0].GetIdx(), bond)\n emol.AddBond(boron.GetIdx(), oxygens[1].GetIdx(), bond)\n\n if del_atoms:\n for a in deleters:\n emol.RemoveAtom(a.GetIdx())\n\n return emol.GetMol(), 2\n\n\ndef amine3_with_amine3(mol, del_atoms, fg1, fg2):\n \"\"\"\n Crates bonds between functional groups.\n\n Parameters\n ----------\n mol : :class:`rdkit.Chem.rdchem.Mol`\n A molecule being assembled.\n\n del : :class:`bool`\n Toggles if atoms with the ``'del'`` property are deleted.\n\n fg1 : :class:`int`\n The id of the first functional group which\n is to be joined, as given by the 'fg_id' property.\n\n fg2 : :class:`int`\n The id of the second functional group which\n is to be joined, as given by the 'fg_id' property.\n\n Returns\n -------\n :class:`tuple`\n The first element is an :class:`rdkit.Chem.rdchem.Mol`. It is\n the molecule with bonds added between the functional groups.\n\n The second element is a :class:`int`. It is the number\n of bonds added.\n\n \"\"\"\n\n fgs = {fg1, fg2}\n atoms1, atoms2 = {}, {}\n deleters = []\n\n for a in mol.GetAtoms():\n if not a.HasProp('fg_id') or a.GetIntProp('fg_id') not in fgs:\n continue\n\n if a.HasProp('bonder') and a.GetIntProp('fg_id') == fg1:\n atoms1[a.GetSymbol()] = a.GetIdx()\n\n if a.HasProp('bonder') and a.GetIntProp('fg_id') == fg2:\n atoms2[a.GetSymbol()] = a.GetIdx()\n\n if a.HasProp('del'):\n deleters.append(a.GetIdx())\n\n conf = mol.GetConformer()\n n1_pos = np.array([*conf.GetAtomPosition(atoms1['N'])])\n n2_pos = np.array([*conf.GetAtomPosition(atoms2['N'])])\n\n c1_pos = np.array([*conf.GetAtomPosition(atoms1['C'])])\n c2_pos = np.array([*conf.GetAtomPosition(atoms2['C'])])\n\n emol = rdkit.EditableMol(mol)\n\n n_joiner = emol.AddAtom(rdkit.Atom(6))\n n_joiner_pos = (n1_pos + n2_pos) / 2\n nh1 = emol.AddAtom(rdkit.Atom(1))\n nh1_pos = n_joiner_pos + np.array([0, 0, 1])\n nh2 = emol.AddAtom(rdkit.Atom(1))\n nh2_pos = n_joiner_pos + np.array([0, 0, -1])\n\n nc_joiner1 = emol.AddAtom(rdkit.Atom(6))\n nc_joiner1_pos = (c1_pos + n2_pos) / 2\n nc1h1 = emol.AddAtom(rdkit.Atom(1))\n nc1h1_pos = nc_joiner1_pos + np.array([0, 0, 1])\n nc1h2 = emol.AddAtom(rdkit.Atom(1))\n nc1h2_pos = nc_joiner1_pos + np.array([0, 0, -1])\n\n nc_joiner2 = emol.AddAtom(rdkit.Atom(6))\n nc_joiner2_pos = (c2_pos + n1_pos) / 2\n nc2h1 = emol.AddAtom(rdkit.Atom(1))\n nc2h1_pos = nc_joiner2_pos + np.array([0, 0, 1])\n nc2h2 = emol.AddAtom(rdkit.Atom(1))\n nc2h2_pos = nc_joiner2_pos + np.array([0, 0, -1])\n\n single = rdkit.rdchem.BondType.SINGLE\n emol.AddBond(atoms1['N'], n_joiner, single)\n emol.AddBond(atoms2['N'], n_joiner, single)\n emol.AddBond(n_joiner, nh1, single)\n emol.AddBond(n_joiner, nh2, single)\n\n emol.AddBond(atoms1['C'], nc_joiner1, single)\n emol.AddBond(atoms2['N'], nc_joiner1, single)\n emol.AddBond(nc_joiner1, nc1h1, single)\n emol.AddBond(nc_joiner1, nc1h2, single)\n\n emol.AddBond(atoms2['C'], nc_joiner2, single)\n emol.AddBond(atoms1['N'], nc_joiner2, single)\n emol.AddBond(nc_joiner2, nc2h1, single)\n emol.AddBond(nc_joiner2, nc2h2, single)\n\n mol = emol.GetMol()\n conf = mol.GetConformer()\n conf.SetAtomPosition(n_joiner, rdkit_geo.Point3D(*n_joiner_pos))\n conf.SetAtomPosition(nh1, rdkit_geo.Point3D(*nh1_pos))\n conf.SetAtomPosition(nh2, rdkit_geo.Point3D(*nh2_pos))\n\n conf.SetAtomPosition(nc_joiner1, rdkit_geo.Point3D(*nc_joiner1_pos))\n conf.SetAtomPosition(nc1h1, rdkit_geo.Point3D(*nc1h1_pos))\n conf.SetAtomPosition(nc1h2, rdkit_geo.Point3D(*nc1h2_pos))\n\n conf.SetAtomPosition(nc_joiner2, rdkit_geo.Point3D(*nc_joiner2_pos))\n conf.SetAtomPosition(nc2h1, rdkit_geo.Point3D(*nc2h1_pos))\n conf.SetAtomPosition(nc2h2, rdkit_geo.Point3D(*nc2h2_pos))\n\n if del_atoms:\n emol = rdkit.EditableMol(mol)\n for a in reversed(deleters):\n emol.RemoveAtom(a)\n mol = emol.GetMol()\n\n return mol, 6\n\n\n# If some functional groups react via a special mechanism not covered\n# in by the base \"react()\" function the function should be placed\n# in this dict. The key should be a sorted tuple which holds the name\n# of every functional group involved in the reaction along with how\n# many such functional groups are invovled.\ncustom_reactions = {\n FGKey(['boronic_acid', 'diol']): boronic_acid_with_diol,\n FGKey(['diol', 'difluorene']): diol_with_difluorne,\n FGKey(['amine3', 'amine3']): amine3_with_amine3\n\n}\n\nperiodic_custom_reactions = {}\n\n\nfunctional_groups = (\n\n FGInfo(name=\"amine\",\n fg_smarts=\"[N]([H])[H]\",\n bonder_smarts=[Match(smarts=\"[$([N]([H])[H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][N][H])]\", n=2)]),\n\n FGInfo(name=\"aldehyde\",\n fg_smarts=\"[C](=[O])[H]\",\n bonder_smarts=[Match(smarts=\"[$([C](=[O])[H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([O]=[C][H])]\", n=1)]),\n\n FGInfo(name=\"carboxylic_acid\",\n fg_smarts=\"[C](=[O])[O][H]\",\n bonder_smarts=[Match(smarts=\"[$([C](=[O])[O][H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][O][C](=[O]))]\", n=1),\n Match(smarts=\"[$([O]([H])[C](=[O]))]\", n=1)]),\n\n\n FGInfo(name=\"amide\",\n fg_smarts=\"[C](=[O])[N]([H])[H]\",\n bonder_smarts=[\n Match(smarts=\"[$([C](=[O])[N]([H])[H])]\", n=1)\n ],\n del_smarts=[\n Match(smarts=\"[$([N]([H])([H])[C](=[O]))]\", n=1),\n Match(smarts=\"[$([H][N]([H])[C](=[O]))]\", n=2)\n ]),\n\n FGInfo(name=\"thioacid\",\n fg_smarts=\"[C](=[O])[S][H]\",\n bonder_smarts=[Match(smarts=\"[$([C](=[O])[S][H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][S][C](=[O]))]\", n=1),\n Match(smarts=\"[$([S]([H])[C](=[O]))]\", n=1)]),\n\n FGInfo(name=\"alcohol\",\n fg_smarts=\"[O][H]\",\n bonder_smarts=[Match(smarts=\"[$([O][H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][O])]\", n=1)]),\n\n FGInfo(name=\"thiol\",\n fg_smarts=\"[S][H]\",\n bonder_smarts=[Match(smarts=\"[$([S][H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][S])]\", n=1)]),\n\n FGInfo(name=\"bromine\",\n fg_smarts=\"*[Br]\",\n bonder_smarts=[Match(smarts=\"[$(*[Br])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([Br]*)]\", n=1)]),\n\n FGInfo(name=\"iodine\",\n fg_smarts=\"*[I]\",\n bonder_smarts=[Match(smarts=\"[$(*[I])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([I]*)]\", n=1)]),\n\n FGInfo(name='alkyne',\n fg_smarts='[C]#[C][H]',\n bonder_smarts=[Match(smarts='[$([C]([H])#[C])]', n=1)],\n del_smarts=[Match(smarts='[$([H][C]#[C])]', n=1)]),\n\n FGInfo(name='terminal_alkene',\n fg_smarts='[C]=[C]([H])[H]',\n bonder_smarts=[Match(smarts='[$([C]=[C]([H])[H])]', n=1)],\n del_smarts=[Match(smarts='[$([H][C]([H])=[C])]', n=2),\n Match(smarts='[$([C](=[C])([H])[H])]', n=1)]),\n\n FGInfo(name='boronic_acid',\n fg_smarts='[B]([O][H])[O][H]',\n bonder_smarts=[Match(smarts='[$([B]([O][H])[O][H])]', n=1)],\n del_smarts=[Match(smarts='[$([O]([H])[B][O][H])]', n=2),\n Match(smarts='[$([H][O][B][O][H])]', n=2)]),\n\n # This amine functional group only deletes one of the\n # hydrogen atoms when a bond is formed.\n FGInfo(name=\"amine2\",\n fg_smarts=\"[N]([H])[H]\",\n bonder_smarts=[Match(smarts=\"[$([N]([H])[H])]\", n=1)],\n del_smarts=[Match(smarts=\"[$([H][N][H])]\", n=1)]),\n\n FGInfo(name=\"secondary_amine\",\n fg_smarts=\"[H][N]([#6])[#6]\",\n bonder_smarts=[\n Match(smarts=\"[$([N]([H])([#6])[#6])]\", n=1)\n ],\n del_smarts=[Match(smarts=\"[$([H][N]([#6])[#6])]\", n=1)]),\n\n FGInfo(name='diol',\n fg_smarts='[H][O][#6]~[#6][O][H]',\n bonder_smarts=[\n Match(smarts='[$([O]([H])[#6]~[#6][O][H])]', n=2)\n ],\n del_smarts=[Match(smarts='[$([H][O][#6]~[#6][O][H])]', n=2)]),\n\n FGInfo(name='difluorene',\n fg_smarts='[F][#6]~[#6][F]',\n bonder_smarts=[Match(smarts='[$([#6]([F])~[#6][F])]', n=2)],\n del_smarts=[Match(smarts='[$([F][#6]~[#6][F])]', n=2)]),\n\n FGInfo(name='alkyne2',\n fg_smarts='[C]#[C][H]',\n bonder_smarts=[Match(smarts='[$([C]#[C][H])]', n=1)],\n del_smarts=[Match(smarts='[$([H][C]#[C])]', n=1),\n Match(smarts='[$([C](#[C])[H])]', n=1)]),\n\n FGInfo(name='amine3',\n fg_smarts='[N]([H])([H])[#6]~[#6]([H])~[#6]([H])',\n bonder_smarts=[\n Match(smarts='[$([N]([H])([H])[#6]~[#6]([H])~[#6]([H]))]', n=1),\n Match(smarts='[$([#6]([H])(~[#6]([H]))~[#6][N]([H])[H])]', n=1),\n ],\n del_smarts=[\n Match(smarts='[$([H][N]([H])[#6]~[#6]([H])~[#6]([H]))]', n=2),\n Match(smarts='[$([H][#6](~[#6]([H]))~[#6][N]([H])[H])]', n=1)])\n\n )\n\ndouble = rdkit.rdchem.BondType.DOUBLE\ntriple = rdkit.rdchem.BondType.TRIPLE\nbond_orders = {\n FGKey(['amine', 'aldehyde']): double,\n FGKey(['amide', 'aldehyde']): double,\n FGKey(['nitrile', 'aldehyde']): double,\n FGKey(['amide', 'amine']): double,\n FGKey(['terminal_alkene', 'terminal_alkene']): double,\n FGKey(['alkyne2', 'alkyne2']): triple}\n","sub_path":"stk/molecular/functional_groups.py","file_name":"functional_groups.py","file_ext":"py","file_size_in_byte":24450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"184793071","text":"\"\"\"Case-study #11\nРазработчики:\nBayanova A. 70%, Shmatov D. 60%\n\"\"\"\n\nimport os\n\n# os.listdir(path=\".\") - список файлов и директорий в папке.\n# os.getcwd() - текущая рабочая директория.\n# os.chdir(path) - смена текущей директории.\n\ndef main(directory):\n '''Основная программа, которая выводит путь к текущему каталогу и меню.\n Вызывает функцию выполнения команд.'''\n\n directory = os.getcwd()\n\n print(directory)\n print('1. Просмотр каталога \\n2. На уровень вверх \\n3. На уровень вниз\\n4. Количество файлов и каталогов\\n5. Размер текущего каталога(в байтах)\\n6. Поиск файла\\n7. Выход из программы')\n vibor = int(input('Выберите пункт меню: '))\n if vibor in [1,2,3,4,5,6,7]:\n return runCommand(vibor, directory)\n else:\n print('Вы ввели неверное значение.')\n main(os.getcwd())\n\n\ndef runCommand(command, directory):\n '''Определяет по номеру команды command, какую функцию следует выполнить.'''\n if command == 1:\n print(os.listdir(directory))\n main(directory)\n\n elif command == 2:\n os.chdir(os.getcwd()[:os.getcwd().rfind(\"\\\\\")])\n print(os.getcwd())\n main(os.getcwd())\n\n elif command == 3:\n print(os.getcwd())\n print(os.listdir(directory))\n try:\n currentDir = str(input('Введите имя подкаталога: '))\n os.chdir(path = currentDir)\n main(os.chdir(os.getcwd()))\n except FileNotFoundError:\n print('Вы ввели неверное имя файла.')\n runCommand(3, directory)\n\n elif command == 4:\n print(countFiles(directory))\n main(os.getcwd())\n\n elif command == 5:\n print(countBytes(directory))\n main(os.getcwd())\n\n elif command == 6:\n target = input('Введите имя файла: ')\n if findFiles(target, directory):\n print(findFiles(target, directory))\n main(os.getcwd())\n else:\n print('Файлы с таким именем не найдены.')\n runCommand(6, directory)\n\n elif command == 7:\n exit()\n\n\n\ndef countFiles(path):\n '''\n Рекурсивная функция подсчитывающая количество файлов в указанном каталоге path.\n В подсчет включаются все файлы, находящиеся в подкаталогах. Возвращает количество файлов.'''\n a = []\n for top, dirs, files in os.walk(path):\n for nm in files:\n a.append(os.path.join(top, nm))\n print(len(a))\n\n\ndef countBytes(path):\n '''Рекурсивная функция подсчитывающая суммарный объем (в байтах) всех файлов в указанном каталоге path.\n В подсчет включаются все файлы, находящиеся в подкаталогах. Возвращает суммарное количество байт.'''\n\n m = 0\n for top, dirs, files in os.walk(os.getcwd()):\n for nm in files:\n m += os.path.getsize(os.path.join(top, nm))\n\n print(m)\n\n\n\ndef findFiles(name, path):\n for root, dirs, files in os.walk(path):\n if name in files or name in root or name in dirs:\n return os.path.join(root, name)\n\n\n '''Рекурсивная функция, формирующая список путей к файлам, в имени которых содержится target.\n В поиск включаются все подкаталоги каталога path. В случае если файлы не найдены, выводит\n соответствующее сообщение.'''\n\nif __name__ == '__main__':\n main(os.getcwd())","sub_path":"case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"395282450","text":"class Quiz():\n def __init__(self, name, type, answerCount, client, amount, answers, rawData):\n self.client = client\n self.name = name\n self.type = type\n self.answerCount = answerCount\n self.currentQuestion = None\n self.questions = []\n self.questionCount = amount\n self.answerCounts = answers\n self.rawEvent = rawData\n\nclass Question():\n def __init__(self,rawEvent,client):\n self.client = client\n self.quiz = client.quiz\n self.index = rawEvent[\"questionIndex\"]\n self.timeLeft = rawEvent[\"timeLeft\"]\n self.type = rawEvent[\"type\"]\n self.usesStoryBlocks = rawEvent.get(\"useStoryBlocks\")\n self.ended = False\n self.quiz.questions.append(self)\n self.number = len(self.quiz.questions)\n self.quiz.currentQuestion = self\n self.rawEvent = rawEvent\n def answer(self,number,secret={}):\n if not number and number != 0:\n return False\n self.client.answerQuestion(number,self,secret)\n\nclass QuestionEndEvent():\n def __init__(self,rawEvent,client):\n try:\n self.client = client\n self.quiz = client.quiz\n self.question = self.quiz.questions[-1]\n self.question.ended = True\n self.correctAnswers = rawEvent.get(\"correctAnswers\")\n self.correctAnswer = self.correctAnswers[0]\n self.text = rawEvent.get(\"text\")\n self.correct = rawEvent[\"correct\"]\n self.nemesis = Nemesis(rawEvent.get(\"nemesis\"))\n self.points = rawEvent[\"points\"]\n self.rank = rawEvent[\"rank\"]\n self.total = rawEvent[\"totalScore\"]\n self.streak = rawEvent[\"pointsData\"][\"answerStreakPoints\"][\"streakLevel\"]\n self.rawEvent = rawEvent\n except Exception as e:\n return e\n\nclass QuestionSubmitEvent():\n def __init__(self,message,client):\n self.client = client\n self.quiz = client.quiz\n self.question = self.quiz.questions[-1]\n self.rawEvent = message\n\nclass Nemesis():\n def __init__(self,rawData):\n if rawData:\n self.name = rawData.get(\"name\")\n self.score = rawData.get(\"totalScore\")\n self.isGhost = rawData.get(\"isGhost\")\n self.exists = True\n self.rawEvent = rawData\n else:\n self.name = None\n self.score = None\n self.isKicked = None\n self.exists = False\n self.rawEvent = None\n\nclass FinishTextEvent():\n def __init__(self,rawEvent):\n self.metal = rawEvent[\"metal\"]\n self.rawEvent = rawEvent\n\nclass QuizFinishEvent():\n def __init__(self,rawEvent,client):\n self.client = client\n self.quiz = client.quiz\n self.players = rawEvent[\"playerCount\"]\n self.quizID = rawEvent[\"quizID\"]\n self.rank = rawEvent[\"rank\"]\n self.correct = rawEvent[\"correct\"]\n self.incorrect = rawEvent[\"incorrect\"]\n self.rawEvent = rawEvent\n","sub_path":"src/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"32007259","text":"from typing import *\n\nimport torch\nimport torch.utils.data\nimport yaml\n\nfrom hydranet import HydraNet\nfrom .cityscapes_dataset import CityscapesSegmentationDataset\nfrom .concat_dataset_wrapper import group_datasets\nfrom .hydranet_dataset import HydranetDataset\nfrom .imagenet_dataset import ImageNetDataset\nfrom .list_depth_segmentation_dataset import ListDepthSegmentationDataset\nfrom .list_segmentation_dataset import ListSegmentationDataset\nfrom .voc_segmentation_dataset import VOCSegmentationDataset\n\n\ndef parse_config(config: Union[str, dict], hydranet, device, common_config: dict = None):\n if type(config) == str:\n with open(config) as f:\n config = yaml.safe_load(f)\n if common_config is not None:\n config.update(common_config)\n if config['type'] == 'cityscapes':\n dataset = CityscapesSegmentationDataset(network=hydranet, device=device, **config)\n elif config['type'] == 'list_segmentation':\n dataset = ListSegmentationDataset(network=hydranet, device=device, **config)\n elif config['type'] == 'list_depth_segmentation':\n dataset = ListDepthSegmentationDataset(network=hydranet, device=device, **config)\n elif config['type'] == 'voc_segmentation':\n dataset = VOCSegmentationDataset(network=hydranet, device=device, **config)\n elif config['type'] == 'imagenet':\n dataset = ImageNetDataset(network=hydranet, device=device, **config)\n else:\n raise ValueError(f'Dataset type \"{config[\"type\"]}\" is not supported')\n return dataset\n\n\ndef _infinite_iterator(loader):\n while True:\n for instance in loader:\n yield instance\n\n\ndef _create_datasets_from_configs(config_paths: List[str], hydranet: HydraNet, device: str, common_conf_path: str = None) \\\n -> List[HydranetDataset]:\n datasets = []\n common_config = None\n if common_conf_path is not None:\n with open(common_conf_path) as f:\n common_config = yaml.load(f)\n\n for config in config_paths:\n with open(config) as f:\n config = yaml.load(f)\n dataset = parse_config(config, hydranet, device, common_config=common_config)\n dataset.config = config\n datasets.append(dataset)\n return datasets\n\n\ndef _add_loader_args_to_datasets(datasets: List[HydranetDataset], batch_size: int, common_loader_args: dict = None, num_workers=0):\n for dataset in datasets:\n\n if 'batch_size' in dataset.config:\n batch_size = dataset.config['batch_size']\n # Apply given loadere args to default args\n loader_args = {\n 'batch_size': batch_size,\n 'shuffle': True,\n 'drop_last': True,\n 'num_workers': num_workers,\n 'pin_memory': False,\n }\n if common_loader_args is not None:\n loader_args.update(common_loader_args)\n # If dataset has loader args specified, add them\n if 'loader_args' in dataset.config:\n loader_args.update(dataset.config['loader_args'])\n\n # If dataset has a collate function, use it\n if hasattr(dataset, 'collate_fn'):\n loader_args['collate_fn'] = dataset.collate_fn\n dataset.loader_args = loader_args\n\n\ndef _create_data_loaders_from_datasets_with_loader_args(datasets: List[HydranetDataset]) -> \\\n List[torch.utils.data.DataLoader]:\n data_loaders = []\n for dataset in datasets:\n data_loader = torch.utils.data.DataLoader(dataset, **dataset.loader_args)\n data_loader.iterator = _infinite_iterator(data_loader)\n data_loaders.append(data_loader)\n return data_loaders\n\n\ndef create_datasets_and_loaders_from_args(args, hydranet: HydraNet, device: Union[str, torch.device]) -> \\\n Tuple[List[HydranetDataset], List[torch.utils.data.DataLoader]]:\n datasets = _create_datasets_from_configs(config_paths=args.configs, hydranet=hydranet, device=device,\n common_conf_path=args.common_conf)\n _add_loader_args_to_datasets(datasets, args.batch_size, num_workers=args.num_workers)\n if args.group:\n datasets = group_datasets(datasets, by=args.group_by)\n\n data_loaders = _create_data_loaders_from_datasets_with_loader_args(datasets)\n return datasets, data_loaders\n","sub_path":"data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"438814690","text":"import os\nimport sys\nimport argparse\n\nimport joblib\n\nfrom speech_utils.IAAN.data_utils import average_pool\n\n\ndef main(args):\n # Load normalized features\n features = joblib.load(args.input_path)\n # Average pooling\n features_pooled = average_pool(\n features=features, num_processes=args.num_processes,\n pool_size=args.pool_size, step_size=args.step_size,\n overlap=args.overlap_size, pad=args.pad, max_len=args.max_len)\n # Save pooled features\n joblib.dump(features_pooled, args.output_path)\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'input_path', type=str, help='Path to the input pickle file.')\n parser.add_argument(\n 'output_path', type=str,\n help='Path where the output pickle file will be saved.')\n\n parser.add_argument(\n 'step_size', type=int, help='Step size (width of pool).')\n parser.add_argument(\n 'overlap_size', type=int,\n help='Size of overlapping (must be less than step size).')\n\n parser.add_argument(\n '--pad', default=False, action=\"store_true\",\n help='Whether to pad zeros to the end of the sequence.')\n parser.add_argument(\n '--max_len', type=int, default=0, help='Maximum sequence length.')\n parser.add_argument(\n '--num_processes', type=int, default=32,\n help='Number of threads in the thread pool.')\n parser.add_argument(\n '--pool_size', type=int, default=32, help='Thread pool size.')\n\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"scripts/IAAN/average_pool.py","file_name":"average_pool.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"267599414","text":"import theano\nimport theano.tensor as T\nfrom blocks.bricks import Linear, Softmax\nfrom blocks.initialization import Constant, IsotropicGaussian\nfrom blocks.bricks.recurrent import GatedRecurrent, LSTM\nclass impatientLayer:\n # both visual and word feature are in the joint space\n # of dim: feature_dim\n # hidden_dim: dim of m\n # output_dim: final joint document query representation dim\n def __init__(self, feature_dim, hidden_dim, output_dim):\n self.image_embed = Linear(input_dim=feature_dim,\n output_dim=hidden_dim,\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False,\n name='image_embed')\n self.word_embed = Linear(input_dim=feature_dim,\n output_dim=hidden_dim,\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False,\n name='word_embed')\n self.r_embed = Linear(input_dim=feature_dim,\n output_dim=hidden_dim,\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False,\n name='r_embed')\n self.m_to_s = Linear(input_dim=hidden_dim,\n output_dim=1,\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False,\n name='m_to_s')\n self.attention_dist = Softmax(name='attention_dist_softmax')\n self.r_to_r = Linear(input_dim=feature_dim,\n output_dim=feature_dim,\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False,\n name='r_to_r')\n # self.r_to_g = Linear(input_dim=feature_dim,\n # output_dim=output_dim,\n # weights_init=IsotropicGaussian(0.01),\n # biases_init=Constant(0),\n # use_bias=False,\n # name='r_to_g')\n self.image_embed.initialize()\n self.word_embed.initialize()\n self.r_embed.initialize()\n self.m_to_s.initialize()\n self.r_to_r.initialize()\n # self.r_to_g.initialize()\n\n # the sequence to sequence LSTM\n self.seq = LSTM(output_dim,\n name='rewatcher_seq',\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0))\n self.seq_embed = Linear(feature_dim,\n output_dim * 4,\n name='rewatcher_seq_embed',\n weights_init=IsotropicGaussian(0.01),\n biases_init=Constant(0),\n use_bias=False)\n\n self.seq.initialize()\n self.seq_embed.initialize()\n\n # doc: row major batch_size x doc_length x feature_dim\n # query: row major batch_size x q x feature_dim\n # mask: mask of query batch_size\n # mask: length of a sentence - 1\n def apply(self, doc, query, mask_, batch_size):\n # batch_size x doc_length x hidden_dim\n mask = mask_.flatten()\n att1 = self.image_embed.apply(doc)\n\n # y_q_i: the ith token of question\n # batch_size x feature_dim\n # r_1: r_m_1\n # batch_size x feature_dim\n # y_d: document\n # batch_size x doc_length x feature_dim\n # y_d_m: d-to-m\n # batch_size x doc_length x hidden_dim\n def one_step(y_q_i, r_1, y_d, y_d_m):\n # batch_size x hidden_dim\n att2 = self.r_embed.apply(r_1)\n # batch_size x hidden_dim\n att3 = self.word_embed.apply(y_q_i)\n att = y_d_m + att2.dimshuffle(0, 'x', 1) + att3.dimshuffle(0, 'x', 1)\n # batch_size x doc_length x hidden_dim\n m = T.tanh(att)\n # batch_size x doc_length x 1\n s = self.m_to_s.apply(m)\n # batch_size x doc_length\n s = s.reshape((s.shape[0], s.shape[1]))\n s = self.attention_dist.apply(s)\n y_d_s = y_d.swapaxes(1, 2)\n # return batch_size x feature_dim\n return T.batched_dot(y_d_s, s) + T.tanh(self.r_to_r.apply(r_1))\n\n # query: batch_size x q x feature_dim\n # r: q x batch_size x feature_dim\n r, updates = theano.scan(fn=one_step,\n sequences=[query.swapaxes(0,1)],\n outputs_info=T.zeros_like(doc[:, 0, :]),\n non_sequences=[doc, att1],\n n_steps=query.shape[1],\n name='impatient layer')\n\n # for the sequence encoder\n # q x batch_size x output_dim\n Wr = self.seq_embed.apply(r)\n # q x batch_size x output_dim\n seq_r, garbage = self.seq.apply(Wr)\n # batch_size x feature_dim\n r_q = r[mask, T.arange(batch_size), :]\n seq_r_q = seq_r[mask, T.arange(batch_size), :]\n # batch_size x output_dim\n return r_q, seq_r_q\n\n","sub_path":"src/rewatcher_seq.py","file_name":"rewatcher_seq.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"266697980","text":"from sources import youtube_data_analysis_live, youtube_data_analysis_cached\nfrom __init__ import shared_mongodb_client\n\nimport logging\nimport traceback\nimport datetime\n\n# logging = logging.getLogger('root')\nlogging.getLogger(__name__)\n\n\ndef record_search(query, search_type, time):\n db = shared_mongodb_client[\"yousights\"]\n db_collection = db[\"searchRecords\"]\n # Normalize the search query\n query = query.lower().strip()\n record_document = {\"query\": query, \"search_type\": search_type, \"time\": time, \"all_matcher\": \"a\"}\n db_collection.insert_one(record_document)\n\n\ndef analyticsai(params):\n query = params[\"query\"]\n source = params[\"source\"]\n\n record_search(query, source, datetime.datetime.utcnow())\n\n if source == \"live\":\n try:\n logging.info(\"Starting live search\")\n analysis_results = youtube_data_analysis_live.analyticsai_live(params)\n return analysis_results\n except Exception as error:\n logging.error((f\"ERROR while searching in live: => {error}\"))\n logging.error(traceback.format_exc())\n elif source == \"cached\":\n try:\n logging.info(\"Starting Cache search\")\n analysis_results = youtube_data_analysis_cached.analyticsai_cached(params)\n return analysis_results\n except Exception as error:\n logging.error((f\"ERROR while searching in cached: => {error}\"))\n logging.error(traceback.format_exc())\n","sub_path":"analyticsAI/sources/youtube_data_analysis.py","file_name":"youtube_data_analysis.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"33930538","text":"class AffineCipher:\n\n def __init__(self):\n \"\"\"This is a python implementation of Affine Cipher\"\"\"\n\n @staticmethod\n def egcd(a, b):\n '''Euclidean Algorithm for finding modular inverse'''\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q, r = b // a, b % a\n m, n = x - u * q, y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n gcd = b\n return gcd, x, y\n\n @staticmethod\n def modinv(a, m):\n '''Modular Inverse'''\n gcd, x, y = AffineCipher.egcd(a, m)\n if gcd != 1:\n return None # modular inverse does not exist\n else:\n return x % m\n\n @staticmethod\n def encrypt(msg: str, a: int, b: int) -> str:\n result = ''\n for ch in msg.upper():\n if ch.isalpha():\n result += chr(((a * (ord(ch) - ord('A')) + b) % 26)\n + ord('A'))\n else:\n result += ch\n return result\n\n @staticmethod\n def decrypt(msg: str, a: int, b: int) -> str:\n result = ''\n for ch in msg.upper():\n if ch.isalpha():\n result += chr(((AffineCipher.modinv(a, 26) * (ord(ch)\n - ord('A') - b)) % 26) + ord('A'))\n else:\n result += ch\n return result\n","sub_path":"decrypto/src/affine_cipher.py","file_name":"affine_cipher.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"87408600","text":"import sys\nimport csv\n\n\nclass CsvOutputter:\n def write(self, artifacts):\n \"\"\"\n\n :param artifacts: iterable object of Artifacts\n :param filename: filename to send output\n :return:\n \"\"\"\n writer = csv.writer(sys.stdout, delimiter=' ', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for artifact in artifacts:\n writer.writerow([artifact.group, artifact.artifact, artifact.version])","sub_path":"src/output/csvformatter.py","file_name":"csvformatter.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"298632628","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\n\ndef set_encounter():\n print('Enter the set element: ')\n A = input().split()\n \n entry = set('')\n for x in A:\n if x in entry:\n print('YES')\n else:\n print('NO')\n entry.add(x)\n \ndef main():\n set_encounter()\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n","sub_path":"pysnakify/10sets-encounter.py","file_name":"10sets-encounter.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"173959000","text":"# libary.py\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import learning_curve\nfrom time import time\nfrom functools import wraps\n\ndef get_dataset_file_path(date, filename):\n \"\"\"Produces a filepath for the dataset.\n\n :parameter date (string): The date folder name. Ex: \"2020-02-05\"\n :parameter filename (string): The csv filename.\n :returns filepath (string): The filepath for the dataset.\n\n Example:\n\n project_root\n ├── README.md\n ├── data\n │   └── 2020-04-13\n │   ├── README.md\n │   ├── data_description.txt\n │   ├── test.csv\n │   └── train.csv\n ├── docs\n ├── requirements.yml\n └── results\n └── 2020-04-13\n └── runall.py\n\n The function is called from the 'runall.py' file.\n >> get_data_file_path('2020-04-13', 'train.csv')\n '~/project_root/data/2020-04-13/train.csv'\n \"\"\"\n\n basepath = os.path.abspath('')\n filepath = os.path.abspath(os.path.join(basepath, \"..\", \"..\")) + \"/data/\" + date + \"/\" + filename\n return filepath\n\n\ndef convert_object_to_categorical(df):\n \"\"\"Converts columns in a pandas dataframe of dtype 'object' to dtype 'categorical.' This is a destructive method\n\n :parameter df (pandas dataframe): A pandas dataframe\n \"\"\"\n assert isinstance(df, pd.DataFrame)\n\n object_columns = df.select_dtypes(include='object').columns.tolist()\n for obj_col in object_columns:\n df[obj_col] = df[obj_col].astype('category')\n\n\ndef display_scores(scores):\n \"\"\"\n\n Args:\n scores: One dimensional array of model scores\n\n Returns:\n Prints out the list of scores, the mean, and standard deviation.\n\n \"\"\"\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard deviation:\", scores.std())\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n \"\"\"Plots a precision recall vs threshold curve given\n\n :parameter precision (numpy ndarray): Precision values such that element i is the precision of predictions with\n score >= thresholds[i] and the last element is 1.\n :parameter recalls (numpy ndarray): Decreasing recall values such that element i is the recall of predictions with\n score >= thresholds[i] and the last element is 0.\n :parameter thresholds (numpy array): Increasing thresholds on the decision function used to compute precision and\n recall.\n\n Generally you will pass the output of sklearn.metrics.precision_recall_curve into this function.\n \"\"\"\n\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\n plt.xlabel(\"Threshold\")\n plt.legend(loc=\"best\")\n plt.ylim([0, 1])\n plt.title(\"Precision vs. Recall\")\n\n\ndef plot_roc_curve(fpr, tpr, label=None):\n \"\"\"Plots a receiver operating characteristic curve given an array of false positive rates and true positive rates.\n :parameter fpr (numpy ndarray): Increasing false positive rates such that element i is the false positive rate of\n predictions with score >= thresholds[i].\n :parameter tpr (numpy ndarray): Increasing true positive rates such that element i is the true positive rate of\n predictions with score >= thresholds[i].\n\n Generally you will pass the output of sklearn.metrics.roc_curve into this function.\n \"\"\"\n\n plt.plot(fpr, tpr, linewidth=2, label=label)\n plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal\n plt.axis([0, 1, 0, 1])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"Receiver Operating Characteristic Curve\")\n\n\ndef plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,\n n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate 3 plots: the test and training learning curve, the training\n samples vs fit times curve, the fit times vs score curve.\n\n Source: https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n axes : array of 3 axes, optional (default=None)\n Axes to use for plotting the curves.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 5-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n train_sizes : array-like, shape (n_ticks,), dtype float or int\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the dtype is float, it is regarded as a\n fraction of the maximum size of the training set (that is determined\n by the selected validation method), i.e. it has to be within (0, 1].\n Otherwise it is interpreted as absolute sizes of the training sets.\n Note that for classification the number of samples usually have to\n be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n if axes is None:\n _, axes = plt.subplots(1, 3, figsize=(20, 5))\n\n axes[0].set_title(title)\n if ylim is not None:\n axes[0].set_ylim(*ylim)\n axes[0].set_xlabel(\"Training examples\")\n axes[0].set_ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores, fit_times, _ = \\\n learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,\n train_sizes=train_sizes,\n return_times=True)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n fit_times_mean = np.mean(fit_times, axis=1)\n fit_times_std = np.std(fit_times, axis=1)\n\n # Plot learning curve\n axes[0].grid()\n axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1,\n color=\"g\")\n axes[0].plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n axes[0].plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n axes[0].legend(loc=\"best\")\n\n # Plot n_samples vs fit_times\n axes[1].grid()\n axes[1].plot(train_sizes, fit_times_mean, 'o-')\n axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,\n fit_times_mean + fit_times_std, alpha=0.1)\n axes[1].set_xlabel(\"Training examples\")\n axes[1].set_ylabel(\"fit_times\")\n axes[1].set_title(\"Scalability of the model\")\n\n # Plot fit_time vs score\n axes[2].grid()\n axes[2].plot(fit_times_mean, test_scores_mean, 'o-')\n axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1)\n axes[2].set_xlabel(\"fit_times\")\n axes[2].set_ylabel(\"Score\")\n axes[2].set_title(\"Performance of the model\")\n\n return plt\n\n\ndef simple_time_tracker(log_fun):\n def _simple_time_tracker(fn):\n @wraps(fn)\n def wrapped_fn(*args, **kwargs):\n start_time = time()\n\n try:\n result = fn(*args, **kwargs)\n finally:\n elapsed_time = time() - start_time\n\n # log the result\n log_fun({\n 'function_name': fn.__name__,\n 'total_time': elapsed_time,\n })\n\n return result\n\n return wrapped_fn\n\n return _simple_time_tracker\n","sub_path":"lib_bre/lib_bre/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"202442571","text":"# def add_positive_numbers(x,y):\n# \tassert x>0 and y>0,\"Both numbers must be positive\"\n# \treturn x+y\n\n# print(add_positive_numbers(1,1)) #2\n# print(add_positive_numbers(1,-1)) #assertionEroor : Both numbers must be positive\n\n\ndef eat_junk(food):\n\tassert food in [\n\t\"pizza\", \n\t\"ice cream\",\n\t\"candy\",\n\t\"burger\",\n\t\"fries\"\n\t] , \"Food must be junk food!\"\n\treturn f\"NOM NOM NOM I am eating {food}\"\n\n\nfood = input(\"Enter a junk food :\")\nprint(eat_junk(food))\n","sub_path":"Testing/assertions.py","file_name":"assertions.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"21560160","text":"import math\n\nclass FormulaGeneralPositiva(object):\n\tdef __init__(self, a, b, c):\n\t\tself.a = a\n\t\tself.b = b\n\t\tself.c = c\n\t\tself.dividendo = 0\n\t\tself.divisor = 0\n\t\tself.x = 0\n\n\tdef operaciones(self):\n\t\tself.dividendo = -(self.b) + math.sqrt(((self.b)**2)-(4 + self.a + self.c))\n\t\tself.divisor = (2*self.a)\n\t\tself.x = (self.dividendo)/(self.divisor)\n\t\treturn self.x\n\n\tdef __str__(self):\n\t\tprint(\"El resultado de x1 = {}\".format(self.x))\n\nclass FormulaGeneralNegativa(FormulaGeneralPositiva):\n\tdef operaciones(self):\n\t\tself.dividendo = -(self.b) - math.sqrt(((self.b)**2)-(4 + self.a + self.c))\n\t\tself.divisor = (2*self.a)\n\t\tself.x = (self.dividendo)/(self.divisor)\n\t\treturn self.x\n\n\tdef __str__(self):\n\t\tprint(\"El resultado de x2 = {}\".format(self.x))\n\t\t\n\nNumeros = [3, -5, -1]\n\nOperacion_Uno = FormulaGeneralPositiva(*Numeros)\nOperacion_Uno.operaciones()\nOperacion_Uno.__str__()\n\nOperacion_Dos = FormulaGeneralNegativa(*Numeros)\nOperacion_Dos.operaciones()\nOperacion_Dos.__str__()\n\n\n\t\t","sub_path":"Ejercicio_50.py","file_name":"Ejercicio_50.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"461311076","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom shows_frontend.views import show_frontend_views\nfrom manage.models.shows_domain import ShowDomains\nimport re\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef landing_view(request):\n domain = request.META['HTTP_HOST']\n if \"www.\" in domain:\n # live deployment\n #httpys://wwww. subdomain. babelfeed. com/adsfasdf\n splitDomainArr = domain.split(\".\")\n if len(splitDomainArr) == 4:\n try:\n subdomain = splitDomainArr[1]\n qs = ShowDomains.objects.get_with_subdomain_name(subdomain)\n return show_frontend_views.show_frontend(request, subdomain)\n except:\n return render(request, \"access_denied.html\") # should redirect to an access denied or show not found page\n else:\n # No subdomain has been entered, so we go to the landing page\n return render(request, \"manage/home_page.html\") # should redirect\n else:\n # local enviroment\n splitDomainArr = domain.split(\".\")\n if len(splitDomainArr) == 2:\n try:\n subdomain = splitDomainArr[0]\n qs = ShowDomains.objects.get_with_subdomain_name(subdomain)\n return show_frontend_views.show_frontend(request, subdomain)\n except:\n return render(request,\n \"access_denied.html\") # should redirect to an access denied or show not found page\n else:\n # No subdomain has been entered, so we go to the landing page\n return render(request, \"manage/home_page.html\") # should redirect\n\n\n\ndef login_view(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect(\"user_profile\")\n else:\n return redirect(\"landing_view\")\n else:\n form = AuthenticationForm()\n return render(request,'accounts/login.html', {'form': form})\n\n\ndef access_denied(request):\n return render(request, \"access_denied.html\")\n\ndef logout_view(request):\n logout(request)\n return redirect(\"login\")\n\ndef register(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n # is_superuser\n\n if form.is_valid():\n superuse = form.save(commit=False)\n superuse.is_staff = True\n superuse.is_superuser = True\n print(superuse.is_staff)\n superuse.save()\n return redirect(\"login\")\n else:\n print(\"shit fucked up\")\n\n else:\n form = UserCreationForm()\n return render(request,'accounts/register.html', {'form': form})","sub_path":"accounts/views/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"35156114","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom proboscis.asserts import assert_false\n\nfrom fuelweb_test import logger\nfrom fuelweb_test.helpers.utils import get_net_settings\nfrom fuelweb_test.settings import iface_alias\nfrom fuelweb_test.tests.base_test_case import TestBasic\n\n\nclass BondingTest(TestBasic):\n def __init__(self):\n self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG = {\n 'mac': None,\n 'mode': 'active-backup',\n 'state': None,\n 'type': 'bond',\n 'assigned_networks': [],\n 'bond_properties': {'mode': 'active-backup',\n 'type__': 'linux'}}\n\n self.TEMPLATE_NEW_SERIALIZATION_BOND_CONFIG = {\n 'mac': None,\n 'mode': 'active-backup',\n 'state': None,\n 'type': 'bond',\n 'assigned_networks': [],\n 'attributes': {\n 'type__': {'type': 'hidden', 'value': 'linux'}}}\n\n self.INTERFACES = {\n 'bond0': [\n 'public',\n 'management',\n 'storage',\n 'private'\n ],\n 'bond1': ['fuelweb_admin']\n }\n self.BOND_LIST = [\n {\n 'name': 'bond0',\n 'slaves': [\n {'name': iface_alias('eth5')},\n {'name': iface_alias('eth4')},\n {'name': iface_alias('eth3')},\n {'name': iface_alias('eth2')}\n ]\n },\n {\n 'name': 'bond1',\n 'slaves': [\n {'name': iface_alias('eth1')},\n {'name': iface_alias('eth0')}]\n }\n ]\n self.BOND_ATTR = {}\n super(BondingTest, self).__init__()\n self.__cluster_id = None\n self.__bond_config = None\n\n @property\n def cluster_id(self):\n if self.__cluster_id is None:\n self.__cluster_id = self.fuel_web.get_last_created_cluster()\n return self.__cluster_id\n\n @property\n def bond_config(self):\n if self.__bond_config is None:\n self.__bond_config = self._generate_bonding_config()\n return self.__bond_config\n\n @staticmethod\n def get_bond_interfaces(bond_config, bond_name):\n bond_slaves = []\n for bond in [bond for bond in bond_config]:\n if bond['name'] == bond_name:\n for slave in bond['slaves']:\n bond_slaves.append(slave['name'])\n return bond_slaves\n\n def _is_old_interface_serialization_scheme(self):\n node = self.fuel_web.client.list_cluster_nodes(self.cluster_id)[0]\n interface = self.fuel_web.client.get_node_interfaces(node['id'])[0]\n if 'interface_properties' in interface.keys():\n return True\n\n def _generate_bonding_config(self):\n bonding_config = copy.deepcopy(self.BOND_LIST)\n if self._is_old_interface_serialization_scheme():\n data = self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG\n else:\n data = self.TEMPLATE_NEW_SERIALIZATION_BOND_CONFIG\n data['attributes'].update(self.BOND_ATTR)\n for bond in bonding_config:\n bond.update(data)\n return bonding_config\n\n def check_interfaces_config_after_reboot(self):\n network_settings = dict()\n skip_interfaces = {\n r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$',\n r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$',\n r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$',\n r'^v_(management|public)$'}\n\n nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)\n\n for node in nodes:\n with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:\n network_settings[node['hostname']] = \\\n get_net_settings(remote, skip_interfaces)\n\n self.fuel_web.warm_restart_nodes(\n self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes))\n\n network_settings_changed = False\n\n for node in nodes:\n with self.env.d_env.get_ssh_to_remote(node['ip']) as remote:\n saved_settings = network_settings[node['hostname']]\n actual_settings = get_net_settings(remote, skip_interfaces)\n if not saved_settings == actual_settings:\n network_settings_changed = True\n logger.error('Network settings were changed after reboot '\n 'on node {0}! '.format(node['hostname']))\n logger.debug('Network settings before the reboot of slave '\n '{0}: {1}'.format(node['hostname'],\n saved_settings))\n logger.debug('Network settings after the reboot of slave '\n '{0}: {1}'.format(node['hostname'],\n actual_settings))\n\n for iface in saved_settings:\n if iface not in actual_settings:\n logger.error(\"Interface '{0}' doesn't exist after \"\n \"reboot of '{1}'!\".format(\n iface, node['hostname']))\n continue\n if saved_settings[iface] != actual_settings[iface]:\n logger.error(\"Interface '{0}' settings \"\n \"were changed after reboot \"\n \"of '{1}': was {2}, now \"\n \"{3}.\".format(iface,\n node['hostname'],\n saved_settings[iface],\n actual_settings[iface]))\n\n assert_false(network_settings_changed,\n \"Network settings were changed after environment nodes \"\n \"reboot! Please check logs for details!\")\n\n\nclass BondingTestDPDK(BondingTest):\n def __init__(self):\n super(BondingTestDPDK, self).__init__()\n self.TEMPLATE_OLD_SERIALIZATION_BOND_CONFIG[\n 'interface_properties'] = {'dpdk': {'available': True}}\n self.BOND_LIST = [\n {\n 'name': 'bond0',\n 'slaves': [\n {'name': iface_alias('eth3')},\n {'name': iface_alias('eth2')}\n ],\n },\n {\n 'name': 'bond1',\n 'slaves': [\n {'name': iface_alias('eth1')},\n {'name': iface_alias('eth0')}\n ],\n },\n {\n 'name': 'bond2',\n 'slaves': [\n {'name': iface_alias('eth5')},\n {'name': iface_alias('eth4')},\n ],\n },\n ]\n\n self.INTERFACES = {\n 'bond0': [\n 'public',\n 'management',\n 'storage',\n ],\n 'bond1': ['fuelweb_admin'],\n 'bond2': ['private'],\n }\n\n self.BOND_ATTR = {\n 'dpdk': {\n 'enabled': {\n 'type': 'checkbox',\n 'value': False,\n 'weight': 10,\n 'label': 'DPDK enabled'},\n 'metadata': {'weight': 40, 'label': 'DPDK'}\n }}\n\n\nclass BondingTestOffloading(BondingTest):\n def __init__(self):\n super(BondingTestOffloading, self).__init__()\n self.BOND_ATTR = {\n \"offloading\": {\n \"disable\": {\n \"type\": \"checkbox\",\n \"value\": False,\n \"weight\": 10,\n \"label\": \"Disable offloading\"\n },\n \"modes\": {\n \"value\": {\n \"rx-vlan-offload\": None,\n \"tx-scatter-gather\": None,\n \"scatter-gather\": None,\n \"generic-segmentation-offload\": None,\n \"tx-nocache-copy\": None,\n \"tx-checksumming\": None,\n \"generic-receive-offload\": None,\n \"tx-checksum-ip-generic\": None,\n \"rx-all\": None,\n \"rx-fcs\": None,\n \"tcp-segmentation-offload\": None,\n \"tx-tcp-segmentation\": None,\n \"rx-checksumming\": None},\n \"type\": \"offloading_modes\",\n \"description\": \"Offloading modes\",\n \"weight\": 20,\n \"label\": \"Offloading modes\"},\n \"metadata\": {\n \"weight\": 10,\n \"label\": \"Offloading\"}\n }}\n","sub_path":"fuelweb_test/tests/test_bonding_base.py","file_name":"test_bonding_base.py","file_ext":"py","file_size_in_byte":9574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"482916380","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hello', '0002_auto_20151013_2355'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='attempt',\n name='last_guess',\n field=models.CharField(max_length=50, null=True),\n ),\n migrations.AddField(\n model_name='attempt',\n name='last_poke_id',\n field=models.IntegerField(null=True),\n ),\n migrations.AlterField(\n model_name='attempt',\n name='player_name',\n field=models.CharField(max_length=25, null=True),\n ),\n ]\n","sub_path":"hello/migrations/0003_auto_20151014_2042.py","file_name":"0003_auto_20151014_2042.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"149275930","text":"\"\"\"Defines hyperparameter search space dimension classes used for declaring hyperparameter choices,\nas well as some utility functions for processing dimensions and the hyperparameter space as a whole\n\nRelated\n-------\n:mod:`hyperparameter_hunter.optimization_core`\n Defines optimization protocol classes that expect to receive hyperparameter dimension inputs\n:mod:`hyperparameter_hunter.utils.optimization_utils`\n Defines utilities for matching a current hyperparameter space with the hyperparameters of saved\n Experiments. Also defines :class:`utils.optimization_utils.AskingOptimizer`, which determines\n the values in the given choices to search next\n\nNotes\n-----\nThis module heavily relies on the Scikit-Optimize library, so thank you to the creators and\ncontributors of `scikit-optimize` for their excellent work. Their documentation may also be useful\nto help understand this module\"\"\"\n##################################################\n# Import Own Assets\n##################################################\nfrom hyperparameter_hunter.utils.boltons_utils import get_path\n\n##################################################\n# Import Miscellaneous Assets\n##################################################\nfrom abc import ABCMeta, abstractmethod\nfrom functools import reduce\nfrom sys import maxsize\nfrom uuid import uuid4 as uuid\n\n##################################################\n# Import Learning Assets\n##################################################\nfrom sklearn.utils import check_random_state\nfrom skopt.space import space as skopt_space\n\n\nNONE = object()\n\n\nclass Singleton(type):\n _instances = {}\n\n def __new__(mcs, name, bases, namespace):\n namespace[\"__copy__\"] = lambda self, *args: self\n namespace[\"__deepcopy__\"] = lambda self, *args: self\n return super().__new__(mcs, name, bases, namespace)\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass RejectedOptional(metaclass=Singleton):\n \"\"\"Singleton class to symbolize the rejection of an `optional` `Categorical` value\n\n This is used as a sentinel, when the value in `Categorical.categories` is not used, to be\n inserted into a :class:`~hyperparameter_hunter.feature_engineering.FeatureEngineer`. If\n :attr:`hyperparameter_hunter.feature_engineering.FeatureEngineer.steps` contains an instance\n of `RejectedOptional`, it is removed from `steps`\"\"\"\n\n def __str__(self):\n return \"\"\n\n def __format__(self, format_spec):\n return str(self).__format__(format_spec)\n\n\n##################################################\n# Dimensions\n##################################################\n# noinspection PyAbstractClass\nclass Dimension(skopt_space.Dimension, metaclass=ABCMeta):\n def __init__(self, **kwargs):\n \"\"\"Base class for hyperparameter search space dimensions\n\n Attributes\n ----------\n id: String\n A stringified UUID used to link space dimensions to their locations in a model's overall\n hyperparameter structure\"\"\"\n self.id = str(uuid())\n super().__init__(**kwargs)\n\n # noinspection PyMethodOverriding\n @skopt_space.Dimension.name.setter\n def name(self, value):\n \"\"\"Set :attr:`_name` to `value`\n\n Parameters\n ----------\n value: String, tuple, or None\n The new value of :attr:`_name`\n\n Raises\n ------\n ValueError\n If `value` is not one of: string, tuple, or None\"\"\"\n if isinstance(value, (str, tuple)) or value is None:\n # noinspection PyAttributeOutsideInit\n self._name = value\n else:\n raise ValueError(\"Dimension's name must be one of: string, tuple, or None.\")\n\n @abstractmethod\n def get_params(self) -> dict:\n \"\"\"Get dict of parameters used to initialize the `Dimension`, or their defaults\"\"\"\n\n\nclass Real(Dimension, skopt_space.Real):\n def __init__(self, low, high, prior=\"uniform\", transform=\"identity\", name=None):\n \"\"\"Search space dimension that can assume any real value in a given range\n\n Parameters\n ----------\n low: Float\n Lower bound (inclusive)\n high: Float\n Upper bound (inclusive)\n prior: String in ['uniform', 'log-uniform'], default='uniform'\n Distribution to use when sampling random points for this dimension. If 'uniform', points\n are sampled uniformly between the lower and upper bounds. If 'log-uniform', points are\n sampled uniformly between `log10(lower)` and `log10(upper)`\n transform: String in ['identity', 'normalize'], default='identity'\n Transformation to apply to the original space. If 'identity', the transformed space is\n the same as the original space. If 'normalize', the transformed space is scaled\n between 0 and 1\n name: String, tuple, or None, default=None\n A name associated with the dimension\"\"\"\n super().__init__(low=low, high=high, prior=prior, transform=transform, name=name)\n\n def __contains__(self, item):\n try:\n return super().__contains__(item)\n except TypeError:\n return False\n\n def get_params(self) -> dict:\n \"\"\"Get dict of parameters used to initialize the `Real`, or their defaults\"\"\"\n return dict(\n low=self.low,\n high=self.high,\n prior=self.prior,\n transform=self.transform_,\n name=self.name,\n )\n\n\nclass Integer(Dimension, skopt_space.Integer):\n def __init__(self, low, high, transform=None, name=None):\n \"\"\"Search space dimension that can assume any integer value in a given range\n\n Parameters\n ----------\n low: Float\n Lower bound (inclusive)\n high: Float\n Upper bound (inclusive)\n transform: String in ['identity', 'normalize'], default='identity'\n Transformation to apply to the original space. If 'identity', the transformed space is\n the same as the original space. If 'normalize', the transformed space is scaled\n between 0 and 1\n name: String, tuple, or None, default=None\n A name associated with the dimension\"\"\"\n super().__init__(low=low, high=high, transform=transform, name=name)\n\n def __contains__(self, item):\n try:\n return super().__contains__(item)\n except TypeError:\n return False\n\n def get_params(self) -> dict:\n \"\"\"Get dict of parameters used to initialize the `Integer`, or their defaults\"\"\"\n return dict(low=self.low, high=self.high, transform=self.transform_, name=self.name)\n\n\nclass Categorical(Dimension, skopt_space.Categorical):\n def __init__(self, categories, prior=None, transform=\"onehot\", optional=False, name=None):\n \"\"\"Search space dimension that can assume any categorical value in a given list\n\n Parameters\n ----------\n categories: List\n Sequence of possible categories of shape (n_categories,)\n prior: List, or None, default=None\n If list, prior probabilities for each category of shape (categories,). By default all\n categories are equally likely\n transform: String in ['onehot', 'identity'], default='onehot'\n Transformation to apply to the original space. If 'identity', the transformed space is\n the same as the original space. If 'onehot', the transformed space is a one-hot encoded\n representation of the original space\n optional: Boolean, default=False\n Intended for use by :class:`~hyperparameter_hunter.feature_engineering.FeatureEngineer`\n when optimizing an :class:`~hyperparameter_hunter.feature_engineering.EngineerStep`.\n Specifically, this enables searching through a space in which an `EngineerStep` either\n may or may not be used. This is contrary to `Categorical`'s usual function of creating\n a space comprising multiple `categories`. When `optional` = True, the space created will\n represent any of the values in `categories` either being included in the entire\n `FeatureEngineer` process, or being skipped entirely. Internally, a value excluded by\n `optional` is represented by a sentinel value that signals it should be removed from the\n containing list, so `optional` will not work for choosing between a single value and\n None, for example\n name: String, tuple, or None, default=None\n A name associated with the dimension\"\"\"\n if optional and RejectedOptional() not in categories:\n categories.append(RejectedOptional())\n self.optional = optional\n # TODO: Test using `optional` with `prior` and `transform`\n\n super().__init__(categories=categories, prior=prior, transform=transform, name=name)\n\n def get_params(self) -> dict:\n \"\"\"Get dict of parameters used to initialize the `Categorical`, or their defaults\"\"\"\n return dict(\n categories=self.categories,\n prior=self.prior,\n transform=self.transform_,\n optional=self.optional,\n name=self.name,\n )\n\n\n##################################################\n# Space\n##################################################\nclass Space(skopt_space.Space):\n def __init__(self, dimensions, random_state=None):\n \"\"\"Hyperparameter search space\n\n Parameters\n ----------\n dimensions: List\n List of search space dimensions. Each search dimension can be defined as any of the\n following: 1) a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions).\n 2) A `(lower_bound, upper_bound, \"prior\")` tuple (for `Real` dimensions).\n 3) A list of categories (for `Categorical` dimensions).\n 4) An instance of a `Dimension` object (`Real`, `Integer`, or `Categorical`)\n random_state: None\n ... Experimental...\"\"\"\n # self.space_random_state = check_random_state(None) # FLAG: THIS BREAKS AND REPEATS RESULTS OF `rvs`\n self.space_random_state = check_random_state(32) # FLAG: THIS WORKS\n super().__init__(dimensions=dimensions)\n\n def rvs(self, n_samples=1, random_state=None):\n \"\"\"Draw random samples from the search space. The samples are in the original space. They\n need to be transformed before being passed to a model or minimizer by :meth:`transform`\n\n Parameters\n ----------\n n_samples: Int, default=1\n Number of samples to be drawn from the space\n\n random_state: Int, RandomState instance, or None, default=None\n Set random state to something other than None for reproducible results\n\n Returns\n -------\n List of lists\n Points sampled from the space. Of shape (n_points, n_dims)\"\"\"\n return super().rvs(n_samples=n_samples, random_state=self.space_random_state)\n\n def __len__(self):\n \"\"\"Determine the number of possible search points in :attr:`dimensions`\n\n Returns\n -------\n search_space_size: Integer, or `sys.maxsize`\n The number of different hyperparameter search points. If the hyperparameter search space\n is infinitely large, `sys.maxsize` is returned to represent `np.inf`, which cannot\n itself be returned because `__len__` is required to produce an int >= 0\"\"\"\n if any(isinstance(_, Real) for _ in self.dimensions):\n search_space_size = maxsize\n else:\n search_space_size = reduce(\n lambda x, y: x * y,\n [\n (_.high - _.low + 1) if isinstance(_, Integer) else len(_.bounds)\n for _ in self.dimensions\n ],\n 1,\n )\n\n return search_space_size\n\n def names(self, use_location=True):\n \"\"\"Retrieve the names, or locations of all dimensions in the hyperparameter search space\n\n Parameters\n ----------\n use_location: Boolean, default=True\n If True and a dimension has a non-null attribute called 'location', its value will be\n used instead of 'name'\n\n Returns\n -------\n names: List\n A list of strings or tuples, in which each value is the name or location of the\n dimension at that index\"\"\"\n names = []\n for dimension in self.dimensions:\n if use_location and hasattr(dimension, \"location\") and dimension.location:\n names.append(dimension.location)\n else:\n names.append(dimension.name)\n return names\n\n def get_by_name(self, name, use_location=True, default=NONE):\n \"\"\"Retrieve a single dimension by its name\n\n Parameters\n ----------\n name: Tuple, or str\n Name of the dimension in :attr:`dimensions` to return\n use_location: Boolean, default=True\n If True and a dimension has a non-null attribute called \"location\", its value will be\n used instead of that dimension's \"name\"\n default: Any (optional)\n If given and `name` is not found, `default` will be returned. Otherwise, `KeyError` will\n be raised when `name` is not found\n\n Returns\n -------\n Dimension\n Dimension subclass in :attr:`dimensions`, whose \"name\" attribute is equal to `name`\"\"\"\n for dimension in self.dimensions:\n if use_location and getattr(dimension, \"location\", None) == name:\n return dimension\n elif dimension.name == name:\n return dimension\n\n if default != NONE:\n return default\n raise KeyError(f\"{name} not found in dimensions\")\n\n\n##################################################\n# Space Utilities\n##################################################\ndef normalize_dimensions(dimensions):\n \"\"\"Create a `Space` where all dimensions are normalized to unit range\n\n Parameters\n ----------\n dimensions: List\n List of search space dimensions. Each search dimension can be defined as any of the\n following: 1) a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions).\n 2) A `(lower_bound, upper_bound, \"prior\")` tuple (for `Real` dimensions).\n 3) A list of categories (for `Categorical` dimensions).\n 4) An instance of a `Dimension` object (`Real`, `Integer`, or `Categorical`)\n\n Returns\n -------\n :class:`hyperparameter_hunter.space.Space` instance\n Hyperparameter space class instance, in which dimensions have been normalized to unit range\n\n Raises\n ------\n RuntimeError\n If a processed element of `dimensions` is not one of: `Real`, `Integer`, `Categorical`\n\n Notes\n -----\n The upper and lower bounds are inclusive for `Integer` dimensions. Based on\n :func:`skopt.utils.normalize_dimensions`\"\"\"\n space = Space(dimensions)\n transformed_dimensions = []\n\n if space.is_categorical:\n for dim in space:\n transformed_dimensions.append(\n Categorical(dim.categories, dim.prior, transform=\"identity\", name=dim.name)\n )\n else:\n for dim in space.dimensions:\n if isinstance(dim, Categorical):\n transformed_dimensions.append(dim)\n elif isinstance(dim, Real):\n transformed_dimensions.append(\n Real(dim.low, dim.high, dim.prior, transform=\"normalize\", name=dim.name)\n )\n elif isinstance(dim, Integer):\n transformed_dimensions.append(\n Integer(dim.low, dim.high, transform=\"normalize\", name=dim.name)\n )\n else:\n raise RuntimeError(f\"Unknown dimension type: {type(dim)}\")\n #################### Replace Lost Attributes ####################\n if hasattr(dim, \"location\"):\n transformed_dimensions[-1].location = dim.location\n\n return Space(transformed_dimensions)\n\n\ndef dimension_subset(hyperparameters, dimensions):\n \"\"\"Return only the values of `hyperparameters` specified by `dimensions`, in the same order as\n `dimensions`\n\n Parameters\n ----------\n hyperparameters: Dict\n Dict of hyperparameters containing at least the following keys: ['model_init_params',\n 'model_extra_params', 'feature_engineer', 'feature_selector']\n dimensions: List of: (strings, or tuples)\n Locations and order of the values to return from `hyperparameters`. If a value is a string,\n it is assumed to belong to `model_init_params`, and its path will be adjusted accordingly\n\n Returns\n -------\n List of hyperparameter values\"\"\"\n dimensions = [(\"model_init_params\", _) if isinstance(_, str) else _ for _ in dimensions]\n values = [get_path(hyperparameters, _, default=RejectedOptional()) for _ in dimensions]\n return values\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"hyperparameter_hunter/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":17238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"517903791","text":"#coding: utf8\n\nfrom tornado.web import RequestHandler\n\nclass BaseHandler(RequestHandler):\n\n def get_formatted_args(self):\n args = dict((_k,_v[0]) if len(_v)==1 else (_k,_v) \\\n for _k,_v in self.request.arguments.iteritems() if not _k==\"action\")\n\n return dict(args, **dict(operator=self.get_current_user()))\n","sub_path":"handler/base_handler.py","file_name":"base_handler.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"627600505","text":"from __future__ import division\n\nimport os\nimport errno\nfrom unidecode import unidecode\nimport re\nimport numpy as np\nimport scipy.sparse\n\nfrom collections import namedtuple\nfrom recordtype import recordtype\n\nimport fx\n\n\ndef normalize_attr(attr):\n return ','.join(sorted(attr.split(',')))\n\n\nText = namedtuple('Text', ['words', 'pos', 'ner'])\nSample = recordtype('Sample', ['id', 'timestamp', 'text',\n 'args', 'edits', 'y', 'features'])\nEdit = namedtuple('Edit', ['id', 'timestamp', 'relation', 'args'])\nTPRF = namedtuple('TPRF', ['threshold', 'precision', 'recall', 'f1'])\n\n\ndef normalize_infobox(s):\n s = s.lower().replace(' ', '')\n if s.startswith('infobox'):\n s = s[len('infobox'):]\n if s.startswith('_'):\n s = s[1:]\n return s\n\n\ndef text_from_json(j):\n return Text(*j)\n\n\ndef edit_from_json(j):\n id, timestamp, relation, args = j\n args = tuple(args)\n return Edit(id, timestamp, relation, args)\n\n\ndef sample_from_json(j):\n id = j['id']\n timestamp = j['timestamp']\n text = text_from_json(j['text'])\n args = tuple(j['args'])\n edits = [edit_from_json(x) for x in j['edits']]\n y = j['y']\n features = j['features']\n return Sample(id, timestamp, text, args, edits, y, features)\n\n\ndef normalize_string(s, _whitespace_pattern=re.compile(r'\\s+')):\n return _whitespace_pattern.sub(' ', unidecode(s).lower()).strip()\n\n\ndef sha1_structure(s):\n import hashlib\n import json\n return hashlib.sha1(json.dumps(s, sort_keys=True)).hexdigest()\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef gen_tprf(predictions, positive_count=None, shuffle=True, sort=True):\n predictions = predictions if not shuffle else \\\n fx.shuffle(predictions)\n predictions = predictions if not sort else \\\n sorted(predictions, key=lambda p: -p.p)\n\n N = positive_count if positive_count is not None else \\\n sum(1 for x in predictions if x.y == 1)\n\n tp = 0\n fp = 0\n fn = 0\n\n for p in predictions:\n if p.y == 1:\n tp += 1\n elif p.y == -1:\n fp += 1\n # else:\n # assert False\n\n fn = N - tp\n\n T = p.p\n P = 0.0\n if tp + fp > 0:\n P = tp / (tp + fp)\n R = 0.0\n if tp + fn > 0:\n R = tp / (tp + fn)\n F = 0.0\n if P + R > 0:\n F = 2 * P * R / (P + R)\n\n yield TPRF(\n threshold=T,\n precision=P,\n recall=R,\n f1=F)\n","sub_path":"cleanslate/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"511330868","text":"import json\n\nimport boto3\nimport numpy as np\nfrom joblib import load\nimport psycopg2\n\n# Configure input queue\nsqs = boto3.resource(\"sqs\")\nqueue = sqs.get_queue_by_name(QueueName=\"weather-input-queue\")\n\n# Configure output database\nconn = psycopg2.connect(\"weather.us-east-1.rds.amazonaws.com\")\ncur = conn.cursor()\n\nINSERT = \"\"\"\n INSERT INTO weather_predictions (location, date, prediction)\n VALUES (%s, %s, %s)\n\"\"\"\n\n\nif __name__ == \"__main__\":\n\n model = load(\"model.joblib\")\n\n # Read all messages from the queue\n for message in queue.receive_messages():\n\n # Get and format input data\n input_data = json.loads(message.body)\n formatted_input = np.array(input_data[\"features\"])\n\n prediction = model.predict(formatted_input)\n\n # Insert prediction into the database\n cur.execute(INSERT, (input_data[\"location\"], input_data[\"date\"], prediction))\n conn.commit()\n\n # Let the queue know that the message is processed\n message.delete()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"440238718","text":"def timeConversion(s):\n hour = int(s[:2])\n\n if hour == 12:\n if 'AM' in s:\n hour = 0\n else:\n hour=12\n elif 'PM' in s:\n hour +=12\n\n hour='{:02}'.format(hour)\n\n return hour+s[2:-2]\n\n\nprint(timeConversion('01:05:45PM'))\n\n","sub_path":"Time Conversion.py","file_name":"Time Conversion.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"291721381","text":"x=0\ndef magic():\n\tglobal x\n\t\n\twhile True:\n\t\tprint()\n\t\tch=input('Enter your number or quit for exit ')\n\t\tif ch == 'quit':\n\t\t\tbreak\n\t\telse:\n\t\t\ty=int(ch)\n\t\t\tx+=y\n\t\t\tprint('The total is = ',x)\n\t\t\nmagic()\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"180203976","text":"import os\nimport subprocess\nimport distutils.dir_util\nimport glob\nimport shutil\n\n# Run \"make html\" in cmd to produce the documentation\nsubprocess.run('make html', shell=True, check=True)\nprint('Ran process \"make html\" in cmd')\n\n# Get the source and destination paths\nfilepath = os.path.dirname(os.path.realpath(__file__))\nsource = filepath + '\\\\_build\\\\html\\\\'\ndestination = os.path.abspath(os.path.join(filepath, '..', 'docs')) + '/'\n\n# Loop through existing files in destination and delete first\nprint('Deleting existing GitHub pages files')\nfiles = glob.glob(destination + '*')\nfor f in files:\n if not '_config.yml' in f:\n try:\n os.remove(f)\n except PermissionError:\n # string is probably a folder\n shutil.rmtree(f)\n\n# Copy the content of source to destination\nprint('Copying contents to GitHub pages docs directory')\ndistutils.dir_util.copy_tree(source, destination)\nprint('Process finished')","sub_path":"documentation/make_html.py","file_name":"make_html.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"147716188","text":"#!/usr/bin/env python3\n#\n#\n# Enable I2C on P1 and P5 (Rev 2 boards only)\n# #######\nimport os\nimport mmap\nimport struct\n\ndef get_dt_ranges(filename, offset):\n\tf = open(filename , \"rb\")\n\tf.seek(offset,1)\n\tdata = f.read(4)\n\tbuf = struct.unpack('BBBB', data)\n\taddress = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]<<0\n\tf.close()\n\treturn address\n\ndef bcm_host_get_peripheral_address():\n\taddr = get_dt_ranges(\"/proc/device-tree/soc/ranges\", 4)\n\tif addr == 0:\n\t\taddr = get_dt_ranges(\"/proc/device-tree/soc/ranges\", 8)\n\tif addr == 0:\n\t\treturn 0x20000000\n\telse:\n\t\treturn addr\n\nBCM2708_PERI_BASE = bcm_host_get_peripheral_address()\n\nGPIO_BASE=(BCM2708_PERI_BASE + 0x00200000)\nBLOCK_SIZE=4096\n\ndef _strto32bit_(str):\n\treturn ((str[3])<<24) + ((str[2])<<16) + ((str[1])<<8) + ((str[0]))\n\ndef _32bittostr_(val):\n\tval1 = bytes((chr(val&0xff) + chr((val>>8)&0xff) + chr((val>>16)&0xff) + chr((val>>24)&0xff)), encoding='UTF-8')\n\treturn val1\n\ndef get_revision():\n\twith open('/proc/cpuinfo') as lines:\n\t\tfor line in lines:\n\t\t\tif line.startswith('Revision'):\n\t\t\t\treturn int(line.strip()[-4:],16)\n\traise RuntimeError('No revision found.')\n\ndef i2cConfig():\n\tif get_revision() <= 3:\n\t\tprint(\"Rev 2 or greater Raspberry Pi required.\")\n\t\treturn\n\t# Use /dev/mem to gain access to peripheral registers\n\tmf=os.open(\"/dev/mem\", os.O_RDWR|os.O_SYNC)\n\tm = mmap.mmap(mf,BLOCK_SIZE, mmap.MAP_SHARED,\n\t\tmmap.PROT_READ|mmap.PROT_WRITE,offset=GPIO_BASE)\n\t# can close the file after we have mmap\n\tos.close(mf)\n\t# Read function select registers\n\t# GPFSEL0 -- GPIO 0,1 I2C0 GPIO 2,3 I2C1\n\tm.seek(0)\n\treg0=_strto32bit_(m.read(4))\n\t# GPFSEL2 -- GPIO 28,29 I2C0\n\tm.seek(8)\n\treg2=_strto32bit_(m.read(4))\n\tm0 = 0b00000000000000000000111111111111\n\ts0 = 0b00000000000000000000100100000000\n\tb0 = reg0 & m0\n\tif b0 != s0:\n\t\tprint(\"reg0 I2C configuration not correct. Updating.\")\n\t\treg0 = (reg0 & ~m0) | s0\n\t\tm.seek(0)\n\t\tm.write(_32bittostr_(reg0))\n\n\t# GPFSEL2 bits --> x[2] SCL0[3] SDA0[3] x[24]\n\t#m2 = 0b00111111000000000000000000000000\n\t#s2 = 0b00100100000000000000000000000000\n\t#b2 = reg2 & m2\n\t#print(\"CONTINUE ************************8\")\n\t#if b2 != s2:\n\t# print( \"reg2 I2C configuration not correct. Updating.\")\n\t# reg2 = (reg2 & ~m2) | s2\n\t# m.seek(8)\n\t# m.write(_32bittostr_(reg2))\n\n\tm.close()\n","sub_path":"boss2_oled_p3/Boss2_Hardware/I2CConfig.py","file_name":"I2CConfig.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"505772695","text":"import json\nfrom urllib.parse import urlencode\n\nimport falcon\nfrom falcon.testing import TestCase\nfrom sqlalchemy.orm import scoped_session\n\nfrom app import app\nfrom core.db.session import Session\n\n\nScopedSession = scoped_session(Session)\n\n\nclass BaseDBTestCase(TestCase):\n \"\"\"Assign Session to TestCase.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.db_session = ScopedSession()\n\n\nclass BaseApiTestCase(BaseDBTestCase):\n \"\"\"Prepare helpers to simulate API requests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.app = app\n\n self.request_headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n\n self.request_methods = {\n 'DELETE': self.simulate_delete,\n 'GET': self.simulate_get,\n 'POST': self.simulate_post,\n 'PUT': self.simulate_put,\n 'PATCH': self.simulate_patch,\n }\n\n def _request_method(self, method_name, path, status, headers, body=None, params=None):\n if headers:\n self.request_headers.update(headers)\n\n request_method = self.request_methods[method_name]\n\n request_body = json.dumps(body, ensure_ascii=False) if body else None\n request_params = urlencode(params, safe=',') if params else None\n\n response = request_method(\n path,\n body=request_body,\n headers=self.request_headers,\n query_string=request_params,\n )\n self.assertEqual(response.status, status, response.content)\n\n return response\n\n def request_get(self, path, params=None, status=falcon.HTTP_200, headers=None):\n return self._request_method('GET', path, status, headers, params=params)\n\n def request_post(self, path, body=None, params=None, status=falcon.HTTP_200, headers=None):\n return self._request_method('POST', path, status, headers, body, params)\n\n def request_patch(self, path, body=None, status=falcon.HTTP_200, headers=None):\n return self._request_method('PATCH', path, status, headers, body)\n\n def request_delete(self, path, status=falcon.HTTP_204, headers=None):\n return self._request_method('DELETE', path, status, headers)\n\n def request_put(self, path, body, status=falcon.HTTP_200, headers=None):\n return self._request_method('PUT', path, status, headers, body)\n","sub_path":"api/core/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"194595456","text":"def gabungkanDuaListUrut(A, B):\r\n la = len(A) ; lb = len(B)\r\n C = list() #C adalah list baru\r\n i = 0; j = 0\r\n\r\n #Gabungkan keduanya sampai salah satu kosong\r\n while i < la and j < lb:\r\n if A[i] < B[j]:\r\n C.append(A[i])\r\n i += 1\r\n else:\r\n C.append(B[j])\r\n j += 1\r\n \r\n while i < la: #jika A mempunyai sisa\r\n C.append(A[i]) # tumpukkan ke list baru itu\r\n i += 1 # satu demi satu\r\n\r\n while j < lb: #jika B mempunyai sisa\r\n C.append(B[j]) # tumpukkan ke list baru itu\r\n j += 1 # satu demi satu\r\n return C\r\n \r\n","sub_path":"MODUL_6/latihan1.py","file_name":"latihan1.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"36542817","text":"import os\nimport base64\nfrom emailnotifier import EmailNotifier\nfrom telegramnotifier import TelegramNotifier\n\ndef get_elector_address(t):\n # Elector address\n res, out = t.getconfig(1)\n if not res:\n print(out)\n return None\n elector_addr = '-1:' + out\n return elector_addr\n\ndef get_awaiting_transactions(t, msig_addr, dest, abi):\n '''\n Get transactions list\n '''\n res, out = t.getTransactions(msig_addr, abi)\n if not res:\n print(out)\n return None\n transactions = []\n try:\n for i in out['transactions']:\n if dest is None or i['dest'] == dest:\n transactions += [i['id']]\n except:\n return None\n\n return transactions\n\ndef confirm_transactions_to_elector(t, msig_addr, keyfile, abi, try_num=30, printl=print):\n elector_addr = get_elector_address(t)\n if elector_addr is None:\n return [], []\n\n # проверим, есть ли транзакции к электору в ожидании подтверждения\n transactions = get_awaiting_transactions(t, msig_addr, elector_addr, abi)\n if transactions is None:\n return [], []\n\n if len(transactions) == 0:\n return [], []\n\n printl('Found unconfirmed transactions: %s' % str(transactions))\n\n confirmed = []\n unconfirmed = []\n\n for i in transactions:\n for n in range(try_num):\n printl('Try %d Confirming %s' % (n+1, i))\n res, out = t.confirmTransaction(msig_addr, i, abi, keyfile)\n if res:\n printl('Success')\n confirmed.append(i)\n break\n else:\n printl(out)\n if i not in confirmed:\n printl('Failed')\n unconfirmed.append(i)\n\n return confirmed, unconfirmed\n\n\ndef request_reward(t, fift, recover_script, msig_addr, elector_addr, abi, keyfile, try_num=30):\n fn = fift.get_tempfile_name('recover-query.boc')\n res, out = fift.run(recover_script, fn)\n if not res:\n print(out)\n return None\n try:\n with open(fn, 'rb') as f:\n recover_request = base64.b64encode(f.read()).decode(\"utf-8\")\n os.remove(fn)\n except:\n return None\n\n for n in range(try_num):\n res, out = t.transfer(msig_addr, elector_addr, 1000000000, True, False, recover_request, abi, keyfile)\n if not res or 'transId' not in out:\n print(out)\n continue\n return out['transId']\n return None\n\nimport tempfile\n\ndef save_atomic(filename, data):\n try:\n with tempfile.NamedTemporaryFile('w', dir=os.path.dirname(filename), delete=False) as tf:\n tf.write(data)\n tempname = tf.name\n os.rename(tempname, filename)\n return True\n except:\n return False\n\ndef notify_custodians(custodians, msg, notify_conf, email_conf):\n for c in custodians:\n if c in notify_conf:\n nn = notify_conf[c]\n if nn['type'] == 'telegram':\n try:\n telegram = TelegramNotifier(nn['params']['token'], nn['params']['chat_ids'])\n telegram.send(msg)\n except:\n pass\n elif nn['type'] == 'email':\n try:\n email = EmailNotifier(email_conf['login'], email_conf['password'], email_conf['smtp'],\n email_conf['port'])\n email.send(nn['params']['address'], msg)\n except:\n pass\n\ndef notify_owner(msg, notify_conf, email_conf):\n if not 'owner' in notify_conf:\n return\n nn = notify_conf['owner']\n if not 'type' in nn:\n return\n if nn['type'] == 'telegram':\n try:\n telegram = TelegramNotifier(nn['params']['token'], nn['params']['chat_ids'])\n telegram.send(msg)\n except:\n pass\n elif nn['type'] == 'email':\n try:\n email = EmailNotifier(email_conf['login'], email_conf['password'], email_conf['smtp'],\n email_conf['port'])\n email.send(nn['params']['address'], msg)\n except:\n pass\n\n\ndef check_participant_list(t, elector_addr, public_key):\n res, out = t.runget(elector_addr, 'participant_list')\n if not res:\n print(out)\n return None\n try:\n node = out[0]\n while True:\n participant = node[0]\n addr_int = int(participant[0], 0)\n if addr_int == public_key:\n stake = int(participant[1], 0)\n return stake\n if node[1] is None:\n break\n node = node[1]\n except Exception as e:\n pass\n\n return 0\n\ndef get_custodians(t, addr, abi):\n res, out = t.getCustodians(addr, abi)\n if not res or 'custodians' not in out:\n print(out)\n return None\n custodians = []\n for i in out['custodians']:\n if 'pubkey' not in i:\n continue\n custodians.append(i['pubkey'])\n\n return custodians\n\n\ndef make_and_check_path(path_conf, key, path):\n if key not in path_conf:\n path_conf[key] = path\n if not os.path.isfile(path_conf[key]):\n print('Cannot find key in %s' % path_conf[key])\n return False\n return True\n\ndef check_config(config, keymaybeseed=False):\n try:\n path_conf = config['path']\n user_conf = config['wallet']\n notify_conf = config['notifications']\n email_conf = config['email']\n except:\n return None, None, None, None\n\n if 'repo' in path_conf and os.path.isdir(path_conf['repo']):\n repo = path_conf['repo']\n else:\n print('Please specify correct repo path')\n return None, None, None, None\n\n if 'keysdir' in path_conf and os.path.isdir(path_conf['keysdir']):\n keysdir = path_conf['keysdir']\n else:\n print('Please specify correct ton-keys path')\n return None, None, None, None\n\n if not 'election_folder' in path_conf:\n path_conf['election_folder'] = './'\n\n if not make_and_check_path(path_conf, 'tonos-cli', repo + '/ton/build/utils/tonos-cli') or \\\n not make_and_check_path(path_conf, 'validator-engine-console', repo + '/ton/build/validator-engine-console/validator-engine-console') or \\\n not make_and_check_path(path_conf, 'client_key', keysdir + '/client') or \\\n not make_and_check_path(path_conf, 'server_pub_key', keysdir + '/server.pub') or \\\n not make_and_check_path(path_conf, 'fift', repo + '/ton/build/crypto/fift') or \\\n not make_and_check_path(path_conf, 'abi', repo + '/configs/SafeMultisigWallet.abi.json'):\n return None, None, None, None\n\n if not keymaybeseed and not make_and_check_path(user_conf, 'keyfile', keysdir + '/msig.keys.json'):\n return None, None, None, None\n\n if not 'server_url' in path_conf:\n path_conf['server_url'] = '127.0.0.1:3030'\n\n if not 'fift_includes' in path_conf:\n path_conf['fift_includes'] = '%s/ton/crypto/fift/lib:%s/ton/crypto/smartcont' % (repo, repo)\n\n if not 'validator-elect-req' in path_conf:\n path_conf['validator-elect-req'] = 'validator-elect-req.fif'\n\n if not 'validator-elect-signed' in path_conf:\n path_conf['validator-elect-signed'] = 'validator-elect-signed.fif'\n\n if not 'recover-stake' in path_conf:\n path_conf['recover-stake'] = 'recover-stake.fif'\n\n if not 'msig_addr' in user_conf:\n import socket\n addr = keysdir + '/' + socket.gethostname() + '.addr'\n try:\n with open(addr, 'r') as f:\n user_conf['msig_addr'] = f.read().replace('\\n', '')\n except:\n print('Cannot read %s' % addr)\n return None, None, None, None\n\n return path_conf, user_conf, notify_conf, email_conf\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"501580984","text":"import asyncio\n\nimport requests\n\n\nasync def get_file(file_id: int):\n response = requests.get(f'http://localhost:5000/test/{file_id}')\n return response.json()\n\n\ndef get_data_async():\n async def get_data():\n async def get_tasks():\n tasks = [get_file(i) for i in range(1, 4)]\n return await asyncio.gather(*tasks)\n\n return await get_tasks()\n\n loop = asyncio.new_event_loop()\n\n results = loop.run_until_complete(asyncio.wait_for(get_data(), timeout=2.0))\n loop.close()\n\n data = []\n for r in results:\n data += [*r['data']]\n return sorted(data, key=lambda x: x['id'])\n","sub_path":"async_data.py","file_name":"async_data.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"465350932","text":"from pymongo import MongoClient, DESCENDING\nfrom dateutil import parser\nfrom datetime import datetime\nfrom pytz import timezone\nimport os, sys, MeCab, json, re, glob\n\nr_men = re.compile('@(\\w)+\\s')\nr_kigou = re.compile('[!\"#$%&\\'\\\\\\\\()*+,-./:;<=>?@[\\\\]^_`{|}~「」〔〕“”〈〉『』【】&*・()$#@。、?!`+¥%]')\nr_url = re.compile('http(s)?://t.co/\\w+')\n\nsakura = set(['桜', 'さくら', 'サクラ'])\n\ndef setup_mecab():\n mecab = MeCab.Tagger('-d /now24/a.saito/local/mecab/lib/mecab/dic/mecab-ipadic-neologd')\n mecab.parse('')\n print('mecab ready')\n return mecab\n\ndef setup_mongo():\n connection = MongoClient()\n db = connection['2014_sakura_twi_1208']\n print('mongoDB ready')\n return db\n\ndef morpho_text(text, mecab):\n morpho_text = []\n node = mecab.parseToNode(text).next\n while node.next:\n feature = node.feature.split(',')[0]\n if feature in ['名詞', '動詞']:\n if node.feature.split(\",\")[6] == '*':\n word = node.surface\n else:\n word = node.feature.split(\",\")[6]\n morpho_text.append(word)\n node = node.next\n morpho_text = list(set(morpho_text))\n return morpho_text\n\ndef text_cleaning(s):\n s = r_men.sub('', s)\n s = r_url.sub('', s)\n s = r_kigou.sub('', s)\n return s\n\ndef insert(line, p, c, mecab):\n jsonline = re.sub('^\\d*\\t', '', line)\n try:\n textline = json.loads(jsonline)\n pname = textline['reverse_geo']['pname']\n if pname == p:\n text = text_cleaning(textline['text'])\n morpho = morpho_text(text, mecab)\n created_at = textline['created_at']\n sakura_twi = 1 if (len(sakura & set(morpho)) > 0) else 0\n insert_line = {\n 'pname' : pname,\n 'text' : text,\n 'morpho_text' : ' '.join(morpho),\n 'sakura_twi' : sakura_twi,\n 'created_at': created_at,\n 'created_at_iso' : parser.parse(created_at).astimezone(timezone('Asia/Tokyo')).isoformat()\n }\n c.insert_one(insert_line)\n except Exception:\n pass\n return\n\ndef main():\n mecab = setup_mecab()\n db = setup_mongo()\n\n d = {\n 'hk': {\n 'pname': '北海道',\n 'file_lst': ['0429', '0430', '0501']\n },\n 'is': {\n 'pname': '石川県',\n 'file_lst': ['0401', '0402', '0403', '0404', '0405', '0406', '0407']\n },\n 'tk': {\n 'pname': '東京都',\n 'file_lst': ['0325', '0326', '0327', '0328', '0329', '0330']\n }\n }\n\n for key in d:\n for date in d[key]['file_lst']:\n col = db['season_' + key]\n month, day = date[:2], date[2:]\n filename = '/now24/a.saito/data_2014/2014-' + month + '/json_rg_2014-' + month + '-' + day + '.txt'\n with open(filename, 'r') as f:\n print('##### insert ' + filename + ' ...')\n line = f.readline()\n while line:\n insert(line, d[key]['pname'], col, mecab)\n line = f.readline()\n\nmain()\n","sub_path":"script/old/exp/insert_season.py","file_name":"insert_season.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"133228221","text":"#!/usr/bin/python\n\n# Import the required modules\nimport cv2, os\nimport numpy as np\nfrom PIL import Image\nimport sys\n\nlength=len(sys.argv)\nif length < 2:\n dataPath = \"subjects\"\n classPath = \"Data.xml\"\nelse:\n #cascPath = sys.argv[1]\n dataPath = sys.argv[1]\n classPath = sys.argv[2]\n\n# For face detection we will use the Haar Cascade provided by OpenCV.\n\ncascadePath = \"haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascadePath)\n\n# For face recognition we will the the LBPH Face Recognizer \nrecognizer = cv2.face.createLBPHFaceRecognizer()\n\ndef get_images_and_labels(path):\n # Append all the absolute image paths in a list image_paths\n # We will not read the image with the .sad extension in the training set\n # Rather, we will use them to test our accuracy of the training\n image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.startswith('.')]\n # images will contains face images\n images = []\n # labels will contains the label that is assigned to the image\n labels = []\n for image_path in image_paths:\n # Read the image and convert to grayscale\n image_pil = Image.open(image_path).convert('L')\n # Convert the image format into numpy array\n image = np.array(image_pil, 'uint8')\n # Get the label of the image\n nbr = int(os.path.split(image_path)[1].split(\".\")[0].replace(\"face-\", \"\"))\n # Detect the face in the image\n faces = faceCascade.detectMultiScale(image)\n # If face is detected, append the face to images and the label to labels\n for (x, y, w, h) in faces:\n images.append(image)\n labels.append(nbr)\n #cv2.imshow(\"Adding faces to traning set...\", image[y: y + h, x: x + w])\n cv2.waitKey(50)\n # return the images list and labels list\n return images, labels\n\n# Path to the Dataset\npath = dataPath\n# Call the get_images_and_labels function and get the face images and the \n# corresponding labels\nimages, labels = get_images_and_labels(path)\ncv2.destroyAllWindows()\n\n# Perform the tranining\nrecognizer.train(images, np.array(labels)) # train\nrecognizer.save(classPath) # save recognizer training\n#recognizer.load('PeopleIKnow.xml')\n# python face_train.py testsubjects Data.xml\n\n","sub_path":"FaceLog/face_train.py","file_name":"face_train.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"331356072","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom FHQVisualizationLib import FHQVisualizationLib\nfrom time import sleep\n\nhost=\"localhost\"\nport=31001\n\nwith FHQVisualizationLib(host, port) as fhq:\n\twhile True:\n\t\tfhq.showthreelock(\"keva\")\n\t\tsleep(3)\n\t\tfhq.showthreelock(\"mustang\")\n\t\tsleep(3)\n\n","sub_path":"client/showthreelock.py","file_name":"showthreelock.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"454174136","text":"import os\nimport csv\nimport numpy as np\nimport shutil\nfrom time import time\n\n#============================Load Data=======================\n#Remove flipped files folder\npath_prefix = './my_data/'\ntry:\n shutil.rmtree(path_prefix + 'flipped/IMG/')\nexcept:\n print(\"no folder for flipped images was found\")\n\n#Create folder to store flipped images\ntry: \n os.mkdir(path_prefix + 'flipped/IMG/') \nexcept OSError: \n print (\"Creation of the directory %s failed\" % path_prefix + 'flipped/IMG/')\n\n#Load data from csv file\nsamples = []\nwith open(path_prefix + 'driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n line[0] = line[0].strip().replace('/home/kiwibot/jdgalviss/Self_driving_cars_tools/15Project_BehavioralCloning/CarND-Behavioral-Cloning-P3/my_data/','')\n line[1] = line[1].strip().replace('/home/kiwibot/jdgalviss/Self_driving_cars_tools/15Project_BehavioralCloning/CarND-Behavioral-Cloning-P3/my_data/','')\n line[2] = line[2].strip().replace('/home/kiwibot/jdgalviss/Self_driving_cars_tools/15Project_BehavioralCloning/CarND-Behavioral-Cloning-P3/my_data/','')\n samples.append(line)\nsamples.pop(0)\nprint('Data Loaded')\nprint(\"number of samples: \", np.array(samples).shape)\n\n#============================Generate flipped images=======================\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport cv2\n\n#Test flipping\nimg=mpimg.imread(path_prefix + samples[0][0])\nimage_flipped = np.fliplr(img)\n\n#Flip and save images to folder\nfor sample in samples:\n img=mpimg.imread(path_prefix + sample[0])\n image_flipped = np.fliplr(img)\n cv2.imwrite(path_prefix + 'flipped/' + sample[0], cv2.cvtColor(image_flipped, cv2.COLOR_BGR2RGB))\nprint('Flipped Images saved')\n\n#============================Divide data in train and test samples======================\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\nprint('Data Splitted')\nprint(\"training samples: \", np.array(train_samples).shape)\nprint(\"validation samples: \", np.array(validation_samples).shape)\n\n#============================Augment Data by using left and right images=======================\nsteering_correction_factor = 0.15\nimg_paths_train = []\nangles_train = []\n\nfor sample in train_samples:\n steering_center = float(sample[3])\n \n # create adjusted steering measurements for the side camera images\n steering_left = steering_center + steering_correction_factor\n steering_right = steering_center - steering_correction_factor\n \n # define center, right and left paths\n img_paths_train.append(path_prefix + sample[0]) # center\n img_paths_train.append(path_prefix + sample[1]) # left\n img_paths_train.append(path_prefix + sample[2]) # right\n angles_train.append(steering_center)\n angles_train.append(steering_left)\n angles_train.append(steering_right)\nprint('Data Augmented: Left and Right') \nprint(\"number of samples: \", np.array(img_paths_train).shape)\nprint(\"number of samples: \", np.array(angles_train).shape)\n\n#============================Augment Data using flipped images=======================\nfor sample in train_samples:\n img_paths_train.append(path_prefix+'flipped/' + sample[0].strip())\n angles_train.append(-float(sample[3]))\n #print(sample[1])\nprint('Data Augmented flipped')\nprint(\"number of samples: \", np.array(img_paths_train).shape)\nprint(\"number of samples: \", np.array(angles_train).shape)\nprint(img_paths_train[0])\n\n#=================Put Validation data into img_path and angles lists==================\n# Validation imgs - create vectors\nimg_paths_validation = []\nangles_validation = []\nfor sample in validation_samples:\n img_paths_validation.append(path_prefix + sample[0].strip())\n angles_validation.append(float(sample[3]))\nprint('Validation Data') \nprint(\"number of samples: \", np.array(img_paths_validation).shape)\nprint(\"number of samples: \", np.array(angles_validation).shape)\n\n#======================Generator for image processing=================\nimport sklearn\nfrom sklearn.utils import shuffle\ndef generator(images_path, angles, batch_size=32):\n num_samples = len(images_path)\n while 1: # Loop forever so the generator never terminates\n images_path, angles = shuffle(images_path, angles)\n for offset in range(0, num_samples, batch_size):\n batch_images = images_path[offset:offset+batch_size]\n batch_angles = angles[offset:offset+batch_size]\n \n images = []\n angles_batch = []\n for batch_image, angle in zip(batch_images, batch_angles):\n center_image = mpimg.imread(batch_image)\n center_angle = float(angle)\n images.append(center_image)\n angles_batch.append(center_angle)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles_batch)\n yield sklearn.utils.shuffle(X_train, y_train)\n \n# Set our batch size\nbatch_size=32\n\n# compile and train the model using the generator function\ntrain_generator = generator(img_paths_train, angles_train, batch_size=batch_size)\nvalidation_generator = generator(img_paths_validation, angles_validation, batch_size=batch_size)\n\n#=======================Model architecture===============\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import BatchNormalization, Lambda, Cropping2D, MaxPooling2D, Conv2D, Dense, Activation, Flatten, Dropout\nfrom tensorflow.keras.callbacks import TensorBoard\n\nch, row, col = 3, 75, 320 # Trimmed image format\nkeep_prob = 0.5\ndef pilotNet(train = True, keep_prob = 0.5):\n model = Sequential()\n #------------Preprocess incoming data------------\n # Crop Image\n model.add(Cropping2D(cropping=((65,20), (0,0)), input_shape=(160,320,3)))\n #centered around zero with small standard deviation \n model.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=(ch, row, col),\n output_shape=(ch, row, col)))\n #--------------Model Architecture: PilotNet-------------------\n model.add(Conv2D(32, (3, 3), input_shape=(32, 32, 3)))\n\n model.add(Conv2D(24, (5, 5), strides = 2))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n \n model.add(Conv2D(36, (5, 5), strides = 2))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n\n model.add(Conv2D(48, (5, 5), strides = 2))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n \n model.add(Flatten())\n model.add(Dense(200))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(rate = keep_prob))\n model.add(Dense(1))\n return model\nmodel = pilotNet()\n\n#============================Train model=======================\nfrom math import ceil\nmodel.compile(loss='mse', optimizer='adam')\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\nhistory_object = model.fit_generator(train_generator,\n steps_per_epoch=ceil(len(img_paths_train)/batch_size),\n validation_data=validation_generator,\n validation_steps=ceil(len(img_paths_validation)/batch_size),\n epochs=4, verbose=2, callbacks=[tensorboard])\n\n#============================Save model=======================\nmodel.save('model_mine3.h5')\nprint('model saved')\n\n\n#============================Plot=======================\n### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"82170633","text":"import pandas as pd\n\n# can we create a week day series like this? 1 - Sunday\n# # from dict\ndata = {1: 'Sun', 2: 'Mon', 3: 'Tues'}\nseries = pd.Series(data)\nprint(series)\n\n\n# DataFrame from a list\na = [11, 12, 13, 14]\n\ndf = pd.DataFrame(a)\nprint(df)\nprint(type(df))","sub_path":"Module_4/Module_4 practice files/Module_4/3. pandas_examples/slide_55.py","file_name":"slide_55.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"624098827","text":"from selenium import webdriver\nimport time\nimport pytest\nimport allure\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom webdriver_manager.firefox import GeckoDriverManager\n\n\nclass TestDrivers2():\n\n @allure.severity(allure.severity_level.CRITICAL)\n @pytest.mark.run(order=6)\n def test_setUp(self):\n global driver\n driver = webdriver.Chrome(ChromeDriverManager().install())\n #driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver.implicitly_wait(10)\n\n @allure.severity(allure.severity_level.CRITICAL)\n @pytest.mark.run(order=7)\n def test_valid_login2(self):\n driver.get(\"http://rentvehicles.multicompetition.com/login\")\n enterEmail = driver.find_element(By.ID, 'email')\n enterEmail.send_keys(\"admin@gmail.com\")\n time.sleep(2)\n\n enterPassword = driver.find_element(By.ID, 'password')\n enterPassword.send_keys(\"admin@123\")\n\n enterLoginBtn = driver.find_element_by_id(\"btnLogin\")\n enterLoginBtn.click()\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=8)\n def test_navigate_driver_section2(self):\n driver.find_element_by_xpath(\"/html/body/div[1]/aside[1]/div/div[4]/div/div/nav/ul/li[2]/a\").click()\n time.sleep(3)\n driver.find_element(By.XPATH, \"//p[contains(text(),'Register Drivers')]\").click()\n time.sleep(2)\n\n @allure.severity(allure.severity_level.CRITICAL)\n @pytest.mark.run(order=9)\n def test_fill_formSection1(self):\n driverName = driver.find_element_by_xpath(\"//input[@name='name']\")\n driverName.send_keys(\"aadil\")\n\n\n phoneNumber = driver.find_element(By.NAME, 'mobile_number')\n phoneNumber.send_keys(\"0528542762\")\n\n emailID = driver.find_element(By.XPATH, \"//input[@name='email']\")\n emailID.send_keys(\"adil16@gmail1.com\")\n #emailID.send_keys(email)\n\n password = driver.find_element_by_xpath(\"//input[@name='password']\")\n password.send_keys(\"12346556\")\n\n enterNIC = driver.find_element_by_xpath(\"//input[@name='nic']\")\n enterNIC.send_keys(\"9200122134V\")\n\n @allure.severity(allure.severity_level.MINOR)\n @pytest.mark.timeout(10)\n @pytest.mark.run(order=10)\n def test_upload_licenece_copy_file(self):\n uploadFile = driver.find_element_by_id(\"name\")\n uploadFile.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.skip(reason=\"licence backcopy is not mendatory\")\n @pytest.mark.run(order=11)\n def test_upload_licenece_backcopy_file(self):\n uploadFile2 = driver.find_element_by_name(\"licence_copy\")\n uploadFile2.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=12)\n def test_enter_vehicle_number(self):\n enterVehicleNumber = driver.find_element_by_xpath(\"//input[@name='vehicle_number']\")\n enterVehicleNumber.send_keys(\"6655663265\")\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=13)\n def test_vehicle_owner_radiobtn(self):\n element = driver.find_element_by_css_selector(\"input.is_vehicle_owner:nth-child(4)\")\n driver.execute_script(\"arguments[0].click();\", element)\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=14)\n def test_select_vehicle_ownername(self):\n element1 = driver.find_element_by_xpath(\"//select[@name='owner_id']\")\n sel = Select(element1)\n sel.select_by_index(2)\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=15)\n def test_select_vehicle_type(self):\n element2 = driver.find_element_by_xpath(\"//select[@name='vehicle_type_id']\")\n sel2 = Select(element2)\n sel2.select_by_index(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=16)\n def test_upload_vehicle_picture(self):\n uploadFile = driver.find_element_by_xpath(\"//input[@name='vehicle_picture']\")\n uploadFile.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=17)\n def test_enter_engine_number(self):\n engine_number = driver.find_element_by_xpath(\"//input[@name='engine_number']\")\n engine_number.send_keys(\"EP-HQ8165\")\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=18)\n def test_enter_chassis_number(self):\n engine_number = driver.find_element_by_xpath(\"//input[@name='chassis_number']\")\n engine_number.send_keys(\"EP-HQ8165645\")\n\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.timeout(19)\n @pytest.mark.run(order=18)\n def test_upload_vehicle_regcopy(self):\n uploadFile = driver.find_element_by_xpath(\"//input[@name='vehicle_registration_copy']\")\n uploadFile.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.timeout(10)\n @pytest.mark.run(order=20)\n def test_upload_driver_photo(self):\n uploadFile = driver.find_element_by_xpath(\"//input[@name='photo']\")\n uploadFile.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=21)\n def test_upload_driver_photo(self):\n uploadFile = driver.find_element(By.XPATH, \"//input[@name='photo']\")\n uploadFile.send_keys(\n \"C://Users//fathih//PycharmProjects//RentVehicles//driver_registration_flow//Image//python.png\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=22)\n def test_parking_location(self):\n uploadFile = driver.find_element(By.XPATH, \"//input[@name='parking_location']\")\n uploadFile.send_keys(\"Abu Dhabi parking-01\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=23)\n def test_prefer_timingfrom(self):\n uploadFile = driver.find_element(By.XPATH, \"//input[@name='prefer_time_from']\")\n uploadFile.send_keys(\"12:10 PM\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=24)\n def test_prefer_timingfrom2(self):\n uploadFile = driver.find_element(By.XPATH, \"//input[@name='prefer_time_to']\")\n uploadFile.send_keys(\"5:00 PM\")\n time.sleep(2)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=25)\n def test_prefer_location(self):\n prefer_location = driver.find_element_by_xpath(\"//select[@name='prefer_location_to_hire']\")\n select = Select(prefer_location)\n select.select_by_index(0)\n time.sleep(3)\n\n @allure.severity(allure.severity_level.NORMAL)\n @pytest.mark.run(order=26)\n def test_submit_details(self):\n element = driver.find_element_by_id(\"submitBtn\")\n driver.execute_script(\"arguments[0].click();\", element)\n time.sleep(10)\n\n\n def test_tearDown(self):\n driver.quit()","sub_path":"allure_report_demo/test_enter_details.py","file_name":"test_enter_details.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"419588927","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n conpaas.core.iaas\n =================\n\n ConPaaS core: get cloud objects.\n\n :copyright: (C) 2010-2013 by Contrail Consortium.\n\"\"\"\n\ndef get_cloud_instance(cloud_name, cloud_type, iaas_config):\n if cloud_type == 'opennebula':\n from .clouds.opennebula import OpenNebulaCloud\n return OpenNebulaCloud(cloud_name, iaas_config)\n elif cloud_type == 'ec2':\n from .clouds.ec2 import EC2Cloud\n return EC2Cloud(cloud_name, iaas_config)\n elif cloud_type == 'openstack':\n from .clouds.openstack import OpenStackCloud\n return OpenStackCloud(cloud_name, iaas_config)\n elif cloud_type == 'dummy':\n from .clouds.dummy import DummyCloud\n return DummyCloud(cloud_name, iaas_config)\n elif cloud_type == 'federation':\n # ConPaaS running in federation mode\n pass\n raise Exception('Cannot get_cloud_instance(%s, %s): No module found for cloud_type %s' % (cloud_name, cloud_type, cloud_type) )\n\n\ndef get_clouds(iaas_config):\n '''Parses the config file containing the clouds'''\n return [get_cloud_instance(cloud_name,\n iaas_config.get(cloud_name, 'DRIVER').lower(),\n iaas_config)\n for cloud_name in iaas_config.get('iaas', 'OTHER_CLOUDS').split(',')\n if iaas_config.has_section(cloud_name)]\n","sub_path":"conpaas-director/cpsdirector/iaas/iaas.py","file_name":"iaas.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"16801757","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.covariance import EllipticEnvelope\nfrom xgboost import XGBClassifier\n\ndatasets = pd.read_csv('../_data/study/winequality-white.csv',\n index_col=None, header=0, sep=';')\n\n# print(datasets.head())\n# print(datasets.shape) # (4898, 12)\n\ncount_data = datasets.groupby('quality')['quality'].count()\n# print(count_data)\nplt.bar(count_data.index, count_data)\n# plt.show()\n'''\nquality\n3 20\n4 163\n5 1457\n6 2198\n7 880\n8 175\n9 5\n'''\n\ndatasets = datasets.values\nx = datasets[:, :11]\ny = datasets[:, 11]\n# print(y.shape) # (4898,)\n\nnewlist = []\nfor i in list(y):\n if i<=4 :\n newlist += [0]\n elif i<=7 :\n newlist += [1]\n else:\n newlist += [2]\ny = np.array(newlist)\n# print(y.shape) # (4898,)\n\nscaler = StandardScaler()\nx = scaler.fit_transform(x)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21)\n\nmodel = XGBClassifier()\nmodel.fit(x_train, y_train)\nscore = model.score(x_test, y_test)\n# print('score = ', score) \n# # score = 0.6653061224489796 \n# -> score = 0.9551020408163265\n\n","sub_path":"ml/ml29_wine_quality.py","file_name":"ml29_wine_quality.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"552439668","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 29/01/2018\n\n\nwith open(\"courseOutline.json\", \"r\") as f:\n data = f.read()\n\ndata = eval(data)\n\nnew_data = []\n\n\ndef get_chapters(val):\n chapter_list = []\n\n for chapter_title, children in val.items():\n chapter_list.append({\"chapterTitle\": chapter_title, \"children\": children})\n\n return chapter_list\n\nfor k, v in data.items():\n chapters = get_chapters(v)\n new_data.append({\"phase\": k, \"chapters\": chapters})\n\nprint(new_data)","sub_path":"20180124/py_excel2.py","file_name":"py_excel2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"363579749","text":"import numpy as np\nimport pandas as pd\nimport requests\n\n\ndef get_historical_forecast_given_time(latitude, longitude, date, time): # date = \"2018-07-29\", time = \"15:30\"\n df = get_historical_forecast_given_date(latitude, longitude, date)\n if df.shape[0] == 0:\n return df\n given_hour, given_minute = time.split(\":\")\n normalized_time = 60*int(given_hour) + int(given_minute)\n available_times = df[\"time\"].values.tolist()\n available_times_splitted = [available_time.split(\":\") for available_time in available_times]\n available_normalized_times = [60*int(hour) + int(minute) for hour, minute in available_times_splitted]\n\n minimum_difference = 1e6\n minimum_difference_index = 0\n\n for index, available_normalized_time in enumerate(available_normalized_times):\n difference = abs(normalized_time - available_normalized_time)\n if difference < minimum_difference:\n minimum_difference = difference\n minimum_difference_index = index\n\n return df.loc[[minimum_difference_index]]\n\n\ndef get_historical_forecast_given_date(latitude, longitude, date): # date = \"2018-07-29\"\n gid = get_gid(latitude, longitude)\n district_name = get_district_name(gid)\n station_id = get_station_id(gid)\n url = \"https://tr.freemeteo.com/havadurumu/\" + district_name + \"/history/daily-history/?gid=\" + str(gid) + \"&date=\" + date + \"&station=\" + \\\n str(station_id) + \"&language=turkish&country=turkey\"\n table = pd.read_html(url)[5]\n df = pd.DataFrame(data=table)\n df.drop(columns=['Simge', 'Rüzgarın Şiddeti'], inplace=True)\n df.rename(columns={'Saat': 'time', 'Sıcaklık': 'temperature', 'Hissedilir Sıcaklık': 'windchill_temperature', 'Rüzgar': 'wind_power',\n 'Bağıl Nem': 'relative_humidity', 'Çiğ oluşma derecesi': 'dewpoint_temperature', 'Basınç': 'pressure',\n 'TarifAyrıntılar': 'cloudness'}, inplace=True)\n df['fog_stability_index'] = df['temperature']\n for index, row in df.iterrows():\n df.set_value(index, 'temperature', edit_temperature(row[1]))\n df.set_value(index, 'windchill_temperature', edit_temperature(row[2]))\n df.set_value(index, 'wind_power', edit_wind(row[3]))\n df.set_value(index, 'relative_humidity', edit_relative_humidity(row[4]))\n df.set_value(index, 'dewpoint_temperature', edit_temperature(row[5]))\n df.set_value(index, 'pressure', edit_pressure(row[6]))\n df.set_value(index, 'cloudness', edit_cloudness(row[7]))\n df.set_value(index, 'fog_stability_index', edit_fog_stability_index(row[1], row[5]))\n return df\n\n\ndef get_gid(latitude, longitude):\n url = \"https://tr.freemeteo.com/Services/GeoLocation/PointByCoordinates/?cid=213&la=17&lat=\" + str(latitude) + \"&lon=\" + str(longitude)\n source_code = requests.get(url)\n plain_text = str(source_code.text)\n startIndex = plain_text.index(\"?gid=\") + 5\n numbers = []\n for i in range(startIndex, len(plain_text)):\n current_char = plain_text[i]\n if current_char != \"&\":\n numbers.append(current_char)\n else:\n break\n gid = \"\".join(numbers)\n return gid\n\n\ndef get_district_name(gid):\n url = \"https://tr.freemeteo.com/Services/Weather/SevenDaysChart?la=17&charts=Humidity&pointID=\" + str(gid) + \"&pointType=Land&unit=Metric&v=2\"\n source_code = requests.get(url)\n plain_text = str(source_code.text)\n start_index = plain_text.index(\"/havadurumu/\") + 12\n district_letters = []\n for j in range(start_index, len(plain_text)):\n char_at_j = plain_text[j]\n if char_at_j != \"/\":\n district_letters.append(char_at_j)\n else:\n break\n district_name = \"\".join(district_letters)\n return district_name\n\n\ndef get_station_id(gid):\n url = \"https://tr.freemeteo.com/Services/Weather/Stations?pointid=\" + str(gid) + \"&la=17&stationType=CurrentWeather&units=Metric&ck=1\"\n source_code = requests.get(url)\n plain_text = str(source_code.text)\n start_index = 7\n station_id_numbers = []\n for j in range(start_index, len(plain_text)):\n char_at_j = plain_text[j]\n if char_at_j != \",\":\n station_id_numbers.append(char_at_j)\n else:\n break\n station_id = \"\".join(station_id_numbers)\n return station_id\n\n\ndef edit_temperature(temperature):\n temperature = str(temperature)\n return int(temperature[:-2])\n\n\ndef edit_wind(wind_power):\n wind_power = str(wind_power)\n try:\n wind_power = wind_power.split()[-2]\n wind_power_numbers = []\n for i in range(len(wind_power)):\n j = len(wind_power) - i - 1\n char_at_j = wind_power[j]\n if char_at_j.isdigit():\n wind_power_numbers.insert(0, char_at_j)\n else:\n break\n return int(\"\".join(wind_power_numbers))\n except:\n return \"NA\"\n\n\ndef edit_relative_humidity(relative_humidity):\n relative_humidity = str(relative_humidity)\n return float(relative_humidity[:-1])/100\n\n\ndef edit_pressure(pressure):\n pressure = str(pressure)\n pressure = pressure[:-2].replace(',', '.')\n return float(pressure)\n\n\ndef edit_cloudness(cloudness):\n cloudness = str(cloudness)\n if cloudness[42].isdigit():\n return int(cloudness[41:43])\n else:\n return int(cloudness[41])\n\n\ndef edit_fog_stability_index(temperature, dewpoint_temperature):\n temperature = float(temperature)\n dewpoint_temperature = float(dewpoint_temperature)\n fog_stability_index = 0.2*np.log(temperature) / \\\n (2*(temperature - dewpoint_temperature) / float(1000))\n return min(fog_stability_index, 5.0)\n\n\n\"\"\"\nfor i in range(100):\n latitude = 40 + i * 0.01\n for j in range(100):\n longitude = 35 + j * 0.01\n print(latitude, \", \", longitude, \", \", \"2018-07-26\")\n df = get_historical_forecast_given_time(latitude, longitude, \"2018-07-26\", \"14:36\")\n print(df.shape)\n print(df)\n break\n print(\"############################################\")\n print(\"\")\n break\n\"\"\"\n","sub_path":"historical_weather_data.py","file_name":"historical_weather_data.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"231734068","text":"import torch\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\n\n# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows\nimport logging\n\nimport matplotlib.pyplot as plt\nimport os\nfrom utils import load_pkl\n\n\n######### get data.\ndirname = \"data/\"\nfilename = \"cooking_labeled_text_data.pkl\"\ninput_data = load_pkl(dirname+filename)\nact_texts = []\n\nfor i in range(len(input_data)): # until length of training examples (documents)\n if len(input_data[i]['words']) == 0: # if there are no words in a document\n continue\n act_text = {}\n act_text['tokens'] = input_data[i]['words'] # tokens = individual words\n act_text['sents'] = input_data[i]['sents'] # sents = sentences [['a ','cat ', 'runs.'], [ ], ...]\n act_texts.append(act_text)\n\nmarked_text = \"\"\nfor act_text in act_texts:\n for sent in act_text['sents']:\n marked_text += \" [CLS] \"\n for word in sent:\n marked_text += word\n marked_text += \" [SEP] \"\n break\n break\n\n\n\n\n\n###bert\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n#tokenize the text\ntokenized_text = tokenizer.tokenize(marked_text)\nprint(tokenized_text)\n\n# print(list(tokenizer.vocab.keys())[5000:5020])\n\nindexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n\nfor tup in zip(tokenized_text, indexed_tokens):\n print(tup)\n\n# TODO: make it zero for alternate sentences.\nsegments_ids = [1] * len(tokenized_text)\n\n# Next we need to convert our data to torch tensors and call the BERT model.\n# The BERT PyTorch interface requires that the data be in torch tensors rather than Python lists, so we convert the lists here\n# - this does not change the shape or the data.\n# Convert inputs to PyTorch tensors\n\ntokens_tensor = torch.tensor([indexed_tokens])\nsegments_tensors = torch.tensor([segments_ids])\n\n# Load pre-trained model (weights)\nmodel = BertModel.from_pretrained('bert-base-uncased')\n\n# Put the model in \"evaluation\" mode, meaning feed-forward operation.\nmodel.eval()\n\n\n#torch.no_grad deactivates the gradient calculations, saves memory, and speeds up computation\n# (we don't need gradients or backpropagation since we're just running a forward pass).\n# Predict hidden states features for each layer\nwith torch.no_grad():\n encoded_layers, _ = model(tokens_tensor, segments_tensors)\n\n#The full set of hidden states for this model, stored in the object encoded_layers, is a little dizzying.\n# This object has four dimensions, in the following order:\n# 1. The layer number (12 layers)\n# 2. The batch number (1 sentence)\n# 3. The word / token number (22 tokens in our sentence)\n# 4. The hidden unit / feature number (768 features)\n\n## That’s 202,752 unique values just to represent our one sentence!\n# The second dimension, the batch size, is used when submitting multiple sentences to the model at once; here, though, we just have one example sentence.\n\nprint(\"Number of layers:\", len(encoded_layers))\nlayer_i = 0\n\nprint(\"Number of batches:\", len(encoded_layers[layer_i]))\nbatch_i = 0\n\nprint(\"Number of tokens:\", len(encoded_layers[layer_i][batch_i]))\ntoken_i = 0\n\nprint(\"Number of hidden units:\", len(encoded_layers[layer_i][batch_i][token_i]))\n\n\n\n\n\n###############################################################\n# Convert the hidden state embeddings into single token vectors\n\n# Holds the list of 12 layer embeddings for each token\n# Will have the shape: [# tokens, # layers, # features]\ntoken_embeddings = []\n\n# For each token in the sentence...\nfor token_i in range(len(tokenized_text)):\n\n # Holds 12 layers of hidden states for each token\n hidden_layers = []\n\n # For each of the 12 layers...\n for layer_i in range(len(encoded_layers)):\n # Lookup the vector for `token_i` in `layer_i`\n vec = encoded_layers[layer_i][batch_i][token_i]\n\n hidden_layers.append(vec)\n\n token_embeddings.append(hidden_layers)\n\n# Sanity check the dimensions:\nprint(\"Number of tokens in sequence:\", len(token_embeddings))\nprint(\"Number of layers per token:\", len(token_embeddings[0]))\n\n\n\n\n#### Word vectors by concatenating last four layers\n# Stores the token vectors, with shape [22 x 3,072]\n\n# Stores the token vectors, with shape [22 x 768]\ntoken_vecs_sum = []\n\n# For each token in the sentence...\nfor token in token_embeddings:\n # Sum the vectors from the last four layers.\n sum_vec = torch.sum(torch.stack(token)[-4:], 0)\n\n # Use `sum_vec` to represent `token`.\n token_vecs_sum.append(sum_vec)\n\nprint('Shape is: %d x %d' % (len(token_vecs_sum), len(token_vecs_sum[0])))\n\n\n######## To get a single vector for our entire sentence we have multiple application-dependent strategieis,\n# but a simple approach is to average the second to last hiden layer of each token producing a single 768 length vector.\n\nsentence_embedding = torch.mean(encoded_layers[11], 1)\nprint(\"Our final sentence embedding vector of shape:\"), sentence_embedding[0].shape[0]\n\n# first 15 words\nprint(token_vecs_sum[10][:15])\n","sub_path":"bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"195883387","text":"import json\n\nimport discord\nfrom discord.ext import commands\nfrom discord_slash import cog_ext\nfrom discord_slash.utils.manage_commands import create_option\n\nfrom my.sql import Connexion\n\n\nclass Rank(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @cog_ext.cog_subcommand(base=\"option\", subcommand_group=\"roles\", name=\"ajouter\",\n description=\"Ajoutez un role donné en fonction du niveau\",\n options=[create_option(\n name=\"Role\",\n description=\"Le rôle à donner\",\n option_type=8,\n required=True),\n\n create_option(\n name=\"Niveau\",\n description=\"À quel niveau donner le role ?\",\n option_type=4,\n required=True)])\n async def add_role(self, ctx, role: discord.Role, niveau: int):\n if not ctx.author.guild_permissions.manage_roles:\n await ctx.send(\"Vous devez avoir la permission \\\"Gerer les roles\\\" pour pouvoir effectuer cette commande.\",\n hidden=True)\n else:\n await ctx.defer()\n await Connexion().add_role(role.id, ctx.guild.id, niveau)\n\n await ctx.send(\n \"{0} sera désormais donné quand quelqu'un depasse le niveau {1} !\".format(role.mention, niveau),\n allowed_mentions=discord.AllowedMentions().none())\n\n @cog_ext.cog_subcommand(base=\"option\", subcommand_group=\"roles\", name=\"supprimer\",\n description=\"Ajoutez un role donné en fonction du niveau\",\n options=[create_option(\n name=\"Role\",\n description=\"Le rôle à supprimer\",\n option_type=8,\n required=True)])\n async def del_role(self, ctx, role: discord.Role):\n if not ctx.author.guild_permissions.manage_roles:\n await ctx.send(\"Vous devez avoir la permission \\\"Gerer les roles\\\" pour pouvoir effectuer cette commande.\",\n hidden=True)\n else:\n await ctx.defer()\n resp = await Connexion().del_role(ctx.guild.id, role.id)\n\n if resp:\n await ctx.send(\"Le role {0} ne sera désormais plus donné.\".format(role.mention),\n allowed_mentions=discord.AllowedMentions().none())\n else:\n await ctx.send(\"Ce role n'est pas donné lors de levels-up, impossible de le supprimer.\")\n\n @cog_ext.cog_subcommand(base=\"option\", subcommand_group=\"roles\", subcommand_group_description=\"Modifiez les paramètres des roles\", name=\"liste\",\n description=\"Affiche les roles donnés en fonction des niveaux\")\n async def get_roles(self, ctx):\n await ctx.defer(True)\n data = await Connexion().list_roles(ctx.guild.id)\n\n if data == [] or len(json.loads(data[0][1])) == 0:\n await ctx.send(\"Il n'y a pour le moment auccun roles d'activés.\", hidden=True)\n\n else:\n message = \"\"\n data = json.loads(data[0][1])\n for x in data:\n message += \"Niveau {}: <@&{}>\\n\".format(x, data[x])\n await ctx.send(message, hidden=True)\n\n\ndef setup(bot):\n bot.add_cog(Rank(bot))\n","sub_path":"FRank/cogs/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"7935644","text":"import FWCore.ParameterSet.Config as cms\n\nimport os\n \nprocess = cms.PSet()\n \nprocess.fwliteInput = cms.PSet(\n# fileNames = cms.vstring('/home/karl/VHbbNtuples_7_6_x3/CMSSW_7_6_3/src/VHbbAnalysis/Heppy/test/Loop_1/tree.root'),\n# fileNames = cms.vstring('/home/karl/VHbbNtuples_7_6_x3/CMSSW_7_6_3/src/VHbbAnalysis/Heppy/test/Loop_2/tree.root'),\n fileNames = cms.vstring('/home/andres/tth/sync/ttHJetToNonbb_M125_13TeV_sync_summer_v3.root'),\n maxEvents = cms.int32(-1),\n outputEvery = cms.uint32(10000)\n)\n\nprocess.fwliteOutput = cms.PSet(\n# fileName = cms.string('/home/karl/sandbox/sync_ntuples/ttHJetToTT_M125_13TeV_ntuples_sync_v0.root')\n# fileName = cms.string('/home/karl/sandbox/sync_ntuples/ttJet_13TeV_ntuples_sync_v0.root')\n fileName = cms.string('/home/andres/tth/sync/sync_ntuple_summer_v1_crab.root')\n)\n\nprocess.syncNtuple = cms.PSet(\n treeName = cms.string('tree'),\n outputTreeName = cms.string('syncTree'),\n era = cms.string('2016'),\n\n # 2016 triggers\n triggers_1e = cms.vstring(['HLT_BIT_HLT_Ele25_WPTight_Gsf_v', 'HLT_BIT_HLT_Ele25_eta2p1_WPTight_Gsf_v', 'HLT_BIT_HLT_Ele27_eta2p1_WPLoose_Gsf_v', 'HLT_BIT_HLT_Ele27_eta2p1_WPTight_Gsf_v']),\n triggers_2e = cms.vstring(['HLT_BIT_HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v']),\n triggers_1mu = cms.vstring(['HLT_BIT_HLT_IsoMu22_v', 'HLT_BIT_HLT_IsoTkMu22_v', 'HLT_BIT_HLT_IsoMu22_eta2p1_v', 'HLT_BIT_HLT_IsoTkMu22_eta2p1_v']),\n triggers_2mu = cms.vstring(['HLT_BIT_HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v', 'HLT_BIT_HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v']),\n triggers_1e1mu = cms.vstring(['HLT_BIT_HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v', 'HLT_BIT_HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_v']),\n \n use_triggers_1e = cms.bool(True),\n use_triggers_2e = cms.bool(True),\n use_triggers_1mu = cms.bool(True),\n use_triggers_2mu = cms.bool(True),\n use_triggers_1e1mu = cms.bool(True),\n \n triggers_3e = cms.vstring(),\n use_triggers_3e = cms.bool(False),\n triggers_2e1mu = cms.vstring(),\n use_triggers_2e1mu = cms.bool(False),\n triggers_1e2mu = cms.vstring(),\n use_triggers_1e2mu = cms.bool(False),\n triggers_3mu = cms.vstring(),\n use_triggers_3mu = cms.bool(False),\n\n apply_offline_e_trigger_cuts_1e = cms.bool(True),\n apply_offline_e_trigger_cuts_1mu = cms.bool(True),\n apply_offline_e_trigger_cuts_2e = cms.bool(True),\n apply_offline_e_trigger_cuts_1e1mu = cms.bool(True),\n apply_offline_e_trigger_cuts_2mu = cms.bool(True),\n apply_offline_e_trigger_cuts_3e = cms.bool(True),\n apply_offline_e_trigger_cuts_2e1mu = cms.bool(True),\n apply_offline_e_trigger_cuts_1e2mu = cms.bool(True),\n apply_offline_e_trigger_cuts_3mu = cms.bool(True),\n\n \n #selEventsFileName_input = cms.string('testEv.txt'),\n selEventsFileName_input = cms.string(''),\n selEventsFileName_output = cms.string(''),\n\n leptonSelection = cms.string('Fakeable'),\n\n hadTauSelection = cms.string('Tight|dR03mvaMedium'),\n #hadTauSelection = cms.string('dR03mvaTight'),\n leptonFakeRateWeight = cms.PSet(\n inputFileName = cms.string(\"tthAnalysis/HiggsToTauTau/data/FR_lep_ttH_mva_2016_data.root\"),\n histogramName_e = cms.string(\"FR_mva075_el_data_comb\"),\n histogramName_mu = cms.string(\"FR_mva075_mu_data_comb\")\n ),\n hadTauFakeRateWeight = cms.PSet(\n inputFileName = cms.string(\"tthAnalysis/HiggsToTauTau/data/FR_tau_2016.root\"),\n lead = cms.PSet(\n absEtaBins = cms.vdouble(-1., 1.479, 9.9),\n graphName = cms.string(\"jetToTauFakeRate/dR03mvaTight/$etaBin/jetToTauFakeRate_mc_hadTaus_pt\"),\n applyGraph = cms.bool(True),\n fitFunctionName = cms.string(\"jetToTauFakeRate/dR03mvaTight/$etaBin/fitFunction_data_div_mc_hadTaus_pt\"),\n applyFitFunction = cms.bool(True),\n applyGraph_lead = cms.bool(False),\n applyFitFunction_lead = cms.bool(True),\n applyGraph_sublead = cms.bool(False),\n applyFitFunction_sublead = cms.bool(True)\n )\n ), \n debug = cms.bool(False), # set it to True if you select only few events\n lumiScale = cms.double(0.002600),\n apply_trigger_bits = cms.bool(True),\n #apply_leptonGenMatching = cms.bool(True)\n #apply_hadTauGenMatching = cms.bool(False),\n apply_hadTauGenMatching = cms.bool(True),\n applyFakeRateWeights = cms.string('disabled'),\n apply_hadTauFakeRateSF = cms.bool(True),\n isMC = cms.bool(True),\n apply_genWeight = cms.bool(True),\n use_HIP_mitigation_mediumMuonId = cms.bool(False),\n \n selectBDT = cms.bool(False)\n)\n","sub_path":"test/sync_ntuples_cfg.py","file_name":"sync_ntuples_cfg.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"168173390","text":"from . import utils\n\nimport os\nimport scanpy as sc\nimport scprep\nimport tempfile\n\nURL = \"https://ndownloader.figshare.com/files/25555739\"\n\n\n@utils.loader\ndef load_tenx_5k_pbmc(test=False):\n \"\"\"Download 5k PBMCs from 10x Genomics.\"\"\"\n if test:\n # load full data first, cached if available\n adata = load_tenx_5k_pbmc(test=False)\n\n # Subsample pancreas data\n adata = adata[:, :500].copy()\n utils.filter_genes_cells(adata)\n\n sc.pp.subsample(adata, n_obs=500)\n # Note: could also use 200-500 HVGs rather than 200 random genes\n\n # Ensure there are no cells or genes with 0 counts\n utils.filter_genes_cells(adata)\n\n return adata\n\n else:\n with tempfile.TemporaryDirectory() as tempdir:\n filepath = os.path.join(tempdir, \"10x_5k_pbmc.h5ad\")\n scprep.io.download.download_url(URL, filepath)\n adata = sc.read(filepath)\n\n adata.var_names_make_unique()\n\n # Ensure there are no cells or genes with 0 counts\n utils.filter_genes_cells(adata)\n\n return adata\n","sub_path":"openproblems/data/tenx_5k_pbmc.py","file_name":"tenx_5k_pbmc.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"79834378","text":"# -*- coding: utf-8 -*-\n### Python Imports\ntry:\n from pysqlite2 import dbapi2 as sqlite\nexcept ImportError:\n from sqlite3 import dbapi2 as sqlite\nimport os\nimport re\nfrom fnmatch import fnmatch\nfrom bs4 import UnicodeDammit\n\n\n### Anknotes Imports\nfrom anknotes.constants import *\nfrom anknotes.imports import *\n# write_file_contents('Loading %s: Importing base' % __name__, 'load')\nfrom anknotes.base import *\n# write_file_contents('Loading %s: Imported base' % __name__, 'load')\n# write_file_contents('Loading %s: Importing logging' % __name__, 'load')\nfrom anknotes.logging import *\nfrom anknotes.db import *\nfrom anknotes.html import *\nfrom anknotes.structs import *\n\n### Check if in Anki\nif in_anki():\n from aqt import mw\n from aqt.qt import QIcon, QPixmap, QPushButton, QMessageBox\n from anknotes.evernote.edam.error.ttypes import EDAMSystemException, EDAMErrorCode, EDAMUserException, \\\n EDAMNotFoundException\n\nclass EvernoteQueryLocationType:\n RelativeDay, RelativeWeek, RelativeMonth, RelativeYear, AbsoluteDate, AbsoluteDateTime = range(6)\n\ndef get_tag_names_to_import(tagNames, evernoteQueryTags=None, evernoteTagsToDelete=None, keepEvernoteTags=None,\n deleteEvernoteQueryTags=None):\n def check_tag_name(v, tags_to_delete):\n return v not in tags_to_delete and (not hasattr(v, 'Name') or getattr(v, 'Name') not in tags_to_delete) and (\n not hasattr(v, 'name') or getattr(v, 'name') not in tags_to_delete)\n if keepEvernoteTags is None:\n keepEvernoteTags = SETTINGS.ANKI.TAGS.KEEP_TAGS.fetch()\n if not keepEvernoteTags:\n return {} if isinstance(tagNames, dict) else []\n if evernoteQueryTags is None:\n evernoteQueryTags = SETTINGS.EVERNOTE.QUERY.TAGS.fetch().replace(',', ' ').split()\n if deleteEvernoteQueryTags is None:\n deleteEvernoteQueryTags = SETTINGS.ANKI.TAGS.DELETE_EVERNOTE_QUERY_TAGS.fetch()\n if evernoteTagsToDelete is None:\n evernoteTagsToDelete = SETTINGS.ANKI.TAGS.TO_DELETE.fetch()\n tags_to_delete = evernoteQueryTags if deleteEvernoteQueryTags else [] + evernoteTagsToDelete\n if isinstance(tagNames, dict):\n return {k: v for k, v in tagNames.items() if check_tag_name(v, tags_to_delete)}\n return sorted([v for v in tagNames if check_tag_name(v, tags_to_delete)])\n\n\ndef find_evernote_guids(content):\n return [x.group('guid') for x in\n re.finditer(r'\\b(?P[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\\b', content)]\n\n\ndef find_evernote_links_as_guids(content):\n return [x.Guid for x in find_evernote_links(content)]\n\n\ndef replace_evernote_web_links(content):\n return re.sub(\n r'https://www.evernote.com/shard/(s\\d+)/[\\w\\d]+/(\\d+)/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})',\n r'evernote:///view/\\2/\\1/\\3/\\3/', content)\n\ndef find_evernote_links(content):\n \"\"\"\n\n :param content:\n :return:\n :rtype : list[EvernoteLink]\n \"\"\"\n # .NET regex saved to regex.txt as 'Finding Evernote Links'\n content = replace_evernote_web_links(content)\n regex_str = r\"\"\"(?si)evernote:///?view/(?P[\\d]+?)/(?Ps\\d+)/(?P[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/(?P=guid)/?)[\"''](?:[^>]+)?>(?P.+?)</a>\"\"\"\n ids = get_evernote_account_ids()\n if not ids.Valid:\n match = re.search(regex_str, content)\n if match:\n ids.update(match.group('uid'), match.group('shard'))\n return [EvernoteLink(m) for m in re.finditer(regex_str, content)]\n\n\ndef check_evernote_guid_is_valid(guid):\n return ankDB().exists(where=\"guid = '%s'\" % guid)\n\n\ndef escape_regex(str_):\n return re.sub(r\"(?sx)(\\(|\\||\\))\", r\"\\\\\\1\", str_)\n\n\ndef remove_evernote_link(link, html):\n html = UnicodeDammit(html, ['utf-8'], is_html=True).unicode_markup\n link_converted = UnicodeDammit(link.WholeRegexMatch, ['utf-8'], is_html=True).unicode_markup\n sep = u'<span style=\"color: rgb(105, 170, 53);\"> | </span>'\n sep_regex = escape_regex(sep)\n no_start_tag_regex = r'[^<]*'\n regex_replace = r'<{0}[^>]*>[^<]*{1}[^<]*</{0}>'\n # html = re.sub(regex_replace.format('li', link.WholeRegexMatch), \"\", html)\n # Remove link\n html = html.replace(link.WholeRegexMatch, \"\")\n # Remove empty li\n html = re.sub(regex_replace.format('li', no_start_tag_regex), \"\", html)\n # Remove dangling separator\n\n regex_span = regex_replace.format('span', no_start_tag_regex) + no_start_tag_regex + sep_regex\n html = re.sub(regex_span, \"\", html)\n # Remove double separator\n html = re.sub(sep_regex + no_start_tag_regex + sep_regex, sep_regex, html)\n return html\n\n\ndef get_dict_from_list(lst, keys_to_ignore=list()):\n dic = {}\n for key, value in lst:\n if not key in keys_to_ignore:\n dic[key] = value\n return dic\n\ndef update_regex():\n regex_str = file(os.path.join(FOLDERS.ANCILLARY, 'regex-see_also.txt'), 'r').read()\n regex_str = regex_str.replace('(?<', '(?P<')\n regex_see_also._regex_see_also = re.compile(regex_str, re.UNICODE | re.VERBOSE | re.DOTALL)\n\ndef regex_see_also():\n if not hasattr(regex_see_also, '_regex_see_also'):\n update_regex()\n return regex_see_also._regex_see_also\n","sub_path":"anknotes/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"275922195","text":"from django.shortcuts import get_object_or_404, render, redirect\r\n\r\n#from customers_wb.models import *\r\nfrom userprofile.models import (Broker, Profile,)\r\nfrom wastelinqbroker.globals import *\r\nfrom generator._models.Generator_Data import *\r\nfrom wastelinqbroker.globals import Status as GlobalStatus\r\n\r\n# Create your views here.\r\ndef index(request): \r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n \r\n items = []\r\n cutomers = Customer.objects.filter(Broker_id = profile.Broker_id, Status = GlobalStatus.Active.value)\r\n for cutomer in cutomers:\r\n addresses = PrimarySiteAddress.objects.filter(Customer_id=cutomer.id)\r\n billTos = BillingInfo.objects.filter(Customer_id=cutomer.id)\r\n addr = billto = None\r\n if addresses:\r\n addr = addresses[0]\r\n if billTos:\r\n billto = billTos[0].Biil_To\r\n items.append({'customer': cutomer, 'address': addr, 'billto': billto})\r\n return render(request,\"generator/index_partial.html\", {\r\n 'items': items,\r\n })\r\n\r\ndef information(request, id):\r\n #return redirect(\"/generator/profiles/\" + id)\r\n try:\r\n profile = Profile.objects.get(user_id=request.user.id)\r\n except Exception:\r\n return render(request, 'base_not_found_partial.html')\r\n\r\n try: \r\n obj_cust = Customer.objects.get(id=id, Status = 'Active')\r\n except Exception:\r\n obj_cust = None\r\n\r\n if obj_cust:\r\n #obj_models = Data2.objects.filter(Customer_id=id).select_related(\"Customer\").select_related(\"Comp_Or_Location\")\r\n obj_addresses = PrimarySiteAddress.objects.filter(Customer_id=id)\r\n else:\r\n return redirect(\"/generator/\")\r\n\r\n \"\"\" if obj_models:\r\n obj_model = obj_models[0]\r\n else:\r\n obj_model = None \"\"\"\r\n\r\n if obj_addresses:\r\n obj_address = obj_addresses[0]\r\n else:\r\n obj_address = None\r\n #cursubpage = request.GET.get('action', None)\r\n return render(request, \"generator/index_1_partial.html\", {\r\n 'model': obj_cust,\r\n 'cust_address': obj_address,\r\n 'customer_id': id,\r\n #'cursubpage':cursubpage,\r\n })","sub_path":"generator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"566306859","text":"from flask import Flask\r\nfrom flask_bootstrap import Bootstrap\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask import render_template, request, redirect, url_for, send_file # imports rendering functions\r\nfrom docx import Document\r\nfrom io import BytesIO\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, PasswordField, BooleanField\r\nfrom wtforms.validators import InputRequired, Email, Length\r\n#from models import *\r\nfrom werkzeug.security import generate_password_hash, check_password_hash\r\nfrom flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user\r\nfrom flask_admin import Admin\r\nfrom flask_admin.contrib.sqla import ModelView\r\n\r\n# Flask app defined\r\napp = Flask(__name__)\r\n\r\n# Establishes secret key\r\napp.config['SECRET_KEY'] = 'thisisasecret'\r\nBootstrap(app)\r\n\r\n# Links to database which is created in config.py\r\napp.config.from_pyfile('config.py')\r\ndb = SQLAlchemy(app)\r\nadmin = Admin(app)\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\nlogin_manager.login_view = 'login'\r\n\r\n\r\n#models\r\nclass User(UserMixin, db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n username = db.Column(db.String(15), unique=True)\r\n email = db.Column(db.String(50), unique=True)\r\n password = db.Column(db.String(80))\r\n admin = db.Column(db.String(1))\r\n scheduler = db.relationship('Scheduler', backref='student', lazy='dynamic')\r\n mcat = db.relationship('Mcat', backref='student', lazy='dynamic')\r\n grades = db.relationship('Grades', backref='student', lazy = 'dynamic')\r\n references = db.relationship('References', backref='student', lazy='dynamic')\r\n activities = db.relationship('Activities', backref='student', lazy='dynamic')\r\n status = db.relationship('Status', backref='student', lazy='dynamic')\r\n personal = db.relationship('Personal', backref='student', lazy='dynamic')\r\n\r\n def __repr__(self):\r\n return (self.username)\r\n\r\n\r\nclass Scheduler(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n schedulename = db.Column(db.String(50))\r\n schedule = db.Column(db.String(200))\r\n data = db.Column(db.LargeBinary)\r\n\r\n\r\nclass Grades(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n ogpa = db.Column(db.String(5))\r\n sgpa = db.Column(db.String(5))\r\n\r\n def __repr__(self):\r\n return 'User %r' % (self.userid)\r\n\r\n\r\nclass Mcat(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n examdate = db.Column(db.String(15))\r\n overall = db.Column(db.String(5))\r\n cp = db.Column(db.String(5))\r\n cars = db.Column(db.String(5))\r\n bb = db.Column(db.String(5))\r\n ps = db.Column(db.String(5))\r\n\r\n def __repr__(self):\r\n return 'User %r' % (self.userid)\r\n\r\n\r\nclass References(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n name = db.Column(db.String(50))\r\n email = db.Column(db.String(50))\r\n type = db.Column(db.String(50))\r\n status = db.Column(db.String(500))\r\n\r\n\r\nclass Activities(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n activity = db.Column(db.String(50))\r\n type = db.Column(db.String(50))\r\n hours = db.Column(db.String(50))\r\n reference = db.Column(db.String(50))\r\n startdate = db.Column(db.String(50))\r\n enddate = db.Column(db.String(50))\r\n description = db.Column(db.String(10000))\r\n\r\n\r\nclass Status(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n university = db.Column(db.String(50))\r\n primary = db.Column(db.String(50))\r\n secondary = db.Column(db.String(50))\r\n interview = db.Column(db.String(50))\r\n offer = db.Column(db.String(50))\r\n essay1p = db.Column(db.String(500))\r\n essay1a = db.Column(db.String(10000))\r\n essay2p = db.Column(db.String(500))\r\n essay2a = db.Column(db.String(10000))\r\n essay3p = db.Column(db.String(500))\r\n essay3a = db.Column(db.String(10000))\r\n essay4p = db.Column(db.String(500))\r\n essay4a = db.Column(db.String(10000))\r\n essay5p = db.Column(db.String(500))\r\n essay5a = db.Column(db.String(10000))\r\n\r\n\r\nclass Personal(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n userid = db.Column(db.Integer, db.ForeignKey('user.id'))\r\n title = db.Column(db.String(50))\r\n essay = db.Column(db.String(10000))\r\n\r\nadmin.add_view(ModelView(User, db.session))\r\nadmin.add_view(ModelView(Scheduler, db.session))\r\nadmin.add_view(ModelView(Grades, db.session))\r\nadmin.add_view(ModelView(Mcat, db.session))\r\nadmin.add_view(ModelView(References, db.session))\r\nadmin.add_view(ModelView(Activities, db.session))\r\nadmin.add_view(ModelView(Status, db.session))\r\nadmin.add_view(ModelView(Personal, db.session))\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return User.query.get(int(user_id))\r\n\r\n\r\n# Creates class for login form w/ username and password\r\nclass LoginForm(FlaskForm):\r\n username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])\r\n password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])\r\n remember = BooleanField('remember me')\r\n\r\n\r\n# Creates class for registration form w/ username, email, and password\r\nclass RegisterForm(FlaskForm):\r\n username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])\r\n email = StringField('email', validators=[InputRequired(), Email(message='Invalid email'), Length(max=50)])\r\n password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n form = LoginForm()\r\n\r\n if form.validate_on_submit():\r\n user = User.query.filter_by(username=form.username.data).first()\r\n if user:\r\n if check_password_hash(user.password, form.password.data):\r\n login_user(user, remember=form.remember.data)\r\n return redirect(url_for('dashboard'))\r\n\r\n return \"<h1>Username or password is not valid</h1>\"\r\n\r\n return render_template(\"login.html\", form=form)\r\n\r\n\r\n@app.route(\"/logout\")\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return redirect(url_for('index'))\r\n\r\n\r\n@app.route('/signup', methods=['GET', 'POST'])\r\ndef signup():\r\n form = RegisterForm()\r\n\r\n if form.validate_on_submit():\r\n hashed_password = generate_password_hash(form.password.data, method='sha256')\r\n new_user = User(username=form.username.data, email=form.email.data, password=hashed_password)\r\n db.session.add(new_user)\r\n db.session.commit()\r\n return render_template(\"newuser.html\", form=form)\r\n\r\n return render_template(\"signup.html\", form=form)\r\n\r\n\r\n@app.route('/dashboard')\r\n@login_required\r\ndef dashboard():\r\n return render_template(\"dashboard.html\", name=current_user.username)\r\n\r\n\r\n@app.route('/scheduler', methods=['GET', 'POST'])\r\n@login_required\r\ndef scheduler():\r\n result = Scheduler.query.filter_by(userid=current_user.id).all()\r\n if request.method == 'POST':\r\n if int(Scheduler.query.count()) < 6:\r\n file = request.files['inputfile']\r\n schedulename = request.form['schedulename']\r\n data = file.read()\r\n signature = Scheduler(userid=current_user.id, schedulename=schedulename,schedule=file.filename, data=data)\r\n db.session.add(signature)\r\n db.session.commit()\r\n result = Scheduler.query.filter_by(userid=current_user.id).all()\r\n return render_template(\"scheduler.html\", result=result)\r\n\r\n\r\n@app.route('/academics', methods=['POST', 'GET'])\r\n@login_required\r\ndef academics():\r\n grades = Grades.query.filter_by(userid=current_user.id).all()\r\n result = Mcat.query.filter_by(userid=current_user.id).all()\r\n result1 = References.query.filter_by(userid=current_user.id).all()\r\n if request.method == 'POST':\r\n ogpa = request.form['ogpa']\r\n sgpa = request.form['sgpa']\r\n signature = Grades(userid=current_user.id, ogpa=ogpa, sgpa=sgpa)\r\n db.session.add(signature)\r\n db.session.commit()\r\n return render_template(\"academics.html\", result=result, result1=result1, grades=grades)\r\n\r\n\r\n@app.route('/academicsdetails', methods=['POST', 'GET'])\r\ndef academicsdetails():\r\n result = References.query.filter_by(id=request.form['academicsdetails']).first()\r\n return render_template(\"academicsdetails.html\", result=result)\r\n\r\n\r\n@app.route('/academicsdetailsprocess', methods=['POST', 'GET'])\r\ndef academicsdetailsprocess():\r\n edit = References.query.filter_by(id=request.form['update']).first()\r\n edit.email = request.form['email']\r\n edit.type = request.form['type']\r\n edit.status = request.form['status']\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/mcat', methods=['POST', 'GET'])\r\ndef mcat():\r\n if request.method == 'POST':\r\n if request.form['examdate'] != '':\r\n examdate = request.form['examdate']\r\n overall = request.form['overall']\r\n cp = request.form['cp']\r\n cars = request.form['cars']\r\n bb = request.form['bb']\r\n ps = request.form['ps']\r\n signature = Mcat(userid=current_user.id, examdate=examdate, overall=overall, cp=cp, cars=cars, bb=bb, ps=ps)\r\n db.session.add(signature)\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/references', methods=['POST', 'GET'])\r\ndef references():\r\n if request.method == 'POST':\r\n if request.form['name'] != '':\r\n name = request.form['name']\r\n email = request.form['email']\r\n type = request.form['type']\r\n status = request.form['status']\r\n signature = References(userid=current_user.id, name=name, email=email, type=type, status=status)\r\n db.session.add(signature)\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/activities', methods=['POST', 'GET'])\r\n@login_required\r\ndef activities():\r\n result = Activities.query.filter_by(userid=current_user.id).all()\r\n if request.method == 'POST':\r\n if request.form['activity'] != '':\r\n activity = request.form['activity']\r\n type = request.form['type']\r\n reference = request.form['reference']\r\n hours = request.form['hours']\r\n signature = Activities(userid=current_user.id, activity=activity, type=type, reference=reference, hours=hours)\r\n db.session.add(signature)\r\n db.session.commit()\r\n result = Activities.query.filter_by(userid=current_user.id).all()\r\n return render_template(\"activities.html\", result=result)\r\n\r\n\r\n@app.route('/activitiesdetails', methods=['POST', 'GET'])\r\ndef activitiesdetails():\r\n result = Activities.query.filter_by(id=request.form['activitiesdetails']).first()\r\n return render_template(\"activitiesdetails.html\", result=result)\r\n\r\n\r\n@app.route('/activitiesdetailsprocess', methods=['POST', 'GET'])\r\ndef activitiesdetailsprocess():\r\n edit = Activities.query.filter_by(id=request.form['update']).first()\r\n edit.type = request.form['type']\r\n edit.hours = request.form['hours']\r\n edit.reference = request.form['reference']\r\n edit.startdate = request.form['startdate']\r\n edit.enddate = request.form['enddate']\r\n edit.description = request.form['description']\r\n db.session.commit()\r\n return redirect(url_for('activities'))\r\n\r\n\r\n@app.route('/status', methods=['POST', 'GET'])\r\n@login_required\r\ndef status():\r\n result = Status.query.filter_by(userid=current_user.id).all()\r\n if request.method == 'POST':\r\n if request.form['university'] != '':\r\n university = request.form['university']\r\n primary = request.form['primary']\r\n secondary = request.form['secondary']\r\n interview = request.form['interview']\r\n offer = request.form['offer']\r\n signature = Status(userid=current_user.id, university=university, primary=primary, secondary=secondary, interview=interview, offer=offer)\r\n db.session.add(signature)\r\n db.session.commit()\r\n return render_template(\"status.html\", result=result)\r\n\r\n\r\n@app.route('/statusdetails', methods=['POST', 'GET'])\r\ndef statusdetails():\r\n result = Status.query.filter_by(id=request.form['statusdetails']).first()\r\n return render_template(\"statusdetails.html\", result=result)\r\n\r\n\r\n@app.route('/statusdetailsprocess', methods=['POST', 'GET'])\r\ndef statusdetailsprocess():\r\n edit = Status.query.filter_by(id=request.form['update']).first()\r\n edit.primary = request.form['primary']\r\n edit.secondary = request.form['secondary']\r\n edit.interview = request.form['interview']\r\n edit.offer = request.form['offer']\r\n edit.essay1p = request.form['essay1p']\r\n edit.essay1a = request.form['essay1a']\r\n edit.essay2p = request.form['essay2p']\r\n edit.essay2a = request.form['essay2a']\r\n edit.essay3p = request.form['essay3p']\r\n edit.essay3a = request.form['essay3a']\r\n edit.essay4p = request.form['essay4p']\r\n edit.essay4a = request.form['essay4a']\r\n edit.essay5p = request.form['essay5p']\r\n edit.essay5a = request.form['essay5a']\r\n db.session.commit()\r\n return redirect(url_for('status'))\r\n\r\n\r\n@app.route('/statusdetailsword', methods=['POST', 'GET'])\r\ndef statusdetailsword():\r\n edit = Status.query.filter_by(id=request.form['word']).first()\r\n document = Document()\r\n document.add_heading(edit.university, 0)\r\n #Essay 1\r\n document.add_heading('Prompt 1:', level=2)\r\n document.add_paragraph(edit.essay1p)\r\n document.add_heading('Essay 1:', level=2)\r\n document.add_paragraph(edit.essay1a)\r\n #Essay 2\r\n document.add_heading('Prompt 2:', level=2)\r\n document.add_paragraph(edit.essay2p)\r\n document.add_heading('Essay 2:', level=2)\r\n document.add_paragraph(edit.essay2a)\r\n #Essay 3\r\n document.add_heading('Prompt 3:', level=2)\r\n document.add_paragraph(edit.essay3p)\r\n document.add_heading('Essay 3:', level=2)\r\n document.add_paragraph(edit.essay3a)\r\n #Essay 4\r\n document.add_heading('Prompt 4:', level=2)\r\n document.add_paragraph(edit.essay4p)\r\n document.add_heading('Essay 4:', level=2)\r\n document.add_paragraph(edit.essay4a)\r\n #Essay 5\r\n document.add_heading('Prompt 5:', level=2)\r\n document.add_paragraph(edit.essay5p)\r\n document.add_heading('Essay 5:', level=2)\r\n document.add_paragraph(edit.essay5a)\r\n table = document.add_table(rows=1, cols=3)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'Essays'\r\n f = BytesIO()\r\n document.save(f)\r\n length = f.tell()\r\n f.seek(0)\r\n return send_file(f, as_attachment=True, attachment_filename='report.doc')\r\n\r\n\r\n@app.route('/personalstatement', methods=['POST', 'GET'])\r\n@login_required\r\ndef personalstatement():\r\n result = Personal.query.filter_by(userid=current_user.id).all()\r\n if request.method == 'POST':\r\n if request.form['title'] != '':\r\n title = request.form['title']\r\n signature = Personal(userid=current_user.id, title=title)\r\n db.session.add(signature)\r\n db.session.commit()\r\n result = Personal.query.filter_by(userid=current_user.id).all()\r\n return render_template(\"personalstatement.html\", result=result)\r\n\r\n\r\n@app.route('/personalstatementdetails', methods=['POST', 'GET'])\r\ndef personalstatementdetails():\r\n result = Personal.query.filter_by(id=request.form['personalstatementdetails']).first()\r\n return render_template(\"personalstatementdetails.html\", result=result)\r\n\r\n\r\n@app.route('/personalstatementdetailsprocess', methods=['POST', 'GET'])\r\ndef personalstatementdetailsprocess():\r\n edit = Personal.query.filter_by(id=request.form['update']).first()\r\n edit.essay = request.form['essay']\r\n db.session.commit()\r\n return redirect(url_for('personalstatement'))\r\n\r\n\r\n# Deletion Routes\r\n\r\n@app.route('/deletescheduler', methods=['POST', 'GET'])\r\ndef deletescheduler():\r\n if request.form['schedulerdelete'] != '':\r\n Scheduler.query.filter_by(id=int(request.form['schedulerdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('scheduler'))\r\n\r\n\r\n@app.route('/deletegrades', methods=['POST', 'GET'])\r\ndef deletegrades():\r\n if request.form['gradesdelete'] != '':\r\n Grades.query.filter_by(id=int(request.form['gradesdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/deletemcat', methods=['POST', 'GET'])\r\ndef deletemcat():\r\n if request.form['mcatdelete'] != '':\r\n Mcat.query.filter_by(id=int(request.form['mcatdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/deletereferences', methods=['POST', 'GET'])\r\ndef deletereferences():\r\n if request.form['referencesdelete'] != '':\r\n References.query.filter_by(id=int(request.form['referencesdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('academics'))\r\n\r\n\r\n@app.route('/deleteactivities', methods=['POST', 'GET'])\r\ndef deleteactivities():\r\n if request.form['activitiesdelete'] != '':\r\n Activities.query.filter_by(id=int(request.form['activitiesdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('activities'))\r\n\r\n\r\n@app.route('/deletestatus', methods=['POST', 'GET'])\r\ndef deletestatus():\r\n if request.form['statusdelete'] != '':\r\n Status.query.filter_by(id=int(request.form['statusdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('status'))\r\n\r\n\r\n@app.route('/deletepersonalstatement', methods=['POST', 'GET'])\r\ndef deletepersonalstatement():\r\n if request.form['personalstatementdelete'] != '':\r\n Personal.query.filter_by(id=int(request.form['personalstatementdelete'])).delete()\r\n db.session.commit()\r\n return redirect(url_for('personalstatement'))\r\n\r\n\r\n# Makes Summary Word Doc\r\n@app.route('/summary')\r\n@login_required\r\ndef summary():\r\n activities = Activities.query.filter_by(userid=current_user.id).all()\r\n grades = Grades.query.filter_by(userid=current_user.id).all()\r\n mcat = Mcat.query.filter_by(userid=current_user.id).all()\r\n references = References.query.filter_by(userid=current_user.id).all()\r\n status = Status.query.filter_by(userid=current_user.id).all()\r\n\r\n document = Document()\r\n document.add_heading(\"Summary\", 0)\r\n document.add_heading('GPA', 1)\r\n table = document.add_table(rows=1, cols=2)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'Overall'\r\n hdr_cells[1].text = 'Science'\r\n for item in grades:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(item.ogpa)\r\n row_cells[1].text = str(item.sgpa)\r\n\r\n document.add_heading('MCAT', 1)\r\n table = document.add_table(rows=1, cols=6)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'Date'\r\n hdr_cells[1].text = 'Overall'\r\n hdr_cells[2].text = 'C/P'\r\n hdr_cells[3].text = 'CARS'\r\n hdr_cells[4].text = 'B/B'\r\n hdr_cells[5].text = 'P/S'\r\n for item in mcat:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(item.examdate)\r\n row_cells[1].text = str(item.overall)\r\n row_cells[2].text = str(item.cp)\r\n row_cells[3].text = str(item.cars)\r\n row_cells[4].text = str(item.bb)\r\n row_cells[5].text = str(item.ps)\r\n\r\n document.add_heading('References', 1)\r\n table = document.add_table(rows=1, cols=4)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'Name'\r\n hdr_cells[1].text = 'Email'\r\n hdr_cells[2].text = 'Type'\r\n hdr_cells[3].text = 'Status'\r\n for item in references:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(item.name)\r\n row_cells[1].text = str(item.email)\r\n row_cells[2].text = str(item.type)\r\n row_cells[3].text = str(item.status)\r\n\r\n document.add_heading('Activities', 1)\r\n table = document.add_table(rows=1, cols=4)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'Activity'\r\n hdr_cells[1].text = 'Type'\r\n hdr_cells[2].text = 'Reference'\r\n hdr_cells[3].text = 'Hours'\r\n for item in activities:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(item.activity)\r\n row_cells[1].text = str(item.type)\r\n row_cells[2].text = str(item.reference)\r\n row_cells[3].text = str(item.hours)\r\n\r\n document.add_heading('Application Status', 1)\r\n table = document.add_table(rows=1, cols=5)\r\n hdr_cells = table.rows[0].cells\r\n hdr_cells[0].text = 'University'\r\n hdr_cells[1].text = 'Primary'\r\n hdr_cells[2].text = 'Secondary'\r\n hdr_cells[3].text = 'Interview'\r\n hdr_cells[4].text = 'Offer'\r\n for item in status:\r\n row_cells = table.add_row().cells\r\n row_cells[0].text = str(item.university)\r\n row_cells[1].text = str(item.primary)\r\n row_cells[2].text = str(item.secondary)\r\n row_cells[3].text = str(item.interview)\r\n row_cells[4].text = str(item.offer)\r\n\r\n f = BytesIO()\r\n document.save(f)\r\n length = f.tell()\r\n f.seek(0)\r\n return send_file(f, as_attachment=True, attachment_filename='report.doc')\r\n\r\n\r\n# Flask app initialized\r\nif __name__ == '__main__':\r\n app.run()","sub_path":"premeddb/premeddb.py","file_name":"premeddb.py","file_ext":"py","file_size_in_byte":21745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596162917","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import special\nimport scipy.signal as sig\nimport FxLMS\nimport os\n\n\"\"\"\n Practise FxLms Argorithm in python\n Johnson March 29th\n Version 0.0.1\n\n\"\"\"\n\ndef fetchData(working_dir):\n\n try:\n inputFile = open(working_dir+'/spk1_input')\n inputData_str = inputFile.read().splitlines()\n inputData_float = map(float,inputData_str)\n inputData = np.array(inputData_float)\n inputFile.close()\n except OSError as e:\n raise RuntimeError(\"input file open failed\")\n\n try:\n outputFile = open(working_dir+'/spk1_mic1_output')\n outputData_str = outputFile.read().splitlines()\n outputData_float = map(float,outputData_str)\n outputData = np.array(outputData_float)\n outputFile.close()\n except OSError as e:\n raise RuntimeError(\"output file open failed\")\n\n return (inputData,outputData)\n\nWORKDIR = os.getcwd()\n# Global parameters definitions\n\n(testX,testY) = fetchData(WORKDIR)\n\n\nuseFxlms = FxLMS.FxLMS(input_signal=testX,output_signal=testY,order=300,learning_rate = 0.0001)\nuseFxlms.solve()\n(final_weights,error)= useFxlms.getResults()\n\nplt.subplot(2, 1, 1)\nplt.plot(np.array(range(error.size)),error, '-')\nplt.ylabel('error')\n\nplt.subplot(2,1,2)\nplt.plot(np.array(range(final_weights.size)),final_weights, '-')\nplt.ylabel('final_weights')\nplt.show()\n\n\n\n","sub_path":"testLms.py","file_name":"testLms.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"353382881","text":"import ConfigParser as configparser\nfrom utils import get_class, FORCEFIELDS_MODULE_NAME, INTEGRATORS_MODULE_NAME, DISTRIBUTIONS_MODULE_NAME\n\nSIMULATION_SECTION_NAME = 'simulation'\nFORCEFIELD_SECTION_NAME = None\nINTEGRATOR_SECTION_NAME = None\nPARTICLES_SECTION_NAME = 'particles'\n\nSIMULATION_MANDATORY_FIELDS = ('dimension', 'particles', 'steps', 'tstep', 'integrator', 'forcefield')\nINTEGRATOR_MANDATORY_FIELDS = ('pos', 'vel', 'acc')\n\nPARTICLES_OPTIONAL_FIELDS = ('pos', 'vel', 'acc')\nDEFAULT_PARTICLES_OPTIONAL_FIELDS_VALUES = {'pos': 'InLine', 'vel': 'Uniform', 'acc': 'Zeros'}\n\nclass ConfigError(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\nclass Config(object):\n def __init__(self, path):\n self.forcefield = None\n self.integrator = None\n self.dimension = None\n self.particles = None\n self.steps = None\n\n parser = configparser.ConfigParser()\n parser.read(path)\n\n self._check_config(parser)\n self._parse_config(parser)\n\n def _check_config(self, parser):\n \"\"\"\n Checks for mandatory section & option fields existance.\n Checks for mandatory section & option fields values correctnes.\n \"\"\"\n global FORCEFIELD_SECTION_NAME, INTEGRATOR_SECTION_NAME\n if not parser.has_section(SIMULATION_SECTION_NAME):\n raise ConfigError(\"Config file doesn't have '{0}' section\".format(SIMULATION_SECTION_NAME))\n for opt in SIMULATION_MANDATORY_FIELDS:\n if not parser.has_option(SIMULATION_SECTION_NAME, opt):\n raise ConfigError(\"Config file doesn't have mandatory option '{0}' in section '{1}'\".format(\n opt, SIMULATION_SECTION_NAME,\n ))\n if opt == 'integrator':\n INTEGRATOR_SECTION_NAME = parser.get(SIMULATION_SECTION_NAME, opt)\n elif opt == 'forcefield':\n FORCEFIELD_SECTION_NAME = parser.get(SIMULATION_SECTION_NAME, opt)\n else:\n val = parser.getfloat(SIMULATION_SECTION_NAME, opt)\n if val < 0:\n raise ConfigError(\"'{0}' field value in '{1}' section must be greater than 0\".format(\n opt, SIMULATION_SECTION_NAME,\n ))\n if opt == 'dimension' and not val in (1, 2, 3):\n raise ConfigError(\"'{0}' field value in '{1}' section must be either 1, 2 or 3\".format(\n opt, SIMULATION_SECTION_NAME,\n ))\n for section in [FORCEFIELD_SECTION_NAME, INTEGRATOR_SECTION_NAME]:\n if not parser.has_section(SIMULATION_SECTION_NAME):\n raise ConfigError(\"Config file doesn't have '{0}' section\".format(section))\n for opt in INTEGRATOR_MANDATORY_FIELDS:\n if not parser.has_option(INTEGRATOR_SECTION_NAME, opt):\n raise ConfigError(\"Config file doesn't have mandatory option '{0}' in section '{1}'\".format(\n opt, INTEGRATOR_SECTION_NAME,\n ))\n val = parser.getint(INTEGRATOR_SECTION_NAME, opt)\n if val < 0:\n raise ConfigError(\"'{0}' field value in '{1}' section must be greater than 0\".format(\n opt, INTEGRATOR_SECTION_NAME,\n ))\n if parser.has_section(PARTICLES_SECTION_NAME):\n for opt in parser.options(PARTICLES_SECTION_NAME):\n class_name = parser.get(PARTICLES_SECTION_NAME, opt)\n get_class(DISTRIBUTIONS_MODULE_NAME, class_name) #If class doesn't exists raises an error\n #TODO: Check if has all neccessary arguments for initialization of Forcefield class object\n #TODO: Check for value of the forcefield argument correctness\n\n def _parse_config(self, parser):\n \"\"\"Parses values\"\"\"\n self.forcefield = self._get_class_object(parser, FORCEFIELD_SECTION_NAME)\n self.integrator = self._get_class_object(parser, INTEGRATOR_SECTION_NAME)\n self.dimension = parser.getint(SIMULATION_SECTION_NAME, 'dimension')\n\n number = parser.getint(SIMULATION_SECTION_NAME, 'particles')\n integr = {}\n particles_section_exists = parser.has_section(PARTICLES_SECTION_NAME)\n for name in INTEGRATOR_MANDATORY_FIELDS:\n if particles_section_exists:\n option_exists = parser.has_option(PARTICLES_SECTION_NAME, name)\n else:\n option_exists = False\n if option_exists:\n val = parser.get(PARTICLES_SECTION_NAME, name)\n else:\n val = DEFAULT_PARTICLES_OPTIONAL_FIELDS_VALUES[name]\n integr[name] = (parser.getint(INTEGRATOR_SECTION_NAME, name) + 1, val)\n\n #pos = parser.getint(INTEGRATOR_SECTION_NAME, 'pos')\n #vel = parser.getint(INTEGRATOR_SECTION_NAME, 'vel')\n #acc = parser.getint(INTEGRATOR_SECTION_NAME, 'acc')\n\n self.particles = (self.dimension, number, integr)#pos, vel, acc)\n self.steps = parser.getint(SIMULATION_SECTION_NAME, 'steps')\n\n def _get_class_object(self, parser, section_name):\n if section_name == FORCEFIELD_SECTION_NAME:\n module_name = FORCEFIELDS_MODULE_NAME\n else:\n module_name = INTEGRATORS_MODULE_NAME\n cls = get_class(module_name, section_name)\n if module_name == FORCEFIELDS_MODULE_NAME:\n options = {opt: parser.getfloat(section_name, opt) for opt in parser.options(section_name)}\n return cls(**options)\n else:\n return cls(parser.getfloat(SIMULATION_SECTION_NAME, 'tstep'))","sub_path":"Project_1/simpleMD/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"90187858","text":"from pangolin import setLogFileName, log_init\nimport oracle\n\nlogconfig = 'config/logging.conf'\n\nlog = log_init(logconfig,'both', setLogFileName('MasterDatasourceList'), 'debug')\n\n# get list of datasources\nsql = 'select connectivity_type, database_type, datasource '\n#sql += \"from hyp_user.epm_datasources where datasource IN('ODA_DATAMART','ODA_PROSTAGE') \"\nsql += \"from hyp_user.epm_datasources where active = 'Y' order by datasource\"\n\nresults = oracle.select('EREP12P', sql)\n\n#for row in results.fetchall():\n# print row[0], row[1], row[2]\n\n\nconnectivityType = {}\nconnectivityType['ODBC'] = 'API_CODE_ODBC'\nconnectivityType['Oracle Net'] = 'API_CODE_NET8'\nconnectivityType['ESSBASE'] = 'API_CODE_ESSBASE'\n\ndatabaseType = {}\ndatabaseType['ODBC'] = 'SVR_CODE_ODBC'\ndatabaseType['Oracle'] = 'SVR_CODE_ORA8'\ndatabaseType['ESSBASE'] = 'SVR_CODE_ESSBASE'\ndatabaseType['MS SQL Server'] = 'SVR_CODE_MSSQLSVR70'\n\nwith open('files/MasterDatasourceList.XML', 'w') as f:\n\n # header\n f.write(\"<?xml version='1.0' encoding='utf-8'?>\")\n f.write(\"<BRIO>\")\n f.write(\" <SETUP>\")\n f.write(\" <key>14307078</key>\")\n f.write(\" </SETUP>\")\n f.write(\" <SERVICES app=\\\"DAS\\\">\")\n f.write(\" <service type=\\\"DataAccess\\\">\")\n f.write(\" <properties>\")\n f.write(\" <propertylist name=\\\"DAS_DB_SERVER_PROPERTIES_LIST\\\" type=\\\"local\\\">\")\n\n # datasource loop\n for row in results.fetchall():\n\n f.write(\" <propertystruct name=\\\"DAS_DB_SERVER_PROPERTY\\\">\")\n f.write(\" <property name=\\\"DAS_DB_CONNECTIVITY\\\">\" + connectivityType[row[0]] + \"</property>\")\n f.write(\" <property name=\\\"DAS_DB_TYPE\\\">\" + databaseType[row[1]] + \"</property>\")\n f.write(\" <property name=\\\"DAS_DB_HOST\\\">\" + row[2] + \"</property>\")\n f.write(\" <property name=\\\"DAS_OLEDB_DATASOURCE\\\"></property>\")\n f.write(\" <property name=\\\"DAS_MAXIMUM_CONNECTIONS_TO_DB_LIMIT\\\">2000</property>\")\n f.write(\" <property name=\\\"DAS_MAXIMUM_CONNECTION_WAIT_QUEUE_SIZE_LIMIT\\\">100</property>\")\n f.write(\" <property name=\\\"DAS_MINIMUM_CONNECTION_IDLE_TIME_SECONDS\\\">300</property>\")\n f.write(\" <property name=\\\"DAS_CONNECTION_RESOURCE_REAP_INTERVAL_SECONDS\\\">180</property>\")\n f.write(\" <property name=\\\"DAS_MAXIMUM_NUMBER_OF_CONNECTIONS_IN_CONNECTION_POOL\\\">1000</property>\")\n f.write(\" <property name=\\\"DAS_MINIMUM_EMPTY_CONNECTION_POOL_IDLE_TIME_SECONDS\\\">300</property>\")\n f.write(\" </propertystruct>\")\n\n # footer\n f.write(\" </propertylist>\")\n f.write(\" </properties>\")\n f.write(\" </service>\")\n f.write(\" </SERVICES>\")\n f.write(\"</BRIO>\")\n","sub_path":"corral/generate_datasource_XMLFile.py","file_name":"generate_datasource_XMLFile.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"475750004","text":"from flake8_plugin_utils import Error\n\n\nclass RouteDecoratorError(Error):\n code = \"CF001\"\n message = \"Avoid `route` decorator. Use a suitable HTTP method as decorator.\"\n\n\nclass RouterPrefixError(Error):\n code = \"CF002\"\n message = (\n \"Avoid using `prefix` parameter on `include_router`.\"\n \"Use it on the `Router` initialization.\"\n )\n","sub_path":"flake8_fastapi/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"372184024","text":"def from_to_prefix(replace_from, replace_to, add_prefix, force_prefix=False):\n def op(var_name):\n # Set the new name\n new_name = var_name\n if None not in [replace_from, replace_to]:\n new_name = new_name.replace(replace_from, replace_to)\n if add_prefix:\n if force_prefix or not new_name.startswith(add_prefix):\n # force prefix or add prefix if it does not exist yet\n new_name = add_prefix + new_name\n\n return new_name\n\n return op\n\n\ndef rename(checkpoint, op, dry_run):\n import tensorflow as tf\n tf.compat.v1.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n for var_name, _ in tf.compat.v1.train.list_variables(checkpoint):\n # Load the variable\n var = tf.compat.v1.train.load_variable(checkpoint, var_name)\n\n # Set the new name\n new_name = op(var_name)\n\n if dry_run:\n print('%s would be renamed to %s.' % (var_name, new_name))\n else:\n if var_name == new_name:\n print('No change for {}'.format(var_name))\n else:\n print('Renaming %s to %s.' % (var_name, new_name))\n\n # Rename the variable\n tf.Variable(var, name=new_name)\n\n if not dry_run:\n # Save the variables\n saver = tf.compat.v1.train.Saver()\n sess.run(tf.compat.v1.global_variables_initializer())\n saver.save(sess, checkpoint)\n\n tf.compat.v1.reset_default_graph()\n","sub_path":"calamari_ocr/ocr/migrations/renametensors.py","file_name":"renametensors.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"524687495","text":"__all__ = [\n \"beet_default\",\n \"lectern\",\n]\n\n\nimport subprocess\nfrom typing import Iterable, List, Optional, cast\n\nfrom beet import Context, Plugin\nfrom beet.core.utils import JsonDict\n\nfrom .document import Document\n\n\ndef beet_default(ctx: Context):\n config = ctx.meta.get(\"lectern\", cast(JsonDict, {}))\n\n load = config.get(\"load\", ())\n snapshot = config.get(\"snapshot\")\n external_files = config.get(\"external_files\")\n scripts = config.get(\"scripts\", ())\n\n ctx.require(lectern(load, snapshot, external_files, scripts))\n\n\ndef lectern(\n load: Iterable[str] = (),\n snapshot: Optional[str] = None,\n external_files: Optional[str] = None,\n scripts: Iterable[List[str]] = (),\n) -> Plugin:\n \"\"\"Return a plugin that handles markdown files with lectern.\"\"\"\n\n def plugin(ctx: Context):\n document = ctx.inject(Document)\n\n for pattern in load:\n for path in ctx.directory.glob(pattern):\n document.load(path)\n\n for arguments in scripts:\n result = subprocess.run(\n arguments,\n cwd=ctx.directory,\n check=True,\n stdout=subprocess.PIPE,\n )\n document.add_text(result.stdout.decode())\n\n yield\n\n if snapshot:\n document.save(\n ctx.directory / snapshot,\n ctx.directory / external_files if external_files else None,\n )\n\n return plugin\n","sub_path":"lectern/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"313690686","text":"import unittest\n\nfrom menu.burritos.con_carne import BurritoConCarne\nfrom menu.burritos.grilled_chicken import BurritoGrilledChicken\nfrom menu.burritos.pork import BurritoPork\nfrom menu.burritos.pulled_chicken import BurritoPulledChicken\nfrom menu.burritos.vegan import BurritoVegan\nfrom menu.enchiladas.con_carne import EnchiladaConCarne\nfrom menu.enchiladas.grilled_chicken import EnchiladaGrilledChicken\nfrom menu.enchiladas.pork import EnchiladaPork\nfrom menu.enchiladas.pulled_chicken import EnchiladaPulledChicken\nfrom menu.enchiladas.vegan import EnchiladaVegan\nfrom menu.quesadillas.con_carne import QuesadillaConCarne\nfrom menu.quesadillas.grilled_chicken import QuesadillaGrilledChicken\nfrom menu.quesadillas.pork import QuesadillaPork\nfrom menu.quesadillas.pulled_chicken import QuesadillaPulledChicken\nfrom menu.quesadillas.vegan import QuesadillaVegan\nfrom food_store import FoodStore\n\n\nclass TestSimpleFactory(unittest.TestCase):\n\n def test_singleton(self):\n first_singleton = FoodStore()\n second_singleton = FoodStore()\n self.assertTrue(first_singleton is second_singleton)\n \n def test_create_burritos(self):\n store = FoodStore()\n\n burrito = BurritoConCarne()\n burrito = store.order_food(\"burrito_con_carne\")\n self.assertIsInstance(burrito, BurritoConCarne)\n\n burrito = BurritoGrilledChicken()\n burrito = store.order_food(\"burrito_grilled_chicken\")\n self.assertIsInstance(burrito, BurritoGrilledChicken)\n\n burrito = BurritoPork()\n burrito = store.order_food(\"burrito_pork\")\n self.assertIsInstance(burrito, BurritoPork)\n\n burrito = BurritoPulledChicken()\n burrito = store.order_food(\"burrito_pulled_chicken\")\n self.assertIsInstance(burrito, BurritoPulledChicken)\n\n burrito = BurritoVegan()\n burrito = store.order_food(\"burrito_vegan\")\n self.assertIsInstance(burrito, BurritoVegan)\n\n def test_create_enchiladas(self):\n store = FoodStore()\n\n enchilada = EnchiladaConCarne()\n enchilada = store.order_food(\"enchilada_con_carne\")\n self.assertIsInstance(enchilada, EnchiladaConCarne)\n\n enchilada = EnchiladaGrilledChicken()\n enchilada = store.order_food(\"enchilada_grilled_chicken\")\n self.assertIsInstance(enchilada, EnchiladaGrilledChicken)\n\n enchilada = EnchiladaPork()\n enchilada = store.order_food(\"enchilada_pork\")\n self.assertIsInstance(enchilada, EnchiladaPork)\n\n enchilada = EnchiladaPulledChicken()\n enchilada = store.order_food(\"enchilada_pulled_chicken\")\n self.assertIsInstance(enchilada, EnchiladaPulledChicken)\n\n enchilada = EnchiladaVegan()\n enchilada = store.order_food(\"enchilada_vegan\")\n self.assertIsInstance(enchilada, EnchiladaVegan)\n\n def test_create_quesadillas(self):\n store = FoodStore()\n\n quesadilla = QuesadillaConCarne()\n quesadilla = store.order_food(\"quesadilla_con_carne\")\n self.assertIsInstance(quesadilla, QuesadillaConCarne)\n\n quesadilla = QuesadillaGrilledChicken()\n quesadilla = store.order_food(\"quesadilla_grilled_chicken\")\n self.assertIsInstance(quesadilla, QuesadillaGrilledChicken)\n\n quesadilla = QuesadillaPork()\n quesadilla = store.order_food(\"quesadilla_pork\")\n self.assertIsInstance(quesadilla, QuesadillaPork)\n\n quesadilla = QuesadillaPulledChicken()\n quesadilla = store.order_food(\"quesadilla_pulled_chicken\")\n self.assertIsInstance(quesadilla, QuesadillaPulledChicken)\n\n quesadilla = QuesadillaVegan()\n quesadilla = store.order_food(\"quesadilla_vegan\")\n self.assertIsInstance(quesadilla, QuesadillaVegan)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"02Fabryka/Students/2019/JarzembinskiBartlomiej/factory_with_class_registration_reflection/food_test.py","file_name":"food_test.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"314928254","text":"from zerver.lib.test_classes import WebhookTestCase\n\n\nclass InspingHookTests(WebhookTestCase):\n STREAM_NAME = 'test'\n URL_TEMPLATE = \"/api/v1/external/insping?&api_key={api_key}&stream={stream}\"\n FIXTURE_DIR_NAME = 'insping'\n\n def test_website_state_available_message(self) -> None:\n expected_topic = \"insping\"\n expected_message = \"\"\"\nState changed to **Available**:\n* **URL**: http://privisus.zulipdev.org:9991\n* **Response time**: 223 ms\n* **Timestamp**: Fri Dec 29 17:23:46 2017\n\"\"\".strip()\n\n self.send_and_test_stream_message('website_state_available',\n expected_topic, expected_message,\n content_type=\"application/x-www-form-urlencoded\")\n\n def test_website_state_not_responding_message(self) -> None:\n expected_topic = \"insping\"\n expected_message = \"\"\"\nState changed to **Not Responding**:\n* **URL**: http://privisus.zulipdev.org:9991\n* **Response time**: 942 ms\n* **Timestamp**: Fri Dec 29 17:13:46 2017\n\"\"\".strip()\n\n self.send_and_test_stream_message('website_state_not_responding',\n expected_topic, expected_message,\n content_type=\"application/x-www-form-urlencoded\")\n\n def get_body(self, fixture_name: str) -> str:\n return self.webhook_fixture_data(\"insping\", fixture_name, file_type=\"json\")\n","sub_path":"zerver/webhooks/insping/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"96010226","text":"from pymongo import MongoClient\nimport os\nfrom flask import Flask, request, render_template\nimport requests\nimport json\n\napp = Flask(__name__)\n\nclient = MongoClient(os.environ.get('MONGODB_URL'))\nVERIFY_TOKEN = os.environ.get('FB_VERIFY_TOKEN')\nPAGE_ACCESS_TOKEN = os.environ.get('PAGE_ACCESS_TOKEN')\nOMDB_API_KEY = os.environ.get('OMDB_API_KEY')\ndb = client.rmdr\n\n@app.route(\"/\")\ndef hello_world():\n return \"Hello World!\"\n\n@app.route(\"/webhook\", methods=['GET', 'POST'])\ndef webhook():\n if request.method == 'GET':\n if('hub.mode' in request.args and 'hub.verify_token' in request.args and 'hub.challenge' in request.args):\n mode = request.args['hub.mode']\n token = request.args['hub.verify_token']\n challenge = request.args['hub.challenge']\n if mode == 'subscribe' and token == VERIFY_TOKEN:\n app.logger.info('WEBHOOK_VERIFIED')\n return challenge\n else:\n resp = '', 403\n return resp\n elif request.method == 'POST':\n body = request.get_json()\n if(body.get('object') == 'page'):\n for entry in body.get('entry'):\n db.logs.insert_one(body)\n if 'message' in entry.get('messaging')[0]:\n message = entry.get('messaging')[0].get('message')\n handle_message(message, entry.get('messaging')[0].get('sender').get('id'))\n if 'attachments' in message:\n handle_attachments(message.get('attachments'), entry.get('messaging')[0].get('sender').get('id'))\n elif 'postback' in entry.get('messaging')[0]:\n handle_postback(entry.get('messaging')[0].get('postback').get('payload'), entry.get('messaging')[0].get('sender').get('id'))\n return 'EVENT_RECEIVED'\n return '', 403\n\ndef handle_message(message, sender_psid):\n if 'text' in message and not('is_echo' in message):\n state = db.users.find_one({\"psid\" : sender_psid}, {\"_id\" : 0, \"state\" : 1}).get('state')\n print(state)\n if message.get('text').lower().find(\"bonjour\") != -1:\n res = hello(sender_psid)\n call_send_API(res, sender_psid)\n elif 'quick_reply' in message:\n payload = message.get('quick_reply').get('payload')\n if payload == \"ADD_SEEN_MOVIE\":\n res = {\n \"text\" : \"Quel est le titre ?\"\n }\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_SEEN_MOVIE_TITLE\"}})\n call_send_API(res, sender_psid)\n elif payload == \"ADD_WISH\":\n res = {\n \"text\" : \"Quel est le titre ?\"\n }\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_WISH_MOVIE_TITLE\"}})\n call_send_API(res, sender_psid)\n elif state == \"WAITING_SEEN_MOVIE_TITLE\":\n r = requests.get('http://www.omdbapi.com/?s={}&apikey={}'.format(message.get('text'), OMDB_API_KEY))\n body = r.json()\n if 'Search' in body:\n res = build_movie_list(body.get('Search'), 1, message.get('text'), \"SEEN\")\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_SEEN_TITLE_SELECT_FROM_LIST\"}})\n else:\n res = {\n \"text\" : \"Désolé, aucun film trouvé\"\n }\n call_send_API(res, sender_psid)\n elif state == \"WAITING_WISH_MOVIE_TITLE\":\n r = requests.get('http://www.omdbapi.com/?s={}&apikey={}'.format(message.get('text'), OMDB_API_KEY))\n body = r.json()\n if 'Search' in body:\n res = build_movie_list(body.get('Search'), 1, message.get('text'), \"WISH\")\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_WISH_TITLE_SELECT_FROM_LIST\"}})\n else:\n res = {\n \"text\" : \"Désolé, aucun film trouvé\"\n }\n call_send_API(res, sender_psid)\n\ndef handle_postback(payload, sender_psid):\n json_content = json.loads(payload)\n if 'origin' in json_content:\n if json_content.get('origin') == \"WAITING_SEEN_TITLE_SELECT_FROM_LIST_VIEWMORE\":\n range_factor = json_content.get('range_factor')\n query = json_content.get('original_search_query')\n r = requests.get('http://www.omdbapi.com/?s={}&apikey={}'.format(query, OMDB_API_KEY))\n body = r.json()\n if 'Search' in body:\n res = build_movie_list(body.get('Search'), range_factor, query, \"SEEN\")\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_SEEN_TITLE_SELECT_FROM_LIST\"}})\n else:\n res = {\n \"text\" : \"Désolé, aucun film trouvé\"\n }\n call_send_API(res, sender_psid)\n elif json_content.get('origin') == \"SELECT_SEEN_MOVIE_FROM_LIST\":\n added_user_movie_type = add_movie(json_content, sender_psid, \"SEEN\")\n res = {}\n if added_user_movie_type == -1:\n res = {\n \"text\" : \"{} fait déjà partie de votre liste de films vus\".format(json_content.get('imdb_title'))\n }\n else:\n res = {\n \"text\" : \"{} a bien été ajouté à ta liste de films vus.\".format(json_content.get('imdb_title'))\n }\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"HELLO\"}})\n elif json_content.get('origin') == \"WAITING_WISH_TITLE_SELECT_FROM_LIST_VIEWMORE\":\n range_factor = json_content.get('range_factor')\n query = json_content.get('original_search_query')\n r = requests.get('http://www.omdbapi.com/?s={}&apikey={}'.format(query, OMDB_API_KEY))\n body = r.json()\n if 'Search' in body:\n res = build_movie_list(body.get('Search'), range_factor, query, \"WISH\")\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"WAITING_WISH_TITLE_SELECT_FROM_LIST\"}})\n else:\n res = {\n \"text\" : \"Désolé, aucun film trouvé\"\n }\n call_send_API(res, sender_psid)\n elif json_content.get('origin') == \"SELECT_WISH_MOVIE_FROM_LIST\":\n added_user_movie_type = add_movie(json_content, sender_psid, \"WISH\")\n res = {}\n if added_user_movie_type == -1:\n res = {\n \"text\" : \"{} fait déjà partie de votre liste d'envies.\".format(json_content.get('imdb_title'))\n }\n else:\n res = {\n \"text\" : \"{} a bien été ajouté à ta liste d'envies.\".format(json_content.get('imdb_title'))\n }\n call_send_API(res, sender_psid)\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"HELLO\"}})\n print(\"POSTBACK CONTAINS JSON\")\n\ndef handle_attachments(attachments, sender_psid):\n attachment = attachments[0]\n res = {\n \"text\" : \"Cette image est également disponible à l'url : {}\".format(attachment.get('payload').get('url'))\n }\n call_send_API(res, sender_psid)\n\ndef call_send_API(res, sender_psid):\n request_body = {\n \"recipient\": {\n \"id\": sender_psid\n },\n \"message\": res\n }\n print(request_body)\n r = requests.post('https://graph.facebook.com/v2.6/me/messages?access_token='+PAGE_ACCESS_TOKEN, json = request_body)\n print(r.json())\n\ndef build_movie_list(omdb_result, range_factor, query, user_movie_type):\n VIEW_LIMIT = 4\n i = (range_factor - 1)*VIEW_LIMIT\n curr_limit = i + VIEW_LIMIT\n elements = []\n while i < curr_limit and i < len(omdb_result):\n payload = {\n \"origin\" : \"SELECT_\" + user_movie_type + \"_MOVIE_FROM_LIST\",\n \"imdb_id\" : omdb_result[i].get('imdbID'),\n \"imdb_title\" : omdb_result[i].get('Title')\n }\n poster_url = \"\"\n if omdb_result[i].get('Poster') != \"N/A\":\n poster_url = omdb_result[i].get('Poster')\n else:\n poster_url = \"https://cdn140.picsart.com/253668100008212.png?r1024x1024\"\n elements.append(\n {\n \"title\" : omdb_result[i].get('Title'),\n \"image_url\" : poster_url,\n \"default_action\": {\n \"type\": \"web_url\",\n \"url\": \"https://www.imdb.com/title/{}/\".format(omdb_result[i].get('imdbID')),\n \"messenger_extensions\": True,\n \"webview_height_ratio\": \"tall\"\n },\n \"buttons\": [\n {\n \"title\": \"Choisir\",\n \"type\": \"postback\",\n \"payload\": json.dumps(payload)\n }\n ]\n }\n )\n i += 1\n \n viewmore_payload = {\n \"origin\" : \"WAITING_\" + user_movie_type + \"_TITLE_SELECT_FROM_LIST_VIEWMORE\",\n \"range_factor\" : range_factor+1,\n \"original_search_query\" : query\n }\n res = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"list\",\n \"top_element_style\": \"compact\",\n \"elements\": elements,\n \"buttons\": [\n {\n \"title\": \"View More\",\n \"type\": \"postback\",\n \"payload\": json.dumps(viewmore_payload) \n }\n ] \n }\n }\n }\n return res\n\ndef add_movie(mov_info, sender_psid, user_movie_type):\n existing_entry = db.films.find_one({\"imdb_id\" : mov_info.get('imdb_id')}, {\"_id\" : 1})\n inserted_id = -1\n if existing_entry == None:\n inserted_id = db.films.insert({\n \"imdb_id\" : mov_info.get('imdb_id'),\n \"title\" : mov_info.get('imdb_title'),\n \"comments\" : []\n })\n else:\n inserted_id = existing_entry.get('_id')\n already_added = db.users.find_one({\"psid\" : sender_psid, \"films.imdb_id\" : mov_info.get('imdb_id')}, {\"_id\" : 1})\n if already_added != None:\n return -1\n else:\n db.users.update({\"psid\" : sender_psid}, {\"$push\":{\"films\" : {\n \"user_movie_type\" : user_movie_type,\n \"imdb_id\" : mov_info.get('imdb_id'),\n \"film_id\" : inserted_id\n }}})\n return 0\n\ndef hello(sender_psid):\n r = requests.get(\"https://graph.facebook.com/v2.6/{}?fields=first_name,last_name&access_token={}\".format(sender_psid, PAGE_ACCESS_TOKEN))\n body = r.json()\n resp_text = \"Bonjour {}.\".format(body.get(\"first_name\"))\n if db.users.count({\"psid\" : sender_psid}) >= 1:\n resp_text += \"\\nBienvenue à nouveau parmi nous ! :)\"\n resp_text += \"\\nQu'est-ce qui t'amène ?\"\n else:\n user = {\n \"first_name\" : body.get(\"first_name\"),\n \"last_name\" : body.get(\"last_name\"),\n \"psid\" : sender_psid,\n \"films\" : [],\n \"state\" : \"HELLO\"\n }\n db.users.insert_one(user)\n quick_replies = [\n {\n \"content_type\":\"text\",\n \"title\":\"Ajout film vu\",\n \"payload\":\"ADD_SEEN_MOVIE\"\n },\n {\n \"content_type\":\"text\",\n \"title\":\"Ajout envie\",\n \"payload\":\"ADD_WISH\"\n }\n ]\n db.users.update({\"psid\" : sender_psid}, {\"$set\":{\"state\" : \"HELLO\"}})\n res = {\n \"text\" : resp_text,\n \"quick_replies\" : quick_replies\n }\n return res","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"104109358","text":"import time\nfrom DynamicSpder.HtmlDownloader import HtmlDownloader\nfrom DynamicSpder.HtmlParser import HtmlParser\nfrom DynamicSpder.DataOutput import DataOutput\n\n\nclass Spider(object):\n\n\n def __init__(self):\n self.download = HtmlDownloader()\n self.parser = HtmlParser()\n self.output = DataOutput()\n\n def crawl(self,root_url):\n content = self.download.download(root_url)\n urls = self.parser.url_parser(root_url, content)\n for url in urls:\n\n t = time.strftime('%Y%m%d%H%M%S3282', time.localtime())\n rank_url = 'http://service.library.mtime.com/Movie.api' \\\n '?Ajax_CallBack=true' \\\n '&Ajax_CallBackType=Mtime.Library.Services' \\\n '&Ajax_CallBackMethod=GetMovieOverviewRating' \\\n '&Ajax_CrossDomain=1' \\\n '&Ajax_RequestUrl=%s' \\\n '&t=%s' \\\n '&Ajax_CallBackArgument0=%s'%(url[0],t,url[1])\n rank_content = self.download.download(rank_url)\n data = self.parser.json_parser(rank_url,rank_content)\n self.output.store_data(data)\n\n self.output.output_end()\n print('crawl finished')\n\n\nif __name__ == '__main__':\n Spider().crawl('http://theater.mtime.com/China_Jiangsu_Province_Nanjing/')","sub_path":"DynamicSpder/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"356203084","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef ajout(mot1,mot2,d):\n \n for cle in d.keys():\n trouver = 0\n if mot1 == cle:\n trouver = 1\n if trouver == 1:\n print(\" le mot :\",mot1,\"est deja une clé presente dans le dico\")\n else:\n d[mot1] = mot2\n\n \ndef delete(mot,d):\n trouver = 0\n for cle in d.keys():\n if mot == cle:\n trouver = 1\n if trouver == 0:\n print(\" le mot :\",mot,\"n'est pas une cle du dico\")\n else:\n del(d[mot])\n\n\ndef affiche(d):\n for cle in d.keys():\n print(\"cle = \",cle,\" mot = \",d[cle])\n\n \ndef exo1():\n\n d = {\"argent\":\"money\",\"voiture\":\"car\",\"maison\":\"house\",\"arbre\":\"tree\",\"ordinateur\":\"computer\"}\n mot1 = input(\"entrer le mot francais a rentrer comme clé: \")\n mot2 = input(\"entrer le mot anglais corespondant : \")\n ajout(mot1,mot2,d)\n\n affiche(d)\n\n mot1 = input(\"entrer le mot francais a suprimer du dico: \")\n delete(mot1,d)\n\n affiche(d)\n\n\nexo1()\n \n","sub_path":"Licence 2/S4/Outils_statistiques/TP2/exo1.py","file_name":"exo1.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"297037138","text":"import os, sys, glob, time, pathlib\n\nimport keras\nfrom keras import applications\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, InputLayer, Conv2D, Activation, LeakyReLU, Concatenate, MaxPooling2D, Dropout, UpSampling2D, concatenate\nfrom helpers import BilinearUpSampling2D\nfrom keras.optimizers import Adam\n\n# Kerasa / TensorFlow\nfrom utilities import get_nyu_train_test_data, load_test_data, depth_loss_function\n\ndef create_model():\n \n print('Loading base model (UNet)..')\n inputs = Input(shape=(None, None, 3))\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n merge6 = concatenate([drop4,up6], axis = 3)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n merge7 = concatenate([conv3,up7], axis = 3)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = concatenate([conv2,up8], axis = 3)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n conv10 = Conv2D(1, 1)(conv8)\n\n model = Model(input = inputs, output = conv10)\n model.summary()\n print(model.layers[-1].output.shape)\n return model\n\ndef train(args, batch_size = 5 , epochs = 5, lr = 0.0001):\n \n if batch_size.__class__ == tuple:\n batch_size = batch_size[0]\n if epochs.__class__ == tuple:\n epochs = epochs[0]\n if lr.__class__ == tuple:\n lr = lr[0] \n \n print(\"batch_size = {0} , epochs = {1}, lr = {2}\".format(batch_size,epochs, lr))\n\n #creates encoder and decoder model\n model = create_model()\n\n train_generator, test_generator = get_nyu_train_test_data( batch_size )\n\n # Training session details\n runPath = os.path.join(os.getcwd(),'models',str(int(time.time())))\n pathlib.Path(runPath).mkdir(parents=True, exist_ok=True)\n print('Output: ' + runPath)\n\n # Optimizer\n optimizer = Adam(lr=lr, amsgrad=True)\n\n model.compile(loss=depth_loss_function, optimizer=optimizer)\n\n # Start training\n model.fit_generator(train_generator, callbacks=None, validation_data=test_generator, epochs=epochs, shuffle=True)\n\n # Save the final trained model:\n print('Model Save Began')\n model.save(runPath + '/unetmodel.h5')\n print('Model (unetmodel.h5) has been Saved!!')\n pass\n\nif __name__ == \"__main__\":\n args = sys.argv[ 1: ]\n if (len(args) <= 0) :\n sys.exit( 0 )\n \n batch_size = int(args[0]), \n epochs = int(args[1]), \n lr = float(args[2]),\n train(args, batch_size, epochs, lr)","sub_path":"depth_estimation.py","file_name":"depth_estimation.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"626339528","text":"from django.shortcuts import render\nfrom .models import *\nfrom django.http import HttpResponse\nfrom .serializers import *\nfrom rest_framework.generics import ListAPIView\n\n# Create your views here.\n\ndef index(request):\n return HttpResponse(\"<h1>Hello World</h1>\")\n\nclass SchoolView(ListAPIView):\n #queryset=School.objects.all()\n def get_queryset(self):\n return School.objects.filter(teacher_first=' '.join(self.kwargs[\"t_first\"].split('_')),teacher_last=' '.join(self.kwargs[\"t_last\"].split('_')))\n serializer_class=SchoolSerializer\n\nclass TeacherView(ListAPIView):\n #queryset=Teachers.objects.all()\n def get_queryset(self):\n return Teachers.objects.filter(school_name=' '.join(self.kwargs[\"t_name\"].split('_')))\n serializer_class=TeacherSerializer\n\nclass AverageEmotionView(ListAPIView):\n def get_queryset(self):\n d={\"confusion\":0,\"happy\":0,\"sad\":0,\"surprised\":0}\n first=' '.join(self.kwargs[\"t_first\"].split('_'))\n last=' '.join(self.kwargs[\"t_last\"].split('_'))\n students=School.objects.filter(teacher_first=first,teacher_last=last)\n teacher=Teachers.objects.filter(teacher_name=first+' '+last)[0]\n sess_id=teacher.sessions\n n=0\n l=list(d.keys())\n for student in students:\n if student.total>0:\n n+=1\n arr=[student.confusion,student.happy,student.sad,student.surprised]\n for i in range(len(arr)):\n d[l[i]]+=arr[i]/(student.total)\n n=max(1,n)\n items=EmotionQueries.objects.filter(teacher_name=first+' '+last,school_name=student.schoolname).delete()\n emotion=EmotionQueries(teacher_name=first+' '+last,\n school_name=student.schoolname,\n confusion=d[\"confusion\"]/n*100,\n happy=d[\"happy\"]/n*100,\n sad=d[\"sad\"]/n*100,\n surprised=d[\"surprised\"]/n*100,\n session=sess_id)\n emotion.save()\n return EmotionQueries.objects.filter(teacher_name=first+' '+last,session=sess_id)\n serializer_class=EmotionSerializer\n\nclass RealTimeFrameView(ListAPIView):\n def get_queryset(self):\n d={\"confusion\":0,\"happy\":0,\"sad\":0,\"surprised\":0}\n first=' '.join(self.kwargs[\"t_first\"].split('_'))\n last=' '.join(self.kwargs[\"t_last\"].split('_'))\n frames=Frame.objects.filter(teacher_name=first+' '+last)\n n=max(1,frames.count())\n l=list(d.keys())\n for frame in frames:\n arr=[frame.confusion,frame.happy,frame.sad,frame.surprised]\n for i in range(len(arr)):\n d[l[i]]+=arr[i]\n items=EmotionQueries1.objects.filter(teacher_name=first+' '+last).delete()\n emotion=EmotionQueries1(teacher_name=first+' '+last,\n confusion=d[\"confusion\"]/n*100,\n happy=d[\"happy\"]/n*100,\n sad=d[\"sad\"]/n*100,\n surprised=d[\"surprised\"]/n*100)\n emotion.save()\n return EmotionQueries1.objects.filter(teacher_name=first+' '+last)\n serializer_class=EmotionSerializer1","sub_path":"DataMeetDjango/DataMeetDjango/education/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"304407070","text":"from musicscore.dtd.dtd import Sequence, Element\nfrom musicscore.musicxml.elements.xml_element import XMLElement\nfrom musicscore.musicxml.types.complextypes.complextype import ComplexType\nfrom musicscore.musicxml.types.simple_type import String\n\n\nclass VirtualLibrary(XMLElement, String):\n \"\"\"\n The virtual-library element indicates the virtual instrument library name.\n \"\"\"\n _TAG = 'virtual-library'\n\n def __init__(self, value=None, *args, **kwargs):\n super().__init__(tag=self._TAG, value=value, *args, **kwargs)\n\n\nclass VirtualName(XMLElement, String):\n \"\"\"\n The virtual-name element indicates the library-specific name for the virtual instrument.\n \"\"\"\n _TAG = 'virtual-name'\n\n def __init__(self, value=None, *args, **kwargs):\n super().__init__(tag=self._TAG, value=value, *args, **kwargs)\n\n\nclass ComplexTypeVirtualInstrument(ComplexType):\n \"\"\"\n The virtual-instrument element defines a specific virtual instrument used for an instrument sound.\n \"\"\"\n _DTD = Sequence(\n Element(VirtualLibrary, min_occurrence=0),\n Element(VirtualName, min_occurrence=0)\n\n )\n\n def __init__(self, tag, value=None, *args, **kwargs):\n super().__init__(tag=tag, value=value, *args, **kwargs)\n","sub_path":"musicscore/musicxml/types/complextypes/virtualinstrument.py","file_name":"virtualinstrument.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"284042198","text":"import sys\nwith open(sys.argv[1], 'r') as f:\n lines = [int(line.rstrip()) for line in f]\n\nans = 0\nfor li in lines:\n for i in lines:\n for i2 in lines:\n if(i+li+i2==2020):\n ans = i*li*i2 \nprint(ans)\n","sub_path":"Day1/trestedLoop.py","file_name":"trestedLoop.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"283556471","text":"\"\"\"\nLC501. Find Mode in Binary Search Tree\nEasy\n\nGiven a binary search tree (BST) with duplicates, find all the mode(s) (the most frequently occurred element) in the given BST.\n\nAssume a BST is defined as follows:\n\nThe left subtree of a node contains only nodes with keys less than or equal to the node's key.\nThe right subtree of a node contains only nodes with keys greater than or equal to the node's key.\nBoth the left and right subtrees must also be binary search trees.\n\nNote: If a tree has more than one mode, you can return them in any order.\n\nFollow up: Could you do that without using any extra space? (Assume that the implicit stack space incurred due to recursion does not count).\n\"\"\"\n\nfrom typing import List\n\nimport sys\nsys.path.insert(1, '../tree/')\nfrom binary_tree import TreeNode, print_tree #, array_to_bt, array_to_bt_lc, bt_find\n\n###############################################################################\n\"\"\"\nSolution #1: Use iterative inorder traversal.\n\nO(n) time\nO(1) space as long as not O(n) values are same\n\nLeetCode Jan 24, 2020\nRuntime: 56 ms, faster than 64.38% of Python3 online submissions for Find Mode in Binary Search Tree.\nMemory Usage: 16.6 MB, less than 100.00% of Python3 online submissions for Find Mode in Binary Search Tree.\n\"\"\"\ndef find_mode(root) -> List:\n if not root:\n return []\n\n max_count = 0\n modes = []\n\n # inorder\n\n stack = []\n curr = root\n prev_val = None\n count = 1\n max_count = 1\n\n while curr or stack:\n if curr:\n while curr:\n stack.append(curr)\n curr = curr.left\n\n curr = stack.pop()\n \n if prev_val is None:\n prev_val = curr.val\n elif curr.val == prev_val:\n count += 1\n else:\n if count > max_count:\n modes = [prev_val]\n max_count = count\n elif count == max_count:\n modes.append(prev_val)\n\n count = 1\n prev_val = curr.val\n\n curr = curr.right\n\n if count > max_count:\n modes = [prev_val]\n #max_count = count\n elif count == max_count:\n modes.append(prev_val)\n\n return modes\n\n###############################################################################\n\"\"\"\nSolution #2: use dict to keep count\n\nO(n) time\nO() extra space\n\nLeetCode Jan 24, 2020\nRuntime: 56 ms, faster than 64.38% of Python3 online submissions for Find Mode in Binary Search Tree.\nMemory Usage: 16.8 MB, less than 100.00% of Python3 online submissions for Find Mode in Binary Search Tree.\n\"\"\"\nimport collections\n\ndef find_mode2(root) -> List:\n def inorder(node):\n if not node:\n return\n \n inorder(node.left)\n counts[node.val] += 1\n inorder(node.right)\n \n if not root:\n return []\n\n counts = collections.defaultdict(int)\n inorder(root)\n\n # find all keys with max value in the dict counts\n max_val = max(counts.values())\n \n return [k for k, v in counts.items() if v == max_val] \n\n###############################################################################\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(2)\n\n print_tree(root)\n\n modes = find_mode(root)\n modes2 = find_mode2(root)\n\n print(f\"\\nmodes (sol #1) = {modes}\")\n print(f\"modes (sol #2) = {modes2}\")\n","sub_path":"bst/0501_mode_bst.py","file_name":"0501_mode_bst.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"22404351","text":"import datetime as dt\nimport constants as c\nimport airflow\nfrom airflow import DAG\n# from airflow.hooks.http_hook import HttpHook\n# from airflow.models import BaseOperator\n# from airflow.utils.decorators import apply_defaults\n# from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook\n# from airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.trigger_rule import TriggerRule\nfrom airflow.contrib.operators.bigquery_operator import BigQueryOperator\nfrom godatadriven.operators.postgres_to_gcs import PostgresToGoogleCloudStorageOperator\nfrom airflow.contrib.operators.dataproc_operator import (\n DataprocClusterCreateOperator,\n DataprocClusterDeleteOperator,\n DataProcPySparkOperator\n)\nfrom airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator\n\nfrom airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator\n\nfrom airflow.operators.python_operator import BranchPythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\n\n# import random\n\n# class HttpToGcsOperator(BaseOperator):\n# \"\"\"\n# Calls an endpoint on an HTTP system to execute an action\n#\n# :param http_conn_id: The connection to run the operator against\n# :type http_conn_id: string\n# :param endpoint: The relative part of the full url. (templated)\n# :type endpoint: string\n# :param gcs_path: The path of the GCS to store the result\n# :type gcs_path: string\n# \"\"\"\n#\n# template_fields = ('endpoint', 'gcs_path', 'data')\n# template_ext = ()\n# ui_color = '#f4a460'\n#\n# @apply_defaults\n# def __init__(self,\n# endpoint,\n# http_conn_id='default_http',\n# gcs_conn_id=\"gcs_default\",\n# gcs_path=None,\n# method=\"GET\",\n# *args,\n# **kwargs):\n# super(HttpToGcsOperator, self).__init__(*args, **kwargs)\n# self.http_conn_id = http_conn_id,\n# self.endpoint = endpoint,\n# self.method = method,\n# self.gcs_path = gcs_path,\n# self.gcs_conn_id = gcs_conn_id\n#\n# def execute(self, context, url=None):\n# # connect to HTTP and get data\n# http = HttpHook(\n# method='GET',\n# http_conn_id='http_default'\n# )\n# res = http.run(url, data=None, headers=None, extra_options=None)\n#\n# temp_file = open(\"testfile.txt\", \"w\")\n# temp_file.write(res.text)\n# temp_file.close()\n#\n# # store to GCS\n# store = GoogleCloudStorageHook(\n# google_cloud_storage_conn_id='google_cloud_default',\n# delegate_to=None\n# )\n# store.upload(bucket='airflow_training_data_123',\n# object='Currencies/{{ds}}/out.json',\n# filename='testfile.txt',\n# mime_type='application/json')\n\ndag = DAG(\n dag_id=\"my_first_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"airflow\",\n \"start_date\": dt.datetime(2018, 10, 1),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\ndag2 = DAG(\n dag_id=\"my_second_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"ewebbe\",\n \"start_date\": dt.datetime(2018, 10, 1),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\n\ndag3 = DAG(\n dag_id=\"my_third_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"ewebbe\",\n \"start_date\": dt.datetime(2018, 10, 1),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\n# dag4 = DAG(\n# dag_id=\"my_fourth_dag\",\n# schedule_interval=\"30 7 * * *\",\n# default_args={\n# \"owner\": \"ewebbe\",\n# \"start_date\": dt.datetime(2018, 10, 1),\n# \"depends_on_past\": True,\n# \"email_on_failure\": True\n# # \"email\": \"ewebbe@bol.com\",\n# },\n# )\n\ndag5 = DAG(\n dag_id=\"my_fifth_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"ewebbe\",\n \"start_date\": dt.datetime(2018, 10, 1),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\n\ndag6 = DAG(\n dag_id=\"my_branching_dag1\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"ewebbe\",\n \"start_date\": airflow.utils.dates.days_ago(21),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\ndag7 = DAG(\n dag_id=\"my_Kubernetes_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"ewebbe\",\n \"start_date\": airflow.utils.dates.days_ago(2),\n \"depends_on_past\": True,\n \"email_on_failure\": True\n # \"email\": \"ewebbe@bol.com\",\n },\n)\n\ndef print_exec_date(**context):\n print(context[\"execution_date\"])\n\n\ndef print_exec_dayname(**context):\n return context[\"execution_date\"].strftime(\"%A\")\n\n# my_task = PythonOperator(\n# task_id=\"task_name\",\n# python_callable=print_exec_date,\n# provide_context=True,\n# dag=dag\n# )\n\n\npgsl_to_gcs = PostgresToGoogleCloudStorageOperator(\n task_id=\"CollectDataFromPgrs\",\n postgres_conn_id='Training_postgres',\n sql=\"SELECT * \\\n FROM land_registry_price_paid_uk \\\n WHERE transfer_date = '{{ ds }}'\",\n bucket='airflow_training_data_123',\n filename='PricePaid/{{ds}}/out.json',\n provide_context=True,\n dag=dag\n)\n\ndataproc_create_cluster = DataprocClusterCreateOperator(\n task_id='CreateTheCluster',\n cluster_name='analyse-pricing-{{ ds }}',\n project_id=c.PROJECT_ID,\n num_workers=2,\n zone='europe-west4-a',\n dag=dag2\n)\n\ncompute_aggregates = DataProcPySparkOperator(\n task_id='ComputeAllTheThings',\n main='gs://europe-west1-training-airfl-9b3d38b2-bucket/other/build_statistics.py',\n cluster_name='analyse-pricing-{{ ds }}',\n arguments=[\"{{ ds }}\"],\n dag=dag2\n)\n\ndataproc_delete_cluster = DataprocClusterDeleteOperator(\n task_id='KillTheCluster',\n cluster_name='analyse-pricing-{{ ds }}',\n project_id=c.PROJECT_ID,\n trigger_rule=TriggerRule.ALL_DONE,\n dag=dag2\n)\n\ncopy_to_bq = GoogleCloudStorageToBigQueryOperator(\n task_id='CopyDataToBigQuery',\n bucket='airflow_training_data_123',\n source_objects=['average_prices/{{ ds }}/*.parquet'],\n destination_project_dataset_table='Analysis.average_prices',\n source_format='PARQUET',\n write_disposition='WRITE_APPEND',\n dag=dag3\n)\n\ndelete_from_bq = BigQueryOperator(\n task_id='delete_rows_from_bq',\n bql=\"DELETE FROM Analysis.average_prices WHERE trans_date = '{{ ds }}'\",\n use_legacy_sql=False,\n dag=dag3\n)\n\n# collect_from_http = HttpToGcsOperator(\n# task_id='CollectFormHTTP',\n# conn_id='http_default',\n# url='\"https://europe-west1-gdd-airflow-training.cloudfunctions.net/\\\n# airflow-training-transform-valutas?date={{ ds }}&from=GBP&to=EUR\"',\n# project_id=c.PROJECT_ID,\n# bucket='airflow_training_data_123',\n# filename='Currencies/{{ds}}/out.json',\n# dag=dag4\n# )\n#\n# delete_from_bq_json = BigQueryOperator(\n# task_id='delete_rows_from_bqJSON',\n# bql=\"DELETE FROM Analysis.exchange_rates WHERE trans_date = '{{ ds }}'\",\n# use_legacy_sql=False,\n# dag=dag4\n# )\n#\n# copy_to_bq_json = GoogleCloudStorageToBigQueryOperator(\n# task_id='CopyDataToBigQueryJSON',\n# bucket='airflow_training_data_123',\n# source_objects=['Currencies/{{ ds }}/*.json'],\n# destination_project_dataset_table='Analysis.exchange_rates',\n# source_format='JSON',\n# write_disposition='WRITE_APPEND',\n# dag=dag4\n# )\n\nload_into_bigquery = DataFlowPythonOperator(\n task_id='Dataflow_into_bigquery',\n dataflow_default_options={\"input\": \"gs://airflow_training_data_123/PricePaid/*/*.json\",\n \"table\": \"Dataflow_import\",\n \"dataset\": \"Analysis\",\n \"project\": c.PROJECT_ID,\n \"bucket\": \"europe-west1-training-airfl-22519ec9-bucket\",\n \"name\": \"write-to-bq-{{ ds }}\"},\n py_file=\"gs://airflow_training_data/other/dataflow_job.py\",\n project_id=c.PROJECT_ID,\n dag=dag5\n)\n\ndataproc_create_cluster >> compute_aggregates >> dataproc_delete_cluster\ndelete_from_bq >> copy_to_bq\n# collect_from_http >> delete_from_bq_json >> copy_to_bq_json\n\n\noptions = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\ntoday = dt.datetime.now().strftime(\"%A\")\n\n\nbranching = BranchPythonOperator(\n task_id='branch',\n python_callable=print_exec_dayname,\n provide_context=True,\n dag=dag6\n)\n\njoin = DummyOperator(\n task_id='join',\n trigger_rule=TriggerRule.ONE_SUCCESS,\n dag=dag6\n)\n\nfor option in options:\n branching >> DummyOperator(task_id=option, dag=dag6) >> join\n\nkubernetes_min_pod = KubernetesPodOperator(\n task_id='RunContainer',\n name='runcontainer',\n namespace='default',\n image='hello-world',\n dag=dag7\n)\n","sub_path":"dags/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":9236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"140355383","text":"\"\"\"Map VFE tags\n\nThis module allows us to map VFE tags.\nThis module accepts different type of files (.csv, jpeg, jpg, .xlsx, json etc),\ns3 credential.\n\nThis module contains the following\nfunctions:\n\n * map_vfe_tags - retuns the data after mapping VFE tags\n\"\"\"\n\nimport logging\n\nfrom workflowcommon import setup_logger\n\n\ndef map_vfe_tags(filename):\n \"\"\"\n mapping VFE tags\n\n :parameters\n ---------------------------\n :param filename: as input filename\n :type filename: file\n\n :return\n ---------------------------\n returns data after mapping with VFE tags\n \"\"\"\n print(filename)\n logger = logging.getLogger(__name__)\n logger.info('mapping vfe tags')\n parse_logger = setup_logger(__name__)\n parse_logger.info('Done! mapping vfe tags completed!')\n return filename\n","sub_path":"dags/vfetags/map_vfe_tags.py","file_name":"map_vfe_tags.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"583785605","text":"class MatchingError(Exception):\n pass\n\nclass District:\n '''Bla bla\n\n '''\n def summarize_voter_sentiment(self):\n '''Bla bla\n\n '''\n for voter in self.__iter__():\n sentiment = voter.get_my_sentiment()\n # Analysis\n\n def set_seat_per_party(self, party_vec):\n '''Bla bla\n\n '''\n if sum(party_vec.values()) != self.n_seats:\n print (self.n_seats)\n print (party_vec)\n print ([(str(x), y) for x, y in party_vec.items()])\n raise MatchingError('Number of elected parties not identical ' + \\\n 'to number of seats')\n self.elected_party = party_vec\n\n def get_seats_per_party(self):\n '''Bla bla\n\n '''\n return self.elected_party\n\n def simulate_voters_opinion(self):\n '''Bla bla\n\n '''\n for voter in self.voters:\n voter.generate_state()\n\n def load_state(self, file_name):\n '''Bla bla\n\n '''\n store = False\n buff = []\n with open(file_name, 'r') as fin:\n for line in fin:\n if self.name == line[:-1]:\n store = True\n if store and 'end' in line[0:3]:\n break\n if store:\n buff.append(line)\n\n for voter, row in zip(self.voters, buff[1:]):\n voter.set_state([float(x) for x in row.split(',')])\n\n def save_state(self, file_name):\n '''Bla bla\n\n '''\n with open(file_name, 'a+') as fin:\n fin.write(self.name + '\\n')\n for voter in self.voters:\n line = [str(x) for x in voter.get_state()]\n fin.write(','.join(line) + '\\n')\n fin.write('end \\n')\n\n def set_voters_manual(self, voters):\n '''Bla bla\n\n '''\n for voter in voters:\n voter.set_parent(self)\n self.voters.append(voter)\n\n def del_voters_opinion(self):\n '''Bla bla\n\n '''\n for voter in self.voters:\n voter.state = None \n\n def set_parties(self, parties):\n '''Bla bla\n\n '''\n self.parties = parties\n\n def __str__(self):\n\n return self.name\n\n def __iter__(self):\n\n if self.voters is None:\n raise RuntimeError('The voters of district %s not set' %(self.name))\n\n for voter in self.voters:\n yield voter\n\n def __init__(self, name, code, province, n_seats, \n population_count=None, urban_rural=None, age_distr=None):\n\n self.name = name\n self.code = int(code)\n self.province = province\n self.n_seats = n_seats\n\n self.voters = [] \n self.parties = []\n\n self.population_count = population_count\n self.urban_rural = urban_rural\n self.age_distr = age_distr\n\n self.elected_party = None\n self.member_of_country = None\n","sub_path":"district.py","file_name":"district.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"118070263","text":"import os\nimport pickle\n\ndetails_list=[]\nl2=[]\nG = []\ndef file_save():\n NAME_PRO = details_list[0]\n ADDRESS_PRO = details_list[1]\n MOBILE_NO_PRO = details_list[2]\n ROOM_NO_PRO = details_list[3]\n PRICE_PRO = details_list[4]\n f = open(\"hotel.dat\", \"ab\")\n a=save(NAME_PRO,ADDRESS_PRO,MOBILE_NO_PRO,ROOM_NO_PRO,PRICE_PRO)\n pickle.dump(a,f,protocol=2)\n f.close()\n restart_program()\n\n\ndef restart_program():\n \"\"\"Restarts the current program.\n Note: this function does not return. Any cleanup action (like\n saving data) must be done before calling this function.\"\"\"\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\n\n\n\n\n\nclass save:\n def __init__(self, NAME_PRO, ADDRESS_PRO, MOBILE_NO_PRO, ROOM_NO_PRO, PRICE_PRO):\n self.name=NAME_PRO\n self.address=ADDRESS_PRO\n self.mobile_no=MOBILE_NO_PRO\n self.room_no=ROOM_NO_PRO\n self.price=PRICE_PRO\n print(self.name,self.address,self.mobile_no,self.room_no,self.price)\n\n\n\n\n\nimport sys\n\nimport tkinter as tk\n\n\n\nclass CheckOut:\n\n def __init__(self):\n def check_room():\n self.rom = str(self.data.get())\n print(self.rom)\n print(\"\\n\")\n if self.rom.isdigit() == True and len(self.rom) != 0 and 0 < eval(self.rom) <= 50 :\n self.Text1.insert(tk.INSERT, \" Valid locker number \"\"\\n\")\n v = int(self.rom)\n f = open(\"hotel.dat\", \"rb\")\n f1 = open(\"hote.dat\", \"ab\")\n n = 0\n try:\n while True:\n s = pickle.load(f)\n if s.room_no == v:\n n = 1\n name1 = s.name\n\n print(\" \")\n else:\n pickle.dump(s, f1)\n except EOFError:\n if n == 0:\n self.Text1.insert(tk.INSERT, \"NO GUEST FOUND\"\"\\n\")\n\n elif n == 1:\n\n self.Text1.insert(tk.INSERT, \"THANK YOU \" + name1.upper() + \" FOR VISTING US\"\"\\n\")\n pass\n f.close()\n f1.close()\n os.remove(\"hotel.dat\")\n os.rename(\"hote.dat\", \"hotel.dat\")\n\n else:\n self.Text1.insert(tk.INSERT, \"Invalid input please input a valid LOCKER NO.\"\"\\n\")\n\n root = tk.Tk()\n '''This class configures and populates the toplevel window.\n top is the toplevel containing window.'''\n _bgcolor = '#ffffff' # X11 color: 'white'\n _fgcolor = '#000000' # X11 color: 'black'\n _compcolor = '#ffffff' # X11 color: 'white'\n _ana1color = '#ffffff' # X11 color: 'white'\n _ana2color = '#ffffff' # X11 color: 'white'\n font10 = \"-family {Courier New} -size 15 -weight normal -slant\" \\\n \" roman -underline 0 -overstrike 0\"\n font11 = \"-family {Segoe UI} -size 23 -weight bold -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n font12 = \"-family {Segoe UI} -size 24 -weight bold -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n font9 = \"-family {Segoe UI} -size 18 -weight normal -slant \" \\\n \"roman -underline 0 -overstrike 0\"\n\n root.geometry(\"1011x750\")\n root.title(\"FIND MY LOCKER\")\n root.configure(background=\"#ADDFFF\") #light turquiose\n root.configure(highlightbackground=\"#83b1c9\")\n root.configure(highlightcolor=\"black\")\n\n\n\n self.Frame1 = tk.Frame(root)\n self.Frame1.place(relx=0.04, rely=0.04, relheight=0.91, relwidth=0.91)\n self.Frame1.configure(relief=tk.GROOVE)\n self.Frame1.configure(borderwidth=\"2\")\n self.Frame1.configure(relief=tk.GROOVE)\n self.Frame1.configure(background=\"#BDEDFF\")\n self.Frame1.configure(highlightbackground=\"#ffffff\")\n self.Frame1.configure(highlightcolor=\"black\")\n self.Frame1.configure(width=925)\n\n self.Label1 = tk.Label(self.Frame1)\n self.Label1.place(relx=0.14, rely=0.12, height=46, width=442)\n self.Label1.configure(activebackground=\"#ffffff\")\n self.Label1.configure(activeforeground=\"black\")\n self.Label1.configure(background=\"#BDEDFF\")\n self.Label1.configure(disabledforeground=\"#bfbfbf\")\n self.Label1.configure(font=font11)\n self.Label1.configure(foreground=\"#000000\")\n self.Label1.configure(highlightbackground=\"#ffffff\")\n self.Label1.configure(highlightcolor=\"black\")\n self.Label1.configure(text='''ENTER THE LOCKER NO. :''')\n\n self.Entry1 = tk.Entry(self.Frame1)\n self.data=tk.StringVar()\n self.Entry1.place(relx=0.67, rely=0.12,height=44, relwidth=0.07)\n self.Entry1.configure(background=\"white\")\n self.Entry1.configure(disabledforeground=\"#bfbfbf\")\n self.Entry1.configure(font=font10)\n self.Entry1.configure(foreground=\"#000000\")\n self.Entry1.configure(highlightbackground=\"#ffffff\")\n self.Entry1.configure(highlightcolor=\"black\")\n self.Entry1.configure(insertbackground=\"black\")\n self.Entry1.configure(selectbackground=\"#e6e6e6\")\n self.Entry1.configure(selectforeground=\"black\")\n self.Entry1.configure(textvariable=self.data)\n\n\n\n\n\n\n\n self.Text1 = tk.Text(self.Frame1)\n self.Text1.place(relx=0.05, rely=0.54, relheight=0.4, relwidth=0.89)\n self.Text1.configure(background=\"white\")\n self.Text1.configure(font=font9)\n self.Text1.configure(foreground=\"black\")\n self.Text1.configure(highlightbackground=\"#ffffff\")\n self.Text1.configure(highlightcolor=\"black\")\n self.Text1.configure(insertbackground=\"black\")\n self.Text1.configure(selectbackground=\"#e6e6e6\")\n self.Text1.configure(selectforeground=\"black\")\n self.Text1.configure(width=824)\n self.Text1.configure(wrap=tk.WORD)\n\n self.Button1 = tk.Button(self.Frame1)\n self.Button1.place(relx=0.34, rely=0.28, height=93, width=286)\n self.Button1.configure(activebackground=\"#ffffff\")\n self.Button1.configure(activeforeground=\"#000000\")\n self.Button1.configure(background=\"#98AFC7\") #Blue Gray\n self.Button1.configure(disabledforeground=\"#bfbfbf\")\n self.Button1.configure(font=font12)\n self.Button1.configure(foreground=\"#000000\")\n self.Button1.configure(highlightbackground=\"#ffffff\")\n self.Button1.configure(highlightcolor=\"black\")\n self.Button1.configure(pady=\"0\")\n self.Button1.configure(text='''CHECK OUT''')\n self.Button1.configure(command=check_room)\n root.mainloop()\n\n\n\nif __name__ == '__main__':\n out=CheckOut()\n","sub_path":"checkoutgui.py","file_name":"checkoutgui.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"345491747","text":"import numpy as np\nimport keras\n\ndef ImageNet_preprocessing(data_x, input_img_size=(256, 256, 2), output_img_size=(256, 256, 3)):\n new_data_x = np.resize(data_x, (len(data_x), 256, 256, 3))\n return new_data_x\n\ndef read_data():\n # write a function to select x_train y_train x_test y_test\n data_x = np.load('image_data_256x256.npy')\n data_y = np.load('labels.npy')\n\n # concatenate together and shuffle the data\n data_x = np.swapaxes(np.swapaxes(data_x,1,2),2,3)\n data_y = np.array([data_y]).T\n\n # Expand 2 channels into 3 channels\n # data_x = ImageNet_preprocessing(data_x)\n\n # shuffle the data first\n from sklearn.utils import shuffle\n data_x, data_y = shuffle(data_x, data_y)\n\n return data_x, data_y\n\ndef cross_validataion_splits(data_x, data_y, test_set_size = 0.2, val_set_size = 0.2, cv_split_size = 10, num_classes = 3):\n from sklearn.cross_validation import train_test_split\n from sklearn.model_selection import KFold\n processed_data = []\n all_index = [_ for _ in range(len(data_x))]\n\n # cross-validataion data set\n kf = KFold(n_splits=cv_split_size)\n\n for train, test in kf.split(all_index):\n print(\"Train size: {}, Test size: {}\".format(train.shape, test.shape))\n x_train = data_x[train]\n y_train = data_y[train]\n x_test = data_x[test]\n y_test = data_y[test]\n\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=val_set_size)\n # Convert class vectors to binary class matrices.\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_val = keras.utils.to_categorical(y_val, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n x_train = x_train.astype('float32')\n x_val = x_val.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_val /= 255\n x_test /= 255\n\n processed_data.append([(x_train,y_train),(x_val, y_val),(x_test, y_test)])\n print(\"Cross validation data has been split!\")\n return processed_data\n\n\ndef main():\n data_x, data_y = read_data()\n processed_data = cross_validataion_splits(data_x, data_y)\n import pickle\n pickle.dump(processed_data, open(\"./saved_data/processed_data.p\", \"wb\")) # save it into a file named save.p\n\nif __name__ == '__main__':\n main()","sub_path":"save_datas_for_cv.py","file_name":"save_datas_for_cv.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"453817314","text":"# -*- coding:GB18030 -*-\n\nfrom base.base_module import *\n\nclass kCompleteUrlModule(BaseModule):\n def __init__(self):\n BaseModule.__init__(self)\n \n def do_one(self, do):\n \n # 每次运行:更新时间 --- 主站200页,女生100页\n for index in xrange(40):\n url = \"http://all.17k.com/all/0_0__0__%d.html\"%(index+1)\n self.push_out(name=\"17k\",url=url)\n\n for index in xrange(20):\n url = \"http://all.17k.com/mm/0_0__0__%d.html\"%(index+1)\n self.push_out(name=\"17k\",url=url)\n","sub_path":"base/domain/17k/gencompleteurl.py","file_name":"gencompleteurl.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"231602188","text":"import json\nimport random\nfrom cryptography.fernet import Fernet\n\n\nclass Data:\n\n version = 'v0.4'\n homewareData = {}\n homewareFile = 'homeware.json'\n secureData = {}\n secureFile = 'secure.json'\n\n\n def __init__(self):\n try:\n with open(self.homewareFile, 'r') as f:\n self.homewareData = json.load(f)\n #Create the secure file if doesn't exists: v0.3 to v0.4\n try:\n with open(self.secureFile, 'r') as f:\n self.secureData = json.load(f)\n except:\n with open('config.json', 'r') as f:\n self.secureData = json.load(f)\n with open('token.json', 'r') as f:\n self.secureData['token']['google'] = json.load(f)['google']\n with open(self.secureFile, 'w') as f:\n json.dump(self.secureData, f)\n #Create DDNS content v0.3 to v0.4\n try:\n ddns = self.secureData['ddns']\n except:\n self.secureData['ddns'] = {\n 'enabled': False,\n 'status': 'Disabled',\n 'code': 'unknown',\n 'last': 'unknown',\n 'ip': 'unknown',\n 'provider': 'ddns',\n 'hostname': self.secureData['domain'],\n 'username': '',\n 'password': ''\n }\n self.save()\n #Create apikey content v0.3 to v0.4\n try:\n ddns = self.secureData['token']['apikey']\n except:\n self.secureData['token']['apikey'] = ''\n self.save()\n except:\n print('Hi')\n\n def getVersion(self):\n return {'version': self.version}\n\n# FILES\n\n def firstRun(self):\n try:\n with open(self.homewareFile, 'r') as f:\n self.homewareData = json.load(f)\n with open(self.secureFile, 'r') as f:\n self.secureData = json.load(f)\n return False\n except:\n return True\n\n def save(self):\n with open(self.homewareFile, 'w') as f:\n json.dump(self.homewareData, f)\n with open(self.secureFile, 'w') as f:\n json.dump(self.secureData, f)\n\n def refresh(self):\n with open(self.homewareFile, 'r') as f:\n self.homewareData = json.load(f)\n with open(self.secureFile, 'r') as f:\n self.secureData = json.load(f)\n\n# DEVICES\n\n def getDevices(self):\n with open(self.homewareFile, 'w') as f:\n json.dump(self.homewareData, f)\n return self.homewareData['devices']\n\n def updateDevice(self, incommingData):\n deviceID = incommingData['devices']['id']\n temp_devices = [];\n for device in self.homewareData['devices']:\n if device['id'] == deviceID:\n temp_devices.append(incommingData['devices'])\n else:\n temp_devices.append(device)\n self.homewareData['devices'] = temp_devices\n self.save()\n\n def createDevice(self, incommingData):\n deviceID = incommingData['devices']['id']\n self.homewareData['devices'].append(incommingData['devices'])\n self.homewareData['status'][deviceID] = {}\n self.homewareData['status'][deviceID] = incommingData['status']\n self.save()\n\n def deleteDevice(self, value):\n temp_devices = [];\n for device in self.homewareData['devices']:\n if device['id'] != value:\n temp_devices.append(device)\n self.homewareData['devices'] = temp_devices\n # Delete status\n status = self.homewareData['status']\n del status[value]\n self.homewareData['status'] = status\n self.save()\n\n# RULES\n\n def getRules(self):\n with open(self.homewareFile, 'w') as f:\n json.dump(self.homewareData, f)\n return self.homewareData['rules']\n\n def updateRule(self, incommingData):\n self.homewareData['rules'][int(incommingData['id'])] = incommingData['rule']\n self.save()\n\n def createRule(self, incommingData):\n self.homewareData['rules'].append(incommingData['rule'])\n self.save()\n\n def deleteRule(self, value):\n temp_rules = self.homewareData['rules']\n del temp_rules[int(value)]\n self.homewareData['rules'] = temp_rules\n self.save()\n\n# STATUS\n\n def getStatus(self):\n with open(self.homewareFile, 'w') as f:\n json.dump(self.homewareData, f)\n return self.homewareData['status']\n\n def updateParamStatus(self, device, param, value):\n self.homewareData['status'][device][param] = value\n self.save()\n\n# SECURE\n\n def getSecure(self):\n data = {\n \"google\": {\n \"client_id\": self.secureData['token'][\"google\"][\"client_id\"],\n \"client_secret\": self.secureData['token'][\"google\"][\"client_secret\"],\n },\n \"ddns\": self.secureData['ddns']\n }\n return data\n\n def updateSecure(self, incommingData):\n self.secureData['token'][\"google\"][\"client_id\"] = incommingData['google']['client_id']\n self.secureData['token'][\"google\"][\"client_secret\"] = incommingData['google']['client_secret']\n self.secureData['ddns']['username'] = incommingData['ddns']['username']\n self.secureData['ddns']['password'] = incommingData['ddns']['password']\n self.secureData['ddns']['provider'] = incommingData['ddns']['provider']\n self.secureData['ddns']['hostname'] = incommingData['ddns']['hostname']\n self.secureData['ddns']['enabled'] = incommingData['ddns']['enabled']\n self.save()\n\n def getToken(self,agent):\n return self.secureData['token'][agent]\n\n def updateToken(self,agent,type,value,timestamp):\n self.secureData['token'][agent][type]['value'] = value\n self.secureData['token'][agent][type]['timestamp'] = timestamp\n self.save()\n\n def setUser(self, incommingData):\n if self.secureData['user'] == '':\n data = {}\n key = Fernet.generate_key()\n self.secureData['key'] = str(key)\n cipher_suite = Fernet(key)\n ciphered_text = cipher_suite.encrypt(str.encode(incommingData['pass'])) #required to be bytes\n self.secureData['user'] = incommingData['user']\n self.secureData['pass'] = str(ciphered_text)\n self.save()\n return 'Saved correctly!'\n else:\n return 'Your user has beed set in the past'\n\n def setDomain(self, value):\n self.secureData['domain'] = value\n self.secureData['ddns']['hostname'] = value\n self.save()\n\n def getDDNS(self):\n return self.secureData['ddns']\n\n def updateDDNS(self, ip, status, code, enabled, last):\n self.secureData['ddns']['ip'] = ip\n self.secureData['ddns']['status'] = status\n self.secureData['ddns']['code'] = code\n self.secureData['ddns']['enabled'] = enabled\n self.secureData['ddns']['last'] = last\n self.save()\n\n def generateAPIKey(self):\n chars = 'abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n token = ''\n i = 0\n while i < 40:\n token += random.choice(chars)\n i += 1\n self.secureData['token']['apikey'] = token\n self.save()\n return token\n\n# LOGIN\n\n def login(self, headers):\n user = headers['user']\n password = headers['pass']\n\n cipher_suite = Fernet(str.encode(self.secureData['key'][2:len(self.secureData['key'])]))\n plain_text = cipher_suite.decrypt(str.encode(self.secureData['pass'][2:len(self.secureData['pass'])]))\n responseData = {}\n if user == self.secureData['user'] and plain_text == str.encode(password):\n #Generate the token\n chars = 'abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n token = ''\n i = 0\n while i < 40:\n token += random.choice(chars)\n i += 1\n #Saved the new token\n self.secureData['token']['front'] = token\n #Prepare the response\n responseData = {\n 'status': 'in',\n 'user': user,\n 'token': token\n }\n else:\n #Prepare the response\n responseData = {\n 'status': 'fail'\n }\n\n self.save()\n return responseData\n\n def validateUserToken(self, headers):\n user = headers['user']\n token = headers['token']\n responseData = {}\n if user == self.secureData['user'] and token == self.secureData['token']['front']:\n responseData = {\n 'status': 'in'\n }\n else:\n responseData = {\n 'status': 'fail'\n }\n\n return responseData\n\n def googleSync(self, headers, responseURL):\n user = headers['user']\n password = headers['pass']\n\n cipher_suite = Fernet(str.encode(self.secureData['key'][2:len(self.secureData['key'])]))\n plain_text = cipher_suite.decrypt(str.encode(self.secureData['pass'][2:len(self.secureData['pass'])]))\n responseData = {}\n if user == self.secureData['user'] and plain_text == str.encode(password):\n return responseURL\n else:\n return \"fail\"\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"199313706","text":"import warnings\n\nfrom math import floor, e\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom .definitions import PARAM, SERIES, COL\nfrom .sww_utils import year_delta, guess_freq, rain_events, agg_events\n\n\ndef annual_series(rolling_sum_values, year_index):\n \"\"\"\n create an annual series of the maximum overlapping sum per year and calculate the \"u\" and \"w\" parameters\n acc. to DWA-A 531 chap. 5.1.5\n\n Args:\n rolling_sum_values (numpy.ndarray): array with maximum rolling sum per event per year.\n year_index (numpy.ndarray): array with year of the event.\n\n Returns:\n tuple[float, float]: parameter u and w from the annual series for a specific duration step as a tuple\n \"\"\"\n annually_series = pd.Series(rolling_sum_values).groupby(year_index).max().values\n # annually_series = pd.Series(data=rolling_sum_values,\n # index=events[COL.START].values).resample('AS').max().index\n annually_series = np.sort(annually_series)[::-1]\n\n mean_sample_rainfall = annually_series.mean()\n sample_size = annually_series.size\n\n index = np.arange(sample_size) + 1\n x = -np.log(np.log((sample_size + 0.2) / (sample_size - index + 0.6)))\n x_mean = x.mean()\n\n w = ((x * annually_series).sum() - sample_size * mean_sample_rainfall * x_mean) / \\\n ((x ** 2).sum() - sample_size * x_mean ** 2)\n u = mean_sample_rainfall - w * x_mean\n\n return {PARAM.U: u, PARAM.W: w}\n\n\ndef _plotting_formula(k, l, m):\n \"\"\"\n plotting function acc. to DWA-A 531 chap. 5.1.3 for the partial series\n\n Args:\n k (float): running index\n l (float): sample size\n m (float): measurement period\n\n Returns:\n float: estimated empirical return period\n \"\"\"\n return (l + 0.2) * m / ((k - 0.4) * l)\n\n\ndef partial_series(rolling_sum_values, measurement_period):\n \"\"\"\n create an partial series of the largest overlapping sums and calculate the \"u\" and \"w\" parameters\n acc. to DWA-A 531 chap. 5.1.4\n\n Args:\n rolling_sum_values (numpy.ndarray): array with maximum rolling sum per event\n measurement_period (float): in years\n\n Returns:\n tuple[float, float]: parameter u and w from the partial series for a specific duration step as a tuple\n \"\"\"\n partially_series = rolling_sum_values\n partially_series = np.sort(partially_series)[::-1]\n\n # use only the (2-3 multiplied with the number of measuring years) of the biggest\n # values in the database (-> acc. to ATV-A 121 chap. 4.3; DWA-A 531 chap. 4.4)\n # as an requirement for the extreme value distribution\n threshold_sample_size = int(floor(measurement_period * e))\n partially_series = partially_series[:threshold_sample_size]\n\n mean_sample_rainfall = partially_series.mean()\n sample_size = threshold_sample_size\n index = np.arange(sample_size) + 1\n log_return_periods = np.log(_plotting_formula(index, sample_size, measurement_period))\n ln_t_n_mean = log_return_periods.mean()\n\n w = ((log_return_periods * partially_series).sum() - sample_size * mean_sample_rainfall * ln_t_n_mean) / \\\n ((log_return_periods ** 2).sum() - sample_size * ln_t_n_mean ** 2)\n\n u = mean_sample_rainfall - w * ln_t_n_mean\n\n return {PARAM.U: u, PARAM.W: w}\n\n\ndef _improve_factor(interval):\n \"\"\"\n correction factor acc. to DWA-A 531 chap. 4.3\n\n Args:\n interval (float): length of the interval: number of observations per duration\n\n Returns:\n float: correction factor\n \"\"\"\n improve_factor = {1: 1.14,\n 2: 1.07,\n 3: 1.04,\n 4: 1.03,\n 5: 1.00,\n 6: 1.00}\n\n return np.interp(interval,\n list(improve_factor.keys()),\n list(improve_factor.values()))\n\n\ndef calculate_u_w(file_input, duration_steps, series_kind):\n \"\"\"\n statistical analysis for each duration step acc. to DWA-A 531 chap. 5.1\n save the parameters of the distribution function as interim results\n acc. to DWA-A 531 chap. 4.4: use the annual series only for measurement periods over 20 years\n\n\n Args:\n file_input (pandas.Series): precipitation data\n duration_steps (list[int] | numpy.ndarray): in minutes\n series_kind (str): 'annual' or 'partial'\n\n Returns:\n dict: with key=durations and values=dict(u, w)\n \"\"\"\n ts = file_input.copy()\n # -------------------------------\n # measuring time in years\n measurement_start, measurement_end = ts.index[[0, -1]]\n measurement_period = (measurement_end - measurement_start) / year_delta(years=1)\n if round(measurement_period, 1) < 10:\n warnings.warn(\"The measurement period is too short. The results may be inaccurate! \"\n \"It is recommended to use at least ten years. \"\n \"(-> Currently {}a used)\".format(measurement_period))\n\n # -------------------------------\n base_frequency = guess_freq(ts.index) # DateOffset/Timedelta\n\n # ------------------------------------------------------------------------------------------------------------------\n interim_results = dict()\n\n # -------------------------------\n # acc. to DWA-A 531 chap. 4.2:\n # The values must be independent of each other for the statistical evaluations.\n # estimated four hours acc. (Schilling, 1984)\n # for larger durations - use the duration as minimal gap\n min_gap_schilling = pd.Timedelta(hours=4)\n\n # --------------\n # if\n # use only duration for splitting events\n # may increase design-rain-height of smaller durations\n\n # -------------------------------\n pbar = tqdm(duration_steps, desc='Calculating Parameters u and w')\n for duration_integer in pbar:\n pbar.set_description('Calculating Parameters u and w for duration {:0.0f}'.format(duration_integer))\n\n duration = pd.Timedelta(minutes=duration_integer)\n\n if duration < pd.Timedelta(base_frequency):\n continue\n\n if duration < min_gap_schilling:\n min_gap = min_gap_schilling\n else:\n min_gap = duration\n\n events = rain_events(ts, min_gap=min_gap)\n\n # correction factor acc. to DWA-A 531 chap. 4.3\n improve = _improve_factor(duration / base_frequency)\n\n roll_sum = ts.rolling(duration).sum()\n\n # events[COL.rolling_sum_valuesAX_OVERLAPPING_SUM] = agg_events(events, roll_sum, 'max') * improve\n rolling_sum_values = agg_events(events, roll_sum, 'max') * improve\n\n if series_kind == SERIES.ANNUAL:\n interim_results[duration_integer] = annual_series(rolling_sum_values, events[COL.START].dt.year.values)\n elif series_kind == SERIES.PARTIAL:\n interim_results[duration_integer] = partial_series(rolling_sum_values, measurement_period)\n else:\n raise NotImplementedError\n\n return interim_results\n","sub_path":"idf_analysis/event_series_analysis.py","file_name":"event_series_analysis.py","file_ext":"py","file_size_in_byte":6940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"74155594","text":"#==============================================================================\r\n#!/usr/bin/env python\r\n#title :parser_validation.py\r\n#description :Will automate the validation process. It takes the path to you inputs, cut/paste the important files\r\n# :into test's directory(RB.log, validation.log, reports.pdf). Extracts total time of validation process\r\n#author :Andrei Baltat\r\n#date :2015/05/06\r\n#version :0.1\r\n#python_version :3.6\r\n#==============================================================================\r\n\r\nimport re\r\nimport glob\r\nimport time\r\nimport sys\r\nimport os\r\nimport shutil\r\nimport datetime\r\n\r\n#rezultatele copiate sa fie arhivate toate cu un nume + timestamp_date_hour_min\r\n # 1 hour\r\nsleep_time = 15 # 30 sec\r\nwhere_is_test = sys.argv[1]\r\n\r\npath_to_validation_log = r\"C:\\Program Files (x86)\\IBM Application Discovery Build Client\\Bin\\Release\\IBMApplicationDiscoveryValidationServer\\Logs\"\r\npath_to_rb_log = r\"C:\\Program Files (x86)\\IBM Application Discovery Build Client\\Bin\\Release\\IBMApplicationDiscoveryValidationServer\\ReportsGenerator\\log\"\r\npath_to_reports = r\"C:\\Program Files (x86)\\IBM Application Discovery Build Client\\Bin\\Release\\IBMApplicationDiscoveryValidationServer\\ReportsGenerator\\data\"\r\n\r\nlist_of_lines = []\r\nstart_time = int(round(time.time()))\r\npath_to_test = where_is_test + \"\\TestResults-\" + str(time.strftime(\"%b%d-%H.%M.%S\"))\r\nflag_of_error = False\r\n#Backbone of clean procedure\r\nclass Clean_logs():\r\n def __init__(self,path,type_file):\r\n self.path = path\r\n self.type_file = type_file\r\n #copiaza tot\r\n def CreateFolder(self):\r\n if not os.path.exists(path_to_test):\r\n os.mkdir(path_to_test)\r\n\r\n def Copy_logs(self):\r\n log = os.listdir(self.path)\r\n for i in log:\r\n if i.endswith(self.type_file):\r\n shutil.move((os.path.join(self.path, i)),\r\n (path_to_test))\r\n\r\n#trigger test.bat\r\nos.system(where_is_test + r'\"\\test.bat\"')\r\n\r\n#it takes a bit of time to trigger the service/create logs,etc\r\ntime.sleep(sleep_time)\r\n\r\n#count all the requests from the input file/files\r\nfiles = glob.glob(where_is_test + r\"\\*.txt\")\r\nfor name in files:\r\n with open(name, 'r') as file:\r\n list_of_lines.append(file.read())\r\nstring_main = str(list_of_lines)\r\nnr_of_requests = string_main.count(\"M034\")\r\nlist_of_lines = []\r\ntimeout = nr_of_requests * 600\r\n\r\nwhile True:\r\n #run while the total time is 1 hour\r\n current_time = int(round(time.time()))\r\n if current_time - start_time > timeout:\r\n break\r\n #count all the (\"Completed: Project\") path_to_validation_log\r\n files_in_validation = glob.glob(path_to_validation_log + r\"\\*.log\")\r\n for validation_log in files_in_validation:\r\n with open(validation_log, 'r') as file:\r\n list_of_lines.append(file.read())\r\n container_of_validated = str(list_of_lines)\r\n if container_of_validated == '[]':\r\n flag_of_error = True\r\n break\r\n how_many_completed_prj = container_of_validated.count(\"Completed: Project\")\r\n\r\n\r\n #check if there are any error in the log\r\n #error_compiled = re.compile('error*', re.IGNORECASE)\r\n count_errors = re.findall('error', container_of_validated, re.IGNORECASE) #check how to check with insesitive case\r\n if len(count_errors) > 0:\r\n flag_of_error = True\r\n with open (path_to_test + \"\\\\\" + \"ERROR.txt\", 'w') as file:\r\n file.write (\"ERROR IN VALIDATION LOG\")\r\n #os.system (r\"C:\\Program Files (x86)\\IBM Application Discovery Build Client\\Bin\\Release\\UpgradeClean.bat\")\r\n #start the validation service\r\n #Check this on windows\r\n #infi.win32service.(\"IBMApplicationDiscoveryBatchService\")\r\n\r\n break\r\n if how_many_completed_prj == nr_of_requests:\r\n break\r\n else:\r\n container_of_validated = None\r\n how_many_completed_prj = None\r\n list_of_lines = []\r\n time.sleep(sleep_time)\r\n\r\n#Here we take the time and store it in a variable\r\n\r\nif flag_of_error == False:\r\n regex_pattern_time = re.compile(r'\\d\\d\\/\\d\\d\\/\\d\\d\\d\\d \\d\\d:\\d\\d:\\d\\d')\r\n container_for_all_time = re.findall(regex_pattern_time, container_of_validated)\r\n validation_start = datetime.datetime.strptime(container_for_all_time[0], '%d/%m/%Y %H:%M:%S' )\r\n validation_end = datetime.datetime.strptime(container_for_all_time[-1], '%d/%m/%Y %H:%M:%S' )\r\n validation_total = validation_end - validation_start\r\n\r\n#Cleanup process\r\n\r\n #Cut and paste validation log\r\nclean_procedure_validation = Clean_logs(path_to_validation_log, \".log\")\r\nclean_procedure_validation.CreateFolder()\r\nclean_procedure_validation.Copy_logs()\r\nclean_procedure_validation = None\r\n\r\n\r\nif flag_of_error == False:\r\n #write the time into a file under the test folder\r\n with open(path_to_test + \"\\\\\" + \"TOTALTIME.txt\", 'w') as file:\r\n file.write(str(validation_total))\r\n\r\n #Clean rb log\r\nclean_procedure_rb_log = Clean_logs(path_to_rb_log,\".log\")\r\nclean_procedure_rb_log.Copy_logs()\r\nclean_procedure_rb_log = None\r\n #Clean data log\r\nclean_procedure_reports = Clean_logs(path_to_reports, \".pdf\")\r\nclean_procedure_reports.Copy_logs()\r\nclean_procedure_reports = None\r\n\r\nprint(\"Done\")","sub_path":"PycharmProjects/PythonRulz/Folder_main/PersonalProjects/Work_related/parser_validation.py","file_name":"parser_validation.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"598123758","text":"import bpy\nimport math\nimport random\n\ndef clear_scene():\n bpy.data.objects.remove(bpy.data.objects['Cube'])\n\ndef setup_denoising():\n if bpy.context.scene.render.engine != 'CYCLES':\n return\n\n bpy.context.view_layer.cycles.denoising_store_passes = True\n bpy.context.scene.use_nodes = True\n bpy.context.scene.node_tree.nodes.clear()\n\n tree = bpy.context.scene.node_tree\n\n composite = tree.nodes.new('CompositorNodeComposite')\n composite.use_alpha = True\n render_layers = tree.nodes.new('CompositorNodeRLayers')\n denoise = tree.nodes.new('CompositorNodeDenoise')\n denoise.use_hdr = True\n\n tree.links.new(render_layers.outputs['Noisy Image'], denoise.inputs['Image'])\n tree.links.new(render_layers.outputs['Denoising Normal'], denoise.inputs['Normal'])\n tree.links.new(render_layers.outputs['Denoising Albedo'], denoise.inputs['Albedo'])\n\n tree.links.new(denoise.outputs['Image'], composite.inputs['Image'])\n\ndef setup_orb_material(mat, color):\n mat.use_nodes = True\n mat.node_tree.nodes.clear()\n\n bsdf = mat.node_tree.nodes.new('ShaderNodeBsdfGlass')\n\n bsdf.inputs['Color'].default_value = color\n bsdf.inputs['Roughness'].default_value = 0.1\n bsdf.inputs['IOR'].default_value = 1.7\n\n output = mat.node_tree.nodes.new('ShaderNodeOutputMaterial')\n mat.node_tree.links.new(bsdf.outputs['BSDF'], output.inputs['Surface'])\n\ndef setup_background():\n bpy.ops.mesh.primitive_plane_add(location = (0, 0, 0), size = 100)\n plane = bpy.context.selected_objects[0]\n\n mat = bpy.data.materials.new('plane')\n mat.use_nodes = True\n mat.node_tree.nodes.clear()\n\n checker = mat.node_tree.nodes.new('ShaderNodeTexChecker')\n checker.inputs['Scale'].default_value = 40\n bsdf = mat.node_tree.nodes.new('ShaderNodeBsdfDiffuse')\n mat.node_tree.links.new(checker.outputs['Color'], bsdf.inputs['Color'])\n\n output = mat.node_tree.nodes.new('ShaderNodeOutputMaterial')\n mat.node_tree.links.new(bsdf.outputs['BSDF'], output.inputs['Surface'])\n\n plane.active_material = mat\n\ndef setup_camera():\n camera = bpy.data.objects['Camera']\n camera.location = (15, -12, 10)\n\nclear_scene()\nsetup_denoising()\nsetup_background()\nsetup_camera()\n\nred = bpy.data.materials.new('orb_red')\nsetup_orb_material(red, (1, 0, 0, 1))\n\ngreen = bpy.data.materials.new('orb_green')\nsetup_orb_material(green, (0, 1, 0, 1))\n\nblue = bpy.data.materials.new('orb_blue')\nsetup_orb_material(blue, (0, 0, 1, 1))\n\nfor i in range(100):\n radius = random.uniform(0.1, 1)\n bpy.ops.mesh.primitive_uv_sphere_add(radius = radius)\n sphere = bpy.context.selected_objects[0]\n\n x, y, z = random.uniform(-10, 20), random.uniform(-5, 5), random.uniform(0, 10)\n sphere.location = (x, y, z + radius)\n sphere.active_material = random.choice([red, green, blue])\n\n bpy.ops.object.mode_set(mode = 'EDIT')\n bpy.ops.mesh.faces_shade_smooth()\n bpy.ops.object.mode_set(mode = 'OBJECT')\n","sub_path":"orbs/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"262978099","text":"from driver import Driver\nfrom rider import Rider, WAITING, CANCELLED, SATISFIED\nfrom container import Queue, PriorityQueue\nfrom location import Location\n\n\nclass Dispatcher:\n \"\"\"A dispatcher fulfills requests from riders and drivers for a\n ride-sharing service.\n\n When a rider requests a driver, the dispatcher assigns a driver to the\n rider. If no driver is available, the rider is placed on a waiting\n list for the next available driver. A rider that has not yet been\n picked up by a driver may cancel their request.\n\n When a driver requests a rider, the dispatcher assigns a rider from\n the waiting list to the driver. If there is no rider on the waiting list\n the dispatcher does nothing. Once a driver requests a rider, the driver\n is registered with the dispatcher, and will be used to fulfill future\n rider requests.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a Dispatcher.\n\n @type self: Dispatcher\n @type _waitlist: Queue of Rider\n @type _fleet: PriorityQueue of Driver\n @rtype: None\n \"\"\"\n self._waitlist = Queue()\n self._fleet = []\n\n def request_driver(self, rider):\n \"\"\"Return a driver for the rider, or None if no driver is available.\n\n Add the rider to the waiting list if there is no available driver.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: Driver | None\n\n >>> d = Dispatcher()\n >>> d1 = Driver('a', Location(9,0), 1)\n >>> d2 = Driver('b', Location(0,0), 1)\n >>> d._fleet = [d1, d2]\n >>> r1 = Rider('a', Location(0,0), Location(1,0), 3)\n >>> print(d.request_driver(r1))\n a\n \"\"\"\n fastest_driver = self._fleet[0]\n\n for driver in self._fleet:\n if not driver.is_idle:\n self._waitlist.add(rider)\n return None\n else:\n for i in range(len(self._fleet) - 1):\n for j in range(len(self._fleet) - 1):\n if self._fleet[i].get_travel_time(rider.origin) \\\n < self._fleet[j].get_travel_time(rider.origin):\n fastest_driver = self._fleet[i]\n else:\n fastest_driver = self._fleet[j]\n\n return fastest_driver\n\n def request_rider(self, driver):\n \"\"\"Return a rider for the driver, or None if no rider is available.\n\n If this is a new driver, register the driver for future rider requests.\n\n @type self: Dispatcher\n @type driver: Driver\n @rtype: Rider | None\n\n >>> d = Dispatcher()\n >>> r1 = Rider('a', Location(0,0), Location(1,0), 3)\n >>> d._waitlist = Queue()\n >>> d._waitlist.add(r1)\n >>> d1 = Driver('a', Location(0,0), 1)\n >>> d2 = Driver('b', Location(0,0), 1)\n >>> d._fleet = [d1]\n >>> print(d.request_rider(d2))\n a\n >>> print(d._fleet)\n \"\"\"\n if driver not in self._fleet:\n self._fleet.append(driver)\n\n # If waitlist is not empty, assign a rider\n if not self._waitlist.is_empty():\n return self._waitlist.remove()\n else:\n return None\n\n def cancel_ride(self, rider):\n \"\"\"Cancel the ride for rider and change their status to CANCELLED.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: None\n \"\"\"\n rider._status = CANCELLED\n self._waitlist.remove(rider)\n","sub_path":"FINAL/CSC148-2016S-A1-master (2)/CSC148-2016S-A1-master/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"56902489","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index , name=\"Puestos\"),\n path('create', views.puestosForm, name=\"InsertarPuesto\"), # post and get\n path('create/<int:id>', views.puestosForm,name=\"ActualizarPuesto\"),\n path('delete/<int:id>', views.delete, name=\"EliminarPuesto\")\n]\n","sub_path":"Apps/Puestos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"335172168","text":"from contextlib import ExitStack\nfrom copy import copy\nimport io\nimport os\nfrom pathlib import Path\nimport platform\nimport sys\nimport urllib.request\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom PIL import Image\n\nfrom matplotlib import (\n colors, image as mimage, patches, pyplot as plt, style, rcParams)\nfrom matplotlib.image import (AxesImage, BboxImage, FigureImage,\n NonUniformImage, PcolorImage)\nfrom matplotlib.testing.decorators import check_figures_equal, image_comparison\nfrom matplotlib.transforms import Bbox, Affine2D, TransformedBbox\n\nimport pytest\n\n\n@image_comparison(['image_interps'], style='mpl20')\ndef test_image_interps():\n \"\"\"Make the basic nearest, bilinear and bicubic interps.\"\"\"\n # Remove this line when this test image is regenerated.\n plt.rcParams['text.kerning_factor'] = 6\n\n X = np.arange(100)\n X = X.reshape(5, 20)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n ax1.imshow(X, interpolation='nearest')\n ax1.set_title('three interpolations')\n ax1.set_ylabel('nearest')\n\n ax2 = fig.add_subplot(312)\n ax2.imshow(X, interpolation='bilinear')\n ax2.set_ylabel('bilinear')\n\n ax3 = fig.add_subplot(313)\n ax3.imshow(X, interpolation='bicubic')\n ax3.set_ylabel('bicubic')\n\n\n@image_comparison(['interp_alpha.png'], remove_text=True)\ndef test_alpha_interp():\n \"\"\"Test the interpolation of the alpha channel on RGBA images\"\"\"\n fig, (axl, axr) = plt.subplots(1, 2)\n # full green image\n img = np.zeros((5, 5, 4))\n img[..., 1] = np.ones((5, 5))\n # transparent under main diagonal\n img[..., 3] = np.tril(np.ones((5, 5), dtype=np.uint8))\n axl.imshow(img, interpolation=\"none\")\n axr.imshow(img, interpolation=\"bilinear\")\n\n\n@image_comparison(['interp_nearest_vs_none'],\n extensions=['pdf', 'svg'], remove_text=True)\ndef test_interp_nearest_vs_none():\n \"\"\"Test the effect of \"nearest\" and \"none\" interpolation\"\"\"\n # Setting dpi to something really small makes the difference very\n # visible. This works fine with pdf, since the dpi setting doesn't\n # affect anything but images, but the agg output becomes unusably\n # small.\n rcParams['savefig.dpi'] = 3\n X = np.array([[[218, 165, 32], [122, 103, 238]],\n [[127, 255, 0], [255, 99, 71]]], dtype=np.uint8)\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax1.imshow(X, interpolation='none')\n ax1.set_title('interpolation none')\n ax2 = fig.add_subplot(122)\n ax2.imshow(X, interpolation='nearest')\n ax2.set_title('interpolation nearest')\n\n\n@pytest.mark.parametrize('suppressComposite', [False, True])\n@image_comparison(['figimage'], extensions=['png', 'pdf'])\ndef test_figimage(suppressComposite):\n fig = plt.figure(figsize=(2, 2), dpi=100)\n fig.suppressComposite = suppressComposite\n x, y = np.ix_(np.arange(100) / 100.0, np.arange(100) / 100)\n z = np.sin(x**2 + y**2 - x*y)\n c = np.sin(20*x**2 + 50*y**2)\n img = z + c/5\n\n fig.figimage(img, xo=0, yo=0, origin='lower')\n fig.figimage(img[::-1, :], xo=0, yo=100, origin='lower')\n fig.figimage(img[:, ::-1], xo=100, yo=0, origin='lower')\n fig.figimage(img[::-1, ::-1], xo=100, yo=100, origin='lower')\n\n\ndef test_image_python_io():\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3])\n buffer = io.BytesIO()\n fig.savefig(buffer)\n buffer.seek(0)\n plt.imread(buffer)\n\n\n@pytest.mark.parametrize(\n \"img_size, fig_size, interpolation\",\n [(5, 2, \"hanning\"), # data larger than figure.\n (5, 5, \"nearest\"), # exact resample.\n (5, 10, \"nearest\"), # double sample.\n (3, 2.9, \"hanning\"), # <3 upsample.\n (3, 9.1, \"nearest\"), # >3 upsample.\n ])\n@check_figures_equal(extensions=['png'])\ndef test_imshow_antialiased(fig_test, fig_ref,\n img_size, fig_size, interpolation):\n np.random.seed(19680801)\n dpi = plt.rcParams[\"savefig.dpi\"]\n A = np.random.rand(int(dpi * img_size), int(dpi * img_size))\n for fig in [fig_test, fig_ref]:\n fig.set_size_inches(fig_size, fig_size)\n axs = fig_test.subplots()\n axs.set_position([0, 0, 1, 1])\n axs.imshow(A, interpolation='antialiased')\n axs = fig_ref.subplots()\n axs.set_position([0, 0, 1, 1])\n axs.imshow(A, interpolation=interpolation)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_imshow_zoom(fig_test, fig_ref):\n # should be less than 3 upsample, so should be nearest...\n np.random.seed(19680801)\n dpi = plt.rcParams[\"savefig.dpi\"]\n A = np.random.rand(int(dpi * 3), int(dpi * 3))\n for fig in [fig_test, fig_ref]:\n fig.set_size_inches(2.9, 2.9)\n axs = fig_test.subplots()\n axs.imshow(A, interpolation='antialiased')\n axs.set_xlim([10, 20])\n axs.set_ylim([10, 20])\n axs = fig_ref.subplots()\n axs.imshow(A, interpolation='nearest')\n axs.set_xlim([10, 20])\n axs.set_ylim([10, 20])\n\n\n@check_figures_equal()\ndef test_imshow_pil(fig_test, fig_ref):\n style.use(\"default\")\n png_path = Path(__file__).parent / \"baseline_images/pngsuite/basn3p04.png\"\n tiff_path = Path(__file__).parent / \"baseline_images/test_image/uint16.tif\"\n axs = fig_test.subplots(2)\n axs[0].imshow(Image.open(png_path))\n axs[1].imshow(Image.open(tiff_path))\n axs = fig_ref.subplots(2)\n axs[0].imshow(plt.imread(png_path))\n axs[1].imshow(plt.imread(tiff_path))\n\n\ndef test_imread_pil_uint16():\n img = plt.imread(os.path.join(os.path.dirname(__file__),\n 'baseline_images', 'test_image', 'uint16.tif'))\n assert img.dtype == np.uint16\n assert np.sum(img) == 134184960\n\n\ndef test_imread_fspath():\n img = plt.imread(\n Path(__file__).parent / 'baseline_images/test_image/uint16.tif')\n assert img.dtype == np.uint16\n assert np.sum(img) == 134184960\n\n\n@pytest.mark.parametrize(\"fmt\", [\"png\", \"jpg\", \"jpeg\", \"tiff\"])\ndef test_imsave(fmt):\n has_alpha = fmt not in [\"jpg\", \"jpeg\"]\n\n # The goal here is that the user can specify an output logical DPI\n # for the image, but this will not actually add any extra pixels\n # to the image, it will merely be used for metadata purposes.\n\n # So we do the traditional case (dpi == 1), and the new case (dpi\n # == 100) and read the resulting PNG files back in and make sure\n # the data is 100% identical.\n np.random.seed(1)\n # The height of 1856 pixels was selected because going through creating an\n # actual dpi=100 figure to save the image to a Pillow-provided format would\n # cause a rounding error resulting in a final image of shape 1855.\n data = np.random.rand(1856, 2)\n\n buff_dpi1 = io.BytesIO()\n plt.imsave(buff_dpi1, data, format=fmt, dpi=1)\n\n buff_dpi100 = io.BytesIO()\n plt.imsave(buff_dpi100, data, format=fmt, dpi=100)\n\n buff_dpi1.seek(0)\n arr_dpi1 = plt.imread(buff_dpi1, format=fmt)\n\n buff_dpi100.seek(0)\n arr_dpi100 = plt.imread(buff_dpi100, format=fmt)\n\n assert arr_dpi1.shape == (1856, 2, 3 + has_alpha)\n assert arr_dpi100.shape == (1856, 2, 3 + has_alpha)\n\n assert_array_equal(arr_dpi1, arr_dpi100)\n\n\n@pytest.mark.parametrize(\"fmt\", [\"png\", \"pdf\", \"ps\", \"eps\", \"svg\"])\ndef test_imsave_fspath(fmt):\n plt.imsave(Path(os.devnull), np.array([[0, 1]]), format=fmt)\n\n\ndef test_imsave_color_alpha():\n # Test that imsave accept arrays with ndim=3 where the third dimension is\n # color and alpha without raising any exceptions, and that the data is\n # acceptably preserved through a save/read roundtrip.\n np.random.seed(1)\n\n for origin in ['lower', 'upper']:\n data = np.random.rand(16, 16, 4)\n buff = io.BytesIO()\n plt.imsave(buff, data, origin=origin, format=\"png\")\n\n buff.seek(0)\n arr_buf = plt.imread(buff)\n\n # Recreate the float -> uint8 conversion of the data\n # We can only expect to be the same with 8 bits of precision,\n # since that's what the PNG file used.\n data = (255*data).astype('uint8')\n if origin == 'lower':\n data = data[::-1]\n arr_buf = (255*arr_buf).astype('uint8')\n\n assert_array_equal(data, arr_buf)\n\n\ndef test_imsave_pil_kwargs_png():\n from PIL.PngImagePlugin import PngInfo\n buf = io.BytesIO()\n pnginfo = PngInfo()\n pnginfo.add_text(\"Software\", \"test\")\n plt.imsave(buf, [[0, 1], [2, 3]],\n format=\"png\", pil_kwargs={\"pnginfo\": pnginfo})\n im = Image.open(buf)\n assert im.info[\"Software\"] == \"test\"\n\n\ndef test_imsave_pil_kwargs_tiff():\n from PIL.TiffTags import TAGS_V2 as TAGS\n buf = io.BytesIO()\n pil_kwargs = {\"description\": \"test image\"}\n plt.imsave(buf, [[0, 1], [2, 3]], format=\"tiff\", pil_kwargs=pil_kwargs)\n im = Image.open(buf)\n tags = {TAGS[k].name: v for k, v in im.tag_v2.items()}\n assert tags[\"ImageDescription\"] == \"test image\"\n\n\n@image_comparison(['image_alpha'], remove_text=True)\ndef test_image_alpha():\n plt.figure()\n\n np.random.seed(0)\n Z = np.random.rand(6, 6)\n\n plt.subplot(131)\n plt.imshow(Z, alpha=1.0, interpolation='none')\n\n plt.subplot(132)\n plt.imshow(Z, alpha=0.5, interpolation='none')\n\n plt.subplot(133)\n plt.imshow(Z, alpha=0.5, interpolation='nearest')\n\n\ndef test_cursor_data():\n from matplotlib.backend_bases import MouseEvent\n\n fig, ax = plt.subplots()\n im = ax.imshow(np.arange(100).reshape(10, 10), origin='upper')\n\n x, y = 4, 4\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) == 44\n\n # Now try for a point outside the image\n # Tests issue #4957\n x, y = 10.1, 4\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) is None\n\n # Hmm, something is wrong here... I get 0, not None...\n # But, this works further down in the tests with extents flipped\n #x, y = 0.1, -0.1\n #xdisp, ydisp = ax.transData.transform([x, y])\n #event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n #z = im.get_cursor_data(event)\n #assert z is None, \"Did not get None, got %d\" % z\n\n ax.clear()\n # Now try with the extents flipped.\n im = ax.imshow(np.arange(100).reshape(10, 10), origin='lower')\n\n x, y = 4, 4\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) == 44\n\n fig, ax = plt.subplots()\n im = ax.imshow(np.arange(100).reshape(10, 10), extent=[0, 0.5, 0, 0.5])\n\n x, y = 0.25, 0.25\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) == 55\n\n # Now try for a point outside the image\n # Tests issue #4957\n x, y = 0.75, 0.25\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) is None\n\n x, y = 0.01, -0.01\n xdisp, ydisp = ax.transData.transform([x, y])\n\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) is None\n\n\n@pytest.mark.parametrize(\n \"data, text_without_colorbar, text_with_colorbar\", [\n ([[10001, 10000]], \"[1e+04]\", \"[10001]\"),\n ([[.123, .987]], \"[0.123]\", \"[0.123]\"),\n])\ndef test_format_cursor_data(data, text_without_colorbar, text_with_colorbar):\n from matplotlib.backend_bases import MouseEvent\n\n fig, ax = plt.subplots()\n im = ax.imshow(data)\n\n xdisp, ydisp = ax.transData.transform([0, 0])\n event = MouseEvent('motion_notify_event', fig.canvas, xdisp, ydisp)\n assert im.get_cursor_data(event) == data[0][0]\n assert im.format_cursor_data(im.get_cursor_data(event)) \\\n == text_without_colorbar\n\n fig.colorbar(im)\n fig.canvas.draw() # This is necessary to set up the colorbar formatter.\n\n assert im.get_cursor_data(event) == data[0][0]\n assert im.format_cursor_data(im.get_cursor_data(event)) \\\n == text_with_colorbar\n\n\n@image_comparison(['image_clip'], style='mpl20')\ndef test_image_clip():\n d = [[1, 2], [3, 4]]\n\n fig, ax = plt.subplots()\n im = ax.imshow(d)\n patch = patches.Circle((0, 0), radius=1, transform=ax.transData)\n im.set_clip_path(patch)\n\n\n@image_comparison(['image_cliprect'], style='mpl20')\ndef test_image_cliprect():\n fig, ax = plt.subplots()\n d = [[1, 2], [3, 4]]\n\n im = ax.imshow(d, extent=(0, 5, 0, 5))\n\n rect = patches.Rectangle(\n xy=(1, 1), width=2, height=2, transform=im.axes.transData)\n im.set_clip_path(rect)\n\n\n@image_comparison(['imshow'], remove_text=True, style='mpl20')\ndef test_imshow():\n fig, ax = plt.subplots()\n arr = np.arange(100).reshape((10, 10))\n ax.imshow(arr, interpolation=\"bilinear\", extent=(1, 2, 1, 2))\n ax.set_xlim(0, 3)\n ax.set_ylim(0, 3)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_imshow_10_10_1(fig_test, fig_ref):\n # 10x10x1 should be the same as 10x10\n arr = np.arange(100).reshape((10, 10, 1))\n ax = fig_ref.subplots()\n ax.imshow(arr[:, :, 0], interpolation=\"bilinear\", extent=(1, 2, 1, 2))\n ax.set_xlim(0, 3)\n ax.set_ylim(0, 3)\n\n ax = fig_test.subplots()\n ax.imshow(arr, interpolation=\"bilinear\", extent=(1, 2, 1, 2))\n ax.set_xlim(0, 3)\n ax.set_ylim(0, 3)\n\n\ndef test_imshow_10_10_2():\n fig, ax = plt.subplots()\n arr = np.arange(200).reshape((10, 10, 2))\n with pytest.raises(TypeError):\n ax.imshow(arr)\n\n\ndef test_imshow_10_10_5():\n fig, ax = plt.subplots()\n arr = np.arange(500).reshape((10, 10, 5))\n with pytest.raises(TypeError):\n ax.imshow(arr)\n\n\n@image_comparison(['no_interpolation_origin'], remove_text=True)\ndef test_no_interpolation_origin():\n fig, axs = plt.subplots(2)\n axs[0].imshow(np.arange(100).reshape((2, 50)), origin=\"lower\",\n interpolation='none')\n axs[1].imshow(np.arange(100).reshape((2, 50)), interpolation='none')\n\n\n@image_comparison(['image_shift'], remove_text=True, extensions=['pdf', 'svg'])\ndef test_image_shift():\n imgData = [[1 / x + 1 / y for x in range(1, 100)] for y in range(1, 100)]\n tMin = 734717.945208\n tMax = 734717.946366\n\n fig, ax = plt.subplots()\n ax.imshow(imgData, norm=colors.LogNorm(), interpolation='none',\n extent=(tMin, tMax, 1, 100))\n ax.set_aspect('auto')\n\n\ndef test_image_edges():\n fig = plt.figure(figsize=[1, 1])\n ax = fig.add_axes([0, 0, 1, 1], frameon=False)\n\n data = np.tile(np.arange(12), 15).reshape(20, 9)\n\n im = ax.imshow(data, origin='upper', extent=[-10, 10, -10, 10],\n interpolation='none', cmap='gray')\n\n x = y = 2\n ax.set_xlim([-x, x])\n ax.set_ylim([-y, y])\n\n ax.set_xticks([])\n ax.set_yticks([])\n\n buf = io.BytesIO()\n fig.savefig(buf, facecolor=(0, 1, 0))\n\n buf.seek(0)\n\n im = plt.imread(buf)\n r, g, b, a = sum(im[:, 0])\n r, g, b, a = sum(im[:, -1])\n\n assert g != 100, 'Expected a non-green edge - but sadly, it was.'\n\n\n@image_comparison(['image_composite_background'],\n remove_text=True, style='mpl20')\ndef test_image_composite_background():\n fig, ax = plt.subplots()\n arr = np.arange(12).reshape(4, 3)\n ax.imshow(arr, extent=[0, 2, 15, 0])\n ax.imshow(arr, extent=[4, 6, 15, 0])\n ax.set_facecolor((1, 0, 0, 0.5))\n ax.set_xlim([0, 12])\n\n\n@image_comparison(['image_composite_alpha'], remove_text=True)\ndef test_image_composite_alpha():\n \"\"\"\n Tests that the alpha value is recognized and correctly applied in the\n process of compositing images together.\n \"\"\"\n fig, ax = plt.subplots()\n arr = np.zeros((11, 21, 4))\n arr[:, :, 0] = 1\n arr[:, :, 3] = np.concatenate(\n (np.arange(0, 1.1, 0.1), np.arange(0, 1, 0.1)[::-1]))\n arr2 = np.zeros((21, 11, 4))\n arr2[:, :, 0] = 1\n arr2[:, :, 1] = 1\n arr2[:, :, 3] = np.concatenate(\n (np.arange(0, 1.1, 0.1), np.arange(0, 1, 0.1)[::-1]))[:, np.newaxis]\n ax.imshow(arr, extent=[1, 2, 5, 0], alpha=0.3)\n ax.imshow(arr, extent=[2, 3, 5, 0], alpha=0.6)\n ax.imshow(arr, extent=[3, 4, 5, 0])\n ax.imshow(arr2, extent=[0, 5, 1, 2])\n ax.imshow(arr2, extent=[0, 5, 2, 3], alpha=0.6)\n ax.imshow(arr2, extent=[0, 5, 3, 4], alpha=0.3)\n ax.set_facecolor((0, 0.5, 0, 1))\n ax.set_xlim([0, 5])\n ax.set_ylim([5, 0])\n\n\n@image_comparison(['rasterize_10dpi'],\n extensions=['pdf', 'svg'], remove_text=True, style='mpl20')\ndef test_rasterize_dpi():\n # This test should check rasterized rendering with high output resolution.\n # It plots a rasterized line and a normal image with imshow. So it will\n # catch when images end up in the wrong place in case of non-standard dpi\n # setting. Instead of high-res rasterization I use low-res. Therefore\n # the fact that the resolution is non-standard is easily checked by\n # image_comparison.\n img = np.asarray([[1, 2], [3, 4]])\n\n fig, axs = plt.subplots(1, 3, figsize=(3, 1))\n\n axs[0].imshow(img)\n\n axs[1].plot([0, 1], [0, 1], linewidth=20., rasterized=True)\n axs[1].set(xlim=(0, 1), ylim=(-1, 2))\n\n axs[2].plot([0, 1], [0, 1], linewidth=20.)\n axs[2].set(xlim=(0, 1), ylim=(-1, 2))\n\n # Low-dpi PDF rasterization errors prevent proper image comparison tests.\n # Hide detailed structures like the axes spines.\n for ax in axs:\n ax.set_xticks([])\n ax.set_yticks([])\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n rcParams['savefig.dpi'] = 10\n\n\n@image_comparison(['bbox_image_inverted'], remove_text=True, style='mpl20')\ndef test_bbox_image_inverted():\n # This is just used to produce an image to feed to BboxImage\n image = np.arange(100).reshape((10, 10))\n\n fig, ax = plt.subplots()\n bbox_im = BboxImage(\n TransformedBbox(Bbox([[100, 100], [0, 0]]), ax.transData),\n interpolation='nearest')\n bbox_im.set_data(image)\n bbox_im.set_clip_on(False)\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 100)\n ax.add_artist(bbox_im)\n\n image = np.identity(10)\n\n bbox_im = BboxImage(TransformedBbox(Bbox([[0.1, 0.2], [0.3, 0.25]]),\n ax.figure.transFigure),\n interpolation='nearest')\n bbox_im.set_data(image)\n bbox_im.set_clip_on(False)\n ax.add_artist(bbox_im)\n\n\ndef test_get_window_extent_for_AxisImage():\n # Create a figure of known size (1000x1000 pixels), place an image\n # object at a given location and check that get_window_extent()\n # returns the correct bounding box values (in pixels).\n\n im = np.array([[0.25, 0.75, 1.0, 0.75], [0.1, 0.65, 0.5, 0.4],\n [0.6, 0.3, 0.0, 0.2], [0.7, 0.9, 0.4, 0.6]])\n fig, ax = plt.subplots(figsize=(10, 10), dpi=100)\n ax.set_position([0, 0, 1, 1])\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n im_obj = ax.imshow(\n im, extent=[0.4, 0.7, 0.2, 0.9], interpolation='nearest')\n\n fig.canvas.draw()\n renderer = fig.canvas.renderer\n im_bbox = im_obj.get_window_extent(renderer)\n\n assert_array_equal(im_bbox.get_points(), [[400, 200], [700, 900]])\n\n\n@image_comparison(['zoom_and_clip_upper_origin.png'],\n remove_text=True, style='mpl20')\ndef test_zoom_and_clip_upper_origin():\n image = np.arange(100)\n image = image.reshape((10, 10))\n\n fig, ax = plt.subplots()\n ax.imshow(image)\n ax.set_ylim(2.0, -0.5)\n ax.set_xlim(-0.5, 2.0)\n\n\ndef test_nonuniformimage_setcmap():\n ax = plt.gca()\n im = NonUniformImage(ax)\n im.set_cmap('Blues')\n\n\ndef test_nonuniformimage_setnorm():\n ax = plt.gca()\n im = NonUniformImage(ax)\n im.set_norm(plt.Normalize())\n\n\ndef test_jpeg_2d():\n # smoke test that mode-L pillow images work.\n imd = np.ones((10, 10), dtype='uint8')\n for i in range(10):\n imd[i, :] = np.linspace(0.0, 1.0, 10) * 255\n im = Image.new('L', (10, 10))\n im.putdata(imd.flatten())\n fig, ax = plt.subplots()\n ax.imshow(im)\n\n\ndef test_jpeg_alpha():\n plt.figure(figsize=(1, 1), dpi=300)\n # Create an image that is all black, with a gradient from 0-1 in\n # the alpha channel from left to right.\n im = np.zeros((300, 300, 4), dtype=float)\n im[..., 3] = np.linspace(0.0, 1.0, 300)\n\n plt.figimage(im)\n\n buff = io.BytesIO()\n plt.savefig(buff, facecolor=\"red\", format='jpg', dpi=300)\n\n buff.seek(0)\n image = Image.open(buff)\n\n # If this fails, there will be only one color (all black). If this\n # is working, we should have all 256 shades of grey represented.\n num_colors = len(image.getcolors(256))\n assert 175 <= num_colors <= 185\n # The fully transparent part should be red.\n corner_pixel = image.getpixel((0, 0))\n assert corner_pixel == (254, 0, 0)\n\n\ndef test_axesimage_setdata():\n ax = plt.gca()\n im = AxesImage(ax)\n z = np.arange(12, dtype=float).reshape((4, 3))\n im.set_data(z)\n z[0, 0] = 9.9\n assert im._A[0, 0] == 0, 'value changed'\n\n\ndef test_figureimage_setdata():\n fig = plt.gcf()\n im = FigureImage(fig)\n z = np.arange(12, dtype=float).reshape((4, 3))\n im.set_data(z)\n z[0, 0] = 9.9\n assert im._A[0, 0] == 0, 'value changed'\n\n\n@pytest.mark.parametrize(\n \"image_cls,x,y,a\", [\n (NonUniformImage,\n np.arange(3.), np.arange(4.), np.arange(12.).reshape((4, 3))),\n (PcolorImage,\n np.arange(3.), np.arange(4.), np.arange(6.).reshape((3, 2))),\n ])\ndef test_setdata_xya(image_cls, x, y, a):\n ax = plt.gca()\n im = image_cls(ax)\n im.set_data(x, y, a)\n x[0] = y[0] = a[0, 0] = 9.9\n assert im._A[0, 0] == im._Ax[0] == im._Ay[0] == 0, 'value changed'\n im.set_data(x, y, a.reshape((*a.shape, -1))) # Just a smoketest.\n\n\ndef test_minimized_rasterized():\n # This ensures that the rasterized content in the colorbars is\n # only as thick as the colorbar, and doesn't extend to other parts\n # of the image. See #5814. While the original bug exists only\n # in Postscript, the best way to detect it is to generate SVG\n # and then parse the output to make sure the two colorbar images\n # are the same size.\n from xml.etree import ElementTree\n\n np.random.seed(0)\n data = np.random.rand(10, 10)\n\n fig, ax = plt.subplots(1, 2)\n p1 = ax[0].pcolormesh(data)\n p2 = ax[1].pcolormesh(data)\n\n plt.colorbar(p1, ax=ax[0])\n plt.colorbar(p2, ax=ax[1])\n\n buff = io.BytesIO()\n plt.savefig(buff, format='svg')\n\n buff = io.BytesIO(buff.getvalue())\n tree = ElementTree.parse(buff)\n width = None\n for image in tree.iter('image'):\n if width is None:\n width = image['width']\n else:\n if image['width'] != width:\n assert False\n\n\ndef test_load_from_url():\n path = Path(__file__).parent / \"baseline_images/test_image/imshow.png\"\n url = ('file:'\n + ('///' if sys.platform == 'win32' else '')\n + path.resolve().as_posix())\n plt.imread(url)\n plt.imread(urllib.request.urlopen(url))\n\n\n@image_comparison(['log_scale_image'], remove_text=True)\ndef test_log_scale_image():\n Z = np.zeros((10, 10))\n Z[::2] = 1\n\n fig, ax = plt.subplots()\n ax.imshow(Z, extent=[1, 100, 1, 100], cmap='viridis', vmax=1, vmin=-1,\n aspect='auto')\n ax.set(yscale='log')\n\n\n@image_comparison(['rotate_image'], remove_text=True)\ndef test_rotate_image():\n delta = 0.25\n x = y = np.arange(-3.0, 3.0, delta)\n X, Y = np.meshgrid(x, y)\n Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)\n Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /\n (2 * np.pi * 0.5 * 1.5))\n Z = Z2 - Z1 # difference of Gaussians\n\n fig, ax1 = plt.subplots(1, 1)\n im1 = ax1.imshow(Z, interpolation='none', cmap='viridis',\n origin='lower',\n extent=[-2, 4, -3, 2], clip_on=True)\n\n trans_data2 = Affine2D().rotate_deg(30) + ax1.transData\n im1.set_transform(trans_data2)\n\n # display intended extent of the image\n x1, x2, y1, y2 = im1.get_extent()\n\n ax1.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], \"r--\", lw=3,\n transform=trans_data2)\n\n ax1.set_xlim(2, 5)\n ax1.set_ylim(0, 4)\n\n\ndef test_image_preserve_size():\n buff = io.BytesIO()\n\n im = np.zeros((481, 321))\n plt.imsave(buff, im, format=\"png\")\n\n buff.seek(0)\n img = plt.imread(buff)\n\n assert img.shape[:2] == im.shape\n\n\ndef test_image_preserve_size2():\n n = 7\n data = np.identity(n, float)\n\n fig = plt.figure(figsize=(n, n), frameon=False)\n\n ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(data, interpolation='nearest', origin='lower', aspect='auto')\n buff = io.BytesIO()\n fig.savefig(buff, dpi=1)\n\n buff.seek(0)\n img = plt.imread(buff)\n\n assert img.shape == (7, 7, 4)\n\n assert_array_equal(np.asarray(img[:, :, 0], bool),\n np.identity(n, bool)[::-1])\n\n\n@image_comparison(['mask_image_over_under.png'], remove_text=True)\ndef test_mask_image_over_under():\n delta = 0.025\n x = y = np.arange(-3.0, 3.0, delta)\n X, Y = np.meshgrid(x, y)\n Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)\n Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /\n (2 * np.pi * 0.5 * 1.5))\n Z = 10*(Z2 - Z1) # difference of Gaussians\n\n palette = copy(plt.cm.gray)\n palette.set_over('r', 1.0)\n palette.set_under('g', 1.0)\n palette.set_bad('b', 1.0)\n Zm = np.ma.masked_where(Z > 1.2, Z)\n fig, (ax1, ax2) = plt.subplots(1, 2)\n im = ax1.imshow(Zm, interpolation='bilinear',\n cmap=palette,\n norm=colors.Normalize(vmin=-1.0, vmax=1.0, clip=False),\n origin='lower', extent=[-3, 3, -3, 3])\n ax1.set_title('Green=low, Red=high, Blue=bad')\n fig.colorbar(im, extend='both', orientation='horizontal',\n ax=ax1, aspect=10)\n\n im = ax2.imshow(Zm, interpolation='nearest',\n cmap=palette,\n norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],\n ncolors=256, clip=False),\n origin='lower', extent=[-3, 3, -3, 3])\n ax2.set_title('With BoundaryNorm')\n fig.colorbar(im, extend='both', spacing='proportional',\n orientation='horizontal', ax=ax2, aspect=10)\n\n\n@image_comparison(['mask_image'], remove_text=True)\ndef test_mask_image():\n # Test mask image two ways: Using nans and using a masked array.\n\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n A = np.ones((5, 5))\n A[1:2, 1:2] = np.nan\n\n ax1.imshow(A, interpolation='nearest')\n\n A = np.zeros((5, 5), dtype=bool)\n A[1:2, 1:2] = True\n A = np.ma.masked_array(np.ones((5, 5), dtype=np.uint16), A)\n\n ax2.imshow(A, interpolation='nearest')\n\n\n@image_comparison(['imshow_endianess.png'], remove_text=True)\ndef test_imshow_endianess():\n x = np.arange(10)\n X, Y = np.meshgrid(x, x)\n Z = np.hypot(X - 5, Y - 5)\n\n fig, (ax1, ax2) = plt.subplots(1, 2)\n\n kwargs = dict(origin=\"lower\", interpolation='nearest', cmap='viridis')\n\n ax1.imshow(Z.astype('<f8'), **kwargs)\n ax2.imshow(Z.astype('>f8'), **kwargs)\n\n\n@image_comparison(['imshow_masked_interpolation'],\n tol=0 if platform.machine() == 'x86_64' else 0.01,\n remove_text=True, style='mpl20')\ndef test_imshow_masked_interpolation():\n\n cm = copy(plt.get_cmap('viridis'))\n cm.set_over('r')\n cm.set_under('b')\n cm.set_bad('k')\n\n N = 20\n n = colors.Normalize(vmin=0, vmax=N*N-1)\n\n data = np.arange(N*N, dtype=float).reshape(N, N)\n\n data[5, 5] = -1\n # This will cause crazy ringing for the higher-order\n # interpolations\n data[15, 5] = 1e5\n\n # data[3, 3] = np.nan\n\n data[15, 15] = np.inf\n\n mask = np.zeros_like(data).astype('bool')\n mask[5, 15] = True\n\n data = np.ma.masked_array(data, mask)\n\n fig, ax_grid = plt.subplots(3, 6)\n interps = sorted(mimage._interpd_)\n interps.remove('antialiased')\n\n for interp, ax in zip(interps, ax_grid.ravel()):\n ax.set_title(interp)\n ax.imshow(data, norm=n, cmap=cm, interpolation=interp)\n ax.axis('off')\n\n\ndef test_imshow_no_warn_invalid():\n plt.imshow([[1, 2], [3, np.nan]]) # Check that no warning is emitted.\n\n\n@pytest.mark.parametrize(\n 'dtype', [np.dtype(s) for s in 'u2 u4 i2 i4 i8 f4 f8'.split()])\ndef test_imshow_clips_rgb_to_valid_range(dtype):\n arr = np.arange(300, dtype=dtype).reshape((10, 10, 3))\n if dtype.kind != 'u':\n arr -= 10\n too_low = arr < 0\n too_high = arr > 255\n if dtype.kind == 'f':\n arr = arr / 255\n _, ax = plt.subplots()\n out = ax.imshow(arr).get_array()\n assert (out[too_low] == 0).all()\n if dtype.kind == 'f':\n assert (out[too_high] == 1).all()\n assert out.dtype.kind == 'f'\n else:\n assert (out[too_high] == 255).all()\n assert out.dtype == np.uint8\n\n\n@image_comparison(['imshow_flatfield.png'], remove_text=True, style='mpl20')\ndef test_imshow_flatfield():\n fig, ax = plt.subplots()\n im = ax.imshow(np.ones((5, 5)), interpolation='nearest')\n im.set_clim(.5, 1.5)\n\n\n@image_comparison(['imshow_bignumbers.png'], remove_text=True, style='mpl20')\ndef test_imshow_bignumbers():\n rcParams['image.interpolation'] = 'nearest'\n # putting a big number in an array of integers shouldn't\n # ruin the dynamic range of the resolved bits.\n fig, ax = plt.subplots()\n img = np.array([[1, 2, 1e12], [3, 1, 4]], dtype=np.uint64)\n pc = ax.imshow(img)\n pc.set_clim(0, 5)\n\n\n@image_comparison(['imshow_bignumbers_real.png'],\n remove_text=True, style='mpl20')\ndef test_imshow_bignumbers_real():\n rcParams['image.interpolation'] = 'nearest'\n # putting a big number in an array of integers shouldn't\n # ruin the dynamic range of the resolved bits.\n fig, ax = plt.subplots()\n img = np.array([[2., 1., 1.e22], [4., 1., 3.]])\n pc = ax.imshow(img)\n pc.set_clim(0, 5)\n\n\n@pytest.mark.parametrize(\n \"make_norm\",\n [colors.Normalize,\n colors.LogNorm,\n lambda: colors.SymLogNorm(1),\n lambda: colors.PowerNorm(1)])\ndef test_empty_imshow(make_norm):\n fig, ax = plt.subplots()\n with pytest.warns(UserWarning,\n match=\"Attempting to set identical left == right\"):\n im = ax.imshow([[]], norm=make_norm())\n im.set_extent([-5, 5, -5, 5])\n fig.canvas.draw()\n\n with pytest.raises(RuntimeError):\n im.make_image(fig._cachedRenderer)\n\n\ndef test_imshow_float128():\n fig, ax = plt.subplots()\n ax.imshow(np.zeros((3, 3), dtype=np.longdouble))\n with (ExitStack() if np.can_cast(np.longdouble, np.float64, \"equiv\")\n else pytest.warns(UserWarning)):\n # Ensure that drawing doesn't cause crash.\n fig.canvas.draw()\n\n\ndef test_imshow_bool():\n fig, ax = plt.subplots()\n ax.imshow(np.array([[True, False], [False, True]], dtype=bool))\n\n\ndef test_full_invalid():\n fig, ax = plt.subplots()\n ax.imshow(np.full((10, 10), np.nan))\n with pytest.warns(UserWarning):\n fig.canvas.draw()\n\n\n@pytest.mark.parametrize(\"fmt,counted\",\n [(\"ps\", b\" colorimage\"), (\"svg\", b\"<image\")])\n@pytest.mark.parametrize(\"composite_image,count\", [(True, 1), (False, 2)])\ndef test_composite(fmt, counted, composite_image, count):\n # Test that figures can be saved with and without combining multiple images\n # (on a single set of axes) into a single composite image.\n X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))\n Z = np.sin(Y ** 2)\n\n fig, ax = plt.subplots()\n ax.set_xlim(0, 3)\n ax.imshow(Z, extent=[0, 1, 0, 1])\n ax.imshow(Z[::-1], extent=[2, 3, 0, 1])\n plt.rcParams['image.composite_image'] = composite_image\n buf = io.BytesIO()\n fig.savefig(buf, format=fmt)\n assert buf.getvalue().count(counted) == count\n\n\ndef test_relim():\n fig, ax = plt.subplots()\n ax.imshow([[0]], extent=(0, 1, 0, 1))\n ax.relim()\n ax.autoscale()\n assert ax.get_xlim() == ax.get_ylim() == (0, 1)\n\n\ndef test_unclipped():\n fig, ax = plt.subplots()\n ax.set_axis_off()\n im = ax.imshow([[0, 0], [0, 0]], aspect=\"auto\", extent=(-10, 10, -10, 10),\n cmap='gray', clip_on=False)\n ax.set(xlim=(0, 1), ylim=(0, 1))\n fig.canvas.draw()\n # The unclipped image should fill the *entire* figure and be black.\n # Ignore alpha for this comparison.\n assert (np.array(fig.canvas.buffer_rgba())[..., :3] == 0).all()\n\n\ndef test_respects_bbox():\n fig, axs = plt.subplots(2)\n for ax in axs:\n ax.set_axis_off()\n im = axs[1].imshow([[0, 1], [2, 3]], aspect=\"auto\", extent=(0, 1, 0, 1))\n im.set_clip_path(None)\n # Make the image invisible in axs[1], but visible in axs[0] if we pan\n # axs[1] up.\n im.set_clip_box(axs[0].bbox)\n buf_before = io.BytesIO()\n fig.savefig(buf_before, format=\"rgba\")\n assert {*buf_before.getvalue()} == {0xff} # All white.\n axs[1].set(ylim=(-1, 0))\n buf_after = io.BytesIO()\n fig.savefig(buf_after, format=\"rgba\")\n assert buf_before.getvalue() != buf_after.getvalue() # Not all white.\n\n\ndef test_image_cursor_formatting():\n fig, ax = plt.subplots()\n # Create a dummy image to be able to call format_cursor_data\n im = ax.imshow(np.zeros((4, 4)))\n\n data = np.ma.masked_array([0], mask=[True])\n assert im.format_cursor_data(data) == '[]'\n\n data = np.ma.masked_array([0], mask=[False])\n assert im.format_cursor_data(data) == '[0]'\n\n data = np.nan\n assert im.format_cursor_data(data) == '[nan]'\n\n\n@check_figures_equal()\ndef test_image_array_alpha(fig_test, fig_ref):\n \"\"\"Per-pixel alpha channel test.\"\"\"\n x = np.linspace(0, 1)\n xx, yy = np.meshgrid(x, x)\n\n zz = np.exp(- 3 * ((xx - 0.5) ** 2) + (yy - 0.7 ** 2))\n alpha = zz / zz.max()\n\n cmap = plt.get_cmap('viridis')\n ax = fig_test.add_subplot(111)\n ax.imshow(zz, alpha=alpha, cmap=cmap, interpolation='nearest')\n\n ax = fig_ref.add_subplot(111)\n rgba = cmap(colors.Normalize()(zz))\n rgba[..., -1] = alpha\n ax.imshow(rgba, interpolation='nearest')\n\n\n@pytest.mark.style('mpl20')\ndef test_exact_vmin():\n cmap = copy(plt.cm.get_cmap(\"autumn_r\"))\n cmap.set_under(color=\"lightgrey\")\n\n # make the image exactly 190 pixels wide\n fig = plt.figure(figsize=(1.9, 0.1), dpi=100)\n ax = fig.add_axes([0, 0, 1, 1])\n\n data = np.array(\n [[-1, -1, -1, 0, 0, 0, 0, 43, 79, 95, 66, 1, -1, -1, -1, 0, 0, 0, 34]],\n dtype=float,\n )\n\n im = ax.imshow(data, aspect=\"auto\", cmap=cmap, vmin=0, vmax=100)\n ax.axis(\"off\")\n fig.canvas.draw()\n\n # get the RGBA slice from the image\n from_image = im.make_image(fig.canvas.renderer)[0][0]\n # expand the input to be 190 long and run through norm / cmap\n direct_computation = (\n im.cmap(im.norm((data * ([[1]] * 10)).T.ravel())) * 255\n ).astype(int)\n\n # check than the RBGA values are the same\n assert np.all(from_image == direct_computation)\n","sub_path":"imGcode/env_imGcode/Lib/site-packages/matplotlib/tests/test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":34943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"374270614","text":"from DataDashboard.views import *\nfrom django.urls import path\n\napp_name = \"DataDashboard\"\nurlpatterns = [\n # path('students', students, name='students'),\n # path('filter', StudentList),\n #path(\"student_listing/\", StudentListing.as_view(), name = 'listing'),\n # path(\"ajax/tutor_groups/\", getTutorGRoups, name = 'get_tutor_groups'),\n path(\"student_list/\", search, name='student_search'),\n path(\"add_intervention/\", add_intervention, name='add_intervention'),\n path('student_search', StudentAutocomplete.as_view(), name='student_autocomplete'),\n path('strategies/<int:teaching_strategy_pk>', view_strategy, name='view_strategy'),\n path('strategies/', find_strategies, name='find_strategies'),\n path('denied/', permission_error, name='permission_error'),\n path('strategies/<int:teaching_strategy_pk>/delete', delete_strategy, name='delete_strategy'),\n path('strategies/<int:teaching_strategy_pk>/confirm_delete', confirmed_delete_strategy, name='confirm_delete_strategy'),\n path('reportdashboard/', report_dashboard, name='report_dashboard'),\n path('curriculum/', curriculum_dashboard, name='curriculum_dashboard'),\n path('master/', master_dashboard, name='master_dashboard'),\n path('tutorial/<int:page_pk>', tutorial, name='tutorial'),\n path('', splash, name='splash'),\n]","sub_path":"DataDashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"329197106","text":"#Embedded file name: C:/Users/hovel/Dropbox/packages/studioLibrary/1.5.8/build27/studioLibrary\\plugins\\animationPlugin.py\n\"\"\"\n# Released subject to the BSD License\n# Please visit http://www.voidspace.org.uk/python/license.shtml\n#\n# Copyright (c) 2014, Kurt Rathjen\n# All rights reserved.\n# Comments, suggestions and bug reports are welcome.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n # * Redistributions of source code must retain the above copyright\n # notice, this list of conditions and the following disclaimer.\n # * Redistributions in binary form must reproduce the above copyright\n # notice, this list of conditions and the following disclaimer in the\n # documentation and/or other materials provided with the distribution.\n # * Neither the name of Kurt Rathjen nor the\n # names of its contributors may be used to endorse or promote products\n # derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY KURT RATHJEN ''AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL KURT RATHJEN BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\"\"\"\nimport os\nimport time\nimport shutil\nimport mutils\ntry:\n from PySide import QtGui\n from PySide import QtCore\nexcept ImportError:\n from PyQt4 import QtGui\n from PyQt4 import QtCore\n\ntry:\n import maya.mel\n import maya.cmds\nexcept ImportError:\n import traceback\n traceback.print_exc()\n\nimport studioLibrary\nimport studioLibrary.plugins.mayaBasePlugin as mayaBasePlugin\n\nclass AnimationPluginError(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\n\nclass Plugin(mayaBasePlugin.Plugin):\n\n def __init__(self, parent):\n \"\"\"\n @type parent:\n \"\"\"\n studioLibrary.Plugin.__init__(self, parent)\n self.setName('Animation')\n self.setExtension('anim')\n self.setIcon(self.dirname() + '/images/animation.png')\n self.setRecord(Record)\n self.setInfoWidget(AnimationInfoWidget)\n self.setCreateWidget(AnimationCreateWidget)\n self.setPreviewWidget(AnimationPreviewWidget)\n settings = self.settings()\n settings.setdefault('byFrame', 1)\n settings.setdefault('byFrameDialog', True)\n settings.setdefault('connect', False)\n settings.setdefault('currentTime', False)\n settings.setdefault('showHelpImage', True)\n settings.setdefault('option', 'replace')\n\n\nclass Record(mayaBasePlugin.Record):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n @type args: list[]\n @type kwargs: dict[]\n \"\"\"\n mayaBasePlugin.Record.__init__(self, *args, **kwargs)\n self._pose = None\n self._filename = None\n self._sequenceTimer = None\n\n def transferPath(self):\n return self.dirname()\n\n def transferObject(self):\n if self._transferObject is None:\n self._transferObject = mutils.Animation.createFromPath(self.transferPath())\n return self._transferObject\n\n def doubleClicked(self):\n \"\"\"\n \"\"\"\n self.accept()\n\n def stop(self):\n \"\"\"\n \"\"\"\n self._filename = None\n self._sequenceTimer.stop()\n self.repaint()\n\n def rename(self, *args, **kwargs):\n \"\"\"\n @type args: list[]\n @type kwargs: dict[]\n \"\"\"\n self._sequenceTimer = None\n mayaBasePlugin.Record.rename(self, *args, **kwargs)\n\n def mouseEnterEvent(self, event):\n \"\"\"\n @type event: QtGui.QEvent\n \"\"\"\n studioLibrary.Record.mouseEnterEvent(self, event)\n if not self._sequenceTimer:\n dirname = self.dirname() + '/sequence'\n self._sequenceTimer = studioLibrary.SequenceTimer(self.parent())\n self._sequenceTimer.setDirname(dirname)\n self._sequenceTimer.communicate.frameChanged.connect(lambda filename, self = self: self.frameChanged(filename))\n self._sequenceTimer.start()\n\n def mouseLeaveEvent(self, event):\n \"\"\"\n @type event: QtGui.QEvent\n \"\"\"\n studioLibrary.Record.mouseLeaveEvent(self, event)\n self.stop()\n\n def mouseMoveEvent(self, event):\n \"\"\"\n @type event: QtGui.QEvent\n \"\"\"\n studioLibrary.Record.mouseMoveEvent(self, event)\n if studioLibrary.isControlModifier():\n x = event.pos().x() - self.rect().x()\n width = self.rect().width()\n percent = 1.0 - float(width - x) / float(width)\n frame = int(self._sequenceTimer.duration() * percent)\n self._sequenceTimer.setCurrentFrame(frame)\n self._filename = self._sequenceTimer.currentFilename()\n self.repaint()\n\n def frameChanged(self, path):\n \"\"\"\n @type path: str\n \"\"\"\n if not studioLibrary.isControlModifier():\n self._filename = path\n self.repaint()\n\n def paint(self, painter, option):\n \"\"\"\n @type painter:\n @type option:\n \"\"\"\n if self._filename:\n self.setPixmap(QtGui.QPixmap(self._filename))\n studioLibrary.Record.paint(self, painter, option)\n painter.save()\n if self._filename:\n r = self.rect()\n painter.setPen(QtCore.Qt.NoPen)\n painter.setBrush(QtGui.QBrush(QtGui.QColor(255, 80, 80)))\n painter.drawRect(r.x(), r.y(), self._sequenceTimer.percent() * r.width() - 1, 2)\n painter.restore()\n\n def accept(self, sourceStart = None, sourceEnd = None):\n \"\"\"\n @type sourceStart:\n @type sourceEnd:\n :raise AnimationPluginError:\n \"\"\"\n msg = 'An error has occurred while loading the animation! Please check the script editor for the traceback.'\n start = None\n try:\n t = time.time()\n if not sourceEnd:\n sourceEnd = int(self.get('end'))\n if not sourceStart:\n sourceStart = int(self.get('start'))\n namespaces = []\n objects = maya.cmds.ls(selection=True) or []\n if not objects:\n namespaces = self.namespaces()\n gSelectedAnimLayers = maya.mel.eval('$a = $gSelectedAnimLayers;')\n if len(gSelectedAnimLayers) > 1:\n msg = 'More than one animation layer selected! Please select only one animation layer for import!'\n raise AnimationPluginError(msg)\n settings = self.plugin().settings()\n option = str(settings.get('option'))\n connect = int(settings.get('connect'))\n if settings.get('currentTime'):\n start = int(maya.cmds.currentTime(query=True))\n if sourceStart < self.get('start') or sourceEnd > self.get('end'):\n msg = 'The requested source time is out of range! Choose a source range between %s - %s.' % (self.get('start'), self.get('end'))\n raise AnimationPluginError(msg)\n a = mutils.Animation.createFromPath(self.dirname())\n a.load(objects, namespaces=namespaces, start=start, sourceTime=(sourceStart, sourceEnd), option=option, connect=connect)\n t = time.time() - t\n self.window().setInfo('Loaded animation in %0.3f seconds.' % t)\n except Exception:\n import traceback\n traceback.print_exc()\n self.window().setError(msg)\n\n\nclass AnimationInfoWidget(mayaBasePlugin.InfoWidget):\n\n def __init__(self, parent = None, record = None):\n \"\"\"\n @type parent: QtGui.QWidget\n @type record: Record\n \"\"\"\n mayaBasePlugin.InfoWidget.__init__(self, parent, record)\n self._record = record\n end = str(record.get('end'))\n start = str(record.get('start'))\n self.ui.start.setText(start)\n self.ui.end.setText(end)\n\n\nclass AnimationPreviewWidget(mayaBasePlugin.PreviewWidget):\n\n def __init__(self, parent = None, record = None):\n \"\"\"\n @type parent: QtGui.QWidget\n @type record: Record\n \"\"\"\n mayaBasePlugin.PreviewWidget.__init__(self, parent, record)\n end = str(record.get('end'))\n start = str(record.get('start'))\n self.ui.start.setText(start)\n self.ui.end.setText(end)\n self.ui.sourceStartEdit.setText(start)\n self.ui.sourceEndEdit.setText(end)\n self.connect(self.ui.currentTime, QtCore.SIGNAL('stateChanged (int)'), self.stateChanged)\n self.connect(self.ui.helpCheckBox, QtCore.SIGNAL('stateChanged(int)'), self.showHelpImage)\n self.connect(self.ui.connectCheckBox, QtCore.SIGNAL('stateChanged(int)'), self.connectChanged)\n self.connect(self.ui.option, QtCore.SIGNAL('currentIndexChanged(const QString&)'), self.optionChanged)\n self.loadSettings()\n\n def sourceStart(self):\n return int(self.ui.sourceStartEdit.text())\n\n def sourceEnd(self):\n return int(self.ui.sourceEndEdit.text())\n\n def showHelpImage(self, value, save = True):\n if value:\n self.ui.helpImage.show()\n else:\n self.ui.helpImage.hide()\n if save:\n self.saveSettings()\n\n def saveSettings(self):\n \"\"\"\n \"\"\"\n super(AnimationPreviewWidget, self).saveSettings()\n s = self.settings()\n s.set('option', str(self.ui.option.currentText()))\n s.set('currentTime', bool(self.ui.currentTime.isChecked()))\n s.set('connect', float(self.ui.connectCheckBox.isChecked()))\n s.set('showHelpImage', bool(self.ui.helpCheckBox.isChecked()))\n s.save()\n\n def loadSettings(self):\n \"\"\"\n \"\"\"\n super(AnimationPreviewWidget, self).loadSettings()\n s = self.settings()\n self.ui.currentTime.setChecked(s.get('currentTime'))\n self.ui.connectCheckBox.setChecked(s.get('connect'))\n self.optionChanged(s.get('option'), save=False)\n self.ui.helpCheckBox.setChecked(s.get('showHelpImage'))\n self.showHelpImage(s.get('showHelpImage'), save=False)\n\n def connectChanged(self, value):\n \"\"\"\n @type value: bool\n \"\"\"\n self.optionChanged(str(self.ui.option.currentText()))\n\n def optionChanged(self, text, save = True):\n \"\"\"\n @type text: str\n \"\"\"\n imageText = text\n if text == 'replace all':\n imageText = 'replaceCompletely'\n self.ui.connectCheckBox.setEnabled(False)\n else:\n self.ui.connectCheckBox.setEnabled(True)\n connect = ''\n if self.ui.connectCheckBox.isChecked() and text != 'replace all':\n connect = 'Connect'\n option_image = os.path.join(self.record().plugin().dirname(), 'images/%s%s.png' % (imageText, connect))\n self.ui.helpImage.setPixmap(QtGui.QPixmap(option_image))\n index = self.ui.option.findText(text)\n if index:\n self.ui.option.setCurrentIndex(index)\n if save:\n self.saveSettings()\n\n def accept(self):\n \"\"\"\n \"\"\"\n self.record().accept(self.sourceStart(), self.sourceEnd())\n\n\nclass AnimationCreateWidget(mayaBasePlugin.CreateWidget):\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n @type args:\n @type kwargs:\n \"\"\"\n mayaBasePlugin.CreateWidget.__init__(self, *args, **kwargs)\n self.connect(self.ui.setEndFrameButton, QtCore.SIGNAL('clicked()'), self.setEndFrame)\n self.connect(self.ui.setStartFrameButton, QtCore.SIGNAL('clicked()'), self.setStartFrame)\n self._sequence = None\n self.ui.byFrameEdit.setValidator(QtGui.QIntValidator(1, 1000, self))\n self.ui.sequenceWidget = studioLibrary.SequenceWidget(self)\n self.connect(self.ui.sequenceWidget, QtCore.SIGNAL('clicked()'), self.snapshot)\n self.ui.layout().insertWidget(1, self.ui.sequenceWidget)\n self.ui.snapshotButton.parent().hide()\n try:\n self.ui.byFrameEdit.setText(str(self.settings().get('byFrame')))\n start, end = mutils.currentRange()\n self.ui.startFrameEdit.setValidator(QtGui.QIntValidator(-50000000, 50000000, self))\n self.ui.endFrameEdit.setValidator(QtGui.QIntValidator(-50000000, 50000000, self))\n self.ui.startFrameEdit.setText(str(int(start)))\n self.ui.endFrameEdit.setText(str(int(end)))\n except ValueError:\n import traceback\n traceback.print_exc()\n\n def startFrame(self):\n \"\"\"\n :return:\n \"\"\"\n try:\n return int(float(str(self.ui.startFrameEdit.text()).strip()))\n except ValueError:\n return None\n\n def endFrame(self):\n \"\"\"\n @rtype:\n \"\"\"\n try:\n return int(float(str(self.ui.endFrameEdit.text()).strip()))\n except ValueError:\n return None\n\n def duration(self):\n \"\"\"\n @rtype:\n \"\"\"\n return self.endFrame() - self.startFrame()\n\n def byFrame(self):\n \"\"\"\n @rtype:\n \"\"\"\n return int(float(self.ui.byFrameEdit.text()))\n\n def close(self):\n \"\"\"\n \"\"\"\n self.settings().set('byFrame', self.byFrame())\n self.settings().save()\n mayaBasePlugin.CreateWidget.close(self)\n\n def setEndFrame(self):\n \"\"\"\n \"\"\"\n start, end = mutils.selectedRange()\n self.ui.endFrameEdit.setText(str(end))\n\n def setStartFrame(self):\n \"\"\"\n \"\"\"\n start, end = mutils.selectedRange()\n self.ui.startFrameEdit.setText(str(start))\n\n def snapshot(self):\n \"\"\"\n :raise AnimationPluginError:\n \"\"\"\n startFrame, endFrame = mutils.selectedRange()\n if startFrame == endFrame:\n endFrame = self.endFrame()\n startFrame = self.startFrame()\n if startFrame is None or endFrame is None:\n msg = 'Please choose a start frame and an end frame.'\n import traceback\n traceback.print_exc()\n QtGui.QMessageBox.critical(self, 'Error', msg)\n raise AnimationPluginError(msg)\n if self.settings().get('byFrameDialog') and self.duration() > 100 and self.byFrame() == 1:\n msg = 'To help speed up the playblast you can set the \"by frame\" to a greater number than 1.\\neg: If the \"by frame\" is set to 2 it will playblast every second frame.\\nWould you like to show this message again?'\n result = self.window().questionDialog(msg, 'Tip')\n if result == QtGui.QMessageBox.Cancel:\n raise AnimationPluginError('Playblast cancelled!')\n elif result == QtGui.QMessageBox.No:\n self.settings().set('byFrameDialog', False)\n path = studioLibrary.tempDir(make=True, clean=True)\n self._thumbnail = path + '/thumbnail.jpg'\n self._sequence = path + '/sequence/thumbnail.jpg'\n try:\n self._sequence = mutils.snapshot(path=self._sequence, start=startFrame, end=self.endFrame(), step=self.byFrame())\n except mutils.SnapshotError as e:\n self.record().window().setError(str(e))\n raise\n\n shutil.copyfile(self._sequence, self._thumbnail)\n self.setSnapshot(self._thumbnail)\n self.ui.sequenceWidget.setDirname(os.path.dirname(self._sequence))\n\n def accept(self):\n \"\"\"\n :raise AnimationPluginError:\n \"\"\"\n msg = 'An error has occurred while saving the animation! Please check the script editor for the traceback.'\n mayaBasePlugin.CreateWidget.accept(self)\n try:\n record = self.record()\n gSelectedAnimLayers = maya.mel.eval('$a = $gSelectedAnimLayers;')\n if len(gSelectedAnimLayers) > 1:\n msg = 'More than one animation layer selected! Please select only one animation layer for export!'\n raise AnimationPluginError(msg)\n if self.startFrame() is None or self.endFrame() is None:\n msg = 'Please specify a start and end frame!'\n raise AnimationPluginError(msg)\n if self.startFrame() >= self.endFrame():\n msg = 'The start frame cannot be greater than or equal to the end frame!'\n raise AnimationPluginError(msg)\n if mutils.getDurationFromNodes(nodes=maya.cmds.ls(selection=True) or []) <= 0:\n msg = 'No animation was found on the selected objects! Please create a pose instead!'\n raise AnimationPluginError(msg)\n path = studioLibrary.tempDir(make=True, clean=True, subdir='animation.anim')\n objects = maya.cmds.ls(selection=True)\n bakeConnected = int(self.ui.bakeCheckBox.isChecked())\n a = mutils.Animation.createFromObjects(objects)\n a.save(path, time=(self.startFrame(), self.endFrame()), bakeConnected=bakeConnected)\n content = a.paths()\n record.set('start', self.startFrame())\n record.set('end', self.endFrame())\n if self._sequence:\n sequence = os.path.dirname(self._sequence)\n if os.path.exists(sequence):\n content.append(sequence)\n record.save(content=content, icon=self.thumbnail())\n except Exception:\n self.record().window().setError(msg)\n raise\n\n\nif __name__ == '__main__':\n import studioLibrary\n studioLibrary.main()\n","sub_path":"plugins/animationPlugin.py","file_name":"animationPlugin.py","file_ext":"py","file_size_in_byte":17996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"276533956","text":"import matplotlib.pyplot as plt\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--read', type=str, help='file input', required=True)\n args = parser.parse_args()\n\n loss = []\n pcorrect = []\n with open(args.read, 'r') as file:\n i = 0\n for row in file:\n if row.startswith('Epoch'):\n loss.append([])\n pcorrect.append([])\n while not row.startswith('model'):\n row = row.strip()\n if row.startswith('p'):\n row = row.split(' ')\n # print(row)\n pcorrect[i].append(float(row[2][:-2]))\n elif row.startswith('['):\n row = row.split(': ')\n # print(row)\n loss[i].append(float(row[1]))\n try:\n row = file.__next__()\n except StopIteration:\n break\n i += 1\n loss = [sum(thing) / len(thing) for thing in loss]\n pcorrect = [sum(thing) / len(thing) for thing in pcorrect]\n print(loss)\n print(pcorrect)\n f = plt.figure(1)\n plt.plot(loss, 'r-')\n plt.ylabel('Binary Cross Entropy Loss')\n plt.xlabel('Epochs')\n plt.title('Loss over Training Set per Epoch')\n\n g = plt.figure(2)\n plt.plot(pcorrect)\n plt.title('Prediction Accuracy over Training Set per Epoch')\n plt.ylabel('Percent Correct')\n plt.xlabel('Epochs')\n plt.show()\n\nmain()\n","sub_path":"data/parse/make_graph.py","file_name":"make_graph.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"253073825","text":"import math\nimport re\n\nimport pycountry\nimport stop_words\n\n\ndef auto_range_categories(value_min, value_max):\n \"\"\"\n Tries to pick sensible numerical range categories for the given range of\n values.\n \"\"\"\n value_range = value_max - value_min\n if value_range > 1:\n # 10, 100, etc\n category_range = int(math.pow(10, math.ceil(math.log10(value_range))))\n\n if value_range < category_range / 2:\n category_range /= 2 # 5, 10, 50, 100 etc\n else:\n category_range = 5\n\n category_step = category_range / 5 # aim for 5 categories\n\n category_min = value_min - (value_min % category_step)\n\n category_max = category_min + category_step * 5\n\n # may need an extra category to hold max value\n while category_max <= value_max:\n category_max += category_step\n\n return category_min, category_max, category_step\n\n\ndef extract_words(text, language):\n \"\"\"\n Extracts significant words from the given text (i.e. words we want to\n include in a word cloud)\n \"\"\"\n ignore_words = []\n if language:\n code = pycountry.languages.get(bibliographic=language).alpha2\n try:\n ignore_words = stop_words.get_stop_words(code)\n except stop_words.StopWordError:\n pass\n\n words = re.split(r\"[^\\w'-]\", text.lower(), flags=re.UNICODE)\n ignore_words = ignore_words\n return [w for w in words if w not in ignore_words and len(w) > 1]\n","sub_path":"tracpro/polls/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"37185462","text":"# Define a class named Product with the following specifications:\r\n# Data members:\r\n#\r\n# product_id – A string to store product.\r\n# product_name - A string to store the name of the product.\r\n# product_purchase_price – A decimal to store the cost price of the product.\r\n# product_sale_price – A decimal to store Sale Price Margin -\r\n#\r\n# A decimal to be calculated\r\n# as (product_sale_price - product_purchase_price)\r\n#\r\n# Remarks - To store \"Profit\" if Margin is positive else \"Loss\" if Margin is negative.\r\n\r\n#\r\n# A constructor to intialize all the data members with valid default values.\r\n# A method set_remarks() that assigns Margin as (product_sale_price - product_purchase_price)\r\n# and sets Remarks as mentioned below :\r\n\r\n\r\nclass Product():\r\n def __init__(self, p_id, p_name, p_purchase_price, p_sale_price ):\r\n self.p_id = p_id\r\n self.p_name = p_name\r\n self.p_purchase_price = p_purchase_price\r\n self.p_sale_price = p_sale_price\r\n\r\n def set_remarks(self):\r\n if self.p_sale_price - self.p_purchase_price > 0:\r\n return \"Profit\"\r\n else:\r\n return \"Loss\"\r\n\r\n def set_details(self):\r\n self.p_id = input(\"Enter Product ID: \")\r\n self.p_name = input(\"Enter Product Name: \")\r\n self.p_purchase_price = int(input(\"Enter Product Purchase Price: \"))\r\n self.p_sale_price = int(input(\"Enter Product Sale Price: \"))\r\n self.set_remarks()\r\n\r\n\r\n def get_details(self,x):\r\n print(\" Product Summary \".center(100, \"-\"))\r\n print(f\"\\nProduct ID: {product1.p_id} \\nProduct Name:{product1.p_name}\\nProduct Purchase Price:\"\r\n f\" {product1.p_purchase_price} \\nProduct Sale Price: {product1.p_sale_price} \\nProduct Remarks:\"\r\n f\" {product1.set_remarks() }\")\r\n\r\nproduct1 = Product(0,\"\" ,0 , 0)\r\n\r\nproduct1.get_details(product1.set_details())","sub_path":"W5_Q3_Product.py","file_name":"W5_Q3_Product.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"483356363","text":"from django import forms\nfrom django.forms import ModelForm\nfrom django.forms.models import inlineformset_factory, BaseModelFormSet, modelformset_factory\nfrom Receipt.models import ReceiptItems\nfrom Stockitems.autocomplete import StockitemsAutocomplete\nfrom Stockitems.models import Stockitems\nimport autocomplete_light\n\nclass ReceiptItemsUpdateForm(ModelForm):\n stockitem = forms.ModelChoiceField(\n Stockitems.objects.all(),\n widget=autocomplete_light.ChoiceWidget('StockitemsAutocomplete')) \n def __init__(self, *args, **kwargs):\n super(ReceiptItemsUpdateForm, self).__init__(*args, **kwargs)\n \n class Meta:\n model = ReceiptItems \n exclude = ( 'first_created', 'orderhead', 'picklist', 'closed', 'stockitem'\n 'last_changed', 'last_accessed', 'picklistline',\n 'created_by', 'changed_by', 'company_id', 'change_counter',)\n \n\nclass ReceiptItemsCreateForm(autocomplete_light.ModelForm):\n stockitem = forms.ModelChoiceField(\n Stockitems.objects.all(),\n widget=autocomplete_light.ChoiceWidget('StockitemsAutocomplete')) \n \n def __init__(self, *args, **kwargs):\n super(ReceiptItemsCreateForm, self).__init__(*args, **kwargs) \n\n class Meta:\n model = ReceiptItems \n exclude = ( 'first_created', 'orderhead', 'picklist', 'closed', 'itemno', 'price',\n 'last_changed', 'last_accessed', 'picklistline',\n 'created_by', 'changed_by', 'company_id', 'change_counter',)\n \n \nclass ReceiptItemsForm(ModelForm):\n class Meta:\n model = ReceiptItems \n exclude = ('first_created', 'last_changed', 'last_accessed', )\n \n","sub_path":"Receipt/forms/receiptitems.py","file_name":"receiptitems.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"451369320","text":"#!/usr/bin/python3\n\nimport subprocess\nimport signal\nimport os\nimport time\nimport webbrowser \nimport sqlite3\nimport json\nimport sys, getopt\nimport argparse\nfrom urllib.parse import urlparse\nimport urllib\nimport os.path\nimport re\nimport _thread\n\nglobal conf_file\ndb_name = 'peer_connection_db'\nconn_tbl = 'connections'\nconfig_file = \"wpa_supplicant.conf\"\ntarget_file = config_file+'.tmp'\nnoob_conf_file='eapoob.conf'\nkeyword = 'Direction'\noob_out_file = '/tmp/noob_output.txt'\n\n\ndef change_config(peerID):\n\n\tif peerID is None:\n\t\tprint (\"Peer ID is NULL\")\n\t\treturn\n\n\tif os.path.isfile(config_file) is False:\n\t\tprint (\"Config file unavailable\")\n\t\treturn\n\n\told_identity = peerID+'+s1@eap-noob.net'\n\tnew_identity = peerID+'+s2@eap-noob.net'\n\n\tread_conf = open(config_file, 'r')\n\twrite_conf = open(target_file,'w')\n\n\tconf_changed =0;\n\n\tfor line in read_conf:\n\t\tif old_identity in line:\n\t\t\tline=line.replace(old_identity,new_identity)\n\t\t\twrite_conf.write(line)\n\t\t\tconf_changed = 1\n\t\telse:\n\t\t\twrite_conf.write(line)\n\n\tif conf_changed is 1:\n\t\twrite_conf.close()\n\t\tread_conf.close()\n\t\tcmd = 'cp '+target_file+' '+config_file+' ; rm -f '+target_file\n\t\trunbash(cmd)\n\t\treconfigure_peer()\n\ndef exec_query(cmd, qtype):\n\n\tretval = 0\n\n\tres = os.path.isfile(db_name)\n\n\tif True != res:\n\t\t#print (\"No database file found\")\n\t\treturn \n\t# create a DB connection \n\tdb_conn = sqlite3.connect(db_name)\n\n\t# check if DB cannot be accessed\n\tif db_conn is None:\t\t \n\t\tprint (\"DB busy\")\n\n\tdb_cur = db_conn.cursor() \t\n \n\tdb_cur.execute(cmd)\n\t\n\tif qtype is 1:\n\t\tretval = db_cur.fetchone()\n\telif qtype is 0:\n\t\tdb_conn.commit()\n\t\n\tdb_conn.close()\n\treturn retval\n\ndef url_to_db(params):\n\t\n\tcmd = 'UPDATE connections SET noob ='+'\\''+ params['Noob'][0]+'\\''+' ,hoob =\\''+params['Hoob'][0]+'\\''+' where PeerID=\\''+params['PeerID'][0]+'\\'' \n\t#print (cmd)\n\n\texec_query(cmd,0)\n\ndef parse_qr_code(url):\n\t\n\turl_comp = urlparse(url);\n\t\n\tparams = urllib.parse.parse_qs(url_comp.query)\n\n\t#print(params)\t\n\t\n\turl_to_db(params)\n\n\tchange_config(params['PeerID'][0])\n\n\ndef read_qr_code(arg):\n\tno_message = True\n\t#print(\"In new thread\")\n\tcmd = \"zbarcam >\"+oob_out_file\n\t#runbash(cmd)\n\tsubprocess.Popen(cmd,shell=True)\n\n\twhile no_message:\n \ttime.sleep(2)\n \toob_output = open(oob_out_file,'r')\n \tfor line in oob_output:\n \tif 'Noob' in line and 'Hoob' in line and 'PeerID' in line:\n \tno_message = False\n \toob_output.close()\n\n\tsubprocess.Popen(\"sudo killall zbarcam\",shell=True)\n\tcmd = 'rm -f '+oob_out_file\n\trunbash(cmd)\n\tprint (line)\n\tparse_qr_code(line) \n\n\ndef update_file(signum, frame):\n\n\t#print ('Updating File')\n\tcon = sqlite3.connect(db_name)\n\tc = con.cursor()\n\n\tfile = open(\"file.txt\", \"wb\")\n\tfor row in c.execute('select ssid,ServInfo,PeerID,Noob,Hoob,err_code from connections where show_OOB = 1'):\n\t\t#print (row[0] + '\\n')\n\t\tservinfo = json.loads(row[1])\n\t\t\n\t\tif(row[5]!=0):\n\t\t\tfile.write(\"Error code: \"+str(row[5]))\n\t\t\n\t\tline = (row[0].encode(encoding='UTF-8') + b',' + servinfo['ServName'].encode(encoding='UTF-8') + b',' \n\t\t+ servinfo['ServUrl'].encode(encoding='UTF-8')+b'/?PeerId='+row [2].encode(encoding='UTF-8') + \n\t\tb'&Noob=' + row[3].encode(encoding='UTF-8')+ b'&Hoob=' + row[4].encode(encoding='UTF-8') + b'\\n')\n\t\tfile.write(line)\n\tfile.close()\n\tcon.close()\n\treturn\n\n\ndef runbash(cmd):\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n out = p.stdout.read().strip()\n return out\n\ndef check_wpa():\n\treturn os.path.isfile('wpa_supplicant')\n\n\ndef get_pid(arg):\n\tpid_list = []\n\tpname = arg.encode(encoding='UTF-8')\n\tp = runbash(b\"ps -A | grep \"+pname)\n\tif None == p:\n\t\treturn None\n\n\tfor line in p.splitlines():\n\t\tif pname in line:\n\t\t\tpid = int(line.split(None,1)[0])\n\t\t\tpid_list.append(pid)\n\treturn pid_list\n\ndef prepare(iface):\n\tpid = get_pid('wpa_supplicant')\n\tfor item in pid:\n\t\tos.kill(int(item),signal.SIGKILL)\n\t#now start your own wpa_supplicant\n\t\n\tprint (\"start wpa_supplicant\")\n\tcmd = 'rm -f '+config_file+' touch '+config_file+' ; rm -f '+db_name\n\trunbash(cmd)\t\t\n\tconf_file = open(config_file,'w')\n\tconf_file.write(\"ctrl_interface=/var/run/wpa_supplicant \\n update_config=1\\ndot11RSNAConfigPMKLifetime=120\\n\\n\")\n\tconf_file.close()\n\tcmd = \"./wpa_supplicant -i \"+iface+\" -c wpa_supplicant.conf -O /var/run/wpa_supplicant \"\n\tsubprocess.Popen(cmd,shell=True, stdout=1, stdin=None)\n\ndef network_scan():\n\t\n\twhile True:\n\t\tresult = runbash(\"./wpa_cli scan | grep OK\")\n\t\tif 'OK' == result.decode():\n\t\t\tprint (\"scan OK\")\n\t\t\treturn\n\t\n\t\ndef get_result():\n\tscan_result = runbash(\"wpa_cli scan_result | awk '$4 ~ /WPA2-EAP/ {print $3,$5,$1}' | sort $1\")\n\tconf_file = open(config_file,'a')\n\ttoken = ''\n\tssid_list = []\n\ttoken_list = []\n\tfor item in scan_result.decode():\n\t\tif '\\n' == item:\n\t\t\ttoken_list.append(token)\n\t\t\tif token_list[1] not in ssid_list:\n\t\t\t\tssid_list.append(token_list[1]) \n\t\t\t\tconf_file.write(\"network={\\n\\tssid=\\\"\"+token_list[1]+\"\\\"\\n\\tbssid=\"+token_list[2]+\"\\n\\tkey_mgmt=WPA-EAP\\n\\tpairwise=CCMP TKIP\"\n\t\t\t\t\"\\n\\tgroup=CCMP TKIP\\n\\teap=NOOB\\n\\tidentity=\\\"noob@eap-noob.net\\\"\\n}\\n\\n\")\n\t\t\t\ttoken = ''\n\t\t\ttoken_list[:] = []\n\n\t\telif ' ' == item:\n\t\t\ttoken_list.append(token)\t\t\n\t\t\ttoken = ''\n\t\telse:\n\t\t\ttoken += str(item)\n\tconf_file.close()\n\treturn ssid_list \n\n\ndef reconfigure_peer():\n\tpid = get_pid('wpa_supplicant')\n\tprint (\"Reconfigure wpa_supplicant\")\n\tos.kill(int(pid[0]),signal.SIGHUP)\n\n\t\ndef check_result():\n\tres = runbash(\"./wpa_cli status | grep 'EAP state=SUCCESS'\")\n\tif res == b\"EAP state=SUCCESS\":\n\t\treturn True\n\n\treturn False \n\ndef launch_browser():\n \turl = \"test.html\"\n \twebbrowser.open(url,new=1,autoraise=True)\n \t#signal.signal(signal.SIGUSR1, update_file)\n\n\ndef get_direction():\n noob_conf = open(noob_conf_file, 'r')\n\n for line in noob_conf:\n if '#' != line[0] and keyword in line:\n parts = re.sub('[\\s+]', '', line)\n direction = (parts[len(keyword)+1])\n\n return direction\n\ndef terminate_supplicant():\n\tpid = get_pid('wpa_supplicant')\n\tos.kill(int(pid[0]),signal.SIGKILL)\n\t\ndef sigint_handler(signum, frame):\n\tterminate_supplicant()\t\t\t\n\texit(0)\n\t\ndef check_if_table_exists():\n\t#cmd = 'SELECT count(*) FROM information_schema.tables WHERE table_name=\\''+conn_tbl+'\\''\n\tcmd = 'SELECT name FROM sqlite_master WHERE type=\\'table\\''\n\twhile True:\n\t\tout = exec_query(cmd,1)\n\t\tif out is not None and out[0] == conn_tbl:\n\t\t\treturn\n\t\ttime.sleep(3)\ndef main():\n\n\tinterface=None\n\tno_result=0\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-i', '--interface', dest='interface')\n\targs = parser.parse_args()\n\n\tif args.interface is None:\n\t\tprint('Usage:wpa_auto_run.py -i <interface>')\n\t\treturn\n\n\tif not(check_wpa()):\n\t\tprint (\"WPA_Supplicant not found\")\n\t\treturn\n\n\tinterface=args.interface\n\n\tsignal.signal(signal.SIGINT, sigint_handler)\n\tprepare(interface)\n\ttime.sleep(2)\n\tnetwork_scan()\n\t\n\twhile True:\n\t\tssid_list = get_result()\n\t\tif len(ssid_list) > 0:\n\t\t\tprint (ssid_list)\n\t\t\tbreak\n\t\ttime.sleep(2)\n\t\n\treconfigure_peer()\t\n\n\tdirection = get_direction()\n\tcheck_if_table_exists()\n\n\tif direction is '2':\n\t\tprint(\"Server to peer direction\")\n\t\t_thread.start_new_thread(read_qr_code,(None,))\n\telif direction is '1':\n\t\tprint(\"Peer to server direction\")\n\t\tlaunch_browser()\n\telse:\n\t\tprint(\"No direction specified\")\n\t\tterminate_supplicant()\n\t\texit(0)\n\n\n\twhile no_result == 0:\n\t\tif check_result():\n\t\t\tno_result =1\n\t\ttime.sleep(5)\n\t\tif direction is '1':\n\t\t\tupdate_file(None,None)\n\n\tprint (\"***************************************EAP AUTH SUCCESSFUL *****************************************************\")\t\n\tcmd = 'sudo ifconfig '+interface+' 0.0.0.0 up ; dhclient '+interface \n\trunbash(cmd)\n\twebbrowser.open_new_tab('https://www.youtube.com')\n\nif __name__=='__main__':\n main()\n","sub_path":"wpa_supplicant-2.5/wpa_supplicant/wpa_auto_run.py","file_name":"wpa_auto_run.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"630967348","text":"from django.conf import settings\nfrom core.models import LivySession\n\ndef combine_settings(request):\n\t\n\t'''\n\tMake some settings variables available to all templates\n\t'''\n\n\treturn {\n\t\t'APP_HOST': settings.APP_HOST,\t\t\n\t\t'DPLA_API_KEY': settings.DPLA_API_KEY,\n\t\t'OAI_RESPONSE_SIZE':settings.OAI_RESPONSE_SIZE,\n\t\t'COMBINE_OAI_IDENTIFIER':settings.COMBINE_OAI_IDENTIFIER\n\t}\n\n\ndef livy_session(request):\n\n\t'''\n\tMake Livy session information available to all views\n\t'''\n\n\t# get active livy session\n\tlv = LivySession.get_active_session()\n\tif lv:\n\t\tlv.refresh_from_livy()\n\n\treturn {\n\t\t'LIVY_SESSION':lv\n\t}","sub_path":"core/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"45173709","text":"import numpy as np\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom sklearn.metrics import accuracy_score, classification_report\nimport logging\n\ndef prepare_x(data):\n df1 = data[:40, :].T\n return np.array(df1)\n\n\ndef get_label(data):\n lob = data[-5:, :].T\n all_label = []\n\n for i in range(lob.shape[1]):\n one_label = lob[:, i] - 1\n one_label = keras.utils.to_categorical(one_label, 3)\n one_label = one_label.reshape(len(one_label), 1, 3)\n all_label.append(one_label)\n\n return np.hstack(all_label)\n\n\ndef data_classification(X, Y, T):\n [N, D] = X.shape\n df = np.array(X)\n\n dY = np.array(Y)\n\n dataY = dY[T - 1:N]\n\n dataX = np.zeros((N - T + 1, T, D))\n for i in range(T, N + 1):\n dataX[i - T] = df[i - T:i, :]\n\n return dataX.reshape(dataX.shape + (1,)), dataY\n\n \ndef create_dataset(encoder_input, decoder_input, encoder_target, batch_size, method, shuffle=False):\n train_pairs_ds = tf.data.Dataset.from_tensor_slices((encoder_input, decoder_input))\n train_pairs_ds = train_pairs_ds.map(lambda d, l: (tf.cast(d, tf.float32), tf.cast(l, tf.float32)))\n\n train_y_ds = tf.data.Dataset.from_tensor_slices(encoder_target)\n train_y_ds = train_y_ds.map(lambda d: (tf.cast(d, tf.float32)))\n\n train_ds = tf.data.Dataset.zip((train_pairs_ds, train_y_ds))\n\n if shuffle:\n train_ds = train_ds.shuffle(len(encoder_input))\n train_ds = train_ds.batch(batch_size, drop_remainder=True)\n # train_ds = train_ds.map(lambda d, l: (tf.cast(d, tf.float32), tf.cast(l, tf.float32)))\n\n if method == 'train':\n return train_ds.repeat()\n\n if method == 'val':\n return train_ds\n\n if method == 'test':\n return train_ds\n\n if method == 'prediction':\n train_ds = tf.data.Dataset.from_tensor_slices((encoder_input, decoder_input))\n train_ds = train_ds.batch(batch_size, drop_remainder=True)\n train_ds = train_ds.map(lambda d, l: (tf.cast(d, tf.float32), tf.cast(l, tf.float32)))\n return train_ds\n\n\ndef evaluation_metrics(real_y, pred_y):\n real_y = real_y[:len(pred_y)]\n logging.info('-------------------------------')\n\n for i in range(real_y.shape[1]):\n print(f'Prediction horizon = {i}')\n print(f'accuracy_score = {accuracy_score(np.argmax(real_y[:, i], axis=1), np.argmax(pred_y[:, i], axis=1))}')\n print(f'classification_report = {classification_report(np.argmax(real_y[:, i], axis=1), np.argmax(pred_y[:, i], axis=1), digits=4)}')\n print('-------------------------------')\n\n\ndef prepare_decoder_input(data, teacher_forcing):\n if teacher_forcing:\n first_decoder_input = keras.utils.to_categorical(np.zeros(len(data)), 3)\n first_decoder_input = first_decoder_input.reshape(len(first_decoder_input), 1, 3)\n decoder_input_data = np.hstack((data[:, :-1, :], first_decoder_input))\n\n if not teacher_forcing:\n decoder_input_data = np.zeros((len(data), 1, 3))\n decoder_input_data[:, 0, 0] = 1.\n\n return decoder_input_data\n\n\n# def get_label(data):\n# lob = data[-5:, :].T\n# return lob\n\n# def create_dataset_single(x_train, y_train, batch_size, method):\n# train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size, drop_remainder=True)\n# train_ds = train_ds.map(lambda d, l: (tf.cast(d, tf.float32), tf.cast(l, tf.float32)))\n\n# if method == 'train':\n# return train_ds.repeat()\n\n# if method == 'val':\n# return train_ds\n\n# if method == 'test':\n# return train_ds\n\n# if method == 'prediction':\n# train_ds = tf.data.Dataset.from_tensor_slices((x_train))\n# train_ds = train_ds.batch(batch_size, drop_remainder=True)\n# train_ds = train_ds.map(lambda d: (tf.cast(d, tf.float32)))\n# return train_ds\n","sub_path":"code_ipu/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"326988247","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Empty\n\nclass gazebo_topics():\n\n\tdef __init__(self):\n\t\t#fake_diagnostics\n\t\tself.joy_usage_pub = rospy.Publisher(\"/joy_usage\", Empty, queue_size=1)\n\t\tself.pc1_usage_pub = rospy.Publisher(\"/pc1_usage\", Empty, queue_size=1)\n\t\tself.pc2_usage_pub = rospy.Publisher(\"/pc2_usage\", Empty, queue_size=1)\n\t\tself.pc3_usage_pub = rospy.Publisher(\"/pc3_usage\", Empty, queue_size=1)\n\t\tself.b1_usage_pub = rospy.Publisher(\"/b1_usage\", Empty, queue_size=1)\n\t\tself.t1_usage_pub = rospy.Publisher(\"/t1_usage\", Empty, queue_size=1)\n\t\tself.t2_usage_pub = rospy.Publisher(\"/t2_usage\", Empty, queue_size=1)\n\t\tself.t3_usage_pub = rospy.Publisher(\"/t3_usage\", Empty, queue_size=1)\n\t\tself.wifi_status_pub = rospy.Publisher(\"/wifi_status\", Empty, queue_size=1)\n\n\t\trospy.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n\trospy.init_node('gazebo_topics')\n\tgt = gazebo_topics()\n\trospy.loginfo(\"gazebo_topics running\")\n\n\trate = rospy.Rate(1)\n\twhile not rospy.is_shutdown():\n\t\tmsg = Empty()\n\t\tgt.joy_usage_pub.publish(msg)\n\t\tgt.pc1_usage_pub.publish(msg)\n\t\tgt.pc2_usage_pub.publish(msg)\n\t\tgt.pc3_usage_pub.publish(msg)\n\t\tgt.b1_usage_pub.publish(msg)\n\t\tgt.t1_usage_pub.publish(msg)\n\t\tgt.t2_usage_pub.publish(msg)\n\t\tgt.t3_usage_pub.publish(msg)\n\t\tgt.wifi_status_pub.publish(msg)\n\t\ttry:\n\t\t\trate.sleep()\n\t\texcept rospy.ROSInterruptException as e:\n\t\t\t#print \"ROSInterruptException\"\n\t\t\tpass\n\n","sub_path":"cob_controller_configuration_gazebo/src/gazebo_topics.py","file_name":"gazebo_topics.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"558626414","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2020 CERN.\n#\n# Invenio is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"GitHub utilities.\"\"\"\n\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom os import path\n\nimport pygit2\n\nfrom automation_tools import config\nfrom automation_tools.config import github\nfrom automation_tools.utils import execute\n\n\nclass GithubUtils(object):\n @staticmethod\n def list_invenio_modules():\n \"\"\"List invenio modules by parsing inveniosoftware organization.\"\"\"\n organization = 'inveniosoftware'\n try:\n user = github.get_organization(organization)\n invenio_repositories = [repository.name for repository in user.get_repos() \\\n if repository.name.startswith('invenio-')]\n return invenio_repositories\n\n except:\n print('Failed to process the request')\n\n @staticmethod\n def list_organization_repositories(organization):\n \"\"\"List repositories by parsing configured organization.\"\"\"\n try:\n user = github.get_organization(organization)\n invenio_repositories = [repository.name for repository in user.get_repos()]\n return invenio_repositories\n\n except:\n print('Failed to process the request')\n\n @staticmethod\n def download_invenio_modules(repositories, local_repositories_path):\n \"\"\"Download all the invenio modules in a newly created subfolder.\"\"\"\n if path.exists(local_repositories_path):\n raise Exception(\"Folder already exists\")\n\n os.mkdir(local_repositories_path)\n url_github = \"https://github.com/inveniosoftware\"\n for repository_name in repositories:\n pygit2.clone_repository(f\"{url_github}/{repository_name}\", f\"{local_repositories_path}/{repository_name}\")\n\n @staticmethod\n def open_pr(gh_repository, title, body, branch, base):\n \"\"\"Open PR with previous changes\"\"\"\n try:\n gh_repository.create_pull(\n title=title,\n body=body,\n head=branch,\n base=base\n )\n pr_opened = True\n except:\n pr_opened = False\n\n return pr_opened\n\n @staticmethod\n def create_organization_repository(repository):\n \"\"\"Creates a repository under the organization name.\"\"\"\n org = config.github.get_organization(config.organization)\n org.create_repo(repository)\n\n\nclass LocalRepository(object):\n \"\"\"Context for a local copy of a repository.\"\"\"\n def __init__(self, repository):\n self.repository = repository\n\n def __enter__(self):\n self.previous_directory = os.getcwd()\n os.chdir(path.join(config.local_repositories_path, self.repository))\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n os.chdir(self.previous_directory)\n\n def check_status(self, expected):\n \"\"\"Check if modifications are the ones expected.\"\"\"\n outputs = []\n for out in execute([\"git\", \"status\", \"-s\"]):\n outputs.append(out.strip())\n\n if outputs == expected:\n modifs_ok = True\n\n else:\n modifs_ok = False\n\n return modifs_ok\n\n def commit(self, message, extra_before=None, extra_after=None):\n \"\"\"Commit if changes.\"\"\"\n try:\n subprocess.check_output([\"git\", \"add\", \".\"])\n commit = [\"git\"]\n if extra_before:\n commit.extend(extra_before)\n commit.extend([\"commit\", \"-m\", message])\n if extra_after:\n commit.extend(extra_after)\n subprocess.check_output(commit)\n commited = True\n except:\n commited = False\n\n return commited\n\n def push(self, destination, local_branch, remote_branch, force=False):\n \"\"\"Push commited changes.\"\"\"\n try:\n push = [\"git\", \"push\", destination, local_branch + ':' + remote_branch]\n if force:\n push.extend(['--force'])\n subprocess.check_output(push)\n pushed = True\n except:\n pushed = False\n\n return pushed\n\n def github_process(self, is_mode_pr, expected, repository, local_branch, remote_branch, message, title, body, base,\n commit_extra_before, commit_extra_after):\n \"\"\".\"\"\"\n modifs_ok = self.check_status(expected)\n if modifs_ok:\n print(\"Has to be committed\")\n committed = self.commit(message, commit_extra_before, commit_extra_after)\n if committed:\n print(\"Has been committed\")\n pushed = self.push(config.destination, local_branch, remote_branch)\n if not pushed:\n raise Exception(\"Failed to push\")\n\n if pushed and is_mode_pr:\n print(\"Has been pushed\")\n gh_repository = github.get_repo(f\"{config.organization}/{repository}\")\n pr_opened = GithubUtils.open_pr(gh_repository, title, body, remote_branch, base)\n if pr_opened:\n print(\"PR has been opened\")\n else:\n raise Exception(\"PR has not been opened\")\n else:\n raise Exception(\"Failed to commit\")\n\n else:\n raise Exception(\"Please review modifications\")\n\n def set_origin(self, new_origin_url):\n \"\"\"Set a repository's origin.\"\"\"\n execute([\"git\", \"remote\", \"set-url\", config.destination, new_origin_url])\n","sub_path":"automation_tools/repositories.py","file_name":"repositories.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"389241561","text":"\"\"\"\nFunction used specifically for camus dataset\nBELHAMISSI Mourad\n\"\"\"\n\nimport numpy as np\nfrom medpy import metric\n\n# Voxel spacing in mm, here Z coordinate is not used => pixel_spacing\nvoxel_spacing = [0.308,0.154]\n\n############################################################\n# Computing evaluation metrics\n############################################################\n\n\ndef dice (gt, segmentation, _id):\n \"\"\"\n Compute the dice metric. \n gt: Groundtruth binary masks for each class. \n seggmentation: Dictionary result of segmentation returned by model.detect[0]. \n returns a dictionary containing the dice value for each class\n the keys of the dictionary are the classes and the values, the dice \n values per class.\n \"\"\"\n \n # Each key of the dictionary corresponds to one of the 3 classses\n metrics = dict([\n (1,0),\n (2,0),\n (3,0),\n ])\n \n # Check if there are missing masks \n check_missing = True\n # Make sure there are masks\n if (segmentation['masks'].ndim > 2):\n # Minimum one mask detected ?\n if (segmentation['masks'].shape[2] > 0):\n metrics[segmentation['class_ids'][0]] = metric.binary.dc(gt[:,:,segmentation['class_ids'][0]-1], segmentation['masks'][:,:,0])\n check_missing = True\n # Minimum 2 masks detected ?\n if (segmentation['masks'].shape[2] > 1):\n metrics[segmentation['class_ids'][1]] = metric.binary.dc(gt[:,:,segmentation['class_ids'][1]-1], segmentation['masks'][:,:,1])\n check_missing = True\n # All masks are detected ?\n if (segmentation['masks'].shape[2] > 2):\n metrics[segmentation['class_ids'][2]] = metric.binary.dc(gt[:,:,segmentation['class_ids'][2]-1], segmentation['masks'][:,:,2])\n check_missing = False\n if (check_missing):\n # Show which classes were detected for the missing masks image\n print (\"Only following classes detected for id \",_id,\":\",segmentation['class_ids'])\n return metrics\n else:\n print (\"No masks detected for id: \",_id)\n pass\n \n \ndef assd (gt, segmentation, _id):\n \"\"\"\n Compute the assymetric surface distance metric in millimeters (mm).\n gt: Groundtruth binary masks for each class.\n segmentation: Dictionary result of segmentation returned by model.detect[0]. \n returns a dictionary containing the assd value for each class\n the keys of the dictionary are the classes and the values, the assd \n values per class.\n \"\"\"\n \n metrics = dict([\n (1,0),\n (2,0),\n (3,0),\n ])\n \n if (segmentation['masks'].ndim > 2):\n if (segmentation['masks'].shape[2] > 0):\n metrics[segmentation['class_ids'][0]] = metric.binary.assd(gt[:,:,segmentation['class_ids'][0]-1], segmentation['masks'][:,:,0], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n if (segmentation['masks'].shape[2] > 1):\n metrics[segmentation['class_ids'][1]] = metric.binary.assd(gt[:,:,segmentation['class_ids'][1]-1], segmentation['masks'][:,:,1], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n if (segmentation['masks'].shape[2] > 2):\n metrics[segmentation['class_ids'][2]] = metric.binary.assd(gt[:,:,segmentation['class_ids'][2]-1], segmentation['masks'][:,:,2], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n return metrics\n else:\n print (\"No masks detected for id: \",_id)\n pass\n \n\ndef hd (gt, segmentation, _id):\n \"\"\"\n Compute the hausdorff distance metric in mm.\n gt: Groundtruth binary masks for each class. \n segmentation: Dictionary result of segmentation returned by model.detect[0]. \n returns a dictionary containing the hd value for each class\n the keys of the dictionary are the classes and the values, the hd \n values per class.\n \"\"\"\n \n metrics = dict([\n (1,0),\n (2,0),\n (3,0),\n ])\n \n if (segmentation['masks'].ndim > 2):\n if (segmentation['masks'].shape[2] > 0):\n metrics[segmentation['class_ids'][0]] = metric.binary.hd(gt[:,:,segmentation['class_ids'][0]-1], segmentation['masks'][:,:,0], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n if (segmentation['masks'].shape[2] > 1):\n metrics[segmentation['class_ids'][1]] = metric.binary.hd(gt[:,:,segmentation['class_ids'][1]-1], segmentation['masks'][:,:,1], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n if (segmentation['masks'].shape[2] > 2):\n metrics[segmentation['class_ids'][2]] = metric.binary.hd(gt[:,:,segmentation['class_ids'][2]-1], segmentation['masks'][:,:,2], voxelspacing = [voxel_spacing[1], voxel_spacing[0]])\n return metrics\n else:\n print (\"No masks detected for id: \",_id)\n pass\n \n############################################################\n# Normalizing bounding boxes and computing bounding boxes errors\n############################################################\n\n\n# We limit the detection instances to three which justifies the limit in these functions\ndef normalize_bbox(gt_bbox, seg): \n \"\"\"\n Get the center coordinates of gt BBox and segmentation BBox as well\n as width and height of BBox. Masks are generated in radom class order,\n for example : 2,3,1\n gt_bbox: numpy array with coordinates (y1, x1, y2, x2)\n top left corner and bottom right corner coordinates.\n \n seg: dictionary result of model.detect[0]\n returns : numpy arrays for gt and segmentation\n each array is 2 dimentional, one line per class: (xcenter, ycenter, width, height) \n \"\"\"\n\n # Init with nan to skip these values when computing mean and std\n # with np.nanmean and np.nanstd\n seg_bbox_reshape = np.empty(gt_bbox.shape)\n gt_bbox_reshape = np.empty(gt_bbox.shape)\n seg_bbox_reshape[:,:] = np.nan\n gt_bbox_reshape[:,:] = np.nan\n \n # Check if there's at least one mask\n if (seg['class_ids'].ndim > 0):\n # Check if all masks are detected\n if (len(seg['class_ids']) == 3):\n # Go through the segmentation masks of each class\n j = 0\n # Go through the gt masks according to the class order of segmentation masks\n for i in seg['class_ids']:\n\n # (y1 + y2) / 2\n gt_bbox_reshape[j,1] = (gt_bbox [j,0] + gt_bbox [j,2]) / 2\n seg_bbox_reshape[i-1,1] = (seg['rois'][j,0] + seg['rois'][j,2]) / 2\n # (x1 + x2) / 2\n gt_bbox_reshape[j,0] = (gt_bbox [j,1] + gt_bbox [j,3]) / 2\n seg_bbox_reshape[i-1,0] = (seg['rois'][j,1] + seg['rois'][j,3]) / 2\n # (y2 - y1)\n gt_bbox_reshape[j,3] = gt_bbox [j,2] - gt_bbox [j,0]\n seg_bbox_reshape[i-1,3] = seg['rois'][j,2] - seg['rois'][j,0] \n # (x2 - x1)\n gt_bbox_reshape[j,2] = gt_bbox [j,3] - gt_bbox [j,1]\n seg_bbox_reshape[i-1,2] = seg['rois'][j,3] - seg['rois'][j,1]\n j += 1\n # If missing masks, reshape all the gt but only the detected masks\n # nan values will remain for none detected masks\n else:\n for i in np.arange(1,4):\n gt_bbox_reshape[i-1,1] = (gt_bbox [i-1,0] + gt_bbox [i-1,2]) / 2\n gt_bbox_reshape[i-1,0] = (gt_bbox [i-1,1] + gt_bbox [i-1,3]) / 2\n gt_bbox_reshape[i-1,3] = gt_bbox [i-1,2] - gt_bbox [i-1,0]\n gt_bbox_reshape[i-1,2] = gt_bbox [i-1,3] - gt_bbox [i-1,1]\n j = 0\n for i in seg['class_ids']:\n seg_bbox_reshape[i-1,1] = (seg['rois'][j,0] + seg['rois'][j,2]) / 2\n seg_bbox_reshape[i-1,0] = (seg['rois'][j,1] + seg['rois'][j,3]) / 2\n seg_bbox_reshape[i-1,3] = seg['rois'][j,2] - seg['rois'][j,0] \n seg_bbox_reshape[i-1,2] = seg['rois'][j,3] - seg['rois'][j,1]\n j += 1 \n else:\n print(\"No mask\")\n pass\n\n return gt_bbox_reshape, seg_bbox_reshape\n\n \ndef compute_bbox_errors(gt_bbox, seg_bbox, class_ids, im_id): \n \"\"\"\n Compute errors between gt BBoxes and segmentation BBoxes \n gt_bbox, seg_bbox : numpy arrays as returned by normalize_bbox\n returns a 2D numpy array containing the error for each class\n height and width erros are in millimeters\n \"\"\"\n\n # Init with nan to skip these values when computing mean and std\n error = np.empty(gt_bbox.shape)\n error[:,:] = np.nan\n \n # Check if there is at least a mask\n if (seg_bbox.ndim > 0):\n error = abs (gt_bbox - seg_bbox)\n for i in np.arange(0,error.shape[0]):\n \n # Return coordinates in millimiters\n error[i,2] *= voxel_spacing[0]\n error[i,0] *= voxel_spacing[0]\n error[i,1] *= voxel_spacing[1]\n error[i,3] *= voxel_spacing[1]\n \n # One number precision only\n error[i,0] = \"%.1f\" % error[i,0]\n error[i,1] = \"%.1f\" % error[i,1]\n error[i,2] = \"%.1f\" % error[i,2]\n error[i,3] = \"%.1f\" % error[i,3]\n \n else: \n pass\n return error \n","sub_path":"mrcnn/camus_utils.py","file_name":"camus_utils.py","file_ext":"py","file_size_in_byte":9533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"161591215","text":"\"\"\"\n Copyright (C) 2014 CollabNet Inc. | 2015+ Volker Kopetzky Kopetzky\n -*- coding: utf-8 -*-\n\n\"\"\"\n\nimport suds\nimport logging\nimport teamforge\nimport teamforge.user\nimport teamforge.group\nimport teamforge.tracker.unit\nimport teamforge.tracker.team\nimport teamforge.tracker.tracker\nimport teamforge.scm.repository\nimport teamforge.document\n\n\nclass Projects(object):\n\n def __init__(self, session):\n self.logger = logging.getLogger('teamforge.project.projects')\n self.session = session\n\n def __getitem__(self, oid):\n try:\n return Project(self.session, oid, load=True)\n except teamforge.NoSuchObjectFault:\n raise KeyError\n\n def __iter__(self):\n for row in self._load():\n yield Project(self.session, row.id, row=row)\n\n def _load(self):\n try:\n return self.session.server.teamforge.service.getProjectList(\n self.session.teamforge_session,\n False\n ).dataRows\n except suds.WebFault as e:\n raise teamforge.convert_exception(e)\n\n\nclass Project(teamforge.TeamForgeObject):\n\n def __init__(self, session, oid, row=None, load=False):\n super(Project, self).__init__(session, oid, row, load)\n self.logger = logging.getLogger('teamforge.project.project')\n\n def _new(self):\n self._oid = None\n self._row = None\n self._data = self.session.server.teamforge.factory.create('ns1:ProjectSoapDO')\n self._is_dirty = False\n\n# def _persist(self, **kwargs):\n# try:\n# pass\n# except suds.WebFault as e:\n# return teamforge.convert_exception(e)\n\n def _load(self):\n try:\n self._row = None\n self._data = self.session.server.teamforge.service.getProjectData(\n self.session.teamforge_session,\n self._oid)\n self._is_dirty = False\n except suds.WebFault as e:\n raise teamforge.convert_exception(e)\n self.logger.debug(\"_load: %s\", self._data)\n\n def _remove(self, **kwargs):\n try:\n # Parameters\n force = kwargs['force'] if force in kwargs else True\n notify = kwargs['notify'] if notify in kwargs else False\n\n self.session.server.teamforge.deleteProject(\n self.session.teamforge_session,\n self._oid,\n notify,\n force\n )\n self._new()\n except suds.WebFault as e:\n raise teamforge.convert_exception(e)\n\n @property\n def created_date(self):\n # overridden, as the soap row has the field as well\n if self._row:\n return self._row.dateCreated\n else:\n if not self._data:\n self._load()\n return self.session, self._data.createdDate\n\n @property\n def description(self):\n if self._row:\n return self._row.description\n else:\n if not self._data:\n self._load()\n return self._data.description\n\n @description.setter\n def description(self, value):\n if not self._data:\n self._load()\n if self._data.description != value:\n self._data.description = value\n self._is_dirty = True\n\n @property\n def locked(self):\n if self._row:\n return self._row.locked\n else:\n if not self._data:\n self._load()\n return self._data.locked\n\n# Use lockProject/unlockProject methods\n# @locked.setter\n# def locked(self, value):\n# if not self._data:\n# self._load()\n# if self._data.locked != value:\n# self._data.locked = value\n# self._is_dirty = True\n\n @property\n def parent_project(self):\n if self._row:\n return Project(self.session, self._row.parentProjectId) if self._row.parentProjectId else None\n else:\n if not self._data:\n self._load()\n return Project(self.session, self._data.parentProjectId) if self._data.parentProjectId else None\n\n# Use setParentProject\n# @parent_project.setter\n# def parent_project(self, value):\n# new_project_id = value.id if value else None\n# if not self._data:\n# self._load()\n# if self._data.parentProjectId != new_project_id:\n# self._data.parentProjectId = new_project_id\n# self._is_dirty = True\n\n @property\n def path(self):\n if self._row:\n return self._row.path\n else:\n if not self._data:\n self._load()\n return self._data.path\n\n # The path is the same as the project name\n @path.setter\n def path(self, value):\n if not self._data:\n self._load()\n if self._data.path != value:\n self._data.path = value\n self._is_dirty = True\n\n @property\n def hierarchy_path(self):\n if self._row:\n return self._row.hierarchyPath\n else:\n if not self._data:\n self._load()\n return self._data.pahierarchyPathth\n\n @property\n def title(self):\n if self._row:\n return self._row.title\n else:\n if not self._data:\n self._load()\n return self._data.title\n\n @title.setter\n def title(self, value):\n if not self._data:\n self._load()\n if self._data.title != value:\n self._data.title = value\n self._is_dirty = True\n\n # TeamForge Convenience Functions\n\n @property\n def users(self):\n return teamforge.user.UsersForProject(self.session, self)\n\n @property\n def groups(self):\n return teamforge.group.GroupsForProject(self.session, self)\n\n @property\n def units(self):\n return teamforge.tracker.unit.UnitsForProject(self.session, self)\n\n @property\n def teams(self):\n return teamforge.tracker.team.TeamsForProject(self.session, self)\n\n @property\n def trackers(self):\n return teamforge.tracker.tracker.TrackersForProject(self.session, self)\n\n @property\n def documents(self):\n return teamforge.document.DocumentsForProject(self.session, self.id)\n\n @property\n def repositories(self):\n return teamforge.scm.repository.RepositoriesForProject(self.session, self)\n\n def has_permission(self, username, permission, object):\n object_id = object.id if isinstance(object, teamforge.TeamForgeObject) else object\n self.logger.info(\"Permission Check: %s, %s, %s, %s\", self.id, username, permission, object_id)\n return self.session.server.teamforge.service.hasPermission(\n self.session.teamforge_session, username, self.id, permission, object_id)\n","sub_path":"lib/teamforge/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"510072773","text":"# https://www.youtube.com/watch?v=jznOxR4AyiI\n# https://dfghdfhello.appspot.com/\n\nimport jinja2\nimport os\nimport sys\n\n\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"libs\"))\n\nimport webapp2\nfrom google.appengine.ext import ndb\nfrom gaesessions import get_current_session\n\n\n\n#########\nclass Person(ndb.Model):\n firstName = ndb.StringProperty()\n lastName = ndb.StringProperty()\n\n\n#########\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n\n jinja_environment = jinja2.Environment(autoescape=True,\n loader=jinja2.FileSystemLoader(os.path.join(\n os.path.dirname(__file__), 'templates')))\n\n session = get_current_session()\n firstName = session.get('firstName', '')\n familyName = session.get('familyName', '')\n message = session.get('message', '')\n\n\n template_vars = {'message': message,\n 'firstName': firstName,\n 'familyName': familyName}\n template = jinja_environment.get_template('index1.html')\n\n self.response.write(template.render(template_vars))\n\n def post(self):\n\n\n jinja_environment = jinja2.Environment(autoescape=True,\n loader=jinja2.FileSystemLoader(os.path.join(\n os.path.dirname(__file__), 'templates')))\n\n\n firstName = self.request.get(\"firstName\")\n familyName = self.request.get(\"familyName\")\n\n session = get_current_session()\n session['firstName'] = firstName\n session['familyName'] = familyName\n\n session['message'] = ''\n if len(firstName) < 2 or len(familyName) < 2:\n session['message'] = \"First name and family name are mendatory\"\n self.redirect('/')\n\n person = Person()\n person.firstName = firstName\n person.lastName = familyName\n person.put()\n\n #for p in people:\n # a = p.firstName\n # b = p.lastName\n\n query = Person.query()\n query.order(Person.lastName, Person.firstName)\n people = query.fetch(20)\n\n\n template_vars = {'people': people}\n template = jinja_environment.get_template('index2.html')\n\n self.response.out.write(template.render(template_vars))\n\n\n\n\n # self.response.out.write('Hello <strong>' + firstName + ' ' + familyName + '<strong>')\n\n\napp = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n","sub_path":"hello/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"280968507","text":"def make_album(artist, title):\n full = {'Artist = ': artist, 'Title = ': title}\n return full\n\n\nwhile True:\n print(\"Please Enter album detail\")\n user = input(\"If you want to exit press 'q': \")\n if user == 'q':\n break\n a_name = input(\"Artist: \")\n t_name = input(\"Title: \")\n\n print(make_album(a_name, t_name))\n\n","sub_path":"Funtions/User Albums.py","file_name":"User Albums.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"283871126","text":"# Licensed to the Software Freedom Conservancy (SFC) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The SFC licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nimport pytest\nfrom selenium.common.exceptions import (\n WebDriverException,\n NoSuchElementException)\n\n\nclass ChildrenFindingTests(unittest.TestCase):\n\n def test_should_find_element_by_xpath(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n child = element.find_element_by_xpath(\"select\")\n self.assertEqual(child.get_attribute(\"id\"), \"2\")\n\n def test_should_not_find_element_by_xpath(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n with pytest.raises(NoSuchElementException):\n element.find_element_by_xpath(\"select/x\")\n\n def test_finding_dot_slash_elements_on_element_by_xpath_should_find_not_top_level_elements(self):\n self._load_simple_page()\n parent = self.driver.find_element_by_id(\"multiline\")\n children = parent.find_elements_by_xpath(\"./p\")\n self.assertEqual(1, len(children))\n self.assertEqual(\"A div containing\", children[0].text)\n\n def test_should_find_elements_by_xpath(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n children = element.find_elements_by_xpath(\"select/option\")\n self.assertEqual(len(children), 8)\n self.assertEqual(children[0].text, \"One\")\n self.assertEqual(children[1].text, \"Two\")\n\n def test_should_not_find_elements_by_xpath(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n children = element.find_elements_by_xpath(\"select/x\")\n self.assertEqual(len(children), 0)\n\n def test_finding_elements_on_element_by_xpath_should_find_top_level_elements(self):\n self._load_simple_page()\n parent = self.driver.find_element_by_id(\"multiline\")\n all_para_elements = self.driver.find_elements_by_xpath(\"//p\")\n children = parent.find_elements_by_xpath(\"//p\")\n self.assertEqual(len(all_para_elements), len(children))\n\n def test_should_find_element_by_name(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n child = element.find_element_by_name(\"selectomatic\")\n self.assertEqual(child.get_attribute(\"id\"), \"2\")\n\n def test_should_find_elements_by_name(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n children = element.find_elements_by_name(\"selectomatic\")\n self.assertEqual(len(children), 2)\n\n def test_should_find_element_by_id(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n child = element.find_element_by_id(\"2\")\n self.assertEqual(child.get_attribute(\"name\"), \"selectomatic\")\n\n def test_should_find_elements_by_id(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n child = element.find_elements_by_id(\"2\")\n self.assertEqual(len(child), 2)\n\n def test_should_find_element_by_id_when_multiple_matches_exist(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_id(\"test_id_div\")\n child = element.find_element_by_id(\"test_id\")\n self.assertEqual(child.text, \"inside\")\n\n def test_should_find_element_by_id_when_no_match_in_context(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_id(\"test_id_div\")\n with pytest.raises(NoSuchElementException):\n element.find_element_by_id(\"test_id_out\")\n\n def test_should_find_element_by_link_text(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"div1\")\n child = element.find_element_by_link_text(\"hello world\")\n self.assertEqual(child.get_attribute(\"name\"), \"link1\")\n\n def test_should_find_elements_by_link_text(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"div1\")\n children = element.find_elements_by_link_text(\"hello world\")\n self.assertEqual(len(children), 2)\n self.assertEqual(\"link1\", children[0].get_attribute(\"name\"))\n self.assertEqual(\"link2\", children[1].get_attribute(\"name\"))\n\n def test_should_find_element_by_class_name(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"classes\")\n element = parent.find_element_by_class_name(\"one\")\n self.assertEqual(\"Find me\", element.text)\n\n def test_should_find_elements_by_class_name(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"classes\")\n elements = parent.find_elements_by_class_name(\"one\")\n self.assertEqual(2, len(elements))\n\n def test_should_find_element_by_tag_name(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"div1\")\n element = parent.find_element_by_tag_name(\"a\")\n self.assertEqual(\"link1\", element.get_attribute(\"name\"))\n\n def test_should_find_elements_by_tag_name(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"div1\")\n elements = parent.find_elements_by_tag_name(\"a\")\n self.assertEqual(2, len(elements))\n\n def test_should_be_able_to_find_an_element_by_css_selector(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"form2\")\n element = parent.find_element_by_css_selector('*[name=\"selectomatic\"]')\n self.assertEqual(\"2\", element.get_attribute(\"id\"))\n\n def test_should_be_able_to_find_multiple_elements_by_css_selector(self):\n self._load_page(\"nestedElements\")\n parent = self.driver.find_element_by_name(\"form2\")\n elements = parent.find_elements_by_css_selector(\n '*[name=\"selectomatic\"]')\n self.assertEqual(2, len(elements))\n\n def test_should_throw_an_error_if_user_passes_in_invalid_by(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n with pytest.raises(WebDriverException):\n element.find_element(\"foo\", \"bar\")\n\n def test_should_throw_an_error_if_user_passes_in_invalid_by_when_find_elements(self):\n self._load_page(\"nestedElements\")\n element = self.driver.find_element_by_name(\"form2\")\n with pytest.raises(WebDriverException):\n element.find_elements(\"foo\", \"bar\")\n\n def _page_url(self, name):\n return self.webserver.where_is(name + '.html')\n\n def _load_simple_page(self):\n self._load_page(\"simpleTest\")\n\n def _load_page(self, name):\n self.driver.get(self._page_url(name))\n","sub_path":"py/test/selenium/webdriver/common/children_finding_tests.py","file_name":"children_finding_tests.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"505304286","text":"i = 'y'\nwhile i == 'y':\n power = float(input('请输入功率值'))\n unit = input('请输入功率的单位:马力(ps)还是千瓦(kw)?')\n power_to_change = 0 # 变量初始化\n if unit.upper() == \"PS\":\n power_to_change = power / 0.7354987 # 当输入的功率值以马力计时,需除以转换系数0.7354987得到对应的以千瓦计的功率值\n print('输入的是马力')\n elif unit.lower() == \"kw\":\n power_to_change = power * 0.7354987 # 当输入功率值以千瓦计昌,需乘以转换系数0.7354987得到对应的以马力计的功率值\n else:\n print(\"\")\n # print(unit)\n print(f\"您输入的功率为{power}{unit.replace('ps', '马力').replace('kw', '千瓦')},换算为{power_to_change}{unit.replace('ps', '千瓦').replace('kw', '马力')}\")\n # 请输入正确的功率单位:ps或kw,大小写即可,其它的字母组合为不正确输入\n i = input('是否继续y/n?' )\nprint('程序结束')","sub_path":"pythonProject/mainFolder/ifElseStudy/ifExample.py","file_name":"ifExample.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"620064851","text":"import torch\nimport torch.nn as nn\nimport random\nimport torch.optim as optim\nfrom torch.nn import Module\nfrom torch.autograd import Variable\n\nclass Net(Module):\n\n def __init__(self,D_in,H,D_out):\n\n super(Net,self).__init__()\n self.fc1 = nn.Linear(D_in,H)\n self.sharing = nn.Linear(H,H)\n self.fc2 = nn.Linear(H,D_out)\n\n def forward(self, in_data):\n h_relu = self.fc1(in_data).clamp(min=0)\n for _ in range(random.randint(0,3)):\n h_relu = self.sharing(h_relu).clamp(min=0)\n y_pred = self.fc2(h_relu)\n return y_pred\n\nN,D_in,H,D_out = 64,1000,100,10\n\nx = Variable(torch.rand(N,D_in))\ny = Variable(torch.randn(N,D_out))\n\nnet = Net(D_in=D_in,H=H,D_out=D_out)\n\ncriteria = nn.MSELoss(size_average=False)\noptimizer = optim.SGD(params=net.parameters(),lr=1e-4,momentum=0.9)\n\nfor step in range(2000):\n\n loss = criteria(net(x),y)\n print(f'step : {step+1} loss : {loss.data[0]}')\n\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()","sub_path":"ControlFlow_WeightSharing.py","file_name":"ControlFlow_WeightSharing.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"643440614","text":"# -*- coding: utf-8 -*-\n\"\"\"Define the cert_manager.ssl.SMIME unit tests.\"\"\"\n# Don't warn about things that happen as that is part of unit testing\n# pylint: disable=protected-access\n# pylint: disable=invalid-name\n\nfrom testtools import TestCase\n\nfrom cert_manager.smime import SMIME\n\nfrom .lib.testbase import ClientFixture\n\n\n# pylint: disable=too-few-public-methods\nclass TestSMIME(TestCase):\n \"\"\"Serve as a Base class for all tests of the Certificates class.\"\"\"\n\n def setUp(self): # pylint: disable=invalid-name\n \"\"\"Initialize the class.\"\"\"\n # Call the inherited setUp method\n super().setUp()\n\n # Make sure the Client fixture is created and setup\n self.cfixt = self.useFixture(ClientFixture())\n self.client = self.cfixt.client\n\n # Set some default values\n self.ep_path = \"/smime\"\n self.api_version = \"v1\"\n self.api_url = self.cfixt.base_url + self.ep_path + \"/\" + self.api_version\n\n\nclass TestInit(TestSMIME):\n \"\"\"Test the class initializer.\"\"\"\n\n def test_defaults(self):\n \"\"\"Parameters should be set correctly inside the class using defaults.\"\"\"\n end = SMIME(client=self.client)\n\n # Check all the internal values\n self.assertEqual(end._client, self.client)\n self.assertEqual(end._api_version, self.api_version)\n self.assertEqual(end._api_url, self.api_url)\n\n def test_version(self):\n \"\"\"Parameters should be set correctly inside the class with a custom version.\"\"\"\n version = \"v2\"\n api_url = self.cfixt.base_url + self.ep_path + \"/\" + version\n\n end = SMIME(client=self.client, api_version=version)\n\n # Check all the internal values\n self.assertEqual(end._client, self.client)\n self.assertEqual(end._api_version, version)\n self.assertEqual(end._api_url, api_url)\n","sub_path":"tests/test_smime.py","file_name":"test_smime.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"646801899","text":"# blackstorm - UserBot\n# Copyright (C) 2021 TeamBlackStorm\n#\n# This file is a part of < https://github.com/TeamBlackStorm/blackstorm/ >\n\nfrom pyUltroid.functions.all import get_chatbot_reply\nfrom pyUltroid.functions.chatBot_db import chatbot_stats\nfrom pyUltroid.functions.clean_db import *\nfrom pyUltroid.functions.forcesub_db import *\nfrom pyUltroid.functions.gban_mute_db import *\nfrom pyUltroid.functions.greetings_db import *\nfrom pyUltroid.functions.username_db import *\nfrom telethon.errors.rpcerrorlist import UserNotParticipantError\nfrom telethon.tl.functions.channels import GetParticipantRequest\nfrom telethon.utils import get_display_name\n\nfrom . import *\n\n\n@blackstorm_bot.on(events.ChatAction())\nasync def ChatActionsHandler(ult): # sourcery no-metrics\n # clean chat actions\n if is_clean_added(ult.chat_id):\n try:\n await ult.delete()\n except BaseException:\n pass\n\n # thank members\n if must_thank(ult.chat_id):\n chat_count = len(await ult.client.get_participants(await ult.get_chat()))\n if chat_count % 100 == 0:\n stik_id = chat_count / 100 - 1\n sticker = stickers[stik_id]\n await ultroid.send_message(ult.chat_id, file=sticker)\n # force subscribe\n if (\n udB.get(\"FORCESUB\")\n and ((ult.user_joined or ult.user_added))\n and get_forcesetting(ult.chat_id)\n ):\n user = await ult.get_user()\n if not user.bot:\n joinchat = get_forcesetting(ult.chat_id)\n try:\n await ultroid_bot(GetParticipantRequest(int(joinchat), user.id))\n except UserNotParticipantError:\n await ultroid_bot.edit_permissions(\n ult.chat_id, user.id, send_messages=False\n )\n res = await ultroid_bot.inline_query(\n asst.me.username, f\"fsub {user.id}_{joinchat}\"\n )\n await res[0].click(ult.chat_id, reply_to=ult.action_message.id)\n\n # gban checks\n if ult.user_joined or ult.added_by:\n user = await ult.get_user()\n chat = await ult.get_chat()\n reason = is_gbanned(user.id)\n if reason and chat.admin_rights:\n try:\n await ult.client.edit_permissions(\n chat.id,\n user.id,\n view_messages=False,\n )\n gban_watch = f\"#GBanned_User Joined.\\n\\n**User** - [{user.first_name}](tg://user?id={user.id})\\n\"\n gban_watch += f\"**Reason**: {reason}\\n\\n\"\n gban_watch += \"`User Banned.`\"\n await ult.reply(gban_watch)\n except Exception as er:\n LOGS.info(er)\n\n # greetings\n elif get_welcome(ult.chat_id):\n user = await ult.get_user()\n chat = await ult.get_chat()\n title = chat.title or \"this chat\"\n pp = await ult.client.get_participants(chat)\n count = len(pp)\n mention = f\"[{get_display_name(user)}](tg://user?id={user.id})\"\n name = user.first_name\n last = user.last_name\n fullname = f\"{name} {last}\" if last else name\n uu = user.username\n username = f\"@{uu}\" if uu else mention\n wel = get_welcome(ult.chat_id)\n msgg = wel[\"welcome\"]\n med = wel[\"media\"]\n userid = user.id\n if msgg:\n send = await ult.reply(\n msgg.format(\n mention=mention,\n group=title,\n count=count,\n name=name,\n fullname=fullname,\n username=username,\n userid=userid,\n ),\n file=med,\n )\n await asyncio.sleep(150)\n await send.delete()\n else:\n await ult.reply(file=med)\n elif (ult.user_left or ult.user_kicked) and get_goodbye(ult.chat_id):\n user = await ult.get_user()\n chat = await ult.get_chat()\n title = chat.title or \"this chat\"\n pp = await ult.client.get_participants(chat)\n count = len(pp)\n mention = f\"[{get_display_name(user)}](tg://user?id={user.id})\"\n name = user.first_name\n last = user.last_name\n fullname = f\"{name} {last}\" if last else name\n uu = user.username\n username = f\"@{uu}\" if uu else mention\n wel = get_goodbye(ult.chat_id)\n msgg = wel[\"goodbye\"]\n med = wel[\"media\"]\n userid = user.id\n if msgg:\n send = await ult.reply(\n msgg.format(\n mention=mention,\n group=title,\n count=count,\n name=name,\n fullname=fullname,\n username=username,\n userid=userid,\n ),\n file=med,\n )\n await asyncio.sleep(150)\n await send.delete()\n else:\n await ult.reply(file=med)\n\n\n@blackstorm_bot.on(events.NewMessage(incoming=True))\nasync def chatBot_replies(e):\n sender = await e.get_sender()\n if not isinstance(sender, types.User):\n return\n if e.text and chatbot_stats(e.chat_id, e.sender_id):\n msg = get_chatbot_reply(e, e.message.message)\n if msg:\n await e.reply(msg)\n chat = await e.get_chat()\n if e.is_group and not sender.bot:\n if sender.username:\n await uname_stuff(e.sender_id, sender.username, sender.first_name)\n elif e.is_private and not sender.bot:\n if chat.username:\n await uname_stuff(e.sender_id, chat.username, chat.first_name)\n\n\n@blackstorm_bot.on(events.Raw(types.UpdateUserName))\nasync def uname_change(e):\n await uname_stuff(e.user_id, e.username, e.first_name)\n\n\nasync def uname_stuff(id, uname, name):\n if udB.get(\"USERNAME_LOG\") == \"True\":\n old = get_username(id)\n # Ignore Name Logs\n if old and old == uname:\n return\n if old and uname:\n await asst.send_message(\n LOG_CHANNEL,\n f\"∆ #UsernameUpdate\\n\\n@{old} changed username to @{uname}\",\n )\n elif old:\n await asst.send_message(\n LOG_CHANNEL,\n f\"∆ #UsernameUpdate\\n\\n[{name}](tg://user?id={id}) removed its username. (@{old})\",\n )\n elif uname:\n await asst.send_message(\n LOG_CHANNEL,\n f\"∆ #UsernameUpdate\\n\\n[{name}](tg://user?id={id})'s new username --> @{uname}\",\n )\n update_username(id, uname)\n","sub_path":"Plugins/_ChatActions.py","file_name":"_ChatActions.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"77420087","text":"# -*- coding: utf-8 -*- \n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n#Copyright (c) 2005 Ali Afshar aafshar@gmail.com\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport gtk\n\nimport pida.core.service as service\nfrom pida.core import actions\nimport pida.pidagtk.contentview as contentview\n\ndefs = service.definitions\n\nfrom pida.model import model, views\n\nclass config_view(contentview.content_view):\n\n ICON_NAME = 'gtk-preferences'\n\n SHORT_TITLE = 'Configuration'\n\n LONG_TITLE = 'PIDA configuration manager'\n\n def init(self):\n self._paned = gtk.HPaned()\n self.widget.pack_start(self._paned)\n\n def set_components(self, lister, pager):\n self._lister = lister\n self._pager = pager\n self._paned.pack1(lister)\n self._paned.pack2(pager)\n self._paned.set_position(200)\n self.show_all()\n\nclass config_manager(service.service):\n \n class ConfigView(defs.View):\n view_type = config_view\n book_name = 'ext'\n\n def cmd_edit(self):\n regs = [(svc.NAME, svc.options) for svc in self.boss.services]\n view = self.create_view('ConfigView')\n self.show_view(view=view)\n view.set_registries(regs)\n view.connect('data-changed', self.cb_view_data_changed)\n\n def cb_view_data_changed(self, view):\n self.boss.reset()\n\n @actions.action(stock_id=gtk.STOCK_PREFERENCES, label=None,\n default_accel='<Shift><Control>k')\n def act_configuration(self, action):\n self.call('edit')\n\n def get_menu_definition(self):\n return \"\"\"\n <menubar>\n <menu name=\"base_file\" action=\"base_file_menu\">\n </menu>\n <menu name=\"base_edit\" action=\"base_edit_menu\">\n <placeholder name=\"PreferencesMenu\">\n <separator />\n <menuitem name=\"confedit\" action=\"configmanager+configuration\" />\n </placeholder>\n </menu>\n <menu name=\"base_project\" action=\"base_project_menu\">\n </menu>\n <menu name=\"base_tools\" action=\"base_tools_menu\">\n </menu>\n </menubar>\n \"\"\"\n\nclass ConfigManager(service.service):\n\n def init(self):\n self._editview = None\n\n def reset(self):\n self.conf_group = model.ModelGroup()\n for svc in self.boss.services:\n if svc.opts is not None:\n self.conf_group.add_model(svc.opts)\n\n class ConfigView(defs.View):\n view_type = config_view\n book_name = 'ext'\n\n def view_closed(self, view):\n self._editview = None\n self.conf_group.remove_observer(self._listobs)\n self.conf_group.remove_observer(self._pageobs)\n\n def cmd_edit(self):\n if self._editview is None:\n self._editview = self.create_view('ConfigView')\n self.show_view(view=self._editview)\n self._listobs = self.conf_group.create_multi_observer(\n views.TreeObserver)\n self._pageobs = self.conf_group.create_single_observer(\n views.PropertyPage)\n self._editview.set_components(self._listobs, self._pageobs)\n self._editview.raise_page()\n\n\n @actions.action(stock_id=gtk.STOCK_PREFERENCES, label=None,\n default_accel='<Shift><Control>k')\n def act_configuration(self, action):\n self.call('edit')\n\n def get_menu_definition(self):\n return \"\"\"\n <menubar>\n <menu name=\"base_file\" action=\"base_file_menu\">\n </menu>\n <menu name=\"base_edit\" action=\"base_edit_menu\">\n <placeholder name=\"PreferencesMenu\">\n <separator />\n <menuitem name=\"confedit\" action=\"configmanager+configuration\" />\n </placeholder>\n </menu>\n <menu name=\"base_project\" action=\"base_project_menu\">\n </menu>\n <menu name=\"base_tools\" action=\"base_tools_menu\">\n </menu>\n </menubar>\n \"\"\"\n\nService = ConfigManager\n\n \n","sub_path":"branches/model-config/pida/services/configmanager.py","file_name":"configmanager.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"88350416","text":"# -*- coding: utf-8 -*-\n# build of python3.6 by OS X\n\n# パッケージのインポート\n# OpenCV使用\n# 他のパッケージはpipコマンドでインストール\nimport sys, os, shutil\nimport datetime\nimport webbrowser as wb\n\nimport cv2\nimport numpy as np\nimport tkinter as tk\nimport tkinter.filedialog as tkFD\nfrom tkinter import PhotoImage\nfrom PIL import ImageTk, Image\nimport random\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\n#import qrcode\n\n#print(sys.path)\n\n# 外部ファイル\nimport func_collection as fc\nimport camera as cam\n\nroot = tk.Tk()\nroot.title(\"KPCAS beta 0.1\")\nroot.geometry(\"720x380\") # ウィンドウサイズ\n\n#RuntimeError: maximum recursion depth exceeded (再帰の数が深すぎるエラー)\n#https://qiita.com/narupo/items/e25ac05a9065c0bd9c03\n#http://sucrose.hatenablog.com/entry/2013/01/19/164008\n#sys.setrecursionlimit(50000) #再帰の最大数を増やす\n#LIMITER = sys.getrecursionlimit()\n#print(\"maximum recursion depth set: \" , LIMITER)\n\n\nFILTER = ('2値化', 'グレイスケール', '赤単色', '緑単色', '青単色',\n '色交換(赤青)', '色交換(赤緑)', '色交換(緑青)', \n 'HSV色空間(色相シフト)', 'HSV色空間(彩度シフト)', 'HSV色空間(明度シフト)',\n '明るく', '暗く', 'ガンマ補正', 'セピア', 'モザイク', 'ネガポジ反転',\n '減色', 'ソーラライズ', 'ポスタライズ', 'イコライズ', 'ミラー', '回転(90度)',\n '膨張', '収縮', \n '平均化', 'メディアンフィルタ', 'ガウシアンフィルタ', 'DoG',\n 'バイラテラルフィルタ', 'ノンローカルミーンフィルタ', \n '一次微分(横)', '一次微分(縦)', 'Prewitt', 'Sobel', 'ラプラシアン', 'ラプラシアン(PIL)',\n 'エンボス', 'エンボス(PIL)', 'アンシャープマスキング',\n 'ごま塩ノイズ', 'ガウシアンノイズ', 'フーリエ変換', 'ローパスフィルタ', 'ハイパスフィルタ',\n '顔検出', '顔面モザイク', 'ORB', )\n\nFILTER_SET = ()\n\n# カレントディレクトリ取得\nCD = os.getcwd()\nprint(CD)\n\n# 画像リサイズ後の保存先。読み込むたびに上書きされる。\n# このパスを自分の環境に合わせて設定。\n# 絶対パス\nREAL_PATH = os.path.join(CD, \"resize_picture\", \"import_pic.jpg\")\nprint(REAL_PATH)\n\n# 出力絶対パス\nO_REAL_PATH = os.path.join(CD, \"output_img\", \"output_img.jpg\")\nprint(O_REAL_PATH)\n\n# 保存先パス\nS_REAL_PATH = os.path.join(CD, \"save_image\", \"Final_img_\")\n\n# フラグ\nFO = 0\nADD_FLAG = 0\nPT_FLAG = 0\n\n# 現時刻\nNOW = datetime.datetime.now()\n\n\n# 命令セット追加削除時のリスト用グローバル変数\n# 本来、グローバル変数は大文字表記が暗黙の了解だが、これら以下のものは例外とする。理由は以下の通り。\n# 1. このグローバル変数は一部の関数内でのみ扱う。\n# 2. 中身の変化状態を別関数では用いていない。\n# 3. 結局のところ、組み込んだ命令はグローバル変数 FILTER_SET を読み出すため。\nglobal lb_default\nglobal lb_new\n\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.pack(expand=1, fill=tk.BOTH, anchor=tk.NW)\n # 基本ウィンドウ\n self.create_widgets()\n\n def create_widgets(self):\n global val_c\n # 各ウィジェット\n # 文字定義\n self.title = tk.Label(self, text=u\"KrProCessAS\", font=(\"\", 20), bg='#ffaacc')\n\n # エントリ定義\n self.var_entry = tk.StringVar()\n self.entry = tk.Entry(self, textvariable=self.var_entry, width=22)\n\n # ボタン定義\n self.button = tk.Button(self, text=u\"開く\", command=self.button_pushed)\n self.button_qt = tk.Button(self, text=u\"Quit\", command=self.button_quit)\n self.button_man = tk.Button(self, text=u\"マニュアル\", command=self.manual_op, width=20)\n self.button_act = tk.Button(self, text=u\"命令を組み込む\", command=self.action, width=20)\n self.button_exe = tk.Button(self, text=u\"命令を実行\", command=self.exe_action, width=20)\n self.button_save = tk.Button(self, text=u\"出力結果を保存\", command=self.save, width=10)\n self.button_clear = tk.Button(self, text=u\"すべてクリア\", command=self.all_clear, width=20)\n self.button_output_clear = tk.Button(self, text=u\"出力結果をクリア\", command=self.output_clear, width=20)\n self.button_web = tk.Button(self, text=u\"Wikiをみる\", command=self.web_link, width=20)\n self.button_hist = tk.Button(self, text=u\"ヒストグラム\", command=self.histgram, width=10)\n\n self.button_cam = tk.Button(self, text=u\"カメラ起動\", command=self.cam)\n self.button_qr = tk.Button(self, text=u\"QR\", command=self.qrcode)\n \n # キャンバス定義\n self.canvas = tk.Canvas(self, width=200, height=200, relief=tk.RIDGE, bd=2)\n self.o_canvas = tk.Canvas(self, width=256, height=256, relief=tk.RIDGE, bd=2)\n\n # リストボックス/スクロールバー\n self.frame = tk.Frame(root)\n self.frame.place(x=285, y=43)\n \n self.v = tk.StringVar(value=FILTER_SET)\n self.listbox_main = tk.Listbox(self.frame, listvariable=self.v, width=20, height=15, relief=tk.RIDGE, bd=2)\n self.scrollbar_m = tk.Scrollbar(self.frame, orient=\"v\", command=self.listbox_main.yview)\n self.listbox_main['yscrollcommand'] = self.scrollbar_m.set\n\n # チェックボックス\n val_c = tk.BooleanVar()\n val_c.set(False)\n self.checkbox = tk.Checkbutton(self, text=u\"リサイズしない\", variable=val_c)\n \n \n # 各物体の位置(gridだとややこしいので、placeで直接指定する)\n # 文字など\n self.title.place(x=100, y=5)\n\n # エントリなど\n self.entry.place(x=500, y=30) #source\n self.entry.insert(tk.END, \"開くを押して参照する\")\n\n # ボタンなど\n self.button.place(x=655, y=2)\n self.button_qt.place(x=10, y=5)\n self.button_man.place(x=500, y=275)\n self.button_act.place(x=280, y=310)\n self.button_exe.place(x=280, y=340)\n self.button_save.place(x=20, y=340)\n self.button_hist.place(x=150, y=340)\n self.button_output_clear.place(x=40, y=310)\n self.button_clear.place(x=500, y=310)\n self.button_web.place(x=500, y=340)\n self.button_cam.place(x=360, y=7)\n self.button_qr.place(x=300, y=7)\n #self.button.grid(column=2, row=0, sticky=tk.E)\n\n # キャンバスなど\n self.canvas.place(x=500, y=60)\n self.canvas.create_text(110, 110, text=u\"Not Found Image...\")\n self.o_canvas.place(x=10, y=40)\n self.o_canvas.create_text(127, 127, text=u\"Not Output Image...\")\n\n # リストボックス/スクロールバーなど\n self.listbox_main.grid(row=0, column=0)\n self.scrollbar_m.grid(row=0, column=1, sticky=tk.NS)\n \n # その他\n self.checkbox.place(x=500, y=5)\n\n # おまけ\n\n def cam(self):\n cam.Camera()\n print(\"escで終了\")\n print(\"spaceで画像取得\")\n\n def qrcode(self):\n print(\"未実装\")\n \n\n # 参照ファイルコマンド\n def button_pushed(self):\n # http://spcx8.hatenablog.com/entry/2017/12/24/112528\n # ファイルの参照方法はWindowsとmacOSで異なる \n # Windowsの場合は以下のようになる\n # fname = tkFD.askopenfilename(filetypes=[('data files','*.csv;*.txt')],initialdir=os.getcwd())\n # 参照ファイルの拡張子を絞る方法が異なるようで、Windowsの場合は'*.*'で全表示も可能\n global REAL_PATH\n global val_c\n \n fname = tkFD.askopenfilename(filetypes=[(\"jpg files\",\"*.jpg\"),(\"png files\",\"*.png\")],initialdir=os.getcwd())\n print(fname)\n if not fname:\n print(\"ファイルが指定されていません\")\n\n if val_c.get() == True:\n img = Image.open(fname)\n img.save(REAL_PATH)\n else:\n img = Image.open(fname)\n im_r = img.resize((256, 256))\n im_r.save(REAL_PATH)\n\n \n # ソースコードの保存場所に気をつける\n\n # 以下、リサイズ後の絶対パス。なぜかはわからないが、絶対パスでないとエラーを吐く\n # ex) ~/Documents/... とするとエラー\n # 読み込み毎にリサイズされて上書きされる\n \n self.var_entry.set(fname)\n\n self.img = ImageTk.PhotoImage(file=REAL_PATH)\n self.canvas.create_image(110, 110, image=self.img)\n\n\n # マニュアルを開く\n def manual_op(self):\n man = open(\"./manual.txt\",\"r\")\n \n man_win = tk.Toplevel(master=self.master)\n man_win.title(\"マニュアル\")\n man_win.geometry(\"680x420+100+100\")\n\n text_in = man.read()\n text_in.ljust(100)\n\n self.button = tk.Button(man_win, text=\"Quit\", command=man_win.destroy)\n self.button.place(x=10, y=10)\n\n self.label = tk.Label(man_win, text=text_in, justify=\"left\")\n self.label.place(x=10, y=50)\n\n #button.focus_set()\n man_win.transient(self.master)\n #man_win.grab_set()\n self.konami = tk.StringVar()\n self.K_COM = tk.Entry(man_win, textvariable=self.konami, width=14)\n self.K_COM.place(x=460, y=10)\n self.button_k = tk.Button(man_win, text=\"Go\", command=self.check_K_COM)\n self.button_k.place(x=600, y=10)\n man.close()\n\n # コナミコマンドチェック用\n def check_K_COM(self):\n global FILTER\n global ADD_FLAG\n check = self.K_COM.get()\n if ADD_FLAG == 0:\n if check == \"uuddlrlrAB\":\n FILTER += ('ヒデオ1', 'ヒデオ2', 'FOXDIE', )\n ADD_FLAG = 1\n print(\"君は選ばれた\")\n else:\n print(\"204863\")\n else:\n print(\"俺は歩いたよ\\n歩くことしかできなかったんだ\\nやがて 俺の前を歩く俺が見えた\\nだが あれは俺じゃない\\n気をつけろ\\nそのドアの隙間は 分断された現実(セパレート・リアリティ)だ\\n俺なのは 俺だけだ\\nお前なのは お前だけか?\")\n \n \n # 保存\n def save(self):\n global FO\n shutil.copy(O_REAL_PATH, S_REAL_PATH + str(NOW) + \"_\" + str(FO) + \".jpg\")\n FO += 1\n print(\"save\")\n\n\n # リンクに飛ぶ\n def web_link(self):\n wb.open_new(\"https://github.com/nyanten/KPCAS/wiki\")\n \n\n # すべてクリア\n def all_clear(self):\n global FILTER_SET\n FILTER_SET = ()\n print(\"All Set clear\")\n\n self.clear_module()\n\n self.canvas.delete(\"all\")\n self.canvas.create_text(110, 110, text=u\"Not Found Image...\")\n self.o_canvas.delete(\"all\")\n self.o_canvas.create_text(127, 127, text=u\"Not Output Image...\")\n\n # メインのリストボックスを更新\n frame = tk.Frame(root)\n frame.place(x=285, y=43)\n \n v = tk.StringVar(value=FILTER_SET)\n self.listbox_main = tk.Listbox(frame, listvariable=v, width=20, height=15, relief=tk.RIDGE, bd=2)\n self.scrollbar_m = tk.Scrollbar(frame, orient=\"v\", command=self.listbox_main.yview)\n self.listbox_main['yscrollcommand'] = self.scrollbar_m.set\n \n self.listbox_main.grid(row=0, column=0)\n self.scrollbar_m.grid(row=0, column=1, sticky=tk.NS)\n\n\n # 出力結果画像クリア\n def output_clear(self):\n flag2 = os.path.exists(O_REAL_PATH)\n if flag2 == True:\n os.remove(O_REAL_PATH)\n\n self.o_canvas.delete(\"all\")\n self.o_canvas.create_text(127, 127, text=u\"Not Output Image...\")\n print(\"Output Image Delete\")\n\n\n # ヒストグラム\n def histgram(self):\n img = cv2.imread(O_REAL_PATH)\n \n if len(img.shape) == 3:\n b, g, r = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n \n hist_r, bins = np.histogram(r.ravel(),256,[0,256])\n hist_g, bins = np.histogram(g.ravel(),256,[0,256])\n hist_b, bins = np.histogram(b.ravel(),256,[0,256])\n\n plt.xlim(0, 255)\n plt.plot(hist_r, \"-r\", label=\"Red\")\n plt.plot(hist_g, \"-g\", label=\"Green\")\n plt.plot(hist_b, \"-b\", label=\"Blue\")\n plt.xlabel(\"Pixel value\", fontsize=20)\n plt.ylabel(\"Number of pixels\", fontsize=20)\n plt.legend()\n plt.grid()\n plt.show()\n\n else:\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n hist, bins = np.histogram(gray.ravel(),256,[0,256])\n\n plt.xlim(0, 255)\n plt.plot(hist)\n plt.xlabel(\"Pixel value\", fontsize=20)\n plt.ylaabel(\"Number of pixels\", fontsize=20)\n plt.grid()\n plt.show()\n\n\n # Exitする 全リセットして終了\n def button_quit(self):\n global FILTER_SET\n FILTER_SET = ()\n print(\"Good Bye.\")\n self.clear_module()\n exit()\n\n\n # クリア系統\n def clear_module(self):\n flag1 = os.path.exists(REAL_PATH)\n flag2 = os.path.exists(O_REAL_PATH)\n if flag1 == True:\n os.remove(REAL_PATH)\n if flag2 == True:\n os.remove(O_REAL_PATH)\n\n print(\"FILTER SET EMPTY\")\n\n \n #\n # beta作成時に発生していた、命令セットサブウィンドウが無限に増えるVer\n # 組み込んだ命令を動的にリストボックス表示したいがためにaction関数を繰り返し呼び出していた\n #\n # 命令追加系統\n #def action_add_bn(self):\n # self.show_selection_a()\n \n #def listbox_selected(self, event):\n # self.show_selection_a()\n \n # def show_selection_a(self):\n # global slb\n # global FILTER_SET\n # for i in lb_default.curselection():\n # slb = lb_default.get(i)\n # print(slb + \"を組み込みました\")\n # FILTER_SET += (slb, )\n # self.action()\n #\n # 命令削除系統\n # def action_del_bn(self):\n # self.show_selection_d()\n \n # def listbox_selected(self, event):\n # self.show_selection_d()\n \n # def show_selection_d(self):\n # global dlb\n # global FILTER_SET\n # for i in lb_new.curselection():\n # dlb = lb_new.get(i)\n # dlb_l = list(FILTER_SET) # タプルの要素削除はできないのでリストへ変更する\n # del dlb_l[i] # リストで選択されている部分を削除する\n # FILTER_SET = tuple(dlb_l) # タプルに戻す\n # print(dlb + \"を削除しました\")\n # self.action()\n #\n # action関数を繰り返し呼び出さず、組み込んだリストボックス(lb_new)を再表示させるために、以下のようにコード改変。\n # action関数に内部関数として呼び出すことで解決。\n #\n \n \n # 命令セット\n def action(self):\n global lb_default\n global lb_new\n \n sub_win = tk.Toplevel(master=self.master)\n sub_win.title(\"命令セット\")\n sub_win.geometry(\"480x240+100+50\")\n\n print(\"Open Assembly Set\")\n \n # 命令追加系統\n def action_add_bn():\n show_selection_a()\n \n def listbox_selected():\n show_selection_a()\n \n def show_selection_a():\n global lb_default\n global lb_new\n global FILTER_SET\n for i in lb_default.curselection():\n slb = lb_default.get(i)\n print(slb + \"を組み込みました\")\n FILTER_SET += (slb, )\n\n listbox_update()\n \n\n # 命令削除系統\n def action_del_bn():\n show_selection_d()\n \n def listbox_selected_d():\n show_selection_d()\n \n def show_selection_d():\n global lb_new\n global FILTER_SET\n for i in lb_new.curselection():\n dlb = lb_new.get(i)\n dlb_l = list(FILTER_SET) # タプルの要素削除はできないのでリストへ変更する\n del dlb_l[i] # リストで選択されている部分を削除する\n FILTER_SET = tuple(dlb_l) # タプルに戻す\n print(dlb + \"を削除しました\")\n \n listbox_update()\n\n\n # 命令全クリア\n def action_all_clear():\n global lb_new\n global FILTER_SET\n FILTER_SET = ()\n lb_new = ()\n print(\"命令を全消去しました\")\n \n listbox_update()\n \n\n # 命令入れ替え\n def action_change():\n global lb_new\n global FILTER_SET\n for i in lb_new.curselection():\n f = lb_new.get(i+1)\n if f != \"\":\n clb = list(FILTER_SET) # リスト変換\n clb[i] = lb_new.get(i+1)\n clb[i+1] = lb_new.get(i)\n FILTER_SET = tuple(clb) # タプル変換\n print(clb[i+1] + \"と\" + clb[i] + \"を入れ替えました\")\n else:\n print(\"下には何もありません\")\n \n listbox_update()\n\n \n # 命令ソート\n def action_sort():\n global lb_new\n global FILTER_SET\n sort_l = list(FILTER_SET) # リスト変換\n sort_l.reverse()\n FILTER_SET = tuple(sort_l)\n print(\"全命令を逆順にソートしました\")\n \n listbox_update()\n \n\n # リストボックス用フレーム\n frame1 = tk.Frame(sub_win)\n frame1.place(x=10, y=50)\n frame2 = tk.Frame(sub_win)\n frame2.place(x=250, y=50)\n\n # 内部関数(ネストした関数)は関数オブジェクトとして扱う\n f1 = action_add_bn\n f2 = listbox_selected\n f3 = action_del_bn\n f4 = listbox_selected_d\n f5 = action_all_clear\n f6 = action_change\n f7 = action_sort\n\n # デフォルトでのリストボックスとスクロールバー生成\n v1 = tk.StringVar(value=FILTER)\n v2 = tk.StringVar(value=FILTER_SET)\n lb_default = tk.Listbox(frame1, listvariable=v1, width=18, height=10)\n lb_new = tk.Listbox(frame2, listvariable=v2, width=18, height=10)\n\n # リストボックスの配置\n lb_default.grid(row=0, column=0)\n lb_new.grid(row=0, column=0)\n\n # スクロールバーの詳細設定\n scrollbar_1 = tk.Scrollbar(frame1, orient=\"v\", command=lb_default.yview)\n scrollbar_2 = tk.Scrollbar(frame2, orient=\"v\", command=lb_new.yview)\n lb_default['yscrollcommand'] = scrollbar_1.set\n lb_new['yscrollcommand'] = scrollbar_2.set\n\n # スクロールバーの配置\n scrollbar_1.grid(row=0, column=1, sticky=tk.NS)\n scrollbar_2.grid(row=0, column=1, sticky=tk.NS)\n\n # 選択されている部分をバインドするためのもの\n lb_default.bind(\"<<Double-Button-1>>\", f2)\n lb_new.bind(\"<<Double-Button-1>>\", f4)\n\n # 各ボタンの役割と配置\n button = tk.Button(sub_win, text=\"Quit\", command=sub_win.destroy)\n button.place(x=10, y=10)\n button = tk.Button(sub_win, text=\"命令を組む\", command=f1)\n button.place(x=80, y=10)\n button = tk.Button(sub_win, text=\"命令を消す\", command=f3)\n button.place(x=250, y=10)\n\n button = tk.Button(sub_win, text=\"命令をクリア\", command=f5)\n button.place(x=350, y=10)\n\n button = tk.Button(sub_win, text=\"⇅\", command=f6, font=(\"\", 12))\n button.place(x=435, y=80)\n\n button = tk.Button(sub_win, text=\"↕\", command=f7, font=(\"\", 14))\n button.place(x=438, y=120)\n\n # 命令組み込み時にサブウィンドウへとフォーカスする、ウィンドウを無限に増やさない\n button.focus_set()\n sub_win.transient(self.master)\n sub_win.grab_set()\n \n\n # リストボックス更新\n def listbox_update():\n global lb_new\n global FILTER_SET\n # リストボックス更新\n frame2 = tk.Frame(sub_win)\n frame2.place(x=250, y=50)\n v2 = tk.StringVar(value=FILTER_SET)\n lb_new = tk.Listbox(frame2, listvariable=v2, width=18, height=10)\n lb_new.grid(row=0, column=0)\n scrollbar_2 = tk.Scrollbar(frame2, orient=\"v\", command=lb_new.yview)\n lb_new['yscrollcommand'] = scrollbar_2.set\n scrollbar_2.grid(row=0, column=1, sticky=tk.NS)\n \n # メインのリストボックスを更新\n frame = tk.Frame(root)\n frame.place(x=285, y=43)\n \n v = tk.StringVar(value=FILTER_SET)\n self.listbox_main = tk.Listbox(frame, listvariable=v, width=20, height=15, relief=tk.RIDGE, bd=2)\n self.scrollbar_m = tk.Scrollbar(frame, orient=\"v\", command=self.listbox_main.yview)\n self.listbox_main['yscrollcommand'] = self.scrollbar_m.set\n \n self.listbox_main.grid(row=0, column=0)\n self.scrollbar_m.grid(row=0, column=1, sticky=tk.NS)\n\n\n # 命令セット逐次実行\n def exe_action(self):\n global PT_FLAG\n PT_FLAG = 0\n print(len(FILTER_SET))\n # 作成した命令セットの長さ\n j = range(len(FILTER_SET))\n\n if FILTER_SET != ():\n # 命令セット分だけ順に実行する\n for i in j:\n # 初回はリサイズしたものを読み込み、以降は処理後を繰り返し読む\n if i == 0:\n if FILTER_SET[i] in {\"2値化\"}:\n print(\"2値化\")\n fc.Binary(REAL_PATH)\n elif FILTER_SET[i] in {\"グレイスケール\"}:\n print(\"グレイスケール\")\n fc.Gray(REAL_PATH)\n elif FILTER_SET[i] in {\"赤単色\"}:\n print(\"赤単色\")\n fc.Red(REAL_PATH)\n elif FILTER_SET[i] in {\"緑単色\"}:\n print(\"緑単色\")\n fc.Green(REAL_PATH)\n elif FILTER_SET[i] in {\"青単色\"}:\n print(\"青単色\")\n fc.Blue(REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(赤青)\"}:\n print(\"色交換(赤青)\")\n fc.RtoB(REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(赤緑)\"}:\n print(\"色交換(赤緑)\")\n fc.RtoG(REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(緑青)\"}:\n print(\"色交換(緑青)\")\n fc.GtoB(REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(色相シフト)\"}:\n print(\"HSV色空間(色相シフト)\")\n fc.HSV_h(REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(彩度シフト)\"}:\n print(\"HSV色空間(彩度シフト)\")\n fc.HSV_s(REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(明度シフト)\"}:\n print(\"HSV色空間(明度シフト)\")\n fc.HSV_v(REAL_PATH)\n elif FILTER_SET[i] in {\"明るく\"}:\n print(\"明るく\")\n fc.Bright(REAL_PATH)\n elif FILTER_SET[i] in {\"暗く\"}:\n print(\"暗く\")\n fc.Dark(REAL_PATH)\n elif FILTER_SET[i] in {\"ガンマ補正\"}:\n print(\"ガンマ補正\")\n fc.Gamma(REAL_PATH)\n elif FILTER_SET[i] in {\"セピア\"}:\n print(\"セピア\")\n fc.Sepia(REAL_PATH)\n elif FILTER_SET[i] in {\"モザイク\"}:\n print(\"モザイク\")\n fc.Moza(REAL_PATH)\n elif FILTER_SET[i] in {\"ネガポジ反転\"}:\n print(\"ネガポジ反転\")\n fc.NegaPosi(REAL_PATH)\n elif FILTER_SET[i] in {\"減色\"}:\n print(\"減色\")\n fc.Loss(REAL_PATH)\n elif FILTER_SET[i] in {\"減色\"}:\n print(\"減色\")\n fc.Loss(REAL_PATH)\n elif FILTER_SET[i] in {\"ソーラライズ\"}:\n print(\"ソーラライズ\")\n fc.Solarize(REAL_PATH)\n elif FILTER_SET[i] in {\"ポスタライズ\"}:\n print(\"ポスタライズ\")\n fc.Posterize(REAL_PATH)\n elif FILTER_SET[i] in {\"イコライズ\"}:\n print(\"イコライズ\")\n fc.Equalize(REAL_PATH)\n elif FILTER_SET[i] in {\"ミラー\"}:\n print(\"ミラー\")\n fc.Mirror(REAL_PATH)\n elif FILTER_SET[i] in {\"回転(90度)\"}:\n print(\"回転(90度)\")\n fc.Rotate(REAL_PATH)\n elif FILTER_SET[i] in {\"膨張\"}:\n print(\"膨張\")\n fc.Dilate(REAL_PATH)\n elif FILTER_SET[i] in {\"収縮\"}:\n print(\"収縮\")\n fc.Erode(REAL_PATH)\n elif FILTER_SET[i] in {\"平均化\"}:\n print(\"平均化\")\n fc.Average(REAL_PATH)\n elif FILTER_SET[i] in {\"メディアンフィルタ\"}:\n print(\"メディアンフィルタ\")\n fc.Median(REAL_PATH)\n elif FILTER_SET[i] in {\"ガウシアンフィルタ\"}:\n print(\"ガウシアンフィルタ\")\n fc.Gaussian(REAL_PATH)\n elif FILTER_SET[i] in {\"DoG\"}:\n print(\"DoG\")\n fc.DoG(REAL_PATH)\n elif FILTER_SET[i] in {\"バイラテラルフィルタ\"}:\n print(\"バイラテラルフィルタ\")\n fc.Bilateral(REAL_PATH)\n elif FILTER_SET[i] in {\"ノンローカルミーンフィルタ\"}:\n print(\"ノンローカルミーンフィルタ\")\n fc.Nonlocal(REAL_PATH)\n elif FILTER_SET[i] in {\"一次微分(横)\"}:\n print(\"一次微分(横)\")\n fc.Diff_w(REAL_PATH)\n elif FILTER_SET[i] in {\"一次微分(縦)\"}:\n print(\"一次微分(縦)\")\n fc.Diff_h(REAL_PATH)\n elif FILTER_SET[i] in {\"Prewitt\"}:\n print(\"Prewitt\")\n fc.Prewitt(REAL_PATH)\n elif FILTER_SET[i] in {\"Sobel\"}:\n print(\"Sobel\")\n fc.Sobel(REAL_PATH)\n elif FILTER_SET[i] in {\"ラプラシアン\"}:\n print(\"ラプラシアン\")\n fc.Laplacian(REAL_PATH)\n elif FILTER_SET[i] in {\"ラプラシアン(PIL)\"}:\n print(\"ラプラシアン(PIL)\")\n fc.Laplacian_re(REAL_PATH)\n elif FILTER_SET[i] in {\"エンボス\"}:\n print(\"エンボス\")\n fc.Emboss(REAL_PATH)\n elif FILTER_SET[i] in {\"エンボス(PIL)\"}:\n print(\"エンボス(PIL)\")\n fc.Emboss_re(REAL_PATH)\n elif FILTER_SET[i] in {\"アンシャープマスキング\"}:\n print(\"アンシャープマスキング\")\n fc.UnsharpMask(REAL_PATH)\n elif FILTER_SET[i] in {\"ごま塩ノイズ\"}:\n print(\"ごま塩ノイズ\")\n fc.Salt_Noise(REAL_PATH)\n elif FILTER_SET[i] in {\"ガウシアンノイズ\"}:\n print(\"ガウシアンノイズ\")\n fc.GaussianNoise(REAL_PATH)\n elif FILTER_SET[i] in {\"フーリエ変換\"}:\n print(\"フーリエ変換\")\n fc.FFT(REAL_PATH)\n elif FILTER_SET[i] in {\"ローパスフィルタ\"}:\n print(\"ローパスフィルタ\")\n fc.Lowpass(REAL_PATH)\n elif FILTER_SET[i] in {\"ハイパスフィルタ\"}:\n print(\"ハイパスフィルタ\")\n fc.Highpass(REAL_PATH)\n elif FILTER_SET[i] in {\"顔検出\"}:\n print(\"顔検出\")\n fc.Face_check(REAL_PATH)\n elif FILTER_SET[i] in {\"顔面モザイク\"}:\n print(\"顔面モザイク\")\n fc.Face_Moza(REAL_PATH)\n elif FILTER_SET[i] in {\"ORB\"}:\n print(\"ORB\")\n fc.ORB(REAL_PATH)\n elif FILTER_SET[i] in {\"ヒデオ1\"}:\n print(\"Hideo\")\n fc.Hideo_1(REAL_PATH)\n elif FILTER_SET[i] in {\"ヒデオ2\"}:\n print(\"Hideo\")\n fc.Hideo_2(REAL_PATH)\n PT_FLAG = 1\n elif FILTER_SET[i] in {\"FOXDIE\"}:\n print(\"FOXDIE\")\n fc.Foxdie(REAL_PATH)\n else:\n print(\"ぶっこわれ\")\n\n \n # 初回以降\n else:\n if FILTER_SET[i] in {\"2値化\"}:\n print(\"2値化\")\n fc.Binary(O_REAL_PATH)\n elif FILTER_SET[i] in {\"グレイスケール\"}:\n print(\"グレイスケール\")\n fc.Gray(O_REAL_PATH)\n elif FILTER_SET[i] in {\"赤単色\"}:\n print(\"赤単色\")\n fc.Red(O_REAL_PATH)\n elif FILTER_SET[i] in {\"緑単色\"}:\n print(\"緑単色\")\n fc.Green(O_REAL_PATH)\n elif FILTER_SET[i] in {\"青単色\"}:\n print(\"青単色\")\n fc.Blue(O_REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(赤青)\"}:\n print(\"色交換(赤青)\")\n fc.RtoB(O_REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(赤緑)\"}:\n print(\"色交換(赤緑)\")\n fc.RtoG(O_REAL_PATH)\n elif FILTER_SET[i] in {\"色交換(緑青)\"}:\n print(\"色交換(緑青)\")\n fc.GtoB(O_REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(色相シフト)\"}:\n print(\"HSV色空間(色相シフト)\")\n fc.HSV_h(O_REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(彩度シフト)\"}:\n print(\"HSV色空間(彩度シフト)\")\n fc.HSV_s(O_REAL_PATH)\n elif FILTER_SET[i] in {\"HSV色空間(明度シフト)\"}:\n print(\"HSV色空間(明度シフト)\")\n fc.HSV_v(O_REAL_PATH)\n elif FILTER_SET[i] in {\"明るく\"}:\n print(\"明るく\")\n fc.Bright(O_REAL_PATH)\n elif FILTER_SET[i] in {\"暗く\"}:\n print(\"暗く\")\n fc.Dark(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ガンマ補正\"}:\n print(\"ガンマ補正\")\n fc.Gamma(O_REAL_PATH)\n elif FILTER_SET[i] in {\"セピア\"}:\n print(\"セピア\")\n fc.Sepia(O_REAL_PATH)\n elif FILTER_SET[i] in {\"モザイク\"}:\n print(\"モザイク\")\n fc.Moza(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ネガポジ反転\"}:\n print(\"ネガポジ反転\")\n fc.NegaPosi(O_REAL_PATH)\n elif FILTER_SET[i] in {\"減色\"}:\n print(\"減色\")\n fc.Loss(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ソーラライズ\"}:\n print(\"ソーラライズ\")\n fc.Solarize(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ポスタライズ\"}:\n print(\"ポスタライズ\")\n fc.Posterize(O_REAL_PATH)\n elif FILTER_SET[i] in {\"イコライズ\"}:\n print(\"イコライズ\")\n fc.Equalize(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ミラー\"}:\n print(\"ミラー\")\n fc.Mirror(O_REAL_PATH)\n elif FILTER_SET[i] in {\"回転(90度)\"}:\n print(\"回転(90度\")\n fc.Rotate(O_REAL_PATH)\n elif FILTER_SET[i] in {\"膨張\"}:\n print(\"膨張\")\n fc.Dilate(O_REAL_PATH)\n elif FILTER_SET[i] in {\"収縮\"}:\n print(\"収縮\")\n fc.Erode(O_REAL_PATH)\n elif FILTER_SET[i] in {\"平均化\"}:\n print(\"平均化\")\n fc.Average(O_REAL_PATH)\n elif FILTER_SET[i] in {\"メディアンフィルタ\"}:\n print(\"メディアンフィルタ\")\n fc.Median(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ガウシアンフィルタ\"}:\n print(\"ガウシアンフィルタ\")\n fc.Gaussian(O_REAL_PATH)\n elif FILTER_SET[i] in {\"DoG\"}:\n print(\"DoG\")\n fc.DoG(O_REAL_PATH)\n elif FILTER_SET[i] in {\"バイラテラルフィルタ\"}:\n print(\"バイラテラルフィルタ\")\n fc.Bilateral(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ノンローカルミーンフィルタ\"}:\n print(\"ノンローカルミーンフィルタ\")\n fc.Nonlocal(O_REAL_PATH)\n elif FILTER_SET[i] in {\"一次微分(横)\"}:\n print(\"一次微分(横)\")\n fc.Diff_w(O_REAL_PATH)\n elif FILTER_SET[i] in {\"一次微分(縦)\"}:\n print(\"一次微分(縦)\")\n fc.Diff_h(O_REAL_PATH)\n elif FILTER_SET[i] in {\"Prewitt\"}:\n print(\"Prewitt\")\n fc.Prewitt(O_REAL_PATH)\n elif FILTER_SET[i] in {\"Sobel\"}:\n print(\"Sobel\")\n fc.Sobel(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ラプラシアン\"}:\n print(\"ラプラシアン\")\n fc.Laplacian(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ラプラシアン(PIL)\"}:\n print(\"ラプラシアン(PIL)\")\n fc.Laplacian_re(O_REAL_PATH)\n elif FILTER_SET[i] in {\"エンボス\"}:\n print(\"エンボス\")\n fc.Emboss(O_REAL_PATH)\n elif FILTER_SET[i] in {\"エンボス(PIL)\"}:\n print(\"エンボス(PIL)\")\n fc.Emboss_re(O_REAL_PATH)\n elif FILTER_SET[i] in {\"アンシャープマスキング\"}:\n print(\"アンシャープマスキング\")\n fc.UnsharpMask(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ごま塩ノイズ\"}:\n print(\"ごま塩ノイズ\")\n fc.Salt_Noise(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ガウシアンノイズ\"}:\n print(\"ガウシアンノイズ\")\n fc.GaussianNoise(O_REAL_PATH)\n elif FILTER_SET[i] in {\"フーリエ変換\"}:\n print(\"フーリエ変換\")\n fc.FFT(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ローパスフィルタ\"}:\n print(\"ローパスフィルタ\")\n fc.Lowpass(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ハイパスフィルタ\"}:\n print(\"ハイパスフィルタ\")\n fc.Highpass(O_REAL_PATH)\n elif FILTER_SET[i] in {\"顔検出\"}:\n print(\"顔検出\")\n fc.Face_check(O_REAL_PATH)\n elif FILTER_SET[i] in {\"顔面モザイク\"}:\n print(\"顔面モザイク\")\n fc.Face_Moza(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ORB\"}:\n print(\"ORB\")\n fc.ORB(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ヒデオ1\"}:\n print(\"Hideo\")\n fc.Hideo_1(O_REAL_PATH)\n elif FILTER_SET[i] in {\"ヒデオ2\"}:\n print(\"Hideo\")\n fc.Hideo_2(O_REAL_PATH)\n PT_FLAG = 1\n elif FILTER_SET[i] in {\"FOXDIE\"}:\n print(\"FOXDIE\")\n fc.Foxdie(O_REAL_PATH)\n else:\n print(\"ぶっこわれ\")\n\n self.img_2 = ImageTk.PhotoImage(file=O_REAL_PATH)\n self.o_canvas.create_image(133, 134, image=self.img_2)\n\n if PT_FLAG == 1:\n self.do_PT()\n \n else:\n print(\"命令セットが組み込まれていません\")\n \n\n # おまけ\n def do_PT(self):\n PT_win = tk.Toplevel(master=self.master)\n PT_win.title(\"204863\")\n PT_win.geometry(\"1280x720+100+50\")\n\n PT_l = [\"./PT/PT_1.txt\", \"./PT/PT_2.txt\", \"./PT/PT_3.txt\", \"./PT/PT_4.txt\", \"./PT/PT_5.txt\"]\n\n str = random.choice(PT_l)\n PT_t = open(str, \"r\")\n text_in = PT_t.read()\n\n if str == \"./PT/PT_1.txt\":\n canvas = tk.Canvas(PT_win, width=1280, height=720)\n canvas.create_rectangle(0, 0, 1280, 720, fill=\"black\")\n canvas.pack(fill=\"x\")\n label = tk.Label(PT_win, text=text_in, justify=\"left\", foreground=\"white\", background=\"black\")\n label.place(x=40, y=50)\n elif str == \"./PT/PT_2.txt\":\n canvas = tk.Canvas(PT_win, width=1280, height=720)\n canvas.create_rectangle(0, 0, 1280, 720, fill=\"white\")\n canvas.pack(fill=\"x\")\n label = tk.Label(PT_win, text=text_in, justify=\"left\", foreground=\"black\", background=\"white\")\n label.place(x=40, y=300)\n elif str == \"./PT/PT_3.txt\":\n canvas = tk.Canvas(PT_win, width=1280, height=720)\n canvas.create_rectangle(0, 0, 1280, 720, fill=\"gray\")\n canvas.pack(fill=\"x\")\n label = tk.Label(PT_win, text=text_in, justify=\"right\", foreground=\"black\", background=\"gray\")\n label.place(x=700, y=50)\n elif str == \"./PT/PT_4.txt\":\n canvas = tk.Canvas(PT_win, width=1280, height=720)\n canvas.create_rectangle(0, 0, 1280, 720, fill=\"black\")\n canvas.pack(fill=\"x\")\n label = tk.Label(PT_win, text=text_in, justify=\"left\", foreground=\"white\", background=\"black\")\n label.place(x=40, y=50)\n elif str == \"./PT/PT_5.txt\":\n canvas = tk.Canvas(PT_win, width=1280, height=720)\n canvas.create_rectangle(0, 0, 1280, 720, fill=\"yellow\")\n canvas.pack(fill=\"x\")\n label = tk.Label(PT_win, text=text_in, justify=\"left\", foreground=\"black\", background=\"yellow\")\n label.place(x=10, y=10)\n \n PT_win.transient(self.master)\n PT_win.grab_set()\n PT_win.focus_set()\n\n PT_t.close()\n \n\n# ひながた \nif __name__ == \"__main__\":\n app = Application(master=root)\n app.pack()\n app.mainloop()\n","sub_path":"KPCAS_beta.py","file_name":"KPCAS_beta.py","file_ext":"py","file_size_in_byte":41660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"188527544","text":"from mnist import MNIST\nimport sklearn.metrics as metrics\nimport numpy as np\nimport scipy\nimport pdb\nimport csv\n\nNUM_CLASSES = 10\n\ndef load_dataset():\n mndata = MNIST('./data/')\n X_train, labels_train = map(np.array, mndata.load_training())\n X_test, labels_test = map(np.array, mndata.load_testing())\n X_train = X_train/255.0\n X_test = X_test/255.0\n return (X_train, labels_train), (X_test, labels_test)\n\n\ndef train(X_train, y_train, reg=0):\n ''' Build a model from X_train -> y_train '''\n ### From HW1\n d = len(X_train[0])\n identity_xx = np.identity(d)\n inv = np.linalg.inv(np.dot(np.transpose(X_train), X_train) + reg * identity_xx)\n outer_y = np.dot(np.transpose(X_train), y_train)\n w = np.dot(inv, outer_y)\n return w\n\ndef train_gd(X_train, y_train, alpha=0.1, reg=0, num_iter=10000):\n ''' Build a model from X_train -> y_train using batch gradient descent '''\n p = len(X_train[0])\n W = np.random.rand(p, y_train.shape[1])\n identity_xx = np.identity(p)\n x_t_x_reg = np.dot(np.transpose(X_train), X_train) + reg * identity_xx\n outer_y = np.dot(np.transpose(X_train), y_train)\n\n for i in range(num_iter):\n grad_f = -2.0/p * (np.dot(x_t_x_reg, W) - outer_y)\n W = W + alpha * grad_f\n return W\n\n\ndef train_sgd(X_train, y_train, alpha=0.1, reg=0, num_iter=10000):\n ''' Build a model from X_train -> y_train using stochastic gradient descent '''\n p = len(X_train[0])\n W = np.random.rand(p, y_train.shape[1])\n identity_p = np.identity(p)\n for i in range(num_iter):\n # if i % 1000 == 0:\n # print(str(i))\n rand_index = np.random.randint(0, y_train.shape[0])\n x_t_x_reg = np.dot(np.atleast_2d(X_train[rand_index]).T, np.atleast_2d(X_train[rand_index])) + reg * identity_p\n outer_y = np.dot(np.atleast_2d(X_train[rand_index]).T, np.atleast_2d(y_train[rand_index]))\n grad_f = -2.0/p * (np.dot(x_t_x_reg, W) - outer_y)\n W = W + alpha * grad_f\n return W\n\n\n\ndef one_hot(labels_train):\n '''Convert categorical labels 0,1,2,....9 to standard basis vectors in R^{10} '''\n ### From HW1\n result = np.zeros((len(labels_train), NUM_CLASSES))\n for i in range(len(labels_train)):\n result[i][labels_train[i]] = 1.0\n return result\n\ndef predict(model, X):\n ''' From model and data points, output prediction vectors '''\n ### From HW 1\n predicted = np.dot(X, model)\n y_result = np.zeros(len(predicted))\n for i in range(len(y_result)):\n y_result[i] = np.argmax(predicted[i])\n return y_result\n\ndef generate_W_B(n_train, n_test, d):\n pdb.set_trace()\n mean = np.zeros(d)\n cov = variance * np.identity(d)\n W = np.random.multivariate_normal(mean, cov, p)\n b = np.random.uniform(0.0, 2 * np.pi, p)\n B_train = np.tile(b, (n_train,1)).transpose()\n B_test = np.tile(b, (n_test,1)).transpose()\n return W, B_train, B_test\n\ndef phi(X, W, B):\n ''' Featurize the inputs using random Fourier features '''\n result = np.dot(W, X.transpose()) + B\n return np.sqrt(2.0/p) * np.cos(result.transpose())\n\n\nif __name__ == \"__main__\":\n (X_train, labels_train), (X_test, labels_test) = load_dataset()\n y_train = one_hot(labels_train)\n y_test = one_hot(labels_test)\n\n ### Tuneable parameters #97% with p=3000 and var =.01\n p = 3000 ## how many GR features we want\n variance = .01 ## variance of the GR variables\n print('p: ' + str(p) + ' var: ' + str(variance))\n W, B_train, B_test = generate_W_B(X_train.shape[0], X_test.shape[0], X_train.shape[1])\n X_train, X_test = phi(X_train, W, B_train), phi(X_test, W, B_test)\n\n print(\"calculating closed form sol\")\n model = train(X_train, y_train, reg=0.1)\n model.shape\n pred_labels_train = predict(model, X_train)\n pred_labels_test = predict(model, X_test)\n print(\"Closed form solution\")\n print(\"Train accuracy: {0}\".format(metrics.accuracy_score(labels_train, pred_labels_train)))\n print(\"Test accuracy: {0}\".format(metrics.accuracy_score(labels_test, pred_labels_test)))\n\n # CSV STUFF\n # c = csv.writer(open(\"kaggle.csv\", \"wt\"))\n # c.writerow(['Id', 'Category'])\n # for i in range(len(pred_labels_test)):\n # c.writerow( (i, int(pred_labels_test[i])))\n\n\n print('starting gradient descent')\n model = train_gd(X_train, y_train, alpha=1e-3, reg=0.1, num_iter=100000)\n pred_labels_train = predict(model, X_train)\n pred_labels_test = predict(model, X_test)\n print(\"Batch gradient descent\")\n print(\"Train accuracy: {0}\".format(metrics.accuracy_score(labels_train, pred_labels_train)))\n print(\"Test accuracy: {0}\".format(metrics.accuracy_score(labels_test, pred_labels_test)))\n\n print('starting stochastic gradient descent')\n model = train_sgd(X_train, y_train, alpha=1e-1, reg=0.1, num_iter=100000)\n pred_labels_train = predict(model, X_train)\n pred_labels_test = predict(model, X_test)\n print(\"Stochastic gradient descent\")\n print(\"Train accuracy: {0}\".format(metrics.accuracy_score(labels_train, pred_labels_train)))\n print(\"Test accuracy: {0}\".format(metrics.accuracy_score(labels_test, pred_labels_test)))\n","sub_path":"hw2_code/hw2_varun.py","file_name":"hw2_varun.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"573542211","text":"from django.conf.urls import url\nfrom . import views\n# 如果在url中写成 url('login/',views.login,name='login'),会导致login和do_login混淆,访问do_login会访问到login界面\n# 原因未知\nurlpatterns = [\n url(r'^login/',views.login,name='login'),\n url(r'^do_login/',views.do_login,name='do_login'),\n url(r'^test/',views.test,name='test'),\n url(r'^mine/',views.mine,name='mine'),\n url(r'^t_mine',views.t_mine,name='t_mine'),\n url(r'^t_login',views.t_login,name='t_login'),\n]","sub_path":"django_study/Cookie_session/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"343029904","text":"from xlutils.copy import copy\nfrom lib.excelmanage import readExcel,getnewexcel\nfrom lib.sendcourserequset import sendcourserequest\nfrom lib.loginLib import loginlib\nimport json\nfrom config import savepath,path,path1\n#得到一个新工作簿\nnewworkbook=getnewexcel(path)\nworkneet=newworkbook.get_sheet(0)\nlist=readExcel(path,0)\nprint(list)\nsessionID=loginlib('auto','sdfsdfsdf')\nfor i in range(0,len(list)):\n row=list[i]\n print(row)\n ret=sendcourserequest(row,sessionID)\n print(ret)\n data1=json.loads(row[6])\n print(data1)\n if ret['retcode']==data1['code']:\n print(row[0]+'测试通过')\n workneet.write(i+1, 7 ,'测试通过')\n else:\n print(row[0]+'测试不通过')\n workneet.write(i + 1, 7, '测试不通过')\n if 'reason' in ret.keys(): # 测试不通过,不一定有原因,所以要判断返回值字典中是否存在reason 的key\n workneet.write(i + 1, 8, ret['reason']) # 把原因写入第i+1 行,第9列\nnewworkbook.save(savepath)","sub_path":"testcase/doexcel.py","file_name":"doexcel.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"46664745","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\n\n\"\"\"\nSimple Linear Regression\n\n(실제값과 함수값의 차이)의 제곱의 합이 가장 작아지는 1차 함수를 찾는다.\n\"\"\"\n\n# Importing the dataset\ndataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:, :-1].values # X는 독립 변수 형렬\ny = dataset.iloc[:, 1].values # y는 종속 변수 벡터\n\n\n\n# Splitting the dataset into the Training set and Test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\n\n\n\n# Fit simple linear regression to the training set\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n\n\n# Predict\ny_pred = regressor.predict(X_test)\n\nprint('===== REAL =====')\nprint(y_test)\nprint('===== PREDICT =====')\nprint(y_pred)\n\n\n\n# Visualize\nplt.scatter(X_train, y_train, color = 'red') # 트레인세트 실제 값들\nplt.scatter(X_test, y_test, color = 'green') # 테스트세트 실제 값들\nplt.plot(X_train, regressor.predict(X_train), color = 'blue') # 1차 함수\nplt.xlabel('Years')\nplt.ylabel('Salary')\nplt.show()\n","sub_path":"02_simple_linear_regression/simple_linear_regression.py","file_name":"simple_linear_regression.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"321688635","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\ndef moving_zeroes(arr):\n # create zeroes array with len(arr)\n moved_zeroes = [0] * len(arr)\n\n i = 0\n\n # loop through array\n for k in range(len(arr)):\n # if element is non-zero, overwrite from left\n if arr[k] != 0:\n moved_zeroes[i] = arr[k]\n i += 1\n\n return moved_zeroes\n\n\nif __name__ == '__main__':\n # Use the main function here to test out your implementation\n arr = [0, 3, 1, 0, -2]\n\n print(f\"The resulting of moving_zeroes is: {moving_zeroes(arr)}\")","sub_path":"moving_zeroes/moving_zeroes.py","file_name":"moving_zeroes.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"333946306","text":"import cv2\nimport cnn\nfrom keras.models import load_model\nimport os\nimport numpy as np\n\ndef zero_pad(x,n):\n for i in range(1,5):\n if x < 10 ** i:\n return (n-i)*'0'+str(x)\n\n\ndef getNutId(x):\n return(x[4])\n\ndef subNutId(x,id):\n s = list(x)\n s[4] = id\n return \"\".join(s)\n\ndef getNutNumber(x):\n return(x[6:len(x)-4])\n\n\npairs = {\n '0':'6',\n '2':'4'\n}\n\ndef getSide(x):\n return x[4]\n\nW = 160\nH = 120\n\nclass predictor:\n model_1 = {}\n model_2 = {}\n\n def __init__(self,model_1,model_2):\n self.model_1 = model_1\n self.model_2 = model_2\n\n def predict(self,img1,img2):\n p_1 = self.model_1.predict_proba(img1, 1, 0)\n p_2 = self.model_2.predict_proba(img2, 1, 0)\n if (p_1 + p_2)> 1:\n return 1\n else:\n return 0\n\nnut_dir_sep = 'good_reg'\n\npairs = {\n '0': '6',\n '2': '4'\n}\n\nnew_imgs_files = [f for f in os.listdir(nut_dir_sep)]\nout = []\nweights = load_model('model_sep_0.h5').get_weights()\nmy_cnn_1 = cnn.cnn_sep(img_width=W, img_height=H)\nmy_cnn_1.set_weights(weights)\nweights = load_model('model_sep_6.h5').get_weights()\nmy_cnn_2 = cnn.cnn_sep(img_width=W, img_height=H)\nmy_cnn_2.set_weights(weights)\n\n\npred_0_6 = predictor(my_cnn_1,my_cnn_2)\nweights = load_model('model_sep_2.h5').get_weights()\nmy_cnn_1 = cnn.cnn_sep(img_width=W, img_height=H)\nmy_cnn_1.set_weights(weights)\nweights = load_model('model_sep_4.h5').get_weights()\nmy_cnn_2 = cnn.cnn_sep(img_width=W, img_height=H)\nmy_cnn_2.set_weights(weights)\n\npred_2_4 = predictor(my_cnn_1,my_cnn_2)\n\n\n\nout = []\np=0\nfor f in new_imgs_files:\n if f != 'labels.csv':\n id = getNutId(f)\n num = getNutNumber(f)\n if id == '0':\n pair_id = pairs[id]\n img_org = cv2.imread(os.path.join(nut_dir_sep, f))\n img_pair = cv2.imread(os.path.join(nut_dir_sep, subNutId(f, pair_id)))\n #predic = pred.predict(img_org,img_org)\n p=pred_0_6.predict(img_org.reshape([-1, 120, 160, 3]),img_pair.reshape([-1, 120, 160, 3]))\n out.append(p)\n print(f+\" :\" + str(p))\n if id == '2':\n pair_id = pairs[id]\n img_org = cv2.imread(os.path.join(nut_dir_sep, f))\n img_pair = cv2.imread(os.path.join(nut_dir_sep, subNutId(f, pair_id)))\n #predic = pred.predict(img_org,img_org)\n p=pred_2_4.predict(img_org.reshape([-1, 120, 160, 3]),img_pair.reshape([-1, 120, 160, 3]))\n print(f+\" :\" + str(p))\n out.append(p)\n\nprint(\"Performance = \"+str(sum(out)/len(out)))\nprint(\"buenas: \"+str(len(out)-sum(out))+\" totales: \"+str(len(out)))\n\n","sub_path":"reg_testing.py","file_name":"reg_testing.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"176399550","text":"import pandas as pd\n\ndf = pd.read_csv('data_tiers.csv')\nlocs = pd.read_csv('brands_luxury_merge.csv')\n\ndf = df.drop('Unnamed: 9',1)\n\ndf = df.drop('Unnamed: 10',1)\ndf = df. drop('Unnamed: 11',1)\n\ndf = df. drop('tier',1)\n\ndf.insert(0, 'ID', range(0,len(df)))\n\ni = 0\nflag =0\n# [0 1] - Designer, [1 0] - Fast/Mid\nfor index1,row1 in df.iterrows():\n for index2,row2 in locs.iterrows():\n if i==row2[0]:\n row1['Tier']='0 1'\n row1.to_csv('data_t.csv',mode='a',header=False, index=False)\n print(row1['name'])\n print(row1['Tier'])\n flag = 1\n if flag == 0:\n row1['Tier']='1 0'\n row1.to_csv('data_t.csv',mode='a',header=False, index=False)\n print(row1['name'])\n flag = 0\n print(i)\n i = i+1\n\ndf.info(verbose=True)\nlocs.info(verbose=True)\ndf.info(verbose=True)\n","sub_path":"kalo_style_classification-master-updated/label_tiers.py","file_name":"label_tiers.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"399151614","text":"class Solution(object):\n def intToRoman(self, num):\n A=[\"I\", \"IV\", \"V\", \"IX\", \"X\", \"XL\", \"L\", \"XC\", \"C\", \"CD\", \"D\", \"CM\", \"M\"] #设立两个列表\n B=[1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]\n res=\"\"\n for i in range(len(A)-1,-1,-1): #从后往前遍历,num依次与B中数字比较,range(start,end,step=1):顾头不顾尾\n while num>=B[i]: #减去B中数字,加上对应的A中字符\n res+=A[i]\n num-=B[i]\n return res\n\nif __name__==\"__main__\":\n num=1994\n print(Solution().intToRoman(num))\n","sub_path":"leetcode12/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"209046373","text":"import numpy as np\nfrom sklearn.utils import check_X_y\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.dummy import DummyRegressor\n\nfrom .double_ml import DoubleML\nfrom ._utils import _dml_cv_predict, _dml_tune\n\n\nclass DoubleMLPLIV(DoubleML):\n \"\"\"Double machine learning for partially linear IV regression models\n\n Parameters\n ----------\n obj_dml_data : :class:`DoubleMLData` object\n The :class:`DoubleMLData` object providing the data and specifying the variables for the causal model.\n\n ml_g : estimator implementing ``fit()`` and ``predict()``\n A machine learner implementing ``fit()`` and ``predict()`` methods (e.g.\n :py:class:`sklearn.ensemble.RandomForestRegressor`) for the nuisance function :math:`g_0(X) = E[Y|X]`.\n\n ml_m : estimator implementing ``fit()`` and ``predict()``\n A machine learner implementing ``fit()`` and ``predict()`` methods (e.g.\n :py:class:`sklearn.ensemble.RandomForestRegressor`) for the nuisance function :math:`m_0(X) = E[Z|X]`.\n\n ml_r : estimator implementing ``fit()`` and ``predict()``\n A machine learner implementing ``fit()`` and ``predict()`` methods (e.g.\n :py:class:`sklearn.ensemble.RandomForestRegressor`) for the nuisance function :math:`r_0(X) = E[D|X]`.\n\n n_folds : int\n Number of folds.\n Default is ``5``.\n\n n_rep : int\n Number of repetitons for the sample splitting.\n Default is ``1``.\n\n score : str or callable\n A str (``'partialling out'`` is the only choice) specifying the score function\n or a callable object / function with signature ``psi_a, psi_b = score(y, z, d, g_hat, m_hat, r_hat, smpls)``.\n Default is ``'partialling out'``.\n\n dml_procedure : str\n A str (``'dml1'`` or ``'dml2'``) specifying the double machine learning algorithm.\n Default is ``'dml2'``.\n\n draw_sample_splitting : bool\n Indicates whether the sample splitting should be drawn during initialization of the object.\n Default is ``True``.\n\n apply_cross_fitting : bool\n Indicates whether cross-fitting should be applied.\n Default is ``True``.\n\n Examples\n --------\n >>> import numpy as np\n >>> import doubleml as dml\n >>> from doubleml.datasets import make_pliv_CHS2015\n >>> from sklearn.ensemble import RandomForestRegressor\n >>> from sklearn.base import clone\n >>> np.random.seed(3141)\n >>> learner = RandomForestRegressor(n_estimators=100, max_features=20, max_depth=5, min_samples_leaf=2)\n >>> ml_g = clone(learner)\n >>> ml_m = clone(learner)\n >>> ml_r = clone(learner)\n >>> data = make_pliv_CHS2015(alpha=0.5, n_obs=500, dim_x=20, dim_z=1, return_type='DataFrame')\n >>> obj_dml_data = dml.DoubleMLData(data, 'y', 'd', z_cols='Z1')\n >>> dml_pliv_obj = dml.DoubleMLPLIV(obj_dml_data, ml_g, ml_m, ml_r)\n >>> dml_pliv_obj.fit().summary\n coef std err t P>|t| 2.5 % 97.5 %\n d 0.522753 0.082263 6.354688 2.088504e-10 0.361521 0.683984\n\n Notes\n -----\n **Partially linear IV regression (PLIV)** models take the form\n\n .. math::\n\n Y - D \\\\theta_0 = g_0(X) + \\\\zeta, & &\\\\mathbb{E}(\\\\zeta | Z, X) = 0,\n\n Z = m_0(X) + V, & &\\\\mathbb{E}(V | X) = 0.\n\n where :math:`Y` is the outcome variable, :math:`D` is the policy variable of interest and :math:`Z`\n denotes one or multiple instrumental variables. The high-dimensional vector\n :math:`X = (X_1, \\\\ldots, X_p)` consists of other confounding covariates, and :math:`\\\\zeta` and\n :math:`V` are stochastic errors.\n \"\"\"\n def __init__(self,\n obj_dml_data,\n ml_g,\n ml_m,\n ml_r,\n n_folds=5,\n n_rep=1,\n score='partialling out',\n dml_procedure='dml2',\n draw_sample_splitting=True,\n apply_cross_fitting=True):\n super().__init__(obj_dml_data,\n n_folds,\n n_rep,\n score,\n dml_procedure,\n draw_sample_splitting,\n apply_cross_fitting)\n\n self._check_data(self._dml_data)\n self._check_score(self.score)\n self.partialX = True\n self.partialZ = False\n _ = self._check_learner(ml_g, 'ml_g', regressor=True, classifier=False)\n _ = self._check_learner(ml_m, 'ml_m', regressor=True, classifier=False)\n _ = self._check_learner(ml_r, 'ml_r', regressor=True, classifier=False)\n self._learner = {'ml_g': ml_g, 'ml_m': ml_m, 'ml_r': ml_r}\n self._predict_method = {'ml_g': 'predict', 'ml_m': 'predict', 'ml_r': 'predict'}\n self._initialize_ml_nuisance_params()\n\n @classmethod\n def _partialX(cls,\n obj_dml_data,\n ml_g,\n ml_m,\n ml_r,\n n_folds=5,\n n_rep=1,\n score='partialling out',\n dml_procedure='dml2',\n draw_sample_splitting=True,\n apply_cross_fitting=True):\n obj = cls(obj_dml_data,\n ml_g,\n ml_m,\n ml_r,\n n_folds,\n n_rep,\n score,\n dml_procedure,\n draw_sample_splitting,\n apply_cross_fitting)\n obj._check_data(obj._dml_data)\n obj._check_score(obj.score)\n obj.partialX = True\n obj.partialZ = False\n _ = obj._check_learner(ml_g, 'ml_g', regressor=True, classifier=False)\n _ = obj._check_learner(ml_m, 'ml_m', regressor=True, classifier=False)\n _ = obj._check_learner(ml_r, 'ml_r', regressor=True, classifier=False)\n obj._learner = {'ml_g': ml_g, 'ml_m': ml_m, 'ml_r': ml_r}\n obj._predict_method = {'ml_g': 'predict', 'ml_m': 'predict', 'ml_r': 'predict'}\n obj._initialize_ml_nuisance_params()\n return obj\n\n @classmethod\n def _partialZ(cls,\n obj_dml_data,\n ml_r,\n n_folds=5,\n n_rep=1,\n score='partialling out',\n dml_procedure='dml2',\n draw_sample_splitting=True,\n apply_cross_fitting=True):\n # to pass the checks for the learners, we temporarily set ml_g and ml_m to DummyRegressor()\n obj = cls(obj_dml_data,\n DummyRegressor(),\n DummyRegressor(),\n ml_r,\n n_folds,\n n_rep,\n score,\n dml_procedure,\n draw_sample_splitting,\n apply_cross_fitting)\n obj._check_data(obj._dml_data)\n obj._check_score(obj.score)\n obj.partialX = False\n obj.partialZ = True\n _ = obj._check_learner(ml_r, 'ml_r', regressor=True, classifier=False)\n obj._learner = {'ml_r': ml_r}\n obj._predict_method = {'ml_r': 'predict'}\n obj._initialize_ml_nuisance_params()\n return obj\n\n @classmethod\n def _partialXZ(cls,\n obj_dml_data,\n ml_g,\n ml_m,\n ml_r,\n n_folds=5,\n n_rep=1,\n score='partialling out',\n dml_procedure='dml2',\n draw_sample_splitting=True,\n apply_cross_fitting=True):\n obj = cls(obj_dml_data,\n ml_g,\n ml_m,\n ml_r,\n n_folds,\n n_rep,\n score,\n dml_procedure,\n draw_sample_splitting,\n apply_cross_fitting)\n obj._check_data(obj._dml_data)\n obj._check_score(obj.score)\n obj.partialX = True\n obj.partialZ = True\n _ = obj._check_learner(ml_g, 'ml_g', regressor=True, classifier=False)\n _ = obj._check_learner(ml_m, 'ml_m', regressor=True, classifier=False)\n _ = obj._check_learner(ml_r, 'ml_r', regressor=True, classifier=False)\n obj._learner = {'ml_g': ml_g, 'ml_m': ml_m, 'ml_r': ml_r}\n obj._predict_method = {'ml_g': 'predict', 'ml_m': 'predict', 'ml_r': 'predict'}\n obj._initialize_ml_nuisance_params()\n return obj\n\n def _initialize_ml_nuisance_params(self):\n if self.partialX & (not self.partialZ):\n if self._dml_data.n_instr == 1:\n valid_learner = ['ml_g', 'ml_m', 'ml_r']\n else:\n valid_learner = ['ml_g', 'ml_r'] + ['ml_m_' + z_col for z_col in self._dml_data.z_cols]\n elif (not self.partialX) & self.partialZ:\n valid_learner = ['ml_r']\n else:\n assert (self.partialX & self.partialZ)\n valid_learner = ['ml_g', 'ml_m', 'ml_r']\n self._params = {learner: {key: [None] * self.n_rep for key in self._dml_data.d_cols}\n for learner in valid_learner}\n\n def _check_score(self, score):\n if isinstance(score, str):\n valid_score = ['partialling out']\n # check whether its worth implementing the IV_type as well\n # In CCDHNR equation (4.7) a score of this type is provided;\n # however in the following paragraph it is explained that one might\n # still need to estimate the partialling out type first\n if score not in valid_score:\n raise ValueError('Invalid score ' + score + '. ' +\n 'Valid score ' + 'partialling out.')\n else:\n if not callable(score):\n raise TypeError('score should be either a string or a callable. '\n '%r was passed.' % score)\n return score\n\n def _check_data(self, obj_dml_data):\n if obj_dml_data.n_instr == 0:\n raise ValueError('Incompatible data. ' +\n 'At least one variable must be set as instrumental variable. '\n 'To fit a partially linear regression model without instrumental variable(s) '\n 'use DoubleMLPLR instead of DoubleMLPLIV.')\n return\n\n def _ml_nuisance_and_score_elements(self, smpls, n_jobs_cv):\n if self.partialX & (not self.partialZ):\n psi_a, psi_b, preds = self._ml_nuisance_and_score_elements_partial_x(smpls, n_jobs_cv)\n elif (not self.partialX) & self.partialZ:\n psi_a, psi_b, preds = self._ml_nuisance_and_score_elements_partial_z(smpls, n_jobs_cv)\n else:\n assert (self.partialX & self.partialZ)\n psi_a, psi_b, preds = self._ml_nuisance_and_score_elements_partial_xz(smpls, n_jobs_cv)\n\n return psi_a, psi_b, preds\n\n def _ml_nuisance_tuning(self, smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search):\n if self.partialX & (not self.partialZ):\n res = self._ml_nuisance_tuning_partial_x(smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search)\n elif (not self.partialX) & self.partialZ:\n res = self._ml_nuisance_tuning_partial_z(smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search)\n else:\n assert (self.partialX & self.partialZ)\n res = self._ml_nuisance_tuning_partial_xz(smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search)\n\n return res\n\n def _ml_nuisance_and_score_elements_partial_x(self, smpls, n_jobs_cv):\n x, y = check_X_y(self._dml_data.x, self._dml_data.y)\n x, d = check_X_y(x, self._dml_data.d)\n\n # nuisance g\n g_hat = _dml_cv_predict(self._learner['ml_g'], x, y, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_g'), method=self._predict_method['ml_g'])\n\n # nuisance m\n if self._dml_data.n_instr == 1:\n # one instrument: just identified\n x, z = check_X_y(x, np.ravel(self._dml_data.z))\n m_hat = _dml_cv_predict(self._learner['ml_m'], x, z, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_m'), method=self._predict_method['ml_m'])\n else:\n # several instruments: 2SLS\n m_hat = np.full((self._dml_data.n_obs, self._dml_data.n_instr), np.nan)\n z = self._dml_data.z\n for i_instr in range(self._dml_data.n_instr):\n x, this_z = check_X_y(x, z[:, i_instr])\n m_hat[:, i_instr] = _dml_cv_predict(self._learner['ml_m'], x, this_z, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_m_' + self._dml_data.z_cols[i_instr]),\n method=self._predict_method['ml_m'])\n\n # nuisance r\n r_hat = _dml_cv_predict(self._learner['ml_r'], x, d, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_r'), method=self._predict_method['ml_r'])\n\n psi_a, psi_b = self._score_elements(y, z, d, g_hat, m_hat, r_hat, smpls)\n preds = {'ml_g': g_hat,\n 'ml_m': m_hat,\n 'ml_r': r_hat}\n\n return psi_a, psi_b, preds\n\n def _score_elements(self, y, z, d, g_hat, m_hat, r_hat, smpls):\n # compute residuals\n u_hat = y - g_hat\n w_hat = d - r_hat\n v_hat = z - m_hat\n\n r_hat_tilde = None\n if self._dml_data.n_instr > 1:\n assert self.apply_cross_fitting\n # TODO check whether the no cross-fitting case can be supported here\n # projection of w_hat on v_hat\n reg = LinearRegression(fit_intercept=True).fit(v_hat, w_hat)\n r_hat_tilde = reg.predict(v_hat)\n\n if isinstance(self.score, str):\n assert self.score == 'partialling out'\n if self._dml_data.n_instr == 1:\n psi_a = -np.multiply(w_hat, v_hat)\n psi_b = np.multiply(v_hat, u_hat)\n else:\n psi_a = -np.multiply(w_hat, r_hat_tilde)\n psi_b = np.multiply(r_hat_tilde, u_hat)\n else:\n assert callable(self.score)\n if self._dml_data.n_instr > 1:\n raise NotImplementedError('Callable score not implemented for DoubleMLPLIV.partialX '\n 'with several instruments.')\n else:\n assert self._dml_data.n_instr == 1\n psi_a, psi_b = self.score(y, z, d,\n g_hat, m_hat, r_hat, smpls)\n\n return psi_a, psi_b\n\n def _ml_nuisance_and_score_elements_partial_z(self, smpls, n_jobs_cv):\n y = self._dml_data.y\n xz, d = check_X_y(np.hstack((self._dml_data.x, self._dml_data.z)),\n self._dml_data.d)\n\n # nuisance m\n r_hat = _dml_cv_predict(self._learner['ml_r'], xz, d, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_r'), method=self._predict_method['ml_r'])\n\n if isinstance(self.score, str):\n assert self.score == 'partialling out'\n psi_a = -np.multiply(r_hat, d)\n psi_b = np.multiply(r_hat, y)\n else:\n assert callable(self.score)\n raise NotImplementedError('Callable score not implemented for DoubleMLPLIV.partialZ.')\n\n preds = {'ml_r': r_hat}\n\n return psi_a, psi_b, preds\n\n def _ml_nuisance_and_score_elements_partial_xz(self, smpls, n_jobs_cv):\n x, y = check_X_y(self._dml_data.x, self._dml_data.y)\n xz, d = check_X_y(np.hstack((self._dml_data.x, self._dml_data.z)),\n self._dml_data.d)\n x, d = check_X_y(x, self._dml_data.d)\n\n # nuisance g\n g_hat = _dml_cv_predict(self._learner['ml_g'], x, y, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_g'), method=self._predict_method['ml_g'])\n\n # nuisance m\n m_hat, m_hat_on_train = _dml_cv_predict(self._learner['ml_m'], xz, d, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_m'), return_train_preds=True,\n method=self._predict_method['ml_m'])\n\n # nuisance r\n m_hat_tilde = _dml_cv_predict(self._learner['ml_r'], x, m_hat_on_train, smpls=smpls, n_jobs=n_jobs_cv,\n est_params=self._get_params('ml_r'), method=self._predict_method['ml_r'])\n\n # compute residuals\n u_hat = y - g_hat\n w_hat = d - m_hat_tilde\n\n if isinstance(self.score, str):\n assert self.score == 'partialling out'\n psi_a = -np.multiply(w_hat, (m_hat-m_hat_tilde))\n psi_b = np.multiply((m_hat-m_hat_tilde), u_hat)\n else:\n assert callable(self.score)\n raise NotImplementedError('Callable score not implemented for DoubleMLPLIV.partialXZ.')\n\n preds = {'ml_g': g_hat,\n 'ml_m': m_hat,\n 'ml_r': m_hat_tilde}\n\n return psi_a, psi_b, preds\n\n def _ml_nuisance_tuning_partial_x(self, smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search):\n x, y = check_X_y(self._dml_data.x, self._dml_data.y)\n x, d = check_X_y(x, self._dml_data.d)\n\n if scoring_methods is None:\n scoring_methods = {'ml_g': None,\n 'ml_m': None,\n 'ml_r': None}\n\n train_inds = [train_index for (train_index, _) in smpls]\n g_tune_res = _dml_tune(y, x, train_inds,\n self._learner['ml_g'], param_grids['ml_g'], scoring_methods['ml_g'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n\n if self._dml_data.n_instr > 1:\n # several instruments: 2SLS\n m_tune_res = {instr_var: list() for instr_var in self._dml_data.z_cols}\n z = self._dml_data.z\n for i_instr in range(self._dml_data.n_instr):\n x, this_z = check_X_y(x, z[:, i_instr])\n m_tune_res[self._dml_data.z_cols[i_instr]] = _dml_tune(this_z, x, train_inds,\n self._learner['ml_m'], param_grids['ml_m'],\n scoring_methods['ml_m'],\n n_folds_tune, n_jobs_cv, search_mode,\n n_iter_randomized_search)\n else:\n # one instrument: just identified\n x, z = check_X_y(x, np.ravel(self._dml_data.z))\n m_tune_res = _dml_tune(z, x, train_inds,\n self._learner['ml_m'], param_grids['ml_m'], scoring_methods['ml_m'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n\n r_tune_res = _dml_tune(d, x, train_inds,\n self._learner['ml_r'], param_grids['ml_r'], scoring_methods['ml_r'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n\n g_best_params = [xx.best_params_ for xx in g_tune_res]\n r_best_params = [xx.best_params_ for xx in r_tune_res]\n if self._dml_data.n_instr > 1:\n params = {'ml_g': g_best_params,\n 'ml_r': r_best_params}\n for instr_var in self._dml_data.z_cols:\n params['ml_m_' + instr_var] = [xx.best_params_ for xx in m_tune_res[instr_var]]\n else:\n m_best_params = [xx.best_params_ for xx in m_tune_res]\n params = {'ml_g': g_best_params,\n 'ml_m': m_best_params,\n 'ml_r': r_best_params}\n\n tune_res = {'g_tune': g_tune_res,\n 'm_tune': m_tune_res,\n 'r_tune': r_tune_res}\n\n res = {'params': params,\n 'tune_res': tune_res}\n\n return res\n\n def _ml_nuisance_tuning_partial_z(self, smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search):\n xz, d = check_X_y(np.hstack((self._dml_data.x, self._dml_data.z)),\n self._dml_data.d)\n\n if scoring_methods is None:\n scoring_methods = {'ml_r': None}\n\n train_inds = [train_index for (train_index, _) in smpls]\n m_tune_res = _dml_tune(d, xz, train_inds,\n self._learner['ml_r'], param_grids['ml_r'], scoring_methods['ml_r'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n\n m_best_params = [xx.best_params_ for xx in m_tune_res]\n\n params = {'ml_r': m_best_params}\n\n tune_res = {'r_tune': m_tune_res}\n\n res = {'params': params,\n 'tune_res': tune_res}\n\n return res\n\n def _ml_nuisance_tuning_partial_xz(self, smpls, param_grids, scoring_methods, n_folds_tune, n_jobs_cv,\n search_mode, n_iter_randomized_search):\n x, y = check_X_y(self._dml_data.x, self._dml_data.y)\n xz, d = check_X_y(np.hstack((self._dml_data.x, self._dml_data.z)),\n self._dml_data.d)\n x, d = check_X_y(x, self._dml_data.d)\n\n if scoring_methods is None:\n scoring_methods = {'ml_g': None,\n 'ml_m': None,\n 'ml_r': None}\n\n train_inds = [train_index for (train_index, _) in smpls]\n g_tune_res = _dml_tune(y, x, train_inds,\n self._learner['ml_g'], param_grids['ml_g'], scoring_methods['ml_g'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n m_tune_res = _dml_tune(d, xz, train_inds,\n self._learner['ml_m'], param_grids['ml_m'], scoring_methods['ml_m'],\n n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search)\n\n r_tune_res = list()\n for idx, (train_index, _) in enumerate(smpls):\n m_hat = m_tune_res[idx].predict(xz[train_index, :])\n r_tune_resampling = KFold(n_splits=n_folds_tune, shuffle=True)\n if search_mode == 'grid_search':\n r_grid_search = GridSearchCV(self._learner['ml_r'], param_grids['ml_r'],\n scoring=scoring_methods['ml_r'],\n cv=r_tune_resampling, n_jobs=n_jobs_cv)\n else:\n assert search_mode == 'randomized_search'\n r_grid_search = RandomizedSearchCV(self._learner['ml_r'], param_grids['ml_r'],\n scoring=scoring_methods['ml_r'],\n cv=r_tune_resampling, n_jobs=n_jobs_cv,\n n_iter=n_iter_randomized_search)\n r_tune_res.append(r_grid_search.fit(x[train_index, :], m_hat))\n\n g_best_params = [xx.best_params_ for xx in g_tune_res]\n m_best_params = [xx.best_params_ for xx in m_tune_res]\n r_best_params = [xx.best_params_ for xx in r_tune_res]\n\n params = {'ml_g': g_best_params,\n 'ml_m': m_best_params,\n 'ml_r': r_best_params}\n\n tune_res = {'g_tune': g_tune_res,\n 'm_tune': m_tune_res,\n 'r_tune': r_tune_res}\n\n res = {'params': params,\n 'tune_res': tune_res}\n\n return res\n","sub_path":"doubleml/double_ml_pliv.py","file_name":"double_ml_pliv.py","file_ext":"py","file_size_in_byte":24400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"466888770","text":"from PIL import Image, ImageChops, ImageFilter, ImageOps\nimport numpy as np\nimport random\n\nclass Agent:\n # The default constructor for your Agent. Make sure to execute any\n # processing necessary before your Agent starts solving problems here.\n #\n # Do not add any variables to this signature; they will not be used by\n # main().\n def __init__(self):\n pass\n \n def Solve(self, problem):\n \n \n if problem.problemType == '2x2':\n\n print(\"problem name: \" + problem.name)\n prob_fig = {}\n ans_fig = {}\n prob_array = {}\n ans_array = {}\n \n \n rav_list = ['A', 'B', 'C']\n ans_list = ['1', '2', '3', '4', '5', '6'] \n \n \n for key in problem.figures:\n fig = problem.figures[key]\n image = Image.open(fig.visualFilename).convert('L')\n arrayImage = self.centerImageArray(np.array(image))\n if key in rav_list:\n prob_fig[key] = image\n prob_array[key] = arrayImage\n if key in ans_list:\n ans_fig[key] = image \n ans_array[key] = arrayImage \n \n \n ATransformations = [ \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_270)))\n ]\n \n BTransformations = [ \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_270)))\n ] \n \n CTransformations = [ \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_270)))\n ] \n \n \n \n if self.compare_images(prob_array['A'],prob_array['B']) == 0:\n print('A == B')\n for i in range(1,7):\n if self.compare_images(ans_array[str(i)], prob_array['C']) == 0:\n answer = i\n print(answer)\n return answer\n elif self.compare_images(prob_array['A'],prob_array['C']) == 0:\n print('A == C')\n min_is_zero = []\n j = 0\n while j < len(ans_array):\n k = self.compare_images(ans_array[str(j+1)], prob_array['B'])\n min_is_zero.append(k)\n j +=1 \n for i in range(1,7):\n if self.compare_images(ans_array[str(i)], prob_array['B']) == 0:\n answer = i\n print(answer)\n return answer \n elif self.compare_images(ans_array[str(i)], prob_array['B']) != 0:\n answer = np.argmin(min_is_zero) + 1 \n print(answer)\n return answer\n else:\n return random.randint(1,7)\n \n \n elif self.compare_images(prob_array['A'],prob_array['C']) != 0:\n print('A !=C')\n compare_scoreAB = []\n compare_scoreCI = []\n compare_scoreAC = []\n compare_scoreBI = []\n i = 0\n compare_scoreAB = [ self.compare_images(x,prob_array['B']) for x in ATransformations]\n compare_scoreAC = [ self.compare_images(x,prob_array['C']) for x in ATransformations]\n print(compare_scoreAB)\n print(compare_scoreAC)\n index_minAB = np.argmin(compare_scoreAB)\n index_minAC = np.argmin(compare_scoreAC)\n transToCompare = CTransformations[index_minAB]\n transToCompareBI = BTransformations[index_minAC]\n compare_scoreCI = [ self.compare_images(ans_array[str(x)],transToCompare) for x in range(1,7)]\n compare_scoreBI = [ self.compare_images(ans_array[str(x)],transToCompareBI) for x in range(1,7)]\n print(compare_scoreCI)\n print(compare_scoreBI)\n \n if len(compare_scoreAB) == compare_scoreAB.count(compare_scoreAB[0]):\n print('All equal') \n p = 0\n mlist = []\n while p < len(ans_array):\n m = self.centerImageArray(np.array(ImageChops.darker(prob_fig['C'],ans_fig[str(p+1)])))\n mlist.append(m)\n p += 1\n \n \n j = self.centerImageArray(np.array(ImageChops.darker(prob_fig['A'],prob_fig['B'])))\n bcenter = self.centerImageArray(prob_array['B'])\n k = self.compare_images(j, bcenter)\n print(k)\n \n p1 = 0\n complist = []\n while p1 < len(ans_array):\n z = self.compare_images(mlist[p1],ans_array[str(p1+1)])\n complist.append(z)\n p1 += 1\n print(complist)\n \n mincomp = np.argmin(complist)\n \n if min(complist) <= k:\n answer = mincomp + 1\n print(answer)\n return answer\n else:\n answer = mincomp + 1\n print(answer)\n return(answer)\n \n \n elif min(compare_scoreAC) < min(compare_scoreAB):\n print('C transformation')\n index_minCI = np.argmin(compare_scoreBI)\n answer = index_minCI + 1\n print(answer)\n return answer\n elif min(compare_scoreAB) < min(compare_scoreAC):\n print('B transformation')\n index_minBI = np.argmin(compare_scoreCI)\n answer = index_minBI + 1\n print(answer)\n return answer\n \n \n \n \n \n\n \n def solve3x3(self, problem):\n pass\n \n def centerImageArray(self,figArray):\n figImage = Image.fromarray(figArray, \"L\")\n \n # mask\n threshold=128\n mask = figImage.point(lambda p: p < threshold and 255)\n \n # find edges\n edges = mask.filter(ImageFilter.FIND_EDGES)\n box = edges.getbbox()\n edges = edges.crop(box)\n \n # center in new image-figure\n tempImg = Image.new(\"L\", figImage.size)\n \n width, height = edges.size\n fwidth, fheight = figImage.size\n \n tempImg.paste(edges, ((fwidth - width) // 2, (fheight - height) // 2))\n \n return np.array(tempImg)\n \n def MSE(self,arrayA, arrayB):\n return np.square(arrayA - arrayB).mean()\n \n def compare_images(self, arrayA, arrayB):\n # compute the mean squared error and structural similarity\n # index for the images\n m = self.MSE(arrayA, arrayB)\n return m\n \n def getFigureDarkpixels(self, figArray, figArray2):\n # in 'L', 255 is white\n white1 = np.count_nonzero(figArray)\n black1 = figArray.size - white1\n white2 = np.count_nonzero(figArray2)\n black2 = figArray2.size - white2\n difference = abs(black2 - black1)\n return difference \n \n def find_min(self, arrayA, arrayB, arrayC, ans_array):\n \n ATransformations = [ \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['A'].transpose(Image.ROTATE_270)))\n ]\n \n BTransformations = [ \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['B'].transpose(Image.ROTATE_270)))\n ] \n \n CTransformations = [ \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.FLIP_LEFT_RIGHT))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.FLIP_TOP_BOTTOM))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_90))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_180))), \n self.centerImageArray(np.array(prob_fig['C'].transpose(Image.ROTATE_270)))\n ] \n \n \n compare_scoreAB = []\n compare_scoreCI = []\n for x in range(len(ATransformations)):\n compare_scoreAB.append(self.compare_images(ATransformations[x],prob_array['B']))\n if compare_scoreAB[x] == min(compare_scoreAB):\n trans_index = x\n for i in range(1,9):\n compare_scoreCI.append(self.compare_images(CTransformations[trans_index], ans_array[str(i)]))\n if compare_scoreCI[str(i)] == min(compare_scoreCI):\n answer = i\n return answer\n \n \n def image_combo(self, prob_fig, prob_array, ans_fig, ans_array):\n AB = self.centerImageArray(np.array(ImageChops.darker(prob_fig['A'],prob_fig['B'])))\n BC = self.centerImageArray(np.array(ImageChops.darker(prob_fig['B'],prob_fig['C'])))\n AC = self.centerImageArray(np.array(ImageChops.darker(prob_fig['B'],prob_fig['C'])))\n \n GH = []\n HI = []\n GI = []\n i = 0\n while i < len(ans_array):\n GH.append(self.centerImageArray(np.array(ImageChops.darker(prob_fig['G'],ans_fig[str(i)]))))\n HI.append(self.centerImageArray(np.array(ImageChops.darker(prob_fig['H'],ans_fig[str(i)]))))\n GI.append(self.centerImageArray(np.array(ImageChops.darker(prob_fig['G'],ans_fig[str(i)]))))\n i += 1\n \n if AB == prob_array['C']:\n g = 0\n while g < len(GH):\n if GH[g] == ans_array[str(g + 1)]:\n answer = g + 1\n print(answer)\n g += 1\n return answer\n \n elif BC == prob_array['A']:\n g = 0\n while g < len(HI):\n if HI[g] == ans_array[str(g + 1)]:\n answer = g + 1\n print(answer)\n g += 1\n return answer\n \n elif AC == prob_array['B']:\n g = 0\n while g < len(GI):\n if GI[g] == ans_array[str(g + 1)]:\n answer = g + 1\n print(answer)\n g += 1\n return answer\n \n def cyclic_patterns(self, prob_fig, prob_array, ans_fig, ans_array):\n if A == E:\n for i in range(1,9):\n if ans_array[str(i)] == prob_array['E']:\n answer = i\n print(answer)\n return(answer)\n \n def image_difference(self, prob_fig, prob_array, ans_fig, ans_array):\n \n diffAB= ImageChops.difference(prob_fig['A'], prob_fig['B'])\n diffarrayAB = centerImageArray(np.array(diffAB))\n diffGH = ImageChops.difference(prob_fig['G'], prob_fig['H'])\n diffarrayGH = centerImageArray(np.array(diffGH)) \n \n if diffarrayAB.all() == prob_array['C'].all():\n for i in range(1,9):\n if diffarrayGH.all() == ans_array[str(i)]:\n answer = i\n print(answer)\n return answer\n \n def make_transparent(self,prob_fig, ans_fig):\n \n datas = img.getdata()\n \n newData = []\n for item in datas:\n if item[0] == 255 and item[1] == 255 and item[2] == 255:\n newData.append((255, 255, 255, 0))\n else:\n newData.append(item)\n \n img.putdata(newData)\n \n \n def pixel_count_pattern(self):\n \n ab = self.getFigureDarkpixels(prob_array['A'], prob_array['B'])\n bc = self.getFigureDarkpixels(prob_array['B'], prob_array['C'])\n de = self.getFigureDarkpixels(prob_array['D'], prob_array['E'])\n ef = self.getFigureDarkpixels(prob_array['E'], prob_array['F'])\n gh = self.getFigureDarkpixels(prob_array['G'], prob_array['H'])\n \n if ab == de:\n if bc == ef:\n if ab == gh:\n for i in range(1,9):\n if self.getFigureDarkpixels(ans_array[str(i)], prob_array['H']) == bc:\n answer = i\n print(answer)\n return answer\n","sub_path":"iterations4.py","file_name":"iterations4.py","file_ext":"py","file_size_in_byte":14737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"426543331","text":"#Import dependencies\nfrom flask import Flask, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\n\n#Create instance of Flask App\napp = Flask(__name__)\n\n#Connect to the Database\napp.config['SQLALCHEMY_DATABASE_URI']='postgresql://prog:1234@192.168.100.78/datacollector'\ndb = SQLAlchemy(app)\n\nclass Data(db.Model):\n #create a table\n __tablename__ = \"data\"\n id = db.Column(db.Integer, primary_key = True)\n height = db.Column(db.Integer)\n weight = db.Column(db.Integer)\n shoesize = db.Column(db.Integer)\n sex = db.Column(db.String)\n\n def __init__(self, height, weight, shoesize, sex):\n self.height = height\n self.weight = weight\n self.shoesize = shoesize\n self.sex = sex\n\n#Define Route and Contant of that page\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n#Define 2nd Route and Content\n@app.route(\"/success\", methods = ['POST'])\ndef success():\n if(request.method == 'POST'):\n height_ = request.form[\"height\"]\n weight_ = request.form[\"weight\"]\n shoesize_ = request.form[\"shoesize\"]\n sex_ = request.form[\"sex\"]\n data = Data(height_,weight_,shoesize_,sex_)\n db.session.add(data)\n db.session.commit()\n return render_template(\"success.html\")\n\n#Running and Controlling the script\nif (__name__ ==\"__main__\"):\n app.run(host='0.0.0.0',debug=True)\n","sub_path":"mainscript.py","file_name":"mainscript.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"86341777","text":"import sys\nimport os\n\n'''\ninput : snap file\nAssuming that input graph is undirected\narglist:\n\n1.metis edge list\n2.input for metis\n\noutput :\n\n1. metis format\n\n2. edge list\n\n3. giraph_file\n\n'''\n\n# fout = open('hello.txt', 'w')\n# fout.write('Hello, world!\\n') # .write(str)\n# fout.write('My name is Homer.\\n')\n# fout.write(\"What a beautiful day we're having.\\n\")\n# fout.close()\n#\n\n\nsnap_file=sys.argv[1]\nmetis_edgeList=sys.argv[2]\nmetis_graph=sys.argv[3]\ngiraph_graph=sys.argv[4]\n\n\nsnapfile = open(snap_file, 'r')\nmetis_edgeListfile = open(metis_edgeList, 'w')\n\nreordering = dict()\nrReordering = dict()\nvid = 1\ncount=0\n\ngraph={}\n\n\nfor line in snapfile:\n # print line[:-1]\n src= line[:-1].split()[0]\n dst= line[:-1].split()[1]\n\n if(rReordering.has_key(src)):\n pass\n else:\n reordering[vid] = src\n rReordering[src] = vid\n vid=vid+1\n\n\n if(rReordering.has_key(dst)):\n pass\n else:\n reordering[vid] = dst\n rReordering[dst] = vid\n vid=vid+1\n\n # graph formation\n\n if(graph.has_key(rReordering[src])):\n n=graph[rReordering[src]]\n n.add(rReordering[dst])\n graph[rReordering[src]]=n\n\n else:\n n = set()\n n.add(rReordering[dst])\n graph[rReordering[src]]=n\n\n\n\n\n count=count+1\n\n # print rReordering[src],rReordering[dst]\n outString=str(rReordering[src]-1)+\" \"+str(rReordering[dst]-1)\n # print outString\n metis_edgeListfile.write(outString+\"\\n\")\n\n# print count\n# print rReordering\n# print graph\nsnapfile.close()\nmetis_edgeListfile.close()\n\n\nmetis_graphfile = open(metis_graph, 'w')\ngiraph_graphfile = open(giraph_graph, 'w')\n\nnumV=len(graph.keys())\n# print numV\n\nnumE=count\n\nmetis_graphfile.write(str(numV)+\" \"+str(numE)+\"\\n\")\n\nfor i in range(1,numV+1):\n\n metis_graphfile.write( \" \".join( str(v) for v in list(graph[i])) + \"\\n\")\n # giraph_graphfile.write(str(i)+\" \"+\" \".join( str(v) for v in list(graph[i])) + \"\\n\")\n outstr=str(i)+\"\\t\"+str(0.0)\n prefix=\"\\t\"\n for v in list(graph[i]):\n outstr=outstr+prefix+str(v)+prefix+str(1.0)\n giraph_graphfile.write(outstr+\"\\n\")\n\nmetis_graphfile.close()\ngiraph_graphfile.close()","sub_path":"Graphs/snap2metis.py","file_name":"snap2metis.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"508292196","text":"#!/usr/bin/python\nimport math\n\ndef outlierCleaner(predictions, ages, net_worths):\n \"\"\"\n Clean away the 10% of points that have the largest\n residual errors (difference between the prediction\n and the actual net worth).\n\n Return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error).\n \"\"\"\n \n cleaned_data = []\n\n ### your code goes here\n \n i=0\n size = len(ages)\n array = [None] * size\n end = int(round(size * 0.9))\n print('Total Size: \\n', size)\n print('End Index: \\n', end)\n while (i < size) :\n array[i]= getError(predictions[i], net_worths[i])\n print('Array: {}\\n', array[i])\n i += 1\n print('i: \\n', i)\n \n result = zip(ages,net_worths,array)\n result.sort(key=lambda x: x[2])\n\n cleaned_data = result[0:end]\n return cleaned_data\n\ndef getError(prediction,net_worth):\n y = math.fabs(prediction - net_worth)\n return y*y\n\n","sub_path":"outliers/outlier_cleaner.py","file_name":"outlier_cleaner.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"109861351","text":"##############################################################################\n#\n# Copyright (c) 2006 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Browser Views for Skin Browser\n\n$Id$\n\"\"\"\n__docformat__ = 'restructuredtext'\nimport inspect\nimport zope.interface\nfrom zope.app.apidoc import component, interface, presentation, utilities\nfrom zope.app.apidoc.ifacemodule.browser import InterfaceDetails\nfrom zope.configuration.xmlconfig import ParserInfo\nfrom zope.security import proxy\n\n\nclass TemplateDetails(object):\n \"\"\"View class for a Template.\"\"\"\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n @property\n def macro(self):\n return self.context.macro\n\n @property\n def filename(self):\n file = self.context.filename\n return {'file': utilities.relativizePath(file),\n 'url': utilities.truncateSysPath(file).replace('\\\\', '/')}\n\n @property\n def contentType(self):\n return self.context.contentType\n\n def fileInfo(self):\n \"\"\"Get the file where the directive was declared.\"\"\"\n info = proxy.removeSecurityProxy(self.context.reg).info\n if proxy.isinstance(info, ParserInfo):\n return component.getParserInfoInfoDictionary(info)\n return None\n\n def permission(self):\n \"\"\"Get the permission to access the view.\"\"\"\n reg = proxy.removeSecurityProxy(self.context.reg)\n perms = utilities.getPermissionIds('publishTraverse', klass=reg.factory)\n return perms['read_perm']\n\n def validationMessages(self):\n return [\n zope.component.getMultiAdapter(\n (message, self.request), name='listitem')\n for message in self.context.validate()]\n\n\nclass ViewDetails(object):\n \"\"\"View class for a View.\"\"\"\n\n # Attributes that are always available can be ignored here.\n excludeAttrs = ('__parent__', '__name__', 'context', 'request',\n 'browserDefault', 'publishTraverse',\n 'update', 'render', '__call__')\n\n def viewTemplate(self):\n if self.context.template:\n return TemplateDetails(self.context.template, self.request)\n\n def fileInfo(self):\n \"\"\"Get the file where the directive was declared.\"\"\"\n info = proxy.removeSecurityProxy(self.context.reg).info\n if proxy.isinstance(info, ParserInfo):\n return component.getParserInfoInfoDictionary(info)\n return None\n\n def factory(self):\n \"\"\"Get factory info\"\"\"\n reg = proxy.removeSecurityProxy(self.context.reg)\n return presentation.getViewFactoryData(reg.factory)\n\n def permission(self):\n \"\"\"Get the permission to access the view.\"\"\"\n reg = proxy.removeSecurityProxy(self.context.reg)\n perms = utilities.getPermissionIds('publishTraverse', klass=reg.factory)\n return perms['read_perm']\n\n def doc(self):\n reg = proxy.removeSecurityProxy(self.context.reg)\n factory = component.getRealFactory(reg.factory)\n if factory.__doc__:\n return utilities.renderText(\n factory.__doc__, inspect.getmodule(factory))\n ifaces = tuple(zope.interface.implementedBy(factory).interfaces())\n if ifaces[0].__doc__:\n iface = ifaces[0]\n return utilities.renderText(iface.__doc__, inspect.getmodule(iface))\n\n @property\n def iface(self):\n reg = proxy.removeSecurityProxy(self.context.reg)\n factory = component.getRealFactory(reg.factory)\n implements = zope.interface.implementedBy(factory)\n return zope.interface.interface.InterfaceClass(\n 'ITemporary', bases=tuple(implements.interfaces()))\n\n def getAttributes(self):\n \"\"\"Return a list of attributes in the order they were specified.\"\"\"\n return [interface.getAttributeInfoDictionary(attr)\n for name, attr in interface.getAttributes(self.iface)\n if name not in self.excludeAttrs]\n\n def getMethods(self):\n \"\"\"Return a list of methods in the order they were specified.\"\"\"\n return [interface.getMethodInfoDictionary(method)\n for name, method in interface.getMethods(self.iface)\n if name not in self.excludeAttrs]\n\n def getFields(self):\n r\"\"\"Return a list of fields in required + alphabetical order.\n\n The required attributes are listed first, then the optional\n attributes.\"\"\"\n # Make sure that the required fields are shown first\n sorter = lambda x, y: cmp((not x[1].required, x[0].lower()),\n (not y[1].required, y[0].lower()))\n return [\n interface.getFieldInfoDictionary(field)\n for name, field in interface.getFieldsInOrder(self.iface, sorter)\n if name not in self.excludeAttrs]\n","sub_path":"lovely.skinbrowser/trunk/src/lovely/skinbrowser/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"230967307","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\n\n# 检查url地址\ndef check_link(url):\n try:\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('无法链接服务器!!!')\n\n#爬取资源\ndef get_contents(ulist,rurl):\n soup = BeautifulSoup(rurl,'lxml')\n trs = soup.find_all('tr')\n for tr in trs:\n ui = []\n for td in tr:\n ui.append(td.text)\n ulist.append(ui)\n\n#保存资源\ndef save_contents(urlist):\n try:\n with open(\"d://data/data.txt\",'w') as f:\n writer = csv.writer(f)\n # writer.writerow(['2016年中国企业500强排行榜'])\n for i in range(len(urlist)):\n writer.writerow([urlist[i][1],urlist[i][3],urlist[i][5]])\n except:\n pass\ndef main():\n urli = []\n url = \"http://www.sysinet.gov.cn/web/2018/11/2018111.htm\"\n rs = check_link(url)\n get_contents(urli, rs)\n save_contents(urli)\n\n\nmain()\n","sub_path":"Python小项目/爬虫/获取网页内表格.py","file_name":"获取网页内表格.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"620604057","text":"\n# coding: utf-8\n\n# In[1]:\nimport datetime\nimport time\n\nfrom pyspark.sql import SparkSession, Row, SQLContext, Window\nfrom pyspark.sql.types import StructType, StringType, StructField, BooleanType, IntegerType, ArrayType, TimestampType, DoubleType\nfrom pyspark import SparkContext\nimport pyspark.sql.functions as f\n\nfrom obs_job import ObsJob \nfrom job import Job\nfrom config.config import getConfig\n\n\nimport os\nspark_submit_str = ('--driver-memory 45g --executor-memory 3g --packages org.apache.spark:spark-sql_2.11:2.4.0,org.apache.bahir:spark-sql-cloudant_2.11:2.3.2'\n ' --driver-class-path /home/jovyan/jars/mysql-connector-java-5.1.42-bin.jar' \n ' --jars /home/jovyan/jars/spark-cassandra-connector.jar,/home/jovyan/jars/mysql-connector-java-5.1.42-bin.jar'\n ' pyspark-shell')\n\nos.environ['PYSPARK_SUBMIT_ARGS'] = spark_submit_str\n\n\nconfig = getConfig()\ndef save_to_cassandra(df, table):\n df.write.format(\"org.apache.spark.sql.cassandra\")\\\n .options(table=table, keyspace=\"amrs\")\\\n .mode(\"append\")\\\n .save()\n \n print(\"Finished loading to cassandra\" + time.ctime()) \n\n\n# In[4]:\n\n\nspark = SparkSession.builder\\\n.config('spark.sql.repl.eagerEval.enabled', True)\\\n .config('cloudant.host', config['couch']['host'])\\\n .config('cloudant.username', config['couch']['username'])\\\n .config('cloudant.password', config['couch']['username'])\\\n .config('cloudant.protocol', config['couch']['protocol'])\\\n .config('spark.rdd.compress', True)\\\n .config('spark.sql.crossJoin.enabled', True)\\\n .config(\"jsonstore.rdd.maxInPartition\", 500).\\\n config(\"jsonstore.rdd.minInPartition\", 1000)\\\n .config(\"cloudant.useQuery\", \"true\")\\\n .config(\"jsonstore.rdd.requestTimeout\", 90000000)\\\n .config(\"spark.sql.shuffle.partitions\", 1000)\\\n .config(\"schemaSampleSize\",1)\\\n .getOrCreate()\nspark.sparkContext.setLogLevel('INFO')\n\n\n# In[5]:\n\nclass EncounterJob(Job):\n def saveToCouchDB(self, dataframe, database):\n dataframe.write.save(database,\"org.apache.bahir.cloudant\",\n bulkSize=\"800\", createDBOnSave=\"false\")\n\n\n \n # In[7]:\n\n\n def get_provider(self):\n provider = super().getDataFromMySQL('amrs', 'provider', {\n 'partitionColumn': 'provider_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 200})\\\n .select('uuid', 'identifier', 'provider_id', 'person_id')\\\n .withColumnRenamed('uuid', 'provider_uuid')\\\n .withColumnRenamed('identifier', 'provider_identifier')\\\n .alias('provider')\n\n person = super().getDataFromMySQL('amrs', 'person_name', {\n 'partitionColumn': 'person_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 200})\\\n .select('given_name', 'family_name', 'middle_name', 'person_id')\\\n .alias('person_name')\n\n return provider.join(person, on='person_id', how='left')\\\n .withColumn('provider_name', f.concat_ws(' ', f.col('given_name'), f.col('middle_name'), f.col('family_name')))\\\n .drop('given_name', 'family_name', 'middle_name')\n\n\n # In[8]:\n\n\n def get_encounter_providers(self):\n encounter_provider = super().getDataFromMySQL('amrs', 'encounter_provider', {\n 'partitionColumn': 'provider_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 450})\\\n .select('uuid', 'encounter_id', 'provider_id')\\\n .withColumnRenamed('uuid', 'encounter_provider_uuid')\\\n .alias('enc_provider')\n\n provider = self.get_provider()\n\n\n return encounter_provider.join(provider, 'provider_id')\n\n\n\n\n # In[9]:\n\n\n def get_encounter_types(self):\n encounter_type = super().getDataFromMySQL('amrs', 'encounter_type', {\n 'partitionColumn': 'encounter_type_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 10000,\n 'numPartitions': 1})\\\n .select('uuid', 'name', 'encounter_type_id')\\\n .withColumnRenamed('uuid', 'encounter_type_uuid')\\\n .withColumnRenamed('name', 'encounter_type_name')\n\n return encounter_type\n\n\n # In[10]:\n\n\n def get_forms(self):\n forms = super().getDataFromMySQL('amrs', 'form', {\n 'partitionColumn': 'form_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 10000,\n 'numPartitions': 10})\\\n .select('form_id', 'uuid', 'name')\\\n .withColumnRenamed('uuid', 'form_uuid')\\\n .withColumnRenamed('name', 'form_name')\n\n return forms\n\n\n # In[11]:\n\n\n def get_locations(self):\n location = super().getDataFromMySQL('amrs', 'location', {\n 'partitionColumn': 'location_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 1})\\\n .select('uuid', 'name', 'location_id')\\\n .withColumnRenamed('uuid', 'location_uuid')\\\n .withColumnRenamed('name', 'location_name')\n\n return location\n\n\n # In[12]:\n\n\n def get_visits(self):\n visit = super().getDataFromMySQL('amrs', 'visit', {\n 'partitionColumn': 'visit_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 100})\\\n .select('uuid', 'date_started', 'date_stopped', 'visit_type_id', 'visit_id', 'location_id')\\\n .withColumnRenamed('uuid', 'visit_uuid')\n\n visit_type = super().getDataFromMySQL('amrs', 'visit_type', {\n 'partitionColumn': 'visit_type_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 45000000,\n 'numPartitions': 1})\\\n .select('uuid', 'name', 'visit_type_id')\\\n .withColumnRenamed('uuid', 'visit_type_uuid')\\\n .withColumnRenamed('name', 'visit_type_name')\n\n locations = self.get_locations()\n\n return visit.join(visit_type, on='visit_type_id')\\\n .join(f.broadcast(locations), on='location_id')\\\n .drop('visit_type_id', 'location_id')\\\n .alias('visit')\n\n\n # In[13]:\n\n\n def get_patients(self):\n person = super().getDataFromMySQL('amrs', 'person', {\n 'partitionColumn': 'person_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 10000000,\n 'numPartitions': 200})\\\n .select('uuid', 'person_id')\\\n .withColumnRenamed('uuid', 'person_uuid')\n\n patient = super().getDataFromMySQL('amrs', 'patient', {\n 'partitionColumn': 'patient_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 1000000,\n 'numPartitions': 200})\\\n .select('patient_id')\n\n\n return person.join(patient, on=f.col('person_id') == f.col('patient_id')).drop('person_id')\n\n\n # In[14]:\n\n\n def get_encounters(self):\n encounters = super().getDataFromMySQL('amrs', 'encounter', {\n 'partitionColumn': 'encounter_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 10000000,\n 'numPartitions': 100})\\\n .alias('encounter')\n\n return encounters\n\n\n # In[15]:\n\n\n def get_concepts(self):\n concepts = super().getDataFromMySQL('amrs', 'concept', {\n 'partitionColumn': 'concept_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 20000,\n 'numPartitions': 10})\\\n .select('uuid', 'concept_id')\\\n .withColumnRenamed('uuid', 'concept_uuid')\n\n concept_names = super().getDataFromMySQL('amrs', 'concept_name', {\n 'partitionColumn': 'concept_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 50000,\n 'numPartitions': 10})\\\n .filter(f.col('locale_preferred') == 1)\\\n .select('name', 'concept_id')\\\n .withColumnRenamed('name', 'concept_name')\n\n return concepts.join(concept_names, on='concept_id')\n\n\n # In[16]:\n\n\n def get_orders(self):\n orders = super().getDataFromMySQL('amrs', 'orders', {\n 'partitionColumn': 'encounter_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 10000000,\n 'numPartitions': 200})\\\n .filter(f.col('voided') == 0)\\\n .select('uuid', 'encounter_id', 'concept_id', 'orderer',\n 'order_action', 'date_activated', 'date_created',\n 'urgency', 'order_type_id', 'order_number')\\\n .withColumnRenamed('uuid', 'order_uuid')\n\n order_type = super().getDataFromMySQL('amrs', 'order_type', {\n 'partitionColumn': 'order_type_id', \n 'fetchsize':4566,\n 'lowerBound': 1,\n 'upperBound': 100,\n 'numPartitions': 1})\\\n .select('order_type_id', 'name')\\\n .withColumnRenamed('name', 'order_type_name')\n\n concepts = self.get_concepts()\n\n orderer = self.get_provider()\n\n return orders.join(f.broadcast(order_type), on='order_type_id')\\\n .join(f.broadcast(concepts), on='concept_id')\\\n .join(f.broadcast(orderer), on=orders['orderer'] == orderer['provider_id'])\\\n .drop('concept_id', 'order_type_id')\\\n .alias('orders')\n\n\n # In[17]:\n\n\n def get_obs(self):\n return spark.read.format(\"org.apache.spark.sql.cassandra\")\\\n .options(table=\"obs\", keyspace=\"amrs\")\\\n .load()\\\n .alias('obs')\n\n\n # In[18]:\n\n\n def transform_into_openmrs_object(self, encounter_dataframe):\n return encounter_dataframe.groupBy('encounter.encounter_id').agg(\n f.first('patient_id').alias('person_id'),\n f.lit('encounter').alias('type'),\n f.first('encounter.location_id').alias('location_id'),\n f.first('person_uuid').alias('person_uuid'),\n f.col('encounter.encounter_id').cast('string').alias('couch_id'),\n f.first('uuid').alias('uuid'),\n f.first('encounter_datetime').alias('encounterdatetime'),\n f.struct(\n f.first('encounter_type_name').alias('display'),\n f.first('encounter_type_uuid').alias('uuid')\n ).alias('encountertype'),\n f.struct(\n f.first('form_name').alias('name'),\n f.first('form_uuid').alias('uuid')\n ).alias('form'),\n f.struct(\n f.first('location.location_name').alias('display'),\n f.first('location.location_uuid').alias('uuid') \n ).alias('location'),\n f.to_json(f.collect_set(\n f.when(f.col('encounter_provider_uuid').isNotNull(), f.struct(\n f.col('encounter_provider_uuid').alias('uuid'),\n f.col('encounter_provider.provider_name').alias('display'),\n f.struct(\n f.col('encounter_provider.provider_uuid').alias('uuid'),\n f.concat_ws(' ', f.col('encounter_provider.provider_identifier'), f.lit('-'), f.col('encounter_provider.provider_name')).alias('display')\n ).alias('provider')\n ))\n )).alias('encounterproviders'),\n f.to_json(f.struct(\n f.first('visit_uuid').alias('uuid'),\n f.first('visit.date_started').alias('dateStarted'),\n f.first('visit.date_stopped').alias('dateStopped'),\n f.struct(\n f.first('visit_type_name').alias('name'),\n f.first('visit_type_uuid').alias('uuid')\n ).alias('visitType'),\n f.struct(\n f.first('visit.location_name').alias('name'),\n f.first('visit.location_uuid').alias('uuid')\n ).alias('location'),\n f.concat_ws(' ', f.first('visit_type_name'), f.lit('@'), f.first('visit.location_name'), f.lit('-'), f.first('visit.date_started'))\n .alias('display')\n )).alias('visit'),\n f.to_json(f.collect_set(\n f.when(f.col('order_uuid').isNotNull(),f.struct(\n f.col('order_uuid').alias('uuid'),\n f.col('order_number').alias('orderNumber'),\n f.struct(\n f.col('orders.concept_uuid').alias('uuid'),\n f.col('orders.concept_name').alias('display')\n ).alias('concept'),\n f.struct(\n f.col('orders.provider_uuid').alias('uuid'),\n f.concat_ws(' ', 'orders.provider_identifier', 'orders.provider_name').alias('display')\n ).alias('orderer'),\n f.col('order_action').alias('action'),\n f.col('orders.date_activated').alias('dateActivated'),\n f.col('orders.date_created').alias('dateCreated'),\n f.col('orders.urgency').alias('urgency'),\n f.col('order_type_name').alias('type')\n )\n ).otherwise(None))).alias('orders'),\n f.to_json(f.collect_list(\n f.struct(\n f.lit('obs_uuid_to_be_included').alias('uuid'),\n f.col('obs_datetime').alias('obsDatetime'),\n f.struct(\n f.col('parent_obs_concept_uuid').alias('uuid'),\n f.struct(\n f.col('parent_obs_concept_name').alias('display'))\n .alias('name')\n ).alias('concept'),\n f.when(f.col('value_coded').isNotNull(),\n f.struct(\n f.col('value_type').alias('type'),\n f.to_json(\n f.struct(\n f.col('value_coded_concept_uuid').alias('uuid'),\n f.col('value_coded_concept_name').alias('display')\n )).alias('value')\n )\n ).when(f.col('value_not_coded').isNotNull(),\n f.struct(\n f.col('value_type').alias('type'),\n f.col('value_not_coded').alias('value')\n )\n ).alias('value'),\n f.when(f.col('groupmembers').isNotNull(), \n f.col('groupmembers')\n ).alias('groupMembers')\n ))).alias('obs'),\n ).withColumn('build_date', f.current_timestamp())\n\n\n # In[19]:\n\n def run(self):\n ### build obs first\n obs = ObsJob().build_obs()\n\n ### start working on encounters\n encounters = self.get_encounters()\n forms = self.get_forms()\n locations = self.get_locations().alias('location')\n visits = self.get_visits()\n encounter_providers = self.get_encounter_providers().alias('encounter_provider')\n encounter_types = self.get_encounter_types()\n patients = self.get_patients()\n orders = self.get_orders()\n\n joined_encounters = encounters.join(f.broadcast(forms), on='form_id')\\\n .join(f.broadcast(locations), on='location_id')\\\n .join(f.broadcast(visits),on='visit_id')\\\n .join(f.broadcast(encounter_types), on=encounters['encounter_type'] == encounter_types['encounter_type_id'])\\\n .join(patients, on='patient_id').join(encounter_providers, on=encounter_providers['encounter_id'] == encounters['encounter_id'], how='left')\\\n .join(orders, on=orders['encounter_id'] == encounters['encounter_id'], how='left')\\\n .join(obs, on=obs['encounter_id'] == encounters['encounter_id'], how='left')\\\n .drop('enc_provider.encounter_id', 'obs.encounter_id', 'orders.encounter_id')\n\n openmrs_encounter_object = self.transform_into_openmrs_object(joined_encounters)\n return openmrs_encounter_object\n\n","sub_path":"spark/batch-rebuild-scripts/encounter_job.py","file_name":"encounter_job.py","file_ext":"py","file_size_in_byte":18425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"123719691","text":"\"\"\"\nGoogle Colab Example: https://colab.research.google.com/github/UKPLab/sentence-transformers/blob/master/examples/applications/retrieve_rerank/retrieve_rerank_simple_wikipedia.ipynb\n\"\"\"\nimport json\nimport os\nimport re\nimport time\n\nfrom model.result import Result\nfrom model.comparer import Comparer\nfrom model.document import Document, load_documents, rawtxt_to_document\nfrom entity.question import Question, Option, AnsweringResponse\nfrom model.question_answering import QuestionAnswering\nfrom model.retriever import Retriever\nfrom flask import stream_with_context, make_response\nfrom flask_restful import Resource, Api\nfrom flask import Flask, flash, request, redirect, url_for, jsonify\n\nif __name__ == '__main__':\n retriever = Retriever()\n comparer = Comparer()\n qa = QuestionAnswering()\n retriever.load_documents(load_documents())\n retriever.combine_data()\n\n\ndef solve_question(question):\n query = re.sub(r\"[._]{5,}\", \" what \", question.content)\n query = re.sub(r\"\\s+\", \" \", query.strip())\n print(f\"Input question: {query}\")\n best_qa_answer = None\n # best_qa_context = None\n contexts = retriever.search(query)\n\n print(\"> QA model:\")\n for context in contexts:\n qa_answer = qa.answer(query, context.content)\n print(f\"QA: '{qa_answer.content}', score: {round(qa_answer.score, 3)}\")\n if best_qa_answer is None or best_qa_answer.score < qa_answer.score * 4 + context.score:\n best_qa_answer = qa_answer\n best_qa_answer.score = qa_answer.score * 4 + context.score\n # best_qa_context = context\n\n print(\"> Comparing options with best context:\")\n best_comparer_answer = None\n for option in question.options:\n score = comparer.compare(option.content, contexts[0].content)\n print(\"Comparer: {}, score: {:.3f}\".format(option.content, score))\n if best_comparer_answer is None or best_comparer_answer.score < score * 3 + contexts[0].score:\n best_comparer_answer = Result(score * 3 + contexts[0].score, option.key)\n\n best_answer = None\n if best_qa_answer.score >= best_comparer_answer.score:\n print(\"> QA method has higher score, comparing QA answer with options:\")\n for option in question.options:\n score = comparer.compare(option.content, best_qa_answer.content)\n print(\"Comparer: {}, score: {:.3f}\".format(option.content, score))\n if best_answer is None or best_answer.score < score:\n best_answer = Result(score, option.key)\n else:\n print(\"> Comparing method has higher score.\")\n best_answer = best_comparer_answer\n\n question.answer = best_answer.content\n print(f\" -> Answer: {question.answer}\")\n return best_answer\n\n\n# db_connect = create_engine('sqlite:///chinook.db')\napp = Flask(__name__)\napi = Api(app)\n\n\n@app.route('/knowledge', methods=['POST'])\ndef upload_knowledge():\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n document = Document(file.filename)\n if os.path.isfile(document.path_txt):\n return make_response(jsonify(message=f\"knowledge {document.name} exists\"), 400)\n print(f\"creating...\")\n document = rawtxt_to_document(file.stream, file.filename)\n print(f\"created {document.path_txt} success, encoding...\")\n success = retriever.encode(document)\n if not success:\n return make_response(jsonify(message=f\"Encode {document.name} fail. Document deleted\"), 500)\n print(f\"encoded {document.path_pt} success\")\n retriever.load_document(document)\n retriever.combine_data()\n return make_response(jsonify(message=f\"Upload and encode {document.name} success\"), 200)\n\n\n@app.route('/knowledge', methods=['DELETE'])\ndef delete_knowledge():\n filename = request.form[\"name\"]\n document = Document(filename)\n if not os.path.isfile(document.path_txt):\n return make_response(jsonify(message=f\"knowledge {document.name} does not exists\"), 400)\n retriever.remove(document)\n\n return make_response(jsonify(message=f\"delete {document.name} success\"), 200)\n\n\n@app.route('/qa', methods=['POST'])\ndef qa_res():\n def question_respond():\n json_questions = json.loads(json.dumps(request.json))\n questions = []\n answers_response = []\n for json_question in json_questions:\n options = []\n for json_option in json_question['options']:\n option = Option(json_option['key'], json_option['content'])\n options.append(option)\n\n question = Question(\n json_question['qn'],\n json_question['content'],\n options)\n questions.append(question)\n\n for question in questions:\n solve_question(question)\n answer = AnsweringResponse(question.qn, question.answer)\n answers_response.append(answer)\n yield json.dumps(answer.__dict__)\n\n # json_string = json.dumps([ob.__dict__ for ob in answers_response])\n\n # return json_string\n\n return app.response_class(stream_with_context(question_respond()))\n\n\nif __name__ == '__main__':\n app.run(port=5000)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"254609215","text":"import socket as s\r\n\r\nsock = s.socket(s.AF_INET, s.SOCK_STREAM)\r\nsite = 'iu.bmstu.ru'\r\nsock.connect((site, 80))\r\nhttp_zapros =[\r\n 'GET / HTTP/1.1',\r\n 'Host: iu.bmstu.ru',\r\n 'Connection: keep-alive',\r\n 'Accept: text/html',\r\n '\\n']\r\ncontent = '\\n'.join(http_zapros)\r\nprint('Сообщение, отправленное на сервер ', http_zapros[1])\r\nprint(content)\r\n\r\nprint('Конец сообщения')\r\nwhile True:\r\n sock.send(content.encode())\r\nresult = sock.recv(200000)\r\nprint('Ответ сервера: ')\r\nprint(result.decode())\r\n","sub_path":"Base_learning/copy_3.py","file_name":"copy_3.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"203055299","text":"from pyspark import SparkContext\nimport os\nimport time\nimport json\nimport sys\n\ns=time.perf_counter()\n# from pyspark.rdd import PipelinedRDD\n\n# os.environ['PYTHON_SPARK'] = '/usr/local/bin/python3.7'\n# os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/bin/python3.7'\n\n# conf=SparkConf().setAppName(\"Task3_ExplorationonMultipleDatasets\").setMaster(\"local[*]\")\n# sc=SparkContext.getOrCreate(conf)\nsc = SparkContext('local[*]', 'Task3_ExplorationonMultipleDatasets')\n\nreviewFile_path = sys.argv[1]\nreviewFile = sc.textFile(reviewFile_path).map(json.loads)\nbusinessFile_path = sys.argv[2]\nbusinessFile = sc.textFile(businessFile_path).map(json.loads)\n\n\ndef swap(s):\n return (s[1], s[0])\n\n\ndef split_comma(s):\n return s.split(\",\")\n\n\ndef split_commandcotation(s):\n return s.split(',\"')\n\n\ndef add(a, b):\n return a + b\n\n\ndef split_colon(s):\n return s.split(\":\")\n\n\ndef strip_quotation(s):\n return s.strip(\"\\\"\")\n\n\ndef printf(s):\n par = list(s)\n print(par)\n\n\ndef count(s):\n par = list(s)\n print(len(par))\n\n\ndef sumf(iterator):\n sum, count = 0, 0\n for v in iterator:\n sum = sum + v\n count = count + 1\n yield (sum, count)\n\n\nreviewFile_RDD = reviewFile.map(lambda s: (s[\"business_id\"], s[\"stars\"])).persist()\n\nbusinessFile_RDD = businessFile.map(lambda s: (s[\"business_id\"], s[\"city\"])).persist()\n\n\njoin_results = reviewFile_RDD.join(businessFile_RDD)\ncity_sum_count = join_results.map(lambda s: (s[1][1], s[1][0])).aggregateByKey((0, 0), lambda u, v: (u[0]+float(v), u[1]+1), lambda u1, u2: (u1[0]+u2[0], u1[1]+u2[1]))\naverage = city_sum_count.map(lambda s: (s[0], float(s[1][0])/s[1][1]))\n# print(average.take(2))\n# sort_average = average.map(swap).sortByKey(False).map(swap).sortByKey()\nsort_average = average.sortByKey().sortBy(lambda s: s[1], False)\nresult = sort_average.collect()\n\n\n\n\n\n\n# print(join_results)\n# print(city_key)\n\nstart_m1 = time.perf_counter()\nresult_m1 = sort_average.collect()[:10]\n# print(result_m1)\nend_m1 = time.perf_counter()\nm1 = end_m1-start_m1\n# print(m1)\n\n\n\nstart_m2 = time.perf_counter()\nresult_m2=sort_average.take(10)\n# print(result_m2)\nend_m2 = time.perf_counter()\nm2=end_m2-start_m2\n# print(m2)\n\n\nfileObject = open (sys.argv[3], 'w')\nfileObject.write(\"city\")\nfileObject.write(\",\")\nfileObject.write(\"stars\")\nfileObject.write(\"\\n\")\nfor i in result:\n fileObject.write(i[0])\n fileObject.write(\",\")\n fileObject.write(str(i[1]))\n fileObject.write(\"\\n\")\nfileObject.close()\n\noutCome = {\"m1\": m1, \"m2\": m2, \"explanation\": \"When using RDD's collect() method, the data is loaded to the driver's memory which is time-consuming. However, take() retrieves only 10 in this case.\"}\n\nwith open(sys.argv[4], 'w') as f:\n json.dump(outCome, f)\n\ne = time.perf_counter()\nprint(e-s)\n","sub_path":"ying_cheng_task3.py","file_name":"ying_cheng_task3.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"70428379","text":"\n\ndef login_check(login):\n min_length = 1\n max_length = 20\n last_symbol = login[len(login)-1]\n first_symbol = login[0]\n if len(login) < min_length:\n print(\"Login is too short\")\n return\n elif len(login) > max_length:\n print(\"Login is too long\")\n return\n\n if first_symbol.isalpha():\n if last_symbol.isalpha() or last_symbol.isnumeric():\n print('login correct')\n else:\n print('last symbol has to be a-z or 1-9')\n else:\n print('login incorrect: login starts from number')\n\nif __name__ == '__main__':\n login = 'aaaabcfdfasfasfasdfsfs22'\n login_check(login)\n ","sub_path":"ErgoSolo/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"159657753","text":"from vps_backend_assessment.swapi.utilities import fetch\nfrom nose.tools import assert_true\n\nimport httpretty\n\nBASE_URL='http://example.com/api'\n\n@httpretty.activate\ndef test_fetch_url():\n \"\"\"\n Tests that the `fetch` method can fetch a URL and return a response\n \"\"\"\n\n # Register the URL with httpretty\n httpretty.register_uri(\n httpretty.GET,\n BASE_URL,\n body='{\"origin\": \"127.0.0.1\"}'\n )\n\n # Fetch the response from the URL\n response = fetch(BASE_URL)\n\n # Assert that the response JSON matches the expected value\n assert_true(response.json() == {'origin': '127.0.0.1'})\n\n","sub_path":"tests/test_utilities.py","file_name":"test_utilities.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"257210764","text":"import os\nimport psycopg2\nimport argparse\nfrom sqlalchemy.engine.url import make_url\n\n\ndef executeScriptsFromFile(filename, cu):\n fd = open(filename, 'r')\n print('Reading Setup SQL Queries File')\n sql_file = fd.read()\n fd.close()\n sql_commands = sql_file.split(';')\n print('Start Applying Setup SQL Queries')\n for command in sql_commands:\n try:\n if not command == '':\n cu.execute(command)\n else:\n continue\n except psycopg2.OperationalError as msg:\n print(\"Command skipped: \" + str(msg))\n print('Finish Applying Setup SQL Queries')\n\n\ndef main():\n if os.environ.get('SETUP_STATE') == 'True':\n dispatcher = {1: 'init/reset.sql', 2: 'init/setup.sql'}\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"mode\", help=\"setup mode\", type=int)\n args = parser.parse_args()\n\n db_url = 'localhost'\n db_name = 'ranking'\n db_user = 'lazypanda'\n db_pwd = ''\n db_port = '5432'\n\n if os.environ.get('DATABASE_URL') is not None:\n db_remote_url = make_url(os.environ.get('DATABASE_URL'))\n db_url = db_remote_url.host\n db_name = db_remote_url.database\n db_user = db_remote_url.username\n db_pwd = db_remote_url.password\n db_port = db_remote_url.port\n\n conn_string = \"dbname=%s user=%s password=%s host=%s port=%s\" % (db_name, db_user, db_pwd, db_url, db_port)\n conn = psycopg2.connect(conn_string)\n print('Postgres Connection Created')\n conn.autocommit = True\n cursor = conn.cursor()\n executeScriptsFromFile(dispatcher[args.mode], cursor)\n conn.close()\n print('Postgres Connection Closed')\n\n\nmain()\n","sub_path":"init/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"227524680","text":"import random\n\nfrom flask import Blueprint\nfrom flask import current_app as app\nfrom flask import redirect, render_template, request, session, url_for\n\nfrom atat.domain.exceptions import AlreadyExistsError, NotFoundError\nfrom atat.domain.permission_sets import PermissionSets\nfrom atat.domain.users import Users\nfrom atat.forms.data import SERVICE_BRANCHES\nfrom atat.jobs import send_mail\nfrom atat.routes.saml_helpers import (\n get_or_create_dev_saml_user,\n load_attributes_from_dev_assertion,\n prepare_idp_dev_url,\n)\nfrom atat.utils import pick\n\nfrom . import current_user_setup, redirect_after_login_url\n\ndev_bp = Blueprint(\"dev\", __name__)\nlocal_access_bp = Blueprint(\"local_access\", __name__)\n\n_ALL_PERMS = [\n PermissionSets.VIEW_PORTFOLIO,\n PermissionSets.VIEW_PORTFOLIO_APPLICATION_MANAGEMENT,\n PermissionSets.VIEW_PORTFOLIO_FUNDING,\n PermissionSets.VIEW_PORTFOLIO_REPORTS,\n PermissionSets.VIEW_PORTFOLIO_ADMIN,\n PermissionSets.EDIT_PORTFOLIO_APPLICATION_MANAGEMENT,\n PermissionSets.EDIT_PORTFOLIO_FUNDING,\n PermissionSets.EDIT_PORTFOLIO_REPORTS,\n PermissionSets.EDIT_PORTFOLIO_ADMIN,\n PermissionSets.PORTFOLIO_POC,\n PermissionSets.VIEW_AUDIT_LOG,\n PermissionSets.MANAGE_CCPO_USERS,\n]\n\n\ndef random_service_branch():\n return random.choice([k for k, v in SERVICE_BRANCHES if k]) # nosec\n\n\n_DEV_USERS = {\n \"sam\": {\n \"dod_id\": \"6346349876\",\n \"first_name\": \"Sam\",\n \"last_name\": \"Stevenson\",\n \"permission_sets\": _ALL_PERMS,\n \"email\": \"sam@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n \"amanda\": {\n \"dod_id\": \"2345678901\",\n \"first_name\": \"Amanda\",\n \"last_name\": \"Adamson\",\n \"email\": \"amanda@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n \"brandon\": {\n \"dod_id\": \"3456789012\",\n \"first_name\": \"Brandon\",\n \"last_name\": \"Buchannan\",\n \"email\": \"brandon@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n \"christina\": {\n \"dod_id\": \"4567890123\",\n \"first_name\": \"Christina\",\n \"last_name\": \"Collins\",\n \"email\": \"christina@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n \"dominick\": {\n \"dod_id\": \"5678901234\",\n \"first_name\": \"Dominick\",\n \"last_name\": \"Domingo\",\n \"email\": \"dominick@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n \"erica\": {\n \"dod_id\": \"6789012345\",\n \"first_name\": \"Erica\",\n \"last_name\": \"Eichner\",\n \"email\": \"erica@example.com\",\n \"service_branch\": random_service_branch(),\n \"phone_number\": \"1234567890\",\n \"citizenship\": \"United States\",\n \"designation\": \"military\",\n },\n}\n\n\nclass IncompleteInfoError(Exception):\n @property\n def message(self):\n return \"You must provide each of: first_name, last_name and dod_id\"\n\n\n@dev_bp.route(\"/login-dev\", methods=[\"GET\", \"POST\"])\ndef login_dev():\n query_string_parameters = session.get(\"query_string_parameters\", {})\n user = None\n\n if \"sls\" in request.args and request.method == \"GET\":\n return redirect(url_for(\"atat.root\"))\n\n if request.method == \"GET\":\n idp_dev_login_url = prepare_idp_dev_url(request)\n return redirect(idp_dev_login_url)\n\n if \"acs\" in request.args and request.method == \"POST\":\n saml_attributes = load_attributes_from_dev_assertion(request)\n session[\"login_method\"] = \"dev\"\n if not (\n \"username_param\" in query_string_parameters\n or \"dod_id_param\" in query_string_parameters\n ):\n user = get_or_create_dev_saml_user(saml_attributes)\n\n if not user:\n user = get_or_create_non_saml_user(request, query_string_parameters)\n\n next_param = query_string_parameters.get(\"next_param\")\n session.pop(\"query_string_parameters\", None)\n current_user_setup(user)\n return redirect(redirect_after_login_url(next_param))\n\n\ndef get_or_create_non_saml_user(request, query_string_parameters):\n dod_id = query_string_parameters.get(\"dod_id_param\") or request.args.get(\"dod_id\")\n if dod_id is not None:\n user = Users.get_by_dod_id(dod_id)\n else:\n persona = query_string_parameters.get(\"username_param\") or request.args.get(\n \"username\", \"amanda\"\n )\n user = get_or_create_dev_persona(persona)\n\n return user\n\n\ndef get_or_create_dev_persona(persona):\n user_data = _DEV_USERS[persona]\n user = Users.get_or_create_by_dod_id(\n user_data[\"dod_id\"],\n **pick(\n [\n \"permission_sets\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"service_branch\",\n \"phone_number\",\n \"citizenship\",\n \"designation\",\n \"date_latest_training\",\n ],\n user_data,\n ),\n )\n return user\n\n\n@local_access_bp.route(\"/dev-new-user\")\ndef dev_new_user():\n first_name = request.args.get(\"first_name\", None)\n last_name = request.args.get(\"last_name\", None)\n dod_id = request.args.get(\"dod_id\", None)\n\n if None in [first_name, last_name, dod_id]:\n raise IncompleteInfoError()\n\n try:\n Users.get_by_dod_id(dod_id)\n raise AlreadyExistsError(\"User with dod_id {}\".format(dod_id))\n except NotFoundError:\n pass\n\n new_user = {\"first_name\": first_name, \"last_name\": last_name}\n\n created_user = Users.create(dod_id, **new_user)\n\n current_user_setup(created_user)\n return redirect(redirect_after_login_url())\n\n\n@local_access_bp.route(\"/login-local\")\ndef local_access():\n dod_id = request.args.get(\"dod_id\")\n user = None\n\n if dod_id:\n user = Users.get_by_dod_id(dod_id)\n else:\n name = request.args.get(\"username\", \"amanda\")\n user = get_or_create_dev_persona(name)\n\n current_user_setup(user)\n\n return redirect(redirect_after_login_url())\n\n\n@dev_bp.route(\"/test-email\")\ndef test_email():\n send_mail.delay(\n [request.args.get(\"to\")], request.args.get(\"subject\"), request.args.get(\"body\")\n )\n\n return redirect(url_for(\"dev.messages\"))\n\n\n@dev_bp.route(\"/messages\")\ndef messages():\n return render_template(\"dev/emails.html\", messages=app.mailer.messages)\n","sub_path":"atat/routes/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"480546662","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport discord\nimport json\nfrom discord.ext import tasks\nimport time\n\n# Google Spreadsheet connection\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('Credentials.json', scope)\ngoogle_client = gspread.authorize(credentials)\n\nspreadsheet_names = list()\nspreadsheet_names.append('Rolling System')\nspreadsheet = google_client.open(spreadsheet_names[0])\nskills = spreadsheet.worksheet('Skills')\nxp = spreadsheet.worksheet('XP')\n\n\n# Discord connection\ntoken = json.load(open('Credentials.json'))['discord_token']\ndiscord_client = discord.Client()\n\n\n@discord_client.event\nasync def on_message(message):\n\n channel = message.channel\n if message.author == discord_client.user:\n return\n\n if message.content.startswith('!ping'):\n msg = 'Pong {0.author.mention} '.format(message)\n await channel.send(msg)\n\n if message.content.startswith('!find '):\n name = message.content[6:]\n cell = skills.find(name)\n cell2 = xp.find(name)\n row_content = skills.row_values(cell.row)\n row_content2 = skills.row_values(cell2.row)\n if row_content == row_content2:\n await channel.send(row_content)\n\n\n@discord_client.event\nasync def on_ready():\n print('Logged in as')\n print(discord_client.user.name)\n print(discord_client.user.id)\n print('------')\n google_login.start()\n\n\n@tasks.loop(minutes=60)\nasync def google_login():\n print(\"Relogging\", time.asctime())\n google_client.login()\n\ndiscord_client.run(token)\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"283612582","text":"import pyodbc\nfrom flask import Flask, request,jsonify\nfrom flask_cors import CORS\nfrom flask_restful import Resource, Api\nimport mysql.connector\nfrom SQLhelpers import *\n##from endpoints.addMovie import Hello\nimport pyodbc\nserver = 'topserver1.database.windows.net'\ndatabase = 'topDatabase'\nusername = 'topAdmin'\npassword = 'Mitsilancer1'\ndriver= '{ODBC Driver 17 for SQL Server}'\nconn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password)\ncursor = conn.cursor()\napp = Flask('topAPI')\nCORS(app)\n\n\n@app.route('/addMovie',methods = ['GET', 'POST'])\n\ndef addMovie():\n if(request.method == 'POST'):\n data = request.get_json()\n\n movieName = data['movieName']\n movieYear = data['year']\n user_id = data['userId']\n rating = 0\n posterURL = data['posterURL']\n statement = 'INSERT INTO movies (movieName,movieYear,posterURL,user_id,rating) VALUES (?,?,?,?,?)'\n cursor.execute(statement,movieName,movieYear,posterURL,user_id,rating)\n conn.commit()\n return jsonify('done')\n\n if(request.method == 'GET'):\n\n return 'he'\n\n@app.route('/home', methods = ['GET','DELETE'])\ndef getMyMovies():\n ## this returns all of the movies for a given user\n if(request.method == 'GET'):\n ## data is sent via url paramaters\n data = request.args\n user_id = data['userId']\n statement = 'SELECT * FROM movies WHERE user_id = '+user_id\n\n cursor.execute(statement)\n movies = cursor.fetchall()\n returnData = []\n\n for row in movies:\n returnData.append(list(row))\n\n return jsonify(returnData)\n return 'no movies'\n\n ## this deletes a given movie\n if(request.method == 'DELETE'):\n data = request.get_json()\n\n deleteMovie_id = (data['movieId'])\n\n statement = \"DELETE FROM movies WHERE movie_id = \"+str(deleteMovie_id)\n cursor.execute(statement)\n conn.commit()\n return jsonify(data)\n\n@app.route('/movieRating',methods = ['POST','GET'])\ndef updateRating():\n if(request.method == 'POST'):\n data = request.get_json()\n\n movie_id = str(data['movie_id'])\n rating = str(data['rating'])\n\n statement = \"UPDATE movies SET rating = ? WHERE movie_id= ?\"\n cursor.execute(statement,rating,movie_id)\n conn.commit()\n return 'rating updated'\n\n\n@app.route('/topTenRanking',methods = ['POST','GET'])\ndef topTenRanking():\n if(request.method == 'GET'):\n ## returns the users top 10 ranking\n ## data is sent via url paramaters\n data = request.args\n user_id = data['userId']\n\n statement = \"SELECT * FROM topTen WHERE user_id = \"+user_id\n cursor.execute(statement)\n rankedMovies = cursor.fetchall()\n returnData = []\n for row in rankedMovies:\n returnData.append(list(row))\n\n return jsonify(returnData)\n if(request.method =='POST'):\n\n data = request.get_json()\n movie_id = str(data['movieId'])\n user_id = str(data['userId'])\n movieName = str(data['movieName'])\n posterURL = str(data['posterURL'])\n ranking = str(data['ranking'])\n ## checking that no movie exists already at that rank\n rankingExists = True\n checkStatement = \"SELECT * FROM topTen WHERE user_id =\"+user_id+'AND ranking='+ranking\n cursor.execute(checkStatement)\n checkRankings = cursor.fetchall()\n if(len(checkRankings) == 0):\n rankingExists = False\n\n if(rankingExists == False):\n statement = \"INSERT into topTen (movieName,posterURL,user_id,ranking,movie_id) VALUES (?,?,?,?,?)\"\n cursor.execute(statement,movieName,posterURL,user_id,ranking,movie_id)\n conn.commit()\n return 'ranking inserted'\n\n\n if(rankingExists):\n statement = \"UPDATE topTen SET movieName = ?,posterURL = ?, movie_id = ? WHERE ranking = ?\"\n cursor.execute(statement,movieName,posterURL,movie_id,ranking)\n\n conn.commit()\n return 'ranking updated'\n\nif __name__ == '__main__':\n app.run(port='5002')\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"442912235","text":"import zmx_api\nimport zmx # noqa\nfrom progressbar import progressbar\nimport os\nimport numpy as np\n\nmce_rows_to_optimize = [1, 2, 3, 19, 20]\nPRECISE_VIGNETTING = True\n\nTheSystem, ZOSAPI, ZOSAPI_NetHelper = zmx_api.connect_zmx_interactive()\n\nMFE = TheSystem.MFE\nMCE = TheSystem.MCE\nMF_DIROUT = './center_pri_footprint/'\n\nNconf = 85\n# Nconf = 7\n\n\ndef vary_vars(MCE, mce_rows_to_optimize, active_conf):\n print(\"Varying a bit starting point\")\n op1_row = MCE.GetOperandAt(mce_rows_to_optimize[0])\n cell1 = op1_row.GetOperandCell(active_conf)\n cell1.DoubleValue = cell1.DoubleValue - 1.0\n cell1.DoubleValue = 38.0\n\n op2_row = MCE.GetOperandAt(mce_rows_to_optimize[1])\n cell2 = op2_row.GetOperandCell(active_conf)\n cell2.DoubleValue = cell2.DoubleValue - 1.0\n cell2.DoubleValue = 38.0\n\n# op3_row = MCE.GetOperandAt(mce_rows_to_optimize[2])\n# cell3 = op3_row.GetOperandCell(active_conf)\n# cell3.DoubleValue = 0.0\n\n\ndef do_we_need_to_reoptimize(MFE):\n \"\"\"returns True if we need to reoptimize.\n returns False if we reached a viable solution.\n \"\"\"\n # check that we found a solution and run optimizer again if not\n MFE.CalculateMeritFunction()\n Nop = MFE.NumberOfOperands\n REOPTIMIZE = False\n for j in range(6):\n op = MFE.GetOperandAt(Nop - j)\n contribution = op.Contribution\n print(\"Contribution %i: %1.2e\" % (j, contribution))\n REOPTIMIZE = REOPTIMIZE or (contribution > 1e-7)\n op_margin = MFE.GetOperandAt(Nop - 7)\n reached_target = np.isclose(op_margin.Value,\n op_margin.Target, atol=20)\n print(\"Margin: %1.2e\" % op_margin.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n\n op_equa = MFE.GetOperandAt(Nop - 8)\n reached_target = op_equa.Value < 20\n print(\"Avg Deviation from edge shape: %1.2f\" % op_equa.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n if REOPTIMIZE:\n print(\"Not OK.\")\n else:\n print(\"Passed, OK.\")\n return REOPTIMIZE\n\n\nfor active_conf in progressbar(range(1, Nconf + 1)):\n mf_fnameout = os.path.abspath(os.path.join(MF_DIROUT,\n \"MF_conf%02i.MF\" % active_conf))\n MFE.LoadMeritFunction(mf_fnameout)\n TheSystem.Tools.RemoveAllVariables()\n\n op_svig = MFE.GetOperandAt(6)\n op_svig.GetOperandCell(2).IntegerValue = 1\n\n# check that we found a solution and run optimizer again if not\n REOPTIMIZE = do_we_need_to_reoptimize(MFE)\n\n if REOPTIMIZE:\n print(\"Reoptimizing:\")\n print(\"\\nFAILED_CAM: %i\" % active_conf)\n# vary_vars(MCE, mce_rows_to_optimize, active_conf)\n# start from where left\n TheSystem.Tools.RemoveAllVariables()\n zmx.set_variables_or_const(mce_rows_to_optimize,\n active_conf,\n MCE, ZOSAPI, vars=True)\n zmx.zemax_optimize(TheSystem, ZOSAPI,\n algorithm='DLS',\n CyclesAuto=False)\n zmx.zemax_optimize(TheSystem, ZOSAPI,\n algorithm='DLS',\n CyclesAuto=False)\n\n REOPTIMIZE = do_we_need_to_reoptimize(MFE)\n\n if REOPTIMIZE:\n print(\"Reoptimizing:\")\n print(\"\\nFAILED_CAM: %i\" % active_conf)\n #vary_vars(MCE, mce_rows_to_optimize, active_conf)\n TheSystem.Tools.RemoveAllVariables()\n zmx.set_variables_or_const(mce_rows_to_optimize,\n active_conf,\n MCE, ZOSAPI, vars=True)\n zmx.zemax_optimize(TheSystem, ZOSAPI,\n algorithm='DLS',\n CyclesAuto=True)\n REOPTIMIZE = do_we_need_to_reoptimize(MFE)\n if REOPTIMIZE:\n print(\"Reoptimizing cam %i:\" % active_conf)\n# vary_vars(MCE, mce_rows_to_optimize, active_conf)\n TheSystem.Tools.RemoveAllVariables()\n zmx.set_variables_or_const(mce_rows_to_optimize,\n active_conf,\n MCE, ZOSAPI, vars=True)\n zmx.zemax_optimize(TheSystem, ZOSAPI,\n algorithm='OD',\n CyclesAuto=False)\n REOPTIMIZE = do_we_need_to_reoptimize(MFE)\n if REOPTIMIZE:\n print(\"Reoptimizing cam %i:\" % active_conf)\n print(\"I'm giving up after this!\")\n vary_vars(MCE, mce_rows_to_optimize, active_conf)\n TheSystem.Tools.RemoveAllVariables()\n zmx.set_variables_or_const(mce_rows_to_optimize,\n active_conf,\n MCE, ZOSAPI, vars=True)\n zmx.zemax_optimize(TheSystem, ZOSAPI,\n algorithm='DSL',\n CyclesAuto=True)\n","sub_path":"ZOS_API_scripts/S4camScripts/time_reverse/TMP/setl_lyot_stop_scripts/8_check_elliptical_ap_after_roundoff.py","file_name":"8_check_elliptical_ap_after_roundoff.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"21623500","text":"from os import system\nfrom time import sleep\n\nsystem(\"apt install figlet > null\")\nsystem(\"clear\")\ndef banner():\n\tsystem(\"figlet AUTOMAC\")\n\tprint(\" ============================================\\n|\\n|-- By Mugo Squero\\n\\n\")\n\nbanner()\n\nkart = str(input(\"Lütfen Ağ Kartınızı Giriniz : \"))\n\nsystem(\"clear\")\nbanner()\n\nisc = str(input(\" 0 = Automac'i Başlat\\n 1 = Automac'i Durdur!\\n\\n?: \"))\n\n\nif (isc == \"1\"):\n\tsystem(\"clear\")\n\tbanner()\n\tprint(\"Durduruluyor..!\\n\\n\")\n\tsleep(2)\n\tsystem(\"macchanger -p \" + kart + \" > null\")\n\tsystem(\"rm null\")\n\tprint(\"Eski MAC Adresi Geri Alındı!\")\n\texit()\n\nelse:\n\tsystem(\"clear\")\n\tbanner()\n\tsystem(\"clear\")\n\tbanner()\n\nsure = int(input(\"Her Değişim Arası Beklenilecek Süre? (saniye) : \"))\n\nsystem(\"clear\")\nbanner()\nprint(\"İşlem Başlatılıyor...\")\nsleep(2)\nsystem(\"rm null\")\nsystem(\"clear\")\nwhile 1:\n\tbanner()\n\tprint(55 * \"-\")\n\tsystem(\"macchanger -a \" + kart)\n\tprint(55 * \"-\")\n\tsleep(sure)\n\tsystem(\"clear\")\n","sub_path":"automac.py","file_name":"automac.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"154478101","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\ndef merge( arrA, arrB ):\n elements = len( arrA ) + len( arrB )\n merged_arr = [0] * elements\n # TO-DO\n cA, cB, cM = 0,0,0\n \n while(cA < len(arrA) and cB < len(arrB)):\n if(arrA[cA] < arrB[cB]):\n merged_arr[cM] = arrA[cA]\n cA += 1\n else:\n merged_arr[cM] = arrB[cB]\n cB += 1\n cM += 1\n \n if(cA < len(arrA)):\n while(cM < len(merged_arr)):\n merged_arr[cM] = arrA[cA]\n cM +=1\n cA +=1\n\n if(cB < len(arrB)):\n while(cM < len(merged_arr)):\n merged_arr[cM] = arrB[cB]\n cM +=1\n cB +=1\n\n return merged_arr\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION\ndef merge_sort( arr ):\n # TO-DO\n if(len(arr) <= 1):\n return arr\n else:\n half = len(arr) // 2\n arr1 = arr[half:]\n arr2 = arr[:half]\n\n sorted_1 = merge_sort(arr1)\n sorted_2 = merge_sort(arr2)\n\n return merge(sorted_1, sorted_2)\n\n return arr\n\n\n# STRETCH: implement an in-place merge sort algorithm\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n for i in range(start, end):\n for x in range(start, end):\n if(arr[x] > arr[x+1]):\n t = arr[x]\n arr[x] = arr[x+1]\n arr[x+1] = t\n return arr\n\ndef merge_sort_in_place(arr, l, r): \n # TO-DO\n m = (l+r)//2\n if(r <= l):\n return arr\n \n merge_sort_in_place(arr, l, m)\n merge_sort_in_place(arr, m+1, r)\n\n merge_in_place(arr, l, m, r)\n return arr\n\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\ndef timsort( arr ):\n\n return arr\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"280199789","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport copy\nimport json\nimport os\nclass Ui_RISCV_Simulator(object):\n # reg=[[0 for x in range(0,32)] for x in range(0,32)]\n # MEM=[0 for x in range(0,10000)]\n def breakpoint(self):\n #print(\"in\")\n if self.bp!=-1:\n item=self.listWidget_2.item(self.bp)\n item.setBackground(QtGui.QColor('white'))\n self.bp=self.listWidget_2.currentRow()\n item=self.listWidget_2.item(self.bp)\n item.setBackground(QtGui.QColor('red'))\n self.listWidget_2.clearSelection()\n def scroll(self):\n try:\n val=int(self.textEdit_3.toPlainText(),16)\n except ValueError:\n self.textEdit_3.setText('Wrong Address')\n return\n if val%4==0:\n self.listWidget.setCurrentRow((12500-val)//4)\n\n def reset_mem(self):#check for code in memory\n self.curr=-1\n self.step=-1\n self.refresh_mem()\n def refresh_mem(self):#add for decimal\n MEM=[]\n if self.step==-1:\n MEM=[0 for x in range(0,100000)]\n else:\n MEM=self.data_gui['mem'][self.step]\n self.listWidget.clear()\n for i in range(32,100000,32):\n out=str(hex((i-32)//8))+'\\t '\n word=MEM[i-32:i]\n for i in range(0,32,8):\n byte=word[i:i+8]\n byte_=''\n for i in byte:\n byte_+=str(i)\n # return\n if self.typ==0:\n out+=(str(hex(int(byte_,2)))+'\\t ')\n elif self.typ==1:\n out+=(str(int(byte_,10))+'\\t ')\n # print(out)\n self.listWidget.insertItem((int(i)//32)-1,out)\n # nibble=MEM[i-32:i]\n # word=\"\"\n # for j in range(0,32,8):\n # byte=nibble[i-j-8:i-j]\n # byte_=''\n # for k in byte:\n # byte_+=str(k).\n \n # self.listWidget.insertItem(i//32)\n def cancel_connect(self):\n self.reset_mem()\n self.reset_mem()\n self.listWidget_2.clearSelection()\n self.curr=-1\n def refresh_reg(self):\n ## for refreshing registers i use first clock cycle data\n reg=[]\n if self.step==-1:\n reg=[[0 for x in range(0,32)] for x in range(0,32)]\n else:\n reg=self.data_gui['reg'][self.step]\n self.listWidget_3.clear()\n print(reg[2])\n if self.typ==0:\n for i in range(32):\n self.listWidget_3.insertItem(i,\"x\"+str(i)+\"\\t\"+str(hex(int(''.join([str(j) for j in reg[i]]),2))))\n elif self.typ==1:\n for i in range(32):\n self.listWidget_3.insertItem(i,\"x\"+str(i)+\"\\t\"+str((int(''.join([str(j) for j in reg[i]]),2))))\n elif self.typ==2:\n for i in range(32):\n self.listWidget_3.insertItem(i,\"x\"+str(i)+\"\\t\"+str(hex(int(''.join([str(j) for j in reg[i]]),2))))\n def reset_reg(self):\n self.step=-1\n self.curr=-1\n self.refresh_reg()\n def check_log_click(self):\n self.curr=-1\n self.listWidget_2.clear()\n self.listWidget_5.clear()\n data=self.textEdit.toPlainText()\n f=open('pipelined/testing.asm','w+')\n f.write(data)\n f.close()\n os.system('python Phase3.py')\n f=open('gui_data.json','r+')\n self.data_gui=json.load(f)\n f.close()\n for i in range(len(self.data_gui['hex'])):\n if len(self.data_gui['hex'][i])<10:\n self.data_gui['hex'][i]+='0'*(10-len(self.data_gui['hex'][i]))\n\n self.listWidget_2.insertItem(i,\"\\t\\t\".join([hex(i*4),self.data_gui['hex'][i],self.data_gui['commands'][i].replace(',',' '),self.data_gui['commands'][i]]))\n self.listWidget_2.setCurrentRow(0)\n def run_connect(self):\n # items = []\n # for index in range(self.listWidget_2.count()):\n # items.append(self.listWidget_2.item(index))\n # mfile=[]\n # for item in items:\n # temp=item.text().split('\\t\\t')[1]\n # mfile.append(temp)\n # full_run(mfile,0)\n self.step=len(self.data_gui['pc'])-1\n self.curr=self.data_gui['pc']=self.step\n self.listWidget_2.clearSelection()\n self.refresh_reg()\n self.refresh_mem()\n def step_connect(self):\n # print(self.curr)\n if self.step<len(self.data_gui['pc']):\n self.step+=1\n if self.step==len(self.data_gui['pc']):\n self.listWidget_2.clearSelection()\n return\n self.curr=self.data_gui['pc'][self.step]\n self.listWidget_2.setCurrentRow(self.curr)\n self.refresh_mem()\n self.refresh_reg()\n def reset_connect(self):\n self.reset_reg()\n self.reset_mem()\n self.listWidget_2.setCurrentRow(0)\n def dump_connect(self):\n self.textEdit_2.setText('\\n'.join(self.data_gui['hex']))\n def prev_connect(self):\n if self.step>=0:\n self.step-=1\n if self.step==-1:\n self.listWidget_2.clearSelection()\n else:\n self.curr=self.data_gui['pc'][self.step]\n self.listWidget_2.setCurrentRow(self.curr)\n self.refresh_mem()\n self.refresh_reg()\n def type_of_out(self):\n self.typ=self.listWidget_4.currentRow()\n self.refresh_mem()\n self.refresh_reg()\n \n def setupUi(self, RISCV_Simulator):\n self.typ=0\n self.curr=-1\n self.step=-1\n self.data_gui={}\n self.bp=-1\n RISCV_Simulator.setObjectName(\"RISCV_Simulator\")\n RISCV_Simulator.resize(1440, 946)\n self.centralwidget = QtWidgets.QWidget(RISCV_Simulator)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1421, 1001))\n self.tabWidget.setMouseTracking(True)\n self.tabWidget.setTabletTracking(True)\n self.tabWidget.setAutoFillBackground(True)\n self.tabWidget.setObjectName(\"tabWidget\")\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName(\"tab\")\n self.textEdit = QtWidgets.QTextEdit(self.tab)\n self.textEdit.setGeometry(QtCore.QRect(0, 0, 1081, 631))\n self.textEdit.setMouseTracking(True)\n self.textEdit.setObjectName(\"textEdit\")\n font = QtGui.QFont()\n font.setPointSize(10)\n self.textEdit.setFont(font)\n self.label = QtWidgets.QLabel(self.tab)\n self.label.setGeometry(QtCore.QRect(0, 660, 111, 41))\n self.label.setStyleSheet(\"font: 11pt \\\"MS Shell Dlg 2\\\";\\n\"\n\"\")\n self.textEdit.setStyleSheet(\"font: 15pt \\\"MS Shell Dlg 2\\\";\\n\"\n\"\") \n self.label.setTextFormat(QtCore.Qt.PlainText)\n self.label.setObjectName(\"label\")\n self.listWidget_5 = QtWidgets.QListWidget(self.tab)\n self.listWidget_5.setGeometry(QtCore.QRect(-5, 701, 1081, 141))\n self.listWidget_5.setObjectName(\"listWidget_5\")\n self.pushButton_2 = QtWidgets.QPushButton(self.tab)\n self.pushButton_2.setGeometry(QtCore.QRect(120, 640, 281, 51))\n self.pushButton_2.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.tabWidget.addTab(self.tab, \"\")\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(250, 40, 81, 61))\n self.pushButton_3.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 190, 81, 41))\n self.label_2.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(160, 180, 131, 51))\n self.label_3.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(400, 180, 131, 51))\n self.label_4.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(self.tab_2)\n self.label_5.setGeometry(QtCore.QRect(680, 180, 131, 51))\n self.label_5.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_5.setObjectName(\"label_5\")\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_4.setGeometry(QtCore.QRect(20, 130, 81, 41))\n self.pushButton_4.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.pushButton_5 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_5.setGeometry(QtCore.QRect(130, 130, 81, 41))\n self.pushButton_5.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_5.setObjectName(\"pushButton_5\")\n self.pushButton_5.clicked.connect(self.step_connect)\n self.pushButton_6 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_6.setGeometry(QtCore.QRect(250, 130, 81, 41))\n self.pushButton_6.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_6.setObjectName(\"pushButton_6\")\n self.pushButton_7 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_7.setGeometry(QtCore.QRect(360, 130, 81, 41))\n self.pushButton_7.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_7.setObjectName(\"pushButton_7\")\n self.pushButton_8 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_8.setGeometry(QtCore.QRect(460, 130, 81, 41))\n self.pushButton_8.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton_8.setObjectName(\"pushButton_8\")\n self.pushButton = QtWidgets.QPushButton(self.tab_2)\n self.pushButton.setGeometry(QtCore.QRect(1270, 820, 121, 81))\n self.pushButton.setStyleSheet(\"font: 10pt \\\"MS Shell Dlg 2\\\";\")\n self.pushButton.setObjectName(\"pushButton\")\n self.listWidget_2 = QtWidgets.QListWidget(self.tab_2)\n self.listWidget_2.setGeometry(QtCore.QRect(5, 231, 761, 531))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.listWidget_2.setFont(font)\n self.listWidget_2.setObjectName(\"listWidget_2\")\n self.tabWidget_2 = QtWidgets.QTabWidget(self.tab_2)\n self.tabWidget_2.setGeometry(QtCore.QRect(960, 10, 431, 811))\n self.tabWidget_2.setObjectName(\"tabWidget_2\")\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName(\"tab_3\")\n self.listWidget_3 = QtWidgets.QListWidget(self.tab_3)\n self.listWidget_3.setGeometry(QtCore.QRect(5, 11, 341, 761))\n self.listWidget_3.setObjectName(\"listWidget_3\")\n font = QtGui.QFont()\n font.setPointSize(10)\n self.listWidget_3.setFont(font)\n self.tabWidget_2.addTab(self.tab_3, \"\")\n self.tab_4 = QtWidgets.QWidget()\n self.tab_4.setObjectName(\"tab_4\")\n self.label_6 = QtWidgets.QLabel(self.tab_4)\n self.label_6.setGeometry(QtCore.QRect(10, 0, 91, 31))\n self.label_6.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_6.setObjectName(\"label_6\")\n self.label_7 = QtWidgets.QLabel(self.tab_4)\n self.label_7.setGeometry(QtCore.QRect(110, 0, 41, 31))\n self.label_7.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_7.setObjectName(\"label_7\")\n self.label_8 = QtWidgets.QLabel(self.tab_4)\n self.label_8.setGeometry(QtCore.QRect(190, 0, 31, 31))\n self.label_8.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_8.setObjectName(\"label_8\")\n self.label_9 = QtWidgets.QLabel(self.tab_4)\n self.label_9.setGeometry(QtCore.QRect(270, 0, 31, 31))\n self.label_9.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_9.setObjectName(\"label_9\")\n self.label_10 = QtWidgets.QLabel(self.tab_4)\n self.label_10.setGeometry(QtCore.QRect(360, 0, 31, 31))\n self.label_10.setStyleSheet(\"font: 12pt \\\"MS Shell Dlg 2\\\";\")\n self.label_10.setObjectName(\"label_10\")\n self.listWidget = QtWidgets.QListWidget(self.tab_4)\n self.listWidget.setGeometry(QtCore.QRect(5, 31, 411, 741))\n self.listWidget.setStyleSheet(\"font: 10pt \\\"MS Shell Dlg 2\\\";\")\n self.listWidget.setObjectName(\"listWidget\")\n font = QtGui.QFont()\n font.setPointSize(15)\n self.listWidget.setFont(font)\n self.tabWidget_2.addTab(self.tab_4, \"\")\n self.label_11 = QtWidgets.QLabel(self.tab_2)\n self.label_11.setGeometry(QtCore.QRect(800, 530, 131, 61))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_11.setFont(font)\n self.label_11.setObjectName(\"label_11\")\n self.textEdit_2 = QtWidgets.QTextEdit(self.tab_2)\n self.textEdit_2.setGeometry(QtCore.QRect(10, 780, 671, 101))\n self.textEdit_2.setObjectName(\"textEdit_2\")\n self.textEdit_3 = QtWidgets.QTextEdit(self.tab_2)\n self.textEdit_3.setGeometry(QtCore.QRect(1120, 840, 121, 51))\n self.textEdit_3.setObjectName(\"textEdit_3\")\n font = QtGui.QFont()\n font.setPointSize(10)\n self.textEdit_2.setFont(font)\n self.listWidget_4 = QtWidgets.QListWidget(self.tab_2)\n self.listWidget_4.setGeometry(QtCore.QRect(800, 630, 131, 61))\n self.listWidget_4.setObjectName(\"listWidget_4\")\n self.listWidget_5 = QtWidgets.QListWidget(self.tab_2)\n self.listWidget_5.setGeometry(QtCore.QRect(800, 800, 131, 61))\n self.listWidget_5.setObjectName(\"listWidget_5\")\n self.tabWidget.addTab(self.tab_2, \"\")\n self.pushButton_2.clicked.connect(self.check_log_click)\n RISCV_Simulator.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(RISCV_Simulator)\n self.statusbar.setObjectName(\"statusbar\")\n RISCV_Simulator.setStatusBar(self.statusbar)\n self.actionRun = QtWidgets.QAction(RISCV_Simulator)\n self.actionRun.setObjectName(\"actionRun\")\n self.retranslateUi(RISCV_Simulator)\n # self.listWidget_3.setCurrentRow(0)\n self.tabWidget.setCurrentIndex(0)\n self.tabWidget_2.setCurrentIndex(0)\n self.pushButton_4.clicked.connect(self.run_connect)\n temp=['hex','decimal','ASCII']\n for i in range(len(temp)):\n self.listWidget_4.insertItem(i,temp[i])\n # print(typ)\n # self.refresh_mem(self,data['mem'][0])\n # self.refresh_reg(self,data['reg'][0])\n self.listWidget_4.clicked.connect(self.type_of_out)\n self.pushButton_8.clicked.connect(self.dump_connect)\n self.pushButton.clicked.connect(self.scroll)\n self.pushButton_6.clicked.connect(self.prev_connect)\n self.pushButton_7.clicked.connect(self.reset_connect)\n self.pushButton_3.clicked.connect(self.cancel_connect)\n self.listWidget_2.doubleClicked.connect(self.breakpoint)\n self.reset_mem()\n self.reset_reg()\n QtCore.QMetaObject.connectSlotsByName(RISCV_Simulator)\n\n def retranslateUi(self, RISCV_Simulator):\n _translate = QtCore.QCoreApplication.translate\n RISCV_Simulator.setWindowTitle(_translate(\"RISCV_Simulator\", \"MainWindow\"))\n self.label.setText(_translate(\"RISCV_Simulator\", \"Error Log\"))\n self.pushButton_2.setText(_translate(\"RISCV_Simulator\", \"Assemble & Simulate the editor\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"RISCV_Simulator\", \"Editor\"))\n self.pushButton_3.setText(_translate(\"RISCV_Simulator\", \"Cancel\"))\n self.label_2.setText(_translate(\"RISCV_Simulator\", \"PC\"))\n self.label_3.setText(_translate(\"RISCV_Simulator\", \"Machine Code\"))\n self.label_4.setText(_translate(\"RISCV_Simulator\", \"Basic Code\"))\n self.label_5.setText(_translate(\"RISCV_Simulator\", \"Original Code\"))\n self.pushButton_4.setText(_translate(\"RISCV_Simulator\", \"Run\"))\n self.pushButton_5.setText(_translate(\"RISCV_Simulator\", \"Step\"))\n self.pushButton_6.setText(_translate(\"RISCV_Simulator\", \"Prev\"))\n self.pushButton_7.setText(_translate(\"RISCV_Simulator\", \"Reset\"))\n self.pushButton_8.setText(_translate(\"RISCV_Simulator\", \"Dump\"))\n self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), _translate(\"RISCV_Simulator\", \"Registers\"))\n self.label_6.setText(_translate(\"RISCV_Simulator\", \"Address\"))\n self.label_7.setText(_translate(\"RISCV_Simulator\", \"+1\"))\n self.label_8.setText(_translate(\"RISCV_Simulator\", \"+2\"))\n self.label_9.setText(_translate(\"RISCV_Simulator\", \"+3\"))\n self.label_10.setText(_translate(\"RISCV_Simulator\", \"+4\"))\n self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), _translate(\"RISCV_Simulator\", \"Memory\"))\n self.label_11.setText(_translate(\"RISCV_Simulator\", \"Display Settings\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"RISCV_Simulator\", \"Simulator\"))\n self.actionRun.setText(_translate(\"RISCV_Simulator\", \"Run\"))\n self.pushButton.setText(_translate(\"RISCV_Simulator\", \"Jump to\"))\n\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n RISCV_Simulator = QtWidgets.QMainWindow()\n ui = Ui_RISCV_Simulator()\n ui.setupUi(RISCV_Simulator)\n RISCV_Simulator.show()\n sys.exit(app.exec_())\n","sub_path":"final_1.py","file_name":"final_1.py","file_ext":"py","file_size_in_byte":17761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"296899706","text":"'''\n\tAuthor: Vedesh Karampudi\n\tPython Version: 3.6.3\n'''\nfrom typing import List\nimport numpy as np\nimport json\nfrom Models import StandardTfIdf,OkapiBM25\n\nclass Testing:\n\tdef __init__(self,model: str,paragraphs: List[str],weights: List[float],top_n: int):\n\t\tself.model_name = model\n\t\tself.paragraphs = paragraphs\n\t\tself.weights = weights\n\t\tself.top_n = top_n\n\t\tif self.model_name == 'StandardTfIdf':\n\t\t\tself.model = StandardTfIdf()\n\t\telif self.model_name == 'OkapiBM25':\n\t\t\tself.model = OkapiBM25()\n\t\telse:\n\t\t\traise ValueError(\"The model name you have entered is not acceptable\")\n\n\tdef getAccuracy(self,questions_filename: str) -> float:\n\t\tself.questions_filename = questions_filename\n\t\twith open(self.questions_filename) as questions_file:\n\t\t\tquestions_json = json.load(questions_file)\n\t\ttotal_questions_count = 0\n\t\tcorrectly_answered_questions_count = 0\n\t\tfor question in questions_json:\n\t\t\tquestion_text = question['text']\n\t\t\tparagraph_id = question['paragraph_id']\n\t\t\tquestion_text = str(question_text)\n\t\t\tparagraph_id = int(paragraph_id)\n\t\t\ttop_n_paragraphs = self.model.top_n_paragraphs(question_text,self.paragraphs,self.weights,self.top_n)\n\t\t\tif paragraph_id in top_n_paragraphs:\n\t\t\t\tcorrectly_answered_questions_count+= 1\n\t\t\ttotal_questions_count+= 1\n\t\tprint(\"Total questions are \" + str(total_questions_count))\n\t\taccuracy = np.divide(correctly_answered_questions_count,total_questions_count)\n\t\treturn (accuracy*100)\n\n\n\n\n\n","sub_path":"Testing_programs/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"120538934","text":"import csv\n\n\ndef dict_to_csv(data: dict):\n with open('group_list.csv', 'w') as group:\n fields = ['First name', 'Last name', 'Telegram tag']\n writer = csv.DictWriter(group, fieldnames=fields)\n writer.writeheader()\n writer.writerows(data)\n\n\nif __name__ == '__main__':\n from .data import group_list\n\n dict_to_csv(group_list)\n","sub_path":"week3/lesson1-python-standart-library/homework/EugeneZabolotny/utils/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"81996854","text":"import boto3\n\ndef detect_labels(photo, bucket):\n # returns number of people detected in image\n client=boto3.client('rekognition')\n response = client.detect_labels(Image={'S3Object':{'Bucket':bucket,'Name':photo}},\n MaxLabels=5)\n sum = 0\n for label in response['Labels']:\n if (label['Name'] == 'Person'):\n for instance in label['Instances']:\n sum += 1\n return sum\n\ndef send_to_fifo(count, fileName, queueName):\n sqs = boto3.resource('sqs')\n queue = sqs.get_queue_by_name(QueueName=queueName)\n response = queue.send_message(\n MessageBody=fileName,\n MessageGroupId = \"counts\",\n MessageAttributes={\n 'Count': {\n 'StringValue': str(count),\n 'DataType': 'String'\n }\n }\n )\n return response\n \ndef main(event, context):\n # save event to logs\n\n filename = f\"\"\"{event['Records'][0]['body']}\"\"\"\n\n s3_client = boto3.client('s3')\n\n #destination_bucket_name = 'bucketswen614'\n\n # Bucket Name where file was uploaded\n source_bucket_name = 'bucketswen614'\n # Copy Source Object\n #copy_source_object = {'Bucket': source_bucket_name, 'Key': filename}\n\n # S3 copy object operation\n #s3_client.copy_object(CopySource=copy_source_object, Bucket=destination_bucket_name, Key=filename)\n\n\n # Send to DB FIFO Queue\n send_to_fifo(detect_labels(filename, source_bucket_name), filename, 'dbqueue.fifo')\n \n s3_client.delete_object(Bucket=source_bucket_name, Key=filename)\n\n return {\n 'statusCode': 200,\n 'body': event\n }","sub_path":"hello-cdk/hello_cdk/send_to_rekognition/lambda-handler.py","file_name":"lambda-handler.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"435304729","text":"class ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def getDecimalValue(self, head: ListNode) -> int:\n num = head.val\n while head.next:\n num = num * 2 + head.next.val\n head = head.next\n return num\n\n\nval1 = ListNode(1)\nval2 = ListNode(0)\nval3 = ListNode(1)\nval4 = ListNode(1)\nval1.next = val2\nval2.next = val3\nval3.next = val4\n\nsol = Solution()\nprint(sol.getDecimalValue(val1))\n","sub_path":"1290_Convert_Binary_Number_in_a_Linked_List_to_Integer.py","file_name":"1290_Convert_Binary_Number_in_a_Linked_List_to_Integer.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"534288573","text":"import pytest\nimport raven\nfrom raven.models.rv import RV, RVI, Ost, RVFile, isinstance_namedtuple\nimport datetime as dt\nfrom collections import namedtuple\nfrom .common import TESTDATA\nfrom pathlib import Path\n\n\nclass TestRVFile:\n\n def test_simple_rv(self):\n fn = list(TESTDATA['raven-hmets'].glob('*.rvp'))[0]\n rvf = RVFile(fn)\n\n assert rvf.ext == 'rvp'\n assert rvf.stem == 'raven-hmets-salmon'\n assert not rvf.is_tpl\n\n def test_simple_tpl(self):\n fn = list(TESTDATA['ostrich-gr4j-cemaneige'].glob('*.rvp.tpl'))[0]\n rvf = RVFile(fn)\n\n assert rvf.ext == 'rvp'\n assert rvf.stem == 'raven-gr4j-salmon'\n assert rvf.is_tpl\n\n def test_ostIn(self):\n fn = list(TESTDATA['ostrich-gr4j-cemaneige'].glob('ostIn.txt'))[0]\n rvf = RVFile(fn)\n\n assert rvf.ext == 'txt'\n assert rvf.stem == 'ostIn'\n assert rvf.is_tpl\n\n def test_tags(self):\n rvp = list((Path(raven.__file__).parent / 'models' / 'raven-gr4j-cemaneige').glob(\"*.rvp\"))[0]\n rvf = RVFile(rvp)\n\n assert isinstance(rvf.tags, list)\n assert 'params.GR4J_X3' in rvf.tags\n\n def test_fail(self):\n fn = Path(raven.__file__).parent\n with pytest.raises(ValueError):\n RVFile(fn)\n\n\nclass TestRV:\n\n def test_end_date(self):\n rvi = RVI(run_name='test',\n start_date=dt.datetime(2000, 1, 1),\n end_date=dt.datetime(2000, 1, 11),\n )\n\n assert 10 == rvi.duration\n\n rvi.duration = 11\n assert dt.datetime(2000, 1, 12) == rvi.end_date\n\n def test_params(self):\n class RVP(RV):\n params = namedtuple('p', 'x, y')\n\n rvp = RVP()\n rvp.params = RVP.params(1, 2)\n assert rvp.params.x == 1\n\n def test_dict_interface(self):\n rv = RV(run_name='test')\n\n assert rv['run_name'] == rv.run_name\n\n with pytest.raises(AttributeError):\n rv['r'] = 6\n\n def test_evaluation_metrics(self):\n rvi = RVI()\n rvi.evaluation_metrics = 'LOG_NASH'\n\n with pytest.raises(ValueError):\n rvi.evaluation_metrics = 'JIM'\n\n def test_update(self):\n rv = RV(a=None, b=None)\n rv.update({'a': 1, 'b': 2})\n assert rv.a == 1\n\n rv.c = 1\n assert rv['c'] == 1\n\n def test_namedtuple(self):\n class Mod(RV):\n params = namedtuple('params', 'x1, x2, x3')\n\n m = Mod(params=Mod.params(1, 2, 3))\n assert m.params.x1 == 1\n\n\nclass TestOst:\n def test_random(self):\n o = Ost()\n assert o.random_seed == ''\n\n o.random_seed = 0\n assert o.random_seed == 'RandomSeed 0'\n\n\ndef test_isinstance_namedtuple():\n X = namedtuple('params', 'x1, x2, x3')\n x = X(1, 2, 3)\n assert isinstance_namedtuple(x)\n assert not isinstance_namedtuple([1, 2, 3])\n","sub_path":"tests/test_rv.py","file_name":"test_rv.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"114669342","text":"from drltools.utils import trainer, ddpg_config\nfrom drltools.agent.agent import DDPGAgent\nfrom unityagents import UnityEnvironment\n\n\nenv = UnityEnvironment(file_name=\"unity_environments/Reacher_mac.app\", worker_id=1)\nconfig = ddpg_config\nagent_class = DDPGAgent\nn_episodes = 2000\nmax_t = 1000\nsolved_score = 30\ntitle = 'DDPG Reacher'\n\nif __name__ == \"__main__\":\n\n trainer(env, config, agent_class, n_episodes, max_t, solved_score, title)\n\n\n","sub_path":"2 - Continuous Control -DDPG/continuous_control_ddpg_reacher.py","file_name":"continuous_control_ddpg_reacher.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"521414869","text":"\"\"\"\nFull Application for Lightpath\n\"\"\"\n############\n# Standard #\n############\nimport logging\nimport threading\nimport os.path\nfrom functools import partial\n\n###############\n# Third Party #\n###############\nfrom pydm import Display\nfrom pydm.PyQt.QtCore import pyqtSlot, Qt\nfrom pydm.PyQt.QtGui import QColor, QSpacerItem, QGridLayout\n\nimport happi\nfrom happi import Client\nfrom happi.backends import JSONBackend\nfrom pcdsdevices.happireader import construct_device\n\n##########\n# Module #\n##########\nfrom .widgets import LightRow, InactiveRow\nfrom ..controller import LightController\n\nlogger = logging.getLogger(__name__)\n\nclass LightApp(Display):\n \"\"\"\n Main widget display for the lightpath\n\n Shows tables of devices and the current destination of the beam, as well\n as the status of the MPS system for LCLS\n\n Parameters\n ----------\n *args\n List of instantiated devices that match :class:`.LightInterface`\n\n containers : list, optional\n Happi device containers to display in the GUI but not to use in the\n lightpath logic\n\n beamline : str, optional\n Beamline to initialize the application with, otherwise the most\n upstream beamline will be selected\n\n dark : bool, optional\n Load the UI with the `qdarkstyle` interface\n\n parent : optional\n \"\"\"\n\n def __init__(self, *devices, containers=None, beamline=None,\n parent=None, dark=True):\n super().__init__(parent=parent)\n #Store Lightpath information\n self.light = LightController(*devices)\n self.path = None\n self._lock = threading.Lock()\n #Create empty layout\n self.lightLayout = QGridLayout()\n self.lightLayout.setVerticalSpacing(1)\n self.lightLayout.setHorizontalSpacing(10)\n self.widget_rows.setLayout(self.lightLayout)\n\n #Add destinations\n for line in self.destinations():\n self.destination_combo.addItem(line)\n\n #Connect signals to slots\n self.destination_combo.currentIndexChanged.connect(\n self.change_path_display)\n self.mps_only_check.clicked.connect(self.change_path_display)\n self.upstream_check.clicked.connect(self.change_path_display)\n self.transmission_slider.valueChanged.connect(self.transmission_adjusted)\n #Store LightRow objects to manage subscriptions\n self.rows = list()\n #Select the beamline to begin with\n beamline = beamline or self.destinations()[0]\n try:\n idx = self.destinations().index(beamline.upper())\n except ValueError:\n logger.error(\"%s is not a valid beamline\", beamline)\n idx = 0\n #Move the ComboBox\n self.destination_combo.setCurrentIndex(idx)\n #Grab containers\n containers = containers or []\n self.containers = dict((key, list())\n for key in self.light.beamlines.keys())\n for device in containers:\n try:\n #Check we have a z attribute\n z = getattr(device, 'z')\n self.containers[device.beamline].append(device)\n except KeyError:\n logger.error('Container %s belongs to beamline %s, '\n ' which is not represented by other devices',\n device.name, device.beamline)\n except AttributeError:\n logger.error('Device %r does not implement the proper '\n 'interface to be included in the path',\n device)\n #Setup the UI\n self.change_path_display()\n\n #Change the stylesheet\n if dark:\n try:\n import qdarkstyle\n except ImportError:\n logger.error(\"Can not use dark theme, \"\n \"qdarkstyle package not available\")\n else:\n self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n\n def destinations(self):\n \"\"\"\n All possible beamline destinations sorted by end point\n \"\"\"\n return sorted(list(self.light.beamlines.keys()),\n key= lambda x : self.light.beamlines[x].range[0])\n\n @property\n def device_rows(self):\n \"\"\"\n Subset of device rows that refer to live devices\n \"\"\"\n return [row for row in self.rows if not isinstance(row.device,\n happi.Device)]\n\n def load_device_row(self, device):\n \"\"\"\n Create LightRow for device\n \"\"\"\n #Create new widget\n if isinstance(device, happi.Device):\n w = InactiveRow(device, parent=self.widget_rows)\n else:\n w = LightRow(device, parent=self.widget_rows)\n return w\n\n def select_devices(self, beamline, upstream=True, mps_only=False):\n \"\"\"\n Select a subset of beamline devices to show in the display\n\n Parameters\n ----------\n beamline : str\n Beamline to display\n\n upstream : bool, optional\n Include upstream devices in the display\n\n mps_only : bool ,optional\n Only show devices that are in the mps system\n \"\"\"\n #Clear any remaining subscriptions\n if self.path:\n self.clear_subs()\n #Find pool of devices and create subscriptions\n self.path = self.light.beamlines[beamline]\n #Defer running updates until UI is created \n self.path.subscribe(self.update_path, run=False)\n self.path.subscribe(self.update_mps,\n event_type=self.path.SUB_MPSPATH_CHNG,\n run=False)\n pool = self.path.path\n #Find end point for each beamline\n bls = set(d.beamline for d in pool)\n endpoints = dict((bl, max([d.z for d in pool\n if d.beamline == bl]))\n for bl in bls)\n #Find necessary containers\n containers = [c for bl in endpoints.keys()\n for c in self.containers[bl]\n if (c.beamline == bl\n and c.z < endpoints[bl])]\n #Add containers to pool and resort\n pool = sorted(pool + containers, key = lambda x : x.z)\n #Only return devices if they are on the specified beamline\n if not upstream:\n pool = [dev for dev in pool if dev.beamline == beamline]\n #Only return MPS devices\n if mps_only:\n #Note: This does not account for improperly configured `mps`\n pool = [dev for dev in pool if hasattr(dev, 'mps')]\n logger.debug(\"Selected %s devices ...\", len(pool))\n return pool\n\n def selected_beamline(self):\n \"\"\"\n Current beamline selected by the combo box\n \"\"\"\n return self.destination_combo.currentText()\n\n def mps_only(self):\n \"\"\"\n Whether the user has selected to only display MPS devices\n \"\"\"\n return self.mps_only_check.isChecked()\n\n def upstream(self):\n \"\"\"\n Whether the user has selected to display upstream devices\n \"\"\"\n return self.upstream_check.isChecked()\n\n @pyqtSlot(bool)\n def remove(self, value, device=None):\n \"\"\"\n Remove the device from the beamline\n \"\"\"\n if device:\n logger.info(\"Removing device %s ...\", device.name)\n try:\n device.remove()\n except Exception as exc:\n logger.error(exc)\n\n @pyqtSlot(bool)\n def insert(self, value, device=None):\n \"\"\"\n Insert the device from the beamline\n \"\"\"\n if device:\n logger.info(\"Inserting device %s ...\", device.name)\n try:\n device.insert()\n except Exception as exc:\n logger.error(exc)\n\n @pyqtSlot(int)\n def transmission_adjusted(self, value):\n \"\"\"\n Adjust the :attr:`.BeamPath.minimum_transmission`\n \"\"\"\n logger.debug(\"Adjusted minimum transmission to %s percent\", value)\n self.path.minimum_transmission = value/100.\n self.update_path()\n\n @pyqtSlot()\n @pyqtSlot(bool)\n def change_path_display(self, value=None):\n \"\"\"\n Change the display devices based on the state of the control buttons\n \"\"\"\n with self._lock:\n logger.debug(\"Resorting beampath display ...\")\n #Grab all the light rows\n rows = [self.load_device_row(d)\n for d in self.select_devices(self.selected_beamline(),\n upstream=self.upstream(),\n mps_only=self.mps_only())]\n #Clear layout if previously loaded rows exist\n if self.rows:\n #Clear our subscribtions\n for row in self.rows: row.clear_sub()\n #Clear the widgets\n for i in reversed(range(self.lightLayout.count())):\n old = self.lightLayout.takeAt(i).widget()\n if old:\n old.deleteLater()\n #Clear subscribed row cache\n self.rows.clear()\n\n #Add all the widgets to the display\n for i, row in enumerate(rows):\n #Cache row to later clear subscriptions\n self.rows.append(row)\n #Connect up remove button\n if hasattr(row, 'remove_button'):\n row.remove_button.clicked.connect(partial(self.remove,\n device=row.device))\n #Connect up insert button\n if hasattr(row, 'insert_button'):\n row.insert_button.clicked.connect(partial(self.insert,\n device=row.device))\n #Add widgets to layout\n for j, widget in enumerate(row.widgets):\n if isinstance(widget, QSpacerItem):\n self.lightLayout.addItem(widget, i, j)\n else:\n self.lightLayout.addWidget(widget, i, j)\n #Initialize interface\n for row in self.device_rows:\n row.update_state()\n #Update display\n self.transmission_adjusted(self.transmission_slider.value()) #Calls .update_path\n self.update_mps()\n\n def ui_filename(self):\n \"\"\"\n Name of designer UI file\n \"\"\"\n return 'lightapp.ui'\n\n def ui_filepath(self):\n \"\"\"\n Full path to :attr:`.ui_filename`\n \"\"\"\n return os.path.join(os.path.dirname(os.path.abspath(__file__)),\n self.ui_filename())\n\n @classmethod\n def from_json(cls, json, beamline=None, parent=None, **kwargs):\n \"\"\"\n Create a lightpath user interface from a JSON happi database\n\n Parameters\n ----------\n path : str\n Path to the JSON file\n\n beamline : str, optional\n Name of beamline to launch application\n\n parent : QWidget, optional\n Parent for LightApp QWidget\n\n kwargs :\n Restrict the devices included in the lightpath. These keywords are\n all passed to :meth:`.happi.Client.search`\n\n Returns\n -------\n lightApp:\n Instantiated widget \n \"\"\"\n #Load all of the information from happi\n happi = Client(database=JSONBackend(json))\n devices = happi.search(**kwargs, as_dict=False)\n #Create valid pcdsdevices\n path = list()\n for dev in devices:\n try:\n path.append(construct_device(dev))\n except Exception:\n logger.exception(\"Error instantiating %s ...\", dev.name)\n #Instantiate the Application\n logger.debug(\"Instantiating User Interface ...\")\n return cls(*path, beamline=beamline, parent=parent)\n\n def update_path(self, *args, **kwargs):\n \"\"\"\n Update the PyDMRectangles to show devices as in the beam or not\n \"\"\"\n with self._lock:\n block = self.path.impediment\n for row in self.device_rows:\n #If our device is before or at the impediment, it is lit\n if not block or (row.device.z <= block.z):\n row.indicator._default_color = Qt.cyan\n #Otherwise, it is off\n else:\n row.indicator._default_color = Qt.gray\n #Update widget display\n row.indicator.update()\n\n def update_mps(self, *args, **kwargs):\n \"\"\"\n Update the MPS status of the frame\n\n The frame of the row will be red if the device is tripping the beam,\n yellow if it is faulted but a full trip is being prevented by an\n upstream device\n \"\"\"\n with self._lock:\n #Path information\n tripped = self.path.tripped_devices\n faulted = self.path.faulted_devices\n for row in self.device_rows:\n row.indicator._pen.setWidth(5)\n if row.device in tripped:\n row.indicator.penColor = Qt.red\n elif row.device in faulted:\n row.indicator.penColor = QColor(255, 215, 0)\n else:\n row.indicator._pen.setWidth(0)\n row.indicator.penColor = Qt.gray\n\n def clear_subs(self):\n \"\"\"\n Clear the subscription event\n \"\"\"\n self.path.clear_sub(self.update_path)\n self.path.clear_sub(self.update_mps)\n","sub_path":"lightpath/ui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":13765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"418840960","text":"from django.urls import path\nfrom . import views\n\napp_name = 'cats'\nurlpatterns = [\n path('', views.MainView.as_view(), name = 'all'),\n path('cat/create/', views.CatCreate.as_view(), name = 'cat_create'),\n path('cat/<int:pk>/update', views.CatUpdate.as_view(), name = 'cat_update'),\n path('cat/<int:pk>/delete', views.CatDelete.as_view(), name = 'cat_delete'),\n path('breed/', views.BreedListView.as_view(), name = 'breed_list'),\n path('breed/create', views.BreedCreate.as_view(), name = 'breed_create'),\n path('breed/<int:pk>/update', views.BreedUpdate.as_view(), name = 'breed_update'),\n path('breed/<int:pk>/delete', views.BreedDelete.as_view(), name = 'breed_delete')\n]","sub_path":"cats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"59566447","text":"from __future__ import annotations\n\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport logging\nimport asyncio\nfrom asyncio.tasks import Task\nfrom datetime import timedelta\n\nfrom aioairctrl import CoAPClient\n\nfrom homeassistant.core import CALLBACK_TYPE, callback\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.exceptions import ConfigEntryNotReady, PlatformNotReady\n\nfrom homeassistant.util.percentage import (\n ordered_list_item_to_percentage,\n percentage_to_ordered_list_item,\n)\n\nfrom homeassistant.components.fan import (\n SUPPORT_PRESET_MODE,\n SUPPORT_SET_SPEED,\n FanEntity,\n)\n\nfrom .const import *\nfrom .timer import Timer\n\n_LOGGER = logging.getLogger(__name__)\n\nMISSED_PACKAGE_COUNT = 3\n\n\nclass Coordinator:\n def __init__(self, client: CoAPClient, host: str) -> None:\n self.client = client\n self._host = host\n\n # It's None before the first successful update.\n # Components should call async_first_refresh to make sure the first\n # update was successful. Set type to just DeviceStatus to remove\n # annoying checks that status is not None when it was already checked\n # during setup.\n self.status: DeviceStatus = None # type: ignore[assignment]\n\n self._listeners: list[CALLBACK_TYPE] = []\n self._task: Task | None = None\n\n self._reconnect_task: Task | None = None\n self._timeout: int = 60\n\n # Timeout = MAX_AGE * 3 Packet losses\n _LOGGER.debug(f\"init: Creating and autostarting timer for host {self._host}\")\n self._timer_disconnected = Timer(\n timeout=self._timeout * MISSED_PACKAGE_COUNT,\n callback=self.reconnect,\n autostart=True,\n )\n self._timer_disconnected._auto_restart = True\n _LOGGER.debug(f\"init: finished for host {self._host}\")\n\n async def shutdown(self):\n _LOGGER.debug(f\"shutdown: called for host {self._host}\")\n if self._reconnect_task is not None:\n _LOGGER.debug(f\"shutdown: cancelling reconnect task for host {self._host}\")\n self._reconnect_task.cancel()\n if self._timer_disconnected is not None:\n _LOGGER.debug(f\"shutdown: cancelling timeout task for host {self._host}\")\n self._timer_disconnected._cancel()\n if self.client is not None:\n await self.client.shutdown()\n\n async def reconnect(self):\n _LOGGER.debug(f\"reconnect: called for host {self._host}\")\n try:\n if self._reconnect_task is not None:\n # Reconnect stuck\n _LOGGER.debug(\n f\"reconnect: cancelling reconnect task for host {self._host}\"\n )\n self._reconnect_task.cancel()\n self._reconnect_task = None\n # Reconnect in new Task, keep timer watching\n _LOGGER.debug(\n f\"reconnect: creating new reconnect task for host {self._host}\"\n )\n self._reconnect_task = asyncio.create_task(self._reconnect())\n except:\n _LOGGER.exception(\"Exception on starting reconnect!\")\n\n async def _reconnect(self):\n try:\n _LOGGER.debug(\"Reconnecting...\")\n try:\n await self.client.shutdown()\n except:\n pass\n self.client = await CoAPClient.create(self._host)\n self._start_observing()\n except asyncio.CancelledError:\n # Silently drop this exception, because we are responsible for it.\n # Reconnect took to long\n pass\n except:\n _LOGGER.exception(\"_reconnect error\")\n\n async def async_first_refresh(self) -> None:\n _LOGGER.debug(\"async_first_refresh for host %s\", self._host)\n try:\n self.status, timeout = await self.client.get_status()\n self._timeout = timeout\n if self._timer_disconnected is not None:\n self._timer_disconnected.setTimeout(timeout * MISSED_PACKAGE_COUNT)\n _LOGGER.debug(\"finished first refresh for host %s\", self._host)\n except Exception as ex:\n _LOGGER.error(\n \"config not ready, first refresh failed for host %s\", self._host\n )\n raise ConfigEntryNotReady from ex\n\n @callback\n def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]:\n \"\"\"Listen for data updates.\"\"\"\n start_observing = not self._listeners\n\n self._listeners.append(update_callback)\n\n if start_observing:\n self._start_observing()\n\n @callback\n def remove_listener() -> None:\n \"\"\"Remove update listener.\"\"\"\n self.async_remove_listener(update_callback)\n\n return remove_listener\n\n @callback\n def async_remove_listener(self, update_callback) -> None:\n \"\"\"Remove data update.\"\"\"\n self._listeners.remove(update_callback)\n\n if not self._listeners and self._task:\n self._task.cancel()\n self._task = None\n\n async def _async_observe_status(self) -> None:\n async for status in self.client.observe_status():\n _LOGGER.debug(\"Status update: %s\", status)\n self.status = status\n self._timer_disconnected.reset()\n for update_callback in self._listeners:\n update_callback()\n\n def _start_observing(self) -> None:\n \"\"\"Schedule state observation.\"\"\"\n if self._task:\n self._task.cancel()\n self._task = None\n self._task = asyncio.create_task(self._async_observe_status())\n self._timer_disconnected.reset()\n\n\nclass PhilipsEntity(Entity):\n def __init__(self, coordinator: Coordinator) -> None:\n super().__init__()\n _LOGGER.debug(\"PhilipsEntity __init__ called\")\n _LOGGER.debug(f\"coordinator.status is: {coordinator.status}\")\n self.coordinator = coordinator\n self._serialNumber = coordinator.status[PHILIPS_DEVICE_ID]\n # self._name = coordinator.status[\"name\"]\n self._name = list(\n filter(None, map(coordinator.status.get, [PHILIPS_NAME, PHILIPS_NEW_NAME]))\n )[0]\n # self._modelName = coordinator.status[\"modelid\"]\n self._modelName = list(\n filter(\n None,\n map(coordinator.status.get, [PHILIPS_MODEL_ID, PHILIPS_NEW_MODEL_ID]),\n )\n )[0]\n self._firmware = coordinator.status[\"WifiVersion\"]\n self._manufacturer = \"Philips\"\n\n @property\n def should_poll(self) -> bool:\n \"\"\"No need to poll. Coordinator notifies entity of updates.\"\"\"\n return False\n\n @property\n def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._serialNumber)},\n \"name\": self._name,\n \"model\": self._modelName,\n \"manufacturer\": self._manufacturer,\n \"sw_version\": self._firmware,\n }\n\n @property\n def available(self):\n return self.coordinator.status is not None\n\n @property\n def _device_status(self) -> dict[str, Any]:\n return self.coordinator.status\n\n async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_on_remove(\n self.coordinator.async_add_listener(self._handle_coordinator_update)\n )\n\n @callback\n def _handle_coordinator_update(self) -> None:\n \"\"\"Handle updated data from the coordinator.\"\"\"\n self.async_write_ha_state()\n\n\nclass PhilipsGenericFan(PhilipsEntity, FanEntity):\n def __init__(\n self,\n coordinator: Coordinator,\n model: str,\n name: str,\n ) -> None:\n super().__init__(coordinator)\n self._model = model\n self._name = name\n self._unique_id = None\n\n @property\n def unique_id(self) -> Optional[str]:\n return self._unique_id\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def icon(self) -> str:\n return self._icon\n\n\nclass PhilipsGenericCoAPFanBase(PhilipsGenericFan):\n AVAILABLE_PRESET_MODES = {}\n AVAILABLE_SPEEDS = {}\n AVAILABLE_ATTRIBUTES = []\n AVAILABLE_SWITCHES = []\n AVAILABLE_LIGHTS = []\n\n KEY_PHILIPS_POWER = PHILIPS_POWER\n STATE_POWER_ON = \"1\"\n STATE_POWER_OFF = \"0\"\n\n def __init__(\n self,\n coordinator: Coordinator,\n model: str,\n name: str,\n ) -> None:\n super().__init__(coordinator, model, name)\n\n self._preset_modes = []\n self._available_preset_modes = {}\n self._collect_available_preset_modes()\n\n self._speeds = []\n self._available_speeds = {}\n self._collect_available_speeds()\n\n self._available_attributes = []\n self._collect_available_attributes()\n\n try:\n device_id = self._device_status[PHILIPS_DEVICE_ID]\n self._unique_id = f\"{self._model}-{device_id}\"\n except Exception as e:\n _LOGGER.error(\"Failed retrieving unique_id: %s\", e)\n raise PlatformNotReady\n\n def _collect_available_preset_modes(self):\n preset_modes = {}\n for cls in reversed(self.__class__.__mro__):\n cls_preset_modes = getattr(cls, \"AVAILABLE_PRESET_MODES\", {})\n preset_modes.update(cls_preset_modes)\n self._available_preset_modes = preset_modes\n self._preset_modes = list(self._available_preset_modes.keys())\n\n def _collect_available_speeds(self):\n speeds = {}\n for cls in reversed(self.__class__.__mro__):\n cls_speeds = getattr(cls, \"AVAILABLE_SPEEDS\", {})\n speeds.update(cls_speeds)\n self._available_speeds = speeds\n self._speeds = list(self._available_speeds.keys())\n\n def _collect_available_attributes(self):\n attributes = []\n for cls in reversed(self.__class__.__mro__):\n cls_attributes = getattr(cls, \"AVAILABLE_ATTRIBUTES\", [])\n attributes.extend(cls_attributes)\n self._available_attributes = attributes\n\n @property\n def is_on(self) -> bool:\n status = self._device_status.get(self.KEY_PHILIPS_POWER)\n # _LOGGER.debug(\"is_on: status=%s - test=%s\", status, self.STATE_POWER_ON)\n return status == self.STATE_POWER_ON\n\n async def async_turn_on(\n self,\n percentage: Optional[int] = None,\n preset_mode: Optional[str] = None,\n **kwargs,\n ):\n if preset_mode:\n await self.async_set_preset_mode(preset_mode)\n return\n if percentage:\n await self.async_set_percentage(percentage)\n return\n await self.coordinator.client.set_control_value(\n self.KEY_PHILIPS_POWER, self.STATE_POWER_ON\n )\n\n async def async_turn_off(self, **kwargs) -> None:\n await self.coordinator.client.set_control_value(\n self.KEY_PHILIPS_POWER, self.STATE_POWER_OFF\n )\n\n @property\n def supported_features(self) -> int:\n features = SUPPORT_PRESET_MODE\n if self._speeds:\n features |= SUPPORT_SET_SPEED\n return features\n\n @property\n def preset_modes(self) -> Optional[List[str]]:\n return self._preset_modes\n\n @property\n def preset_mode(self) -> Optional[str]:\n for preset_mode, status_pattern in self._available_preset_modes.items():\n for k, v in status_pattern.items():\n if self._device_status.get(k) != v:\n break\n else:\n return preset_mode\n\n async def async_set_preset_mode(self, preset_mode: str) -> None:\n \"\"\"Set the preset mode of the fan.\"\"\"\n status_pattern = self._available_preset_modes.get(preset_mode)\n if status_pattern:\n await self.coordinator.client.set_control_values(data=status_pattern)\n\n @property\n def speed_count(self) -> int:\n return len(self._speeds)\n\n @property\n def percentage(self) -> Optional[int]:\n for speed, status_pattern in self._available_speeds.items():\n for k, v in status_pattern.items():\n if self._device_status.get(k) != v:\n break\n else:\n return ordered_list_item_to_percentage(self._speeds, speed)\n\n async def async_set_percentage(self, percentage: int) -> None:\n if percentage == 0:\n await self.async_turn_off()\n else:\n speed = percentage_to_ordered_list_item(self._speeds, percentage)\n status_pattern = self._available_speeds.get(speed)\n if status_pattern:\n await self.coordinator.client.set_control_values(data=status_pattern)\n\n @property\n def extra_state_attributes(self) -> Optional[Dict[str, Any]]:\n def append(\n attributes: dict,\n key: str,\n philips_key: str,\n value_map: Union[dict, Callable[[Any, Any], Any]] = None,\n ):\n if philips_key in self._device_status:\n value = self._device_status[philips_key]\n if isinstance(value_map, dict) and value in value_map:\n value = value_map.get(value, \"unknown\")\n elif callable(value_map):\n value = value_map(value, self._device_status)\n attributes.update({key: value})\n\n device_attributes = dict()\n for key, philips_key, *rest in self._available_attributes:\n value_map = rest[0] if len(rest) else None\n append(device_attributes, key, philips_key, value_map)\n return device_attributes\n\n @property\n def icon(self) -> str:\n if not self.is_on:\n return ICON.POWER_BUTTON\n\n preset_mode = self.preset_mode\n if preset_mode == None:\n return ICON.FAN_SPEED_BUTTON\n if preset_mode in PRESET_MODE_ICON_MAP:\n return PRESET_MODE_ICON_MAP[preset_mode]\n\n return ICON.FAN_SPEED_BUTTON\n\n\nclass PhilipsGenericCoAPFan(PhilipsGenericCoAPFanBase):\n AVAILABLE_PRESET_MODES = {}\n AVAILABLE_SPEEDS = {}\n\n AVAILABLE_ATTRIBUTES = [\n # device information\n (ATTR_NAME, PHILIPS_NAME),\n (ATTR_TYPE, PHILIPS_TYPE),\n (ATTR_MODEL_ID, PHILIPS_MODEL_ID),\n (ATTR_PRODUCT_ID, PHILIPS_PRODUCT_ID),\n (ATTR_DEVICE_ID, PHILIPS_DEVICE_ID),\n (ATTR_DEVICE_VERSION, PHILIPS_DEVICE_VERSION),\n (ATTR_SOFTWARE_VERSION, PHILIPS_SOFTWARE_VERSION),\n (ATTR_WIFI_VERSION, PHILIPS_WIFI_VERSION),\n (ATTR_ERROR_CODE, PHILIPS_ERROR_CODE),\n (ATTR_ERROR, PHILIPS_ERROR_CODE, PHILIPS_ERROR_CODE_MAP),\n # device configuration\n (ATTR_LANGUAGE, PHILIPS_LANGUAGE),\n (ATTR_PREFERRED_INDEX, PHILIPS_PREFERRED_INDEX, PHILIPS_PREFERRED_INDEX_MAP),\n # device sensors\n (\n ATTR_RUNTIME,\n PHILIPS_RUNTIME,\n lambda x, _: str(timedelta(seconds=round(x / 1000))),\n ),\n ]\n\n AVAILABLE_LIGHTS = [PHILIPS_DISPLAY_BACKLIGHT, PHILIPS_LIGHT_BRIGHTNESS]\n\n AVAILABLE_SWITCHES = []\n AVAILABLE_SELECTS = []\n\n\nclass PhilipsNewGenericCoAPFan(PhilipsGenericCoAPFanBase):\n AVAILABLE_PRESET_MODES = {}\n AVAILABLE_SPEEDS = {}\n\n AVAILABLE_ATTRIBUTES = [\n # device information\n (ATTR_NAME, PHILIPS_NEW_NAME),\n (ATTR_MODEL_ID, PHILIPS_NEW_MODEL_ID),\n (ATTR_PRODUCT_ID, PHILIPS_PRODUCT_ID),\n (ATTR_DEVICE_ID, PHILIPS_DEVICE_ID),\n (ATTR_SOFTWARE_VERSION, PHILIPS_SOFTWARE_VERSION),\n (ATTR_WIFI_VERSION, PHILIPS_WIFI_VERSION),\n # (ATTR_ERROR_CODE, PHILIPS_ERROR_CODE),\n # (ATTR_ERROR, PHILIPS_ERROR_CODE, PHILIPS_ERROR_CODE_MAP),\n # device configuration\n (ATTR_LANGUAGE, PHILIPS_NEW_LANGUAGE),\n (\n ATTR_PREFERRED_INDEX,\n PHILIPS_NEW_PREFERRED_INDEX,\n PHILIPS_PREFERRED_INDEX_MAP,\n ),\n # device sensors\n (\n ATTR_RUNTIME,\n PHILIPS_RUNTIME,\n lambda x, _: str(timedelta(seconds=round(x / 1000))),\n ),\n ]\n\n AVAILABLE_LIGHTS = [PHILIPS_NEW_DISPLAY_BACKLIGHT]\n\n AVAILABLE_SWITCHES = []\n AVAILABLE_SELECTS = []\n\n KEY_PHILIPS_POWER = PHILIPS_NEW_POWER\n STATE_POWER_ON = \"ON\"\n STATE_POWER_OFF = \"OFF\"\n\n\nclass PhilipsHumidifierMixin(PhilipsGenericCoAPFanBase):\n AVAILABLE_SELECTS = [PHILIPS_FUNCTION, PHILIPS_HUMIDITY_TARGET]\n\n\n# the AC1715 seems to be a new class of devices that follows some patterns of its own\nclass PhilipsAC1715(PhilipsNewGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Auto General\"},\n SPEED_1: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Gentle/Speed 1\"},\n SPEED_2: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Speed 2\"},\n PRESET_MODE_TURBO: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Turbo\"},\n PRESET_MODE_SLEEP: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Sleep\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Sleep\"},\n SPEED_1: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Gentle/Speed 1\"},\n SPEED_2: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Speed 2\"},\n PRESET_MODE_TURBO: {PHILIPS_NEW_POWER: \"ON\", PHILIPS_NEW_MODE: \"Turbo\"},\n }\n\n\n# TODO consolidate these classes as soon as we see a proper pattern\n\n\nclass PhilipsAC1214(PhilipsGenericCoAPFan):\n # the AC1214 doesn't seem to like a power on call when the mode or speed is set,\n # so this needs to be handled separately\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_MODE: \"A\"},\n # make speeds available as preset\n PRESET_MODE_NIGHT: {PHILIPS_MODE: \"N\"},\n SPEED_1: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_NIGHT: {PHILIPS_MODE: \"N\"},\n SPEED_1: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SWITCHES = [PHILIPS_CHILD_LOCK]\n\n async def async_set_a(self) -> None:\n _LOGGER.debug(f\"AC1214 switches to mode 'A' first\")\n a_status_pattern = self._available_preset_modes.get(PRESET_MODE_ALLERGEN)\n await self.coordinator.client.set_control_values(data=a_status_pattern)\n await asyncio.sleep(1)\n return\n\n async def async_set_preset_mode(self, preset_mode: str) -> None:\n \"\"\"Set the preset mode of the fan.\"\"\"\n _LOGGER.debug(f\"AC1214 async_set_preset_mode is called with: {preset_mode}\")\n\n # the AC1214 doesn't like it if we set a preset mode to switch on the device,\n # so it needs to be done in sequence\n if not self.is_on:\n _LOGGER.debug(f\"AC1214 is switched on without setting a mode\")\n await self.coordinator.client.set_control_value(\n PHILIPS_POWER, PHILIPS_POWER_MAP[SWITCH_ON]\n )\n await asyncio.sleep(1)\n\n # the AC1214 also doesn't seem to like switching to mode 'M' without cycling through mode 'A'\n current_pattern = self._available_preset_modes.get(self.preset_mode)\n _LOGGER.debug(f\"AC1214 is currently on mode: {current_pattern}\")\n if preset_mode:\n _LOGGER.debug(f\"AC1214 preset mode requested: {preset_mode}\")\n status_pattern = self._available_preset_modes.get(preset_mode)\n _LOGGER.debug(f\"this corresponds to status pattern: {status_pattern}\")\n if (\n status_pattern\n and status_pattern.get(PHILIPS_MODE) != \"A\"\n and current_pattern.get(PHILIPS_MODE) != \"M\"\n ):\n await self.async_set_a()\n _LOGGER.debug(f\"AC1214 sets preset mode to: {preset_mode}\")\n if status_pattern:\n await self.coordinator.client.set_control_values(data=status_pattern)\n return\n\n async def async_set_percentage(self, percentage: int) -> None:\n \"\"\"Set the preset mode of the fan.\"\"\"\n _LOGGER.debug(f\"AC1214 async_set_percentage is called with: {percentage}\")\n\n # the AC1214 doesn't like it if we set a preset mode to switch on the device,\n # so it needs to be done in sequence\n if not self.is_on:\n _LOGGER.debug(f\"AC1214 is switched on without setting a mode\")\n await self.coordinator.client.set_control_value(\n PHILIPS_POWER, PHILIPS_POWER_MAP[SWITCH_ON]\n )\n await asyncio.sleep(1)\n\n current_pattern = self._available_preset_modes.get(self.preset_mode)\n _LOGGER.debug(f\"AC1214 is currently on mode: {current_pattern}\")\n if percentage == 0:\n _LOGGER.debug(f\"AC1214 uses 0% to switch off\")\n await self.async_turn_off()\n else:\n # the AC1214 also doesn't seem to like switching to mode 'M' without cycling through mode 'A'\n _LOGGER.debug(f\"AC1214 speed change requested: {percentage}\")\n speed = percentage_to_ordered_list_item(self._speeds, percentage)\n status_pattern = self._available_speeds.get(speed)\n _LOGGER.debug(f\"this corresponds to status pattern: {status_pattern}\")\n if (\n status_pattern\n and status_pattern.get(PHILIPS_MODE) != \"A\"\n and current_pattern.get(PHILIPS_MODE) != \"M\"\n ):\n await self.async_set_a()\n _LOGGER.debug(f\"AC1214 sets speed percentage to: {percentage}\")\n if status_pattern:\n await self.coordinator.client.set_control_values(data=status_pattern)\n return\n\n async def async_turn_on(\n self,\n percentage: Optional[int] = None,\n preset_mode: Optional[str] = None,\n **kwargs,\n ):\n _LOGGER.debug(\n f\"AC1214 async_turn_on called with percentage={percentage} and preset_mode={preset_mode}\"\n )\n # the AC1214 doesn't like it if we set a preset mode to switch on the device,\n # so it needs to be done in sequence\n if not self.is_on:\n _LOGGER.debug(f\"AC1214 is switched on without setting a mode\")\n await self.coordinator.client.set_control_value(\n PHILIPS_POWER, PHILIPS_POWER_MAP[SWITCH_ON]\n )\n await asyncio.sleep(1)\n\n if preset_mode:\n _LOGGER.debug(f\"AC1214 preset mode requested: {preset_mode}\")\n await self.async_set_preset_mode(preset_mode)\n return\n if percentage:\n _LOGGER.debug(f\"AC1214 speed change requested: {percentage}\")\n await self.async_set_percentage(percentage)\n return\n\n\nclass PhilipsAC2729(\n PhilipsHumidifierMixin,\n PhilipsGenericCoAPFan,\n):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"A\"},\n # make speeds available as preset\n PRESET_MODE_NIGHT: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_NIGHT: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SWITCHES = [PHILIPS_CHILD_LOCK]\n\n\nclass PhilipsAC2889(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"A\"},\n PRESET_MODE_BACTERIA: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"B\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC29xx(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\"},\n PRESET_MODE_GENTLE: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"GT\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\"},\n PRESET_MODE_GENTLE: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"GT\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\"},\n }\n\n\nclass PhilipsAC2936(PhilipsAC29xx):\n pass\n\n\nclass PhilipsAC2939(PhilipsAC29xx):\n pass\n\n\nclass PhilipsAC2958(PhilipsAC29xx):\n pass\n\nclass PhilipsAC2959(PhilipsAC29xx):\n pass\n\n\nclass PhilipsAC30xx(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC3033(PhilipsAC30xx):\n pass\n\n\nclass PhilipsAC3036(PhilipsAC30xx):\n pass\n\n\nclass PhilipsAC3039(PhilipsAC30xx):\n pass\n\n\nclass PhilipsAC3055(PhilipsAC30xx):\n pass\n\n\nclass PhilipsAC3059(PhilipsAC30xx):\n pass\n\n\nclass PhilipsAC3259(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"A\"},\n PRESET_MODE_BACTERIA: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"B\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC3829(PhilipsHumidifierMixin, PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"A\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SWITCHES = [PHILIPS_CHILD_LOCK]\n\n\nclass PhilipsAC385x50(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC385450(PhilipsAC385x50):\n pass\n\n\nclass PhilipsAC385850(PhilipsAC385x50):\n pass\n\n\nclass PhilipsAC385x51(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n PRESET_MODE_SLEEP_ALLERGY: {\n PHILIPS_POWER: \"1\",\n PHILIPS_MODE: \"AS\",\n PHILIPS_SPEED: \"as\",\n },\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SWITCHES = [PHILIPS_CHILD_LOCK]\n\n\nclass PhilipsAC385451(PhilipsAC385x51):\n pass\n\n\nclass PhilipsAC385851(PhilipsAC385x51):\n pass\n\n\nclass PhilipsAC4236(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"S\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"T\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC4558(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n # there doesn't seem to be a manual mode, so no speed setting as part of preset\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"AG\"},\n PRESET_MODE_GAS: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"F\"},\n # it seems that when setting the pollution and allergen modes, we also need to set speed \"a\"\n PRESET_MODE_POLLUTION: {\n PHILIPS_POWER: \"1\",\n PHILIPS_MODE: \"P\",\n PHILIPS_SPEED: \"a\",\n },\n PRESET_MODE_ALLERGEN: {\n PHILIPS_POWER: \"1\",\n PHILIPS_MODE: \"A\",\n PHILIPS_SPEED: \"a\",\n },\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_SPEED: \"2\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_SPEED: \"t\"},\n }\n\n\nclass PhilipsAC5659(PhilipsGenericCoAPFan):\n AVAILABLE_PRESET_MODES = {\n PRESET_MODE_AUTO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"P\"},\n PRESET_MODE_ALLERGEN: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"A\"},\n PRESET_MODE_BACTERIA: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"B\"},\n # make speeds available as preset\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n AVAILABLE_SPEEDS = {\n PRESET_MODE_SLEEP: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"s\"},\n SPEED_1: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"1\"},\n SPEED_2: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"2\"},\n SPEED_3: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"3\"},\n PRESET_MODE_TURBO: {PHILIPS_POWER: \"1\", PHILIPS_MODE: \"M\", PHILIPS_SPEED: \"t\"},\n }\n\n\nmodel_to_class = {\n MODEL_AC1214: PhilipsAC1214,\n MODEL_AC1715: PhilipsAC1715,\n MODEL_AC2729: PhilipsAC2729,\n MODEL_AC2889: PhilipsAC2889,\n MODEL_AC2936: PhilipsAC2936,\n MODEL_AC2939: PhilipsAC2939,\n MODEL_AC2958: PhilipsAC2958,\n MODEL_AC2959: PhilipsAC2959,\n MODEL_AC3033: PhilipsAC3033,\n MODEL_AC3036: PhilipsAC3036,\n MODEL_AC3039: PhilipsAC3039,\n MODEL_AC3055: PhilipsAC3055,\n MODEL_AC3059: PhilipsAC3059,\n MODEL_AC3259: PhilipsAC3259,\n MODEL_AC3829: PhilipsAC3829,\n MODEL_AC3854_50: PhilipsAC385450,\n MODEL_AC3854_51: PhilipsAC385451,\n MODEL_AC3858_50: PhilipsAC385850,\n MODEL_AC3858_51: PhilipsAC385851,\n MODEL_AC4236: PhilipsAC4236,\n MODEL_AC4558: PhilipsAC4558,\n MODEL_AC5659: PhilipsAC5659,\n}\n","sub_path":"homeassistant/components/philips_airpurifier_coap/philips.py","file_name":"philips.py","file_ext":"py","file_size_in_byte":35679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"643966553","text":"\"\"\"LEXOR to HTML LIST NodeConverter\n\nCreates a list with the information provided.\n\n\"\"\"\n\nfrom lexor.core.converter import NodeConverter\nfrom lexor.core.elements import Element\n\n\nclass ListNC(NodeConverter):\n \"\"\"Build a list. \"\"\"\n\n @staticmethod\n def start_list(ltype):\n \"\"\"Create a new list element. \"\"\"\n node = Element(ltype)\n node.append_child(Element('li'))\n return node\n\n @staticmethod\n def make_list(main):\n \"\"\"Return a list. \"\"\"\n item = main[0]\n if item.name == '#text':\n if main.prev is not None and main.prev.name == '#text':\n main.prev.data += item.data\n del main[0]\n item = main[0]\n else:\n item = item.next\n main.parent.insert_before(main.index, item.prev)\n level = 1\n list_node = ListNC.start_list(item['type'])\n crt = list_node\n while item['level'] > level:\n crt[-1].append_child(ListNC.start_list(item['type']))\n crt = crt[-1][-1]\n level += 1\n crt[-1].extend_children(item)\n for key in item:\n if key.startswith('__'):\n crt[key[2:]] = item[key]\n elif key.startswith('_'):\n crt[-1][key[1:]] = item[key]\n item = item.next\n while item is not None:\n if 'flag' in item and item['flag'] == 'close':\n while level >= item['level']:\n crt = crt.parent.parent\n level -= 1\n else:\n if item['level'] == level:\n crt.append_child(Element('li'))\n elif item['level'] > level:\n if len(crt) == 0:\n crt.append_child(Element('li'))\n while item['level'] > level:\n crt[-1].append_child(ListNC.start_list(item['type']))\n crt = crt[-1][-1]\n level += 1\n else:\n while item['level'] < level:\n crt = crt.parent.parent\n level -= 1\n crt.append_child(Element('li'))\n crt[-1].extend_children(item)\n for key in item:\n if key.startswith('__'):\n crt[key[2:]] = item[key]\n elif key.startswith('_'):\n crt[-1][key[1:]] = item[key]\n item = item.next\n del main[:]\n return list_node\n\n def end(self, node):\n \"\"\"Modifies the nodes caught by this node converter. \"\"\"\n list_node = ListNC.make_list(node)\n node.parent.insert_before(node.index, list_node)\n del node.parent[node.index]\n return list_node\n","sub_path":"default/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"75"} +{"seq_id":"110265535","text":"from django.conf.urls import url\n\nfrom .views.session_views import TaskStart, TaskEnd, SessionDelete\nfrom .views.task_views import TaskUpdate, TaskDelete\n\napp_name = 'tasks'\n\nurlpatterns = [\n # Session(s)\n url(r'^session/delete/(?P<pk>\\d+)/$', SessionDelete.as_view(), name='session_delete'),\n url(r'^start/(?P<pk>\\d+)/$', TaskStart.as_view(), name='start'),\n url(r'^end/(?P<pk>\\d+)/$', TaskEnd.as_view(), name='end'),\n\n # Task(s)\n url(r'^update/(?P<pk>\\d+)/$', TaskUpdate.as_view(), name='update'),\n url(r'^delete/(?P<pk>\\d+)/$', TaskDelete.as_view(), name='delete'),\n]\n","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"78145837","text":"import numpy as np\n\nchildfile = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/data/childterm_shell.txt\", \"r\")\nchildline = childfile.read()\nchildline = [[format(s1) for s1 in s0.split(\"\\n\")] for s0 in childline.split(\"//\")]\n#childline = childline.split()\n#print(childline[0])\n\nnamefile = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/downloads/hsa_geneID_name_cp.txt\", \"r\")\nnameline = namefile.read()\nnameline = [[format(s1) for s1 in s0.split(\"\\t\")] for s0 in nameline.split(\"\\n\")]\n#print(nameline[1][1])\n\noutfile = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/data/replace_tf.txt\", \"w\")\n\n\nfor i in range(0, len(childline)):\n for j in range(0, len(nameline)-1):\n if nameline[j][0] in childline[i]:\n outfile.write(str(childline[i][1]))\n outfile.write(\"\\t\")\n outfile.write(str(nameline[j][1]))\n outfile.write(\"\\n\")\n\noutfile.close()\n\n\nreplacefile = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/data/replace_tf.txt\", \"r\")\n#replacefile2 = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191107/data/heart/replace_tf.txt\", \"r\")\nreplaceline = replacefile.read()\n#replaceline = [[format(s1) for s1 in s0.split()] for s0 in replaceline.split(\"\\n\")]\nreplaceline = replaceline.split()\n\nrankfile = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/data/heart_rank.txt\", \"r\")\nrankline = rankfile.read()\nrankline = rankline.split(\"\\n\")\n\noutfile2 = open(\"/Users/ryoheieguchi/Desktop/GeneOntology_test_20191111/data/binary_outfile.txt\", \"w\")\n\n#for k in range(0, len(replaceline)):\n #print(replaceline)\n #print(replaceline.count(\"NACA,\"))\nfor l in range(0, len(rankline)):\n\n outfile2.write(str(rankline[l]))\n outfile2.write(\"\\t\")\n outfile2.write(str(replaceline.count(rankline[l]+ \",\")))\n outfile2.write(\"\\n\")\n#print(rankline[1])\n\n#from collections import defaultdict\n#key = defaultdict(set)\n#value = defaultdict(set)\n\n#arr = []\n\n#for replaceline2 in replacefile2:\n #replaceline2 = replaceline2.replace(\"\\n\", \"\")\n #replaceline2 = replaceline2.split(\"\\t\")\n\n #key[replaceline2[1]].add(replaceline2[0])\n #value[replaceline2[0]].add(replaceline2[1])\n\n\n#for j in range(0, len(replaceline)):\n #arr.append(replaceline[j][0])\n\n#arr = np.array(arr)\n#arr = np.unique(arr)\n\n\n#for k in range(0, len(rankline)):\n #for l in range(0, len(arr)):\n #if rankline[k] in value[arr[l]]:\n #outfile2.write(str(rankline[k]))\n #outfile2.write(\"\\t\")\n #outfile2.write(\"1\")\n #outfile2.write(\"\\n\")\n #else:\n #outfile2.write(str(rankline[l]))\n #outfile2.write(\"\\t\")\n #outfile2.write(\"0\")\n #outfile2.write(\"\\n\")\n","sub_path":"src/tf_hsaid_childterm.py","file_name":"tf_hsaid_childterm.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596029664","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the permutationEquation function below.\ndef permutationEquation(p):\n\tsize = len(p)\n\tcres_list = p\n\tp_sorted = sorted(p)\n\tindex_list = []\n\telement_list = []\n\n\tfor x in range(size):\n\t\tindex_list.append(cres_list.index(p_sorted[x]) + 1)\n\t\telement_list.append(cres_list.index(index_list[x]) + 1)\n\n\n\treturn element_list\n\nn = int(input())\n\np = list(map(int, input().rstrip().split()))\n\nresult = permutationEquation(p)\n\nprint(result)\n","sub_path":"30-permutation_equation.py","file_name":"30-permutation_equation.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"519805794","text":"import speech_recognition as sr\nimport time\nfrom os import path\n\nclass handlerJugadas:\n def __init__(self,socket,nombre,tab):\n #Constructor de la clase personajes aquí se llenará el diccionario personajes al azar\n self.cliente=socket\n self.name=nombre\n self.tablero=tab\n self.bloqueados=[]\n def pideJugada(self):\n intentosConexion=0\n intentosJugada=0\n jugada=self.recibeJugada()\n while jugada==\"Audio Ilegible\" or jugada==\"No se puede acceder al servicio\":\n if jugada==\"No se puede acceder al servicio\":\n intentosConexion+=1\n self.cliente.send(\"No se puede acceder al servicio, reintentando\".encode())\n time.sleep(0.5)\n if jugada==\"Audio Ilegible\":\n intentosJugada+=1\n self.cliente.send(\"Audio ilegible\".encode())\n time.sleep(0.5)\n if intentosConexion>3:\n self.cliente.send(\"Numero de intentos de conexion maximos alcanzado, revisa tu conexion\".encode())\n break\n if intentosJugada>3:\n self.cliente.send(\"Hay mucho ruido en tus grabaciones, revisa tu microfono\".encode())\n break\n jugada=self.recibeJugada()\n return jugada\n def verificaJugada(self,comando):\n #Función que se encargará de responder al cliente el resultado de su jugada\n caracteristica=\"\"\n correcto,diccionario=self.tablero.getGanador()\n caracteristicas=diccionario.keys()\n #Se asume que el comando es \"El personaje correcto es (nombre)\"\n if \"correcto\" in comando:\n for element in comando:\n if element.lower()==correcto.lower():\n return \"Has adivinado\"\n return \"No, sigue intentando\"\n for element in comando:\n if element in caracteristicas:\n caracteristica=element\n break\n if caracteristica==\"\":\n return \"Comando invalido\"\n else:\n valor=diccionario[caracteristica].lower()\n for element in comando:\n if element==valor:\n return \"SI\"\n return \"NO\"\n\n\n pass\n def recibeJugada(self):\n #Función que se encargará de recolectar los datos del cliente y de pasar de audio a texto\n self.cliente.send(\"Ya es tu turno\".encode())\n time.sleep(0.5)\n self.cliente.send(\"Juega:\".encode())\n time.sleep(0.5) \n data = self.cliente.recv(32)\n #print(\"Bytes del archivo: \"+data.decode())\n f=open(self.name+\"-jugada.wav\",\"wb\")\n while True:\n datos=self.cliente.recv(2048)\n try:\n if datos.decode()==\"Archivo enviado\":\n break\n except:\n pass\n f.write(datos)\n f.close()\n #print(\"Archivo recibido\")\n #print(\"Archivo guardado\")\n return self.speechRecognizer()\n def limpiaJugada(self,comando):\n resultado=comando\n reemplazar={\"á\":\"a\",\"é\":\"e\",\"í\":\"i\",\"ó\":\"o\",\"ú\":\"u\"}\n llaves=reemplazar.keys()\n conectores=['el','la','los','de','es','tiene']\n for element in conectores:\n while (element in comando):\n resultado.remove(element)\n for i in range(len(comando)):\n string=\"\"\n for j in range(len(comando[i])):\n if (comando[i][j] in llaves):\n string+=str(reemplazar[comando[i][j]])\n else:\n string+=str(comando[i][j])\n comando[i]=string\n return resultado\n def dimeComando(self,jugada):\n #Función que se encargará de traducir el texto de la jugada a un comando\n #print(\"INICIANDO reconocimiento de comando\")\n comando=jugada.split()\n comando=self.limpiaJugada(comando)\n if (\"quiero\" in comando) and (\"salir\" in comando):\n return (1,\"Salir\")\n if (\"bloquea\" in comando):\n self.bloqueaJugador(comando)\n return (2,comando)\n else:\n return (3,comando)\n def speechRecognizer(self):\n #Funcion encargada de pasar de un archivo de audio a texto\n #print(\"INICIANDO speechRecognizer\")\n AUDIO_FILE = path.join(path.dirname(path.realpath(__file__)), self.name+\"-jugada.wav\")\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source) # read the entire audio file\n #print(\"datos del audio recogidos\")\n try:\n text=r.recognize_ibm(audio)\n #print(\"finalizo la traduccion\")\n return text\n except sr.UnknownValueError:\n return \"Audio Ilegible\"\n except sr.RequestError as e:\n return \"No se puede acceder al servicio\"\n def bloqueaJugador(self,comando):\n #Se asume que el comando es \"bloquea al personaje nombre\"\n if len(comando)==4:\n for i in range(len(comando)):\n if comando[i]==\"personaje\" and i+1<len(comando):\n nombre=comando[i+1].lower()\n if not(nombre in self.bloqueados):\n self.bloqueados.append(nombre)\n self.cliente.send(str(\"Personaje \"+ nombre+ \" bloqueado\").encode())\n time.sleep(0.1)\n else:\n self.cliente.send(\"El personaje ya estaba bloqueado\".encode())\n time.sleep(0.1)\n else:\n self.cliente.send(\"Comando incorrecto\".encode())\n time.sleep(0.1)\n ","sub_path":"practica2/Problema1/handlerJugadas.py","file_name":"handlerJugadas.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"287313127","text":"from oct_converter.readers import FDA\n\n# a sample .fda file can be downloaded from the Biobank resource here:\n# https://biobank.ndph.ox.ac.uk/showcase/refer.cgi?id=31\nfilepath = '/Users/mark/Downloads/eg_oct_fda.fda'\nfda = FDA(filepath)\n\noct_volume = fda.read_oct_volume() # returns an OCT volume with additional metadata if available\noct_volume.peek() # plots a montage of the volume\noct_volume.save('fda_testing.avi') # save volume as a movie\noct_volume.save('fda_testing.png') # save volume as a set of sequential images, fds_testing_[1...N].png\n\nfundus_image = fda.read_fundus_image() # returns a Fundus image with additional metadata if available\n#fundus_image.save('mark_test.jpg')\nfundus_image.save('fda_testing_fundus.jpg')\n","sub_path":"examples/demo_fda_extraction.py","file_name":"demo_fda_extraction.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"473516466","text":"import unittest\n\nimport pandas as pd\n\nfrom slugify import slugify\n\nfrom gtfstools.utils import showme\n\n\nclass TestGtfsNormalizer(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def normalize_str(self, txt):\n try:\n return slugify(txt, separator=\" \")\n except TypeError as error:\n return txt\n\n def normalize_serie(self, serie):\n return serie.map(self.normalize_str)\n\n def normalize_dataframe(self, frame):\n cols = frame.columns\n for col in cols:\n frame[col] = self.normalize_serie(frame[col])\n\n return frame\n\n @showme.time\n def test_normalize_str(self):\n txt = \"J'ai Pété\"\n r = slugify(txt, max_length=-1, word_boundary=True, separator=\" \")\n self.assertEqual(\"j ai pete\", r)\n\n @showme.time\n def test_normalize_serie(self):\n data = {\n \"agency_id\": \"RGRTA\",\n \"agency_name\": \"Rochester-Genesee Regional Transportation Authority \",\n \"agency_url\": \"http://www.rgrta.com\",\n \"agency_timezone\": \"America/New_York\",\n \"agency_lang\": \"en\",\n \"agency_phone\": \"585-288-1700\"\n }\n frame = pd.DataFrame([data])\n frame = self.normalize_dataframe(frame)\n data2 = {\n \"agency_id\": \"rgrta\",\n \"agency_name\": \"rochester genesee regional transportation authority\",\n \"agency_url\": \"http www rgrta com\",\n \"agency_timezone\": \"america new york\",\n \"agency_lang\": \"en\",\n \"agency_phone\": \"585 288 1700\"\n }\n frame2 = pd.DataFrame([data2])\n pd.util.testing.assert_frame_equal(frame, frame2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"gtfstools/tests/test_gtfs_normalizer.py","file_name":"test_gtfs_normalizer.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"365509503","text":"import tornado.web\nimport pickle\nimport datetime\nimport time\nimport logic\nimport logic_manager\nimport mysql\nimport os\nimport subprocess\nimport platform\n#import qrcode\nimport MySQLdb\nimport printer\nimport prepare\n\nfrom tornado.escape import json_encode, json_decode\n\nclass ManagerHomeHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-home.html')\n\nclass ManagerFacultyListHandler(tornado.web.RequestHandler):\n def post(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n faculty_list = mysql.get_all('faculty')\n faculty_list.sort(key=lambda x: x['fid'])\n #print faculty_list\n response = {'status': 'ok', 'faculty': faculty_list}\n self.write(json_encode(response))\n\nclass ManagerCompanyHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-company.html')\n\n def post(self):\n file = logic.company_file\n if not os.path.isfile(file):\n info = {'company': '', 'shop': '', 'location': '', 'time': '', 'heading': '', 'welcome': '', 'desp': ''}\n else:\n with open(file, 'rb') as f:\n info = pickle.load(f)\n response = {'info': info}\n self.write(json_encode(response))\n\nclass ManagerCompanySetHandler(tornado.web.RequestHandler):\n def post(self):\n company = self.get_argument('company')\n shop = self.get_argument('shop')\n location = self.get_argument('location')\n work_time = self.get_argument('time')\n heading = self.get_argument('heading')\n welcome = self.get_argument('welcome')\n desp = self.get_argument('desp')\n #print work_time\n #content = company +'\\n'+shop+'\\n'+location+'\\n'\n #print content\n #content = content.encode('gb18030')\n #printer.gprint(bytes(content))\n info = {'company': company, 'shop': shop, 'location': location, 'time': work_time, 'heading': heading, 'welcome': welcome, 'desp': desp}\n logic.info = info\n\n data_dir = logic.data_dir\n if not os.path.isdir(data_dir):\n os.mkdir(data_dir)\n file = logic.company_file\n with open(file, 'wb') as f:\n pickle.dump(info, f)\n response = {'status': 'ok'}\n self.write(json_encode(response))\n\nclass ManagerDietHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-diet.html')\n \nclass ManagerGroupAddHandler(tornado.web.RequestHandler):\n def post(self):\n cid = self.get_argument('cid')\n cname = self.get_argument('cname')\n corder = self.get_argument('corder')\n cdesp = self.get_argument('cdesp')\n corder = int(corder)\n row = {'cid':cid, 'name': unicode(cname), 'ord': corder, 'desp': cdesp}\n result = mysql.insert('category', row)\n if result:\n logic.category[cid] = row\n response = {'status': 'ok'}\n else:\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerGroupDelHandler(tornado.web.RequestHandler):\n def post(self):\n cid = self.get_argument('cid')\n result = mysql.delete('category', {'cid': cid})\n if result:\n logic.category.pop(cid)\n response = {'status': 'ok'}\n else:\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerGroupShowHandler(tornado.web.RequestHandler):\n def post(self):\n result = mysql.get_all('category')\n result.sort(key=lambda one: one['cid'])\n response = {'status': 'ok', 'category': result}\n self.write(json_encode(response))\n\nclass ManagerDietAddHandler(tornado.web.RequestHandler):\n def post(self):\n did = self.get_argument('did')\n name = self.get_argument('name')\n order = self.get_argument('order')\n price = self.get_argument('price')\n price2 = self.get_argument('price2')\n base = self.get_argument('base')\n cid = self.get_argument('cid')\n who = self.get_argument('who', 'cook')\n #print who\n desp = self.get_argument('desp')\n \n order = int(order)\n price = float(price)\n price2 = float(price2)\n base = float(base)\n picture = ''\n pic_dir = os.path.join(logic.data_dir, 'pictures')\n if not os.path.isdir(pic_dir):\n os.mkdir(pic_dir)\n if self.request.files:\n metas = self.request.files['picture']\n for meta in metas:\n file_name = meta['filename']\n content = meta['body']\n ext = os.path.splitext(file_name)[-1]\n picture = str(did) + ext\n full_path = os.path.join(pic_dir, picture)\n with open(full_path, 'wb') as f:\n f.write(content)\n row = {'did': did, 'name': name, 'ord': order, 'price': price, 'price2': price2, 'base': base, 'cid': cid, 'who': who, 'desp': desp, 'pic': picture}\n result = mysql.insert('diet', row)\n if result:\n logic.diet[did] = row\n response = {'status': 'ok'}\n else:\n if os.path.isfile(full_path):\n os.remove(full_path)\n response = {'status': 'error'}\n #content = name+'\\n'+ ('%s' % price) +'\\n'\n #content = content.encode('gb18030')\n #printer.gprint(bytes(content))\n self.write(json_encode(response))\n\nclass ManagerDietDelHandler(tornado.web.RequestHandler):\n def post(self):\n did = self.get_argument('did')\n if did in logic.diet:\n logic.diet.pop(did)\n result = mysql.get('diet', {'did': did})\n if result and result[0]:\n picture = result[0]['pic']\n full_path = os.path.join(logic.data_dir, 'pictures/' + picture)\n \n if mysql.delete('diet', {'did': did}) and picture != '':\n if os.path.isfile(full_path):\n os.remove(full_path)\n\n response = {'status': 'ok'}\n self.finish(json_encode(response))\n return\n response = {'status': 'error'}\n self.finish(json_encode(response))\n\nclass ManagerDietShowHandler(tornado.web.RequestHandler):\n def post(self):\n result = mysql.get_all('diet');\n result.sort(key=lambda one: one['did'])\n response = {'status': 'ok', 'diet': result}\n self.write(json_encode(response))\n\nclass ManagerDeskHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-desk.html')\n\nclass ManagerDeskAddHandler(tornado.web.RequestHandler):\n def post(self):\n desk = self.get_argument('desk')\n desk = desk.upper()\n seats = self.get_argument('seats')\n seats = int(seats)\n result = mysql.insert('desks', {'desk': desk, 'num': seats})\n \n if result:\n #path = os.path.join(logic.data_dir, 'desks/' + desk)\n #data = desk\n #img = qrcode.make(data)\n #img.save(path)\n if desk not in logic.desks:\n logic.desks.add(desk)\n if desk not in logic.tables:\n logic.tables[desk] = logic.Table(desk)\n response = {'status': 'ok'}\n else:\n \n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerDeskDelHandler(tornado.web.RequestHandler):\n def post(self):\n desk = self.get_argument('desk')\n desk = desk.upper()\n result = mysql.delete('desks', {'desk': desk})\n if desk in logic.desks:\n logic.desks.remove(desk)\n if desk in logic.tables:\n logic.tables.pop(desk)\n if result:\n #path = os.path.join(logic.data_dir, 'desks/' + desk)\n #os.remove(path)\n response = {'status': 'ok'}\n else:\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerDeskShowHandler(tornado.web.RequestHandler):\n def post(self):\n desks = mysql.get_all('desks')\n desks.sort(key=lambda x: x['desk'])\n response = {'status': 'ok', 'desks': desks}\n self.write(json_encode(response))\n\nclass ManagerPrinterHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-printer.html')\n\nclass ManagerPrinterAddHandler(tornado.web.RequestHandler):\n def post(self):\n name = self.get_argument('name')\n \n ip = self.get_argument('ip')\n result = mysql.insert('printers', {'name': name, 'ip': ip})\n \n if result:\n #path = os.path.join(logic.data_dir, 'desks/' + desk)\n #data = desk\n #img = qrcode.make(data)\n #img.save(path)\n if name not in logic.printers:\n logic.printers[name] = ip\n \n response = {'status': 'ok'}\n else:\n \n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerPrinterDelHandler(tornado.web.RequestHandler):\n def post(self):\n printer = self.get_argument('printer')\n result = mysql.delete('printers', {'name': printer})\n \n if result:\n #path = os.path.join(logic.data_dir, 'desks/' + desk)\n #os.remove(path)\n if printer in logic.printers:\n logic.printers.pop(printer)\n response = {'status': 'ok'}\n else:\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerPrinterShowHandler(tornado.web.RequestHandler):\n def post(self):\n printers = mysql.get_all('printers')\n response = {'status': 'ok', 'printers': printers}\n self.write(json_encode(response))\n\nclass ManagerOrderHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-order.html')\n\n def post(self):\n pid = self.get_argument('order')\n pid = int(pid)\n sql = 'select order_history.uid as uid,diet.name as name,num,order_history.price as price,cash_history.fid as cashier,status from cash_history,order_history,diet where cash_history.uid=order_history.uid and order_history.did=diet.did and order_history.pid=%s' % pid\n result = mysql.query(sql)\n #print result\n result2 = mysql.get_all('faculty')\n faculty = {}\n for one in result2:\n faculty[one['fid']] = one['name']\n total = 0\n for one in result:\n one['price'] = one['num'] * one['price']\n \n if one['status'] == 'success':\n total += one['price']\n result.append({'name': '', 'num': 'all', 'price': total, 'status': ''})\n response = {'status': 'ok', 'pid': pid, 'items': result}\n self.write(json_encode(response))\n \n\nclass ManagerWorkerHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-worker.html')\n\nclass ManagerWorkerAddHandler(tornado.web.RequestHandler):\n def post(self):\n fid = self.get_argument('fid')\n name = self.get_argument('name')\n passwd = self.get_argument('passwd')\n role = json_decode(self.get_argument('role'))\n role = ','.join(role)\n result = mysql.insert('faculty', {'fid': fid, 'name': name, 'role': role})\n result2 = mysql.insert('password', {'fid': fid, 'passwd': passwd})\n if result and result2:\n response = {'status': 'ok'}\n else:\n mysql.delete('faculty', {'fid': fid})\n mysql.delete('password', {'fid': fid})\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerWorkerDelHandler(tornado.web.RequestHandler):\n def post(self):\n fid = self.get_argument('fid')\n result = mysql.delete('faculty', {'fid': fid})\n result2 = mysql.delete('password', {'fid': fid})\n if result and result2:\n response = {'status': 'ok'}\n else:\n response = {'status': 'error'}\n self.write(json_encode(response))\n\nclass ManagerWorkerShowHandler(tornado.web.RequestHandler):\n def post(self):\n sql = 'select faculty.fid, name, role, passwd from faculty, password where faculty.fid = password.fid'\n result = mysql.query(sql)\n result.sort(key=lambda x: x['fid'])\n #print result\n response = {'status': 'ok', 'workers': result}\n self.write(json_encode(response))\n\nclass ManagerCookdoHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-cookdo.html')\n\n def post(self):\n fid = self.get_argument('fid')\n #print fid\n \n results = []\n if fid == 'all':\n faculty = mysql.get_all('faculty')\n for one in faculty:\n if one['role'].find('cook') >=0:\n results.append({'fid': one['fid'], 'name': one['name'], 'diet': logic_manager.get_cook_range(one['fid'])})\n else:\n name = logic.faculty.get(fid)['name']\n results.append({'fid': fid, 'name': name, 'diet': logic_manager.get_cook_range(fid)})\n response = {'status': 'ok', 'result': results}\n \n self.write(json_encode(response))\n\nclass ManagerTodayHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-today.html')\n def post(self):\n now = datetime.datetime.now()\n start = datetime.datetime(now.year, now.month, now.day)\n end = start + datetime.timedelta(days=1)\n t1 = start.strftime('%Y-%m-%d')\n t2 = end.strftime('%Y-%m-%d')\n rows = logic_manager.flow_data(start, end)\n flow = [{'type': '', 'from': t1, 'to': t2, 'rows': rows}]\n frequency = logic_manager.frequency(now, request=1, kitchen=1, cash=1)\n cooks = []\n for fid in logic.working_cooks:\n rows = logic_manager.one_cook_flow(fid, start, end)\n #print 'one_cook_flow:', rows\n name = logic.faculty.get(fid)['name']\n cooks.append({'fid': fid, 'name': name, 'rows': rows, 'type': 'cook'})\n response = {'status': 'ok', 'flow': flow, 'frequency': frequency, 'cooks': cooks}\n self.write(json_encode(response))\n\nclass ManagerAchievementHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-achievement.html')\n\n def post(self):\n t1 = self.get_argument('from')\n t2 = self.get_argument('to')\n fid = self.get_argument('fid')\n trend = self.get_argument('trend')\n trend = int(trend)\n #print t1, t2\n format = '%Y-%m-%d'\n t1 = datetime.datetime.strptime(t1, format)\n t2 = datetime.datetime.strptime(t2, format)\n #print t1, t2\n if t1 >= t2:\n return\n result = logic_manager.achieve(fid, t1, t2, trend)\n #print result\n response = {'status': 'ok', 'result': result}\n self.write(json_encode(response))\n\nclass ManagerHistoryHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-history.html')\n\nclass ManagerHistoryFlowHandler(tornado.web.RequestHandler):\n def post(self):\n start = self.get_argument('from')\n end = self.get_argument('to')\n trend = self.get_argument('trend')\n trend = int(trend)\n format = '%Y-%m-%d'\n start = datetime.datetime.strptime(start, format)\n end = datetime.datetime.strptime(end, format)\n result = logic_manager.flow(start, end, trend)\n #print result\n response = {'status': 'ok', 'result': result}\n self.write(json_encode(response))\n \n\nclass ManagerHistoryFeedbackHandler(tornado.web.RequestHandler):\n def post(self):\n pass\n\nclass ManagerHistoryTrendHandler(tornado.web.RequestHandler):\n def post(self):\n pass\n\nclass ManagerOnedietHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-onediet.html')\n\n def post(self):\n did = self.get_argument('did')\n start = self.get_argument('from')\n end = self.get_argument('to')\n trend = self.get_argument('trend')\n trend = int(trend)\n format = '%Y-%m-%d'\n start = datetime.datetime.strptime(start, format)\n end = datetime.datetime.strptime(end, format)\n result = logic_manager.one_diet(did, start, end, trend)\n response = {'status': 'ok', 'result': result}\n self.write(json_encode(response))\n\nclass ManagerFrequencyHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-frequency.html')\n\n def post(self):\n day = self.get_argument('date')\n request = self.get_argument('request')\n kitchen = self.get_argument('kitchen')\n cash = self.get_argument('cash')\n request = int(request)\n kitchen = int(kitchen)\n cash = int(cash)\n format = '%Y-%m-%d'\n day = datetime.datetime.strptime(day, format)\n result = logic_manager.frequency(day,request=request, kitchen=kitchen, cash=cash)\n response = {'status': 'ok', 'result': result}\n self.write(json_encode(response))\n\nclass ManagerCommentHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-comment.html')\n\ncursor = None\nclass ManagerCommentShowHandler(tornado.web.RequestHandler):\n def post(self):\n global cursor\n HOST = 'localhost'\n PORT = 3306\n USER = 'artoftime'\n PASSWD = 'artoftime'\n DB = 'artoftime'\n conn = MySQLdb.connect(host=HOST, port=PORT, user=USER, passwd=PASSWD, db=DB, charset='utf8')\n conn.autocommit(False)\n cursor = conn.cursor()\n sql = 'select * from comment order by stamp desc'\n cursor.execute(sql)\n conn.commit()\n comments = cursor.fetchmany(100)\n #print comments\n result = []\n for one in comments:\n stamp = one[3]\n stamp = datetime.datetime.fromtimestamp(stamp)\n stamp = stamp.strftime('%Y-%m-%d %H:%M:%S')\n t = {'desk': one[1], 'comment': one[2], 'stamp': stamp}\n result.append(t)\n response = {'status': 'ok', 'comments': result}\n self.write(json_encode(response))\n\nclass ManagerCommentMoreHandler(tornado.web.RequestHandler):\n def post(self):\n global cursor\n comments = cursor.fetchmany(100)\n result = []\n for one in comments:\n stamp = one[3]\n stamp = datetime.datetime.fromtimestamp(stamp)\n stamp = stamp.strftime('%Y-%m-%d %H:%M:%S')\n t = {'desk': one[1], 'comment': one[2], 'stamp': stamp}\n result.append(t)\n response = {'status': 'ok', 'comments': result}\n self.write(json_encode(response))\n\nclass ManagerMaskHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-mask.html')\n\nclass ManagerMaskDietHandler(tornado.web.RequestHandler):\n def post(self):\n diet = mysql.get_all('diet')\n diet.sort(key=lambda one: one['did'])\n response = {'status': 'ok', 'diet': diet}\n self.write(json_encode(response))\n\nclass ManagerMaskInsHandler(tornado.web.RequestHandler):\n def post(self):\n ins = json_decode(self.get_argument('ins'))\n logic.mask.ins(ins)\n response = {'status': 'ok'}\n self.write(json_encode(response))\n \nclass ManagerMaskUpdateHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def post(self):\n stamp = json_decode(self.get_argument('stamp'))\n mymask = yield logic.mask.update(stamp)\n response = {'status': 'ok', 'mask': mymask, 'stamp': logic.mask.stamp}\n self.write(json_encode(response))\n raise tornado.gen.Return()\n\n\nclass ManagerShutdownHandler(tornado.web.RequestHandler):\n def get(self):\n role = self.get_cookie('role')\n if role != 'manager':\n return\n self.render('manager-shutdown.html')\n\n def post(self):\n #prepare.save()\n if platform.system() == 'Darwin':\n echo = subprocess.Popen(['echo', 'jerrylan418'], stdout=subprocess.PIPE)\n shutdown = subprocess.Popen(['sudo', '-S', 'shutdown', '-h', 'now'], stdin=echo.stdout)\n elif platform.system() == 'Linux':\n echo = subprocess.Popen(['echo', 'lin890418\\n'], stdout=subprocess.PIPE)\n shutdown = subprocess.Popen(['sudo', '-S', 'shutdown', '-P', 'now'], stdin=echo.stdout)\n \nclass ManagerRebootHandler(tornado.web.RequestHandler):\n def post(self):\n #prepare.save()\n if platform.system() == 'Darwin':\n echo = subprocess.Popen(['echo', 'jerrylan418'], stdout=subprocess.PIPE)\n reboot = subprocess.Popen(['sudo', '-S', 'reboot'], stdin=echo.stdout)\n elif platform.system() == 'Linux':\n echo = subprocess.Popen(['echo', 'lin890418\\n'], stdout=subprocess.PIPE)\n reboot = subprocess.Popen(['sudo', '-S', 'reboot'], stdin=echo.stdout)\n \n \n\n \n \n","sub_path":"handlers_manager.py","file_name":"handlers_manager.py","file_ext":"py","file_size_in_byte":22341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"402200445","text":"\"\"\"The reconstruct CLI command.\"\"\"\n# 1. Standard python modules\nimport argparse\nfrom datetime import date, datetime\nimport sys\n\n# 2. Third party modules\nimport numpy as np\nimport pandas as pd\nfrom pytides.tide import Tide as pyTide\n\n# 3. Aquaveo modules\n\n# 4. Local modules\nfrom .common import add_common_args, add_const_out_args, add_loc_model_args\nfrom ..harmonica import Tide\n\n\nDESCR = 'Reconstruct the tides at specified location and times.'\nEXAMPLE = \"\"\"\nExample:\n\n harmonica reconstruct 38.375789 -74.943915\n\"\"\"\n\n\ndef validate_date(value):\n \"\"\"Validate a date string.\n\n Args:\n value (str): The date string, should be in '%Y-%m-%d' format (e.g. '2022-02-20')\n\n Returns:\n bool: True if the value is positive, False if it is zero or negative\n \"\"\"\n try:\n # return date.fromisoformat(value) # python 3.7\n return pd.datetime.strptime(value, '%Y-%m-%d')\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(value)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef check_positive(value):\n \"\"\"Check if a value is positive.\n\n Args:\n value (Union[int, float]): The value to check\n\n Returns:\n bool: True if the value is positive, False if it is zero or negative\n \"\"\"\n flt = float(value)\n if flt <= 0:\n msg = \"Not a valid time length: {0}\".format(value)\n raise argparse.ArgumentTypeError(msg)\n return flt\n\n\ndef config_parser(p, sub=False):\n \"\"\"Configure the command line arguments passed the reconstruct CLI command.\n\n Args:\n p (ArgumentParser): The argument parser\n sub (Optional[bool]): True if this is a resources subparser\n \"\"\"\n # Subparser info\n if sub:\n p = p.add_parser(\n 'reconstruct',\n description=DESCR,\n help=DESCR,\n epilog=EXAMPLE,\n add_help=False,\n )\n\n add_common_args(p)\n p.add_argument(\n '-S', '--start_date',\n type=validate_date,\n default=date.today(),\n help='Start Date [YYYY-MM-DD], default: today'\n )\n p.add_argument(\n '-L', '--length',\n type=check_positive,\n default=7.,\n help='Length of series in days [positive non-zero], default: 7'\n )\n add_loc_model_args(p)\n add_const_out_args(p)\n\n\ndef parse_args(args):\n \"\"\"Parse the command line arguments passed the reconstruct CLI command.\n\n Args:\n args (...): Variable length positional arguments\n\n Returns:\n ArgumentParser: The command line argument parser\n \"\"\"\n p = argparse.ArgumentParser(\n description=DESCR,\n epilog=EXAMPLE,\n add_help=False,\n )\n config_parser(p)\n return p.parse_args(args)\n\n\ndef execute(args):\n \"\"\"Execute the reconstruct CLI command.\n\n Args:\n args (...): Variable length positional arguments\n \"\"\"\n times = pyTide._times(datetime.fromordinal(args.start_date.toordinal()), np.arange(args.length * 24., dtype=float))\n tide = Tide(model=args.model).reconstruct_tide(loc=[args.lat, args.lon], times=times, cons=args.cons,\n positive_ph=args.positive_phase)\n out = tide.data.to_csv(args.output, sep='\\t', header=True, index=False)\n if args.output is None:\n print(out)\n print('\\nComplete.\\n')\n\n\ndef main(args=None):\n \"\"\"Entry point for the reconstruct CLI command.\n\n Args:\n args (...): Variable length positional arguments\n \"\"\"\n if not args:\n args = sys.argv[1:]\n try:\n execute(parse_args(args))\n except RuntimeError as e:\n print(str(e))\n sys.exit(1)\n return\n","sub_path":"harmonica/cli/main_reconstruct.py","file_name":"main_reconstruct.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"124374953","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.generic import CreateView, FormView, ListView, TemplateView\nfrom django.db.models import Q\n\nfrom commons.utils import PaginatedListView\nfrom estates.models import UserEstate, Estate\nfrom users.forms import UserModelForm, UserDjangoCreationForm, UserDjangoModelForm\nfrom users.mixings import ValidAuthorMixin\nfrom users.models import User\n\n\nclass IndexView(PaginatedListView):\n template_name = 'index/index.html'\n model = UserEstate\n paginate_by = 5\n\n def get_queryset(self):\n queryset = super(IndexView, self).get_queryset()\n search = self.request.GET.get('search')\n estate_type = self.request.GET.get('estate_type')\n if search:\n queryset = queryset.filter(Q(user__document__icontains=search) |\n Q(user__user__first_name__icontains=search) |\n Q(user__user__username__icontains=search) |\n Q(user__user__last_name__icontains=search) |\n Q(estate__catastral_id__icontains=search) |\n Q(estate__name__icontains=search) |\n Q(estate__address__icontains=search)\n )\n if estate_type and estate_type != \"------\":\n print(\"NO ES\")\n queryset = queryset.filter(estate__type=estate_type)\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n context['url_search'] = self.request.get_full_path()\n context['estate_type'] = Estate.TYPE\n return context\n\n\nclass UserRegisterView(CreateView):\n template_name = 'user/register.html'\n model = User\n form_class = UserModelForm\n second_form_class = UserDjangoCreationForm\n\n def get_context_data(self, **kwargs):\n context = super(UserRegisterView, self).get_context_data(**kwargs)\n if not 'form_user' in context:\n context['form_user'] = self.second_form_class\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = None\n form = self.get_form()\n form_user = self.second_form_class(request.POST)\n\n if form.is_valid() and form_user.is_valid():\n return self.form_valid(form, form_user)\n else:\n return self.form_invalid(form, form_user)\n\n def form_valid(self, form, form_user=None):\n user = form_user.save()\n self.object = form.save(commit=False)\n self.object.user = user\n self.object.save()\n messages.success(self.request, 'Registro realizado correctamente!')\n login(\n self.request, self.object.user,\n backend='django.contrib.auth.backends.ModelBackend'\n )\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)\n\n def form_invalid(self, form, form_user=None):\n return self.render_to_response(\n self.get_context_data(**{'form': form, 'form_user': form_user}),\n status=400\n )\n\n\nclass UserUpdateView(LoginRequiredMixin, ValidAuthorMixin, FormView):\n template_name = 'user/edit-profile.html'\n model = User\n form_class = UserModelForm\n second_form_class = UserDjangoModelForm\n\n def get_form(self, form_class=None):\n if form_class is None:\n form_class = self.get_form_class()\n return form_class(\n **self.get_form_kwargs(), instance=self.author_instance\n )\n\n def get_context_data(self, **kwargs):\n context = super(UserUpdateView, self).get_context_data(**kwargs)\n if not 'form_user' in context:\n context['form_user'] = self.second_form_class(\n instance=self.author_instance.user\n )\n return context\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n form_user = self.second_form_class(\n request.POST, instance=self.author_instance.user\n )\n if form.is_valid() and form_user.is_valid():\n return self.form_valid(form, form_user)\n else:\n return self.form_invalid(form, form_user)\n\n def form_valid(self, form, form_user=None):\n user = form_user.save()\n self.object = form.save()\n self.object.save()\n messages.success(self.request, 'Tu perfil ha sido actualizado.')\n return HttpResponseRedirect(reverse('users:index'))\n\n def form_invalid(self, form, form_user=None):\n return self.render_to_response(\n self.get_context_data(**{'form': form, 'form_user': form_user}),\n status=400\n )\n","sub_path":"src/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"108248304","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport paho.mqtt.client as mqtt\nimport json, sys\nfrom prime_number import getPrimeNumber\n\nBROKER_HOST = 'mqtt_container'\n# BROKER_HOST = 'localhost'\nMQTT_TOPIC = '/t/primenumber'\n\ndef publishData(startPoint, endPoint):\n client = mqtt.Client(\"P1\")\n client.connect(BROKER_HOST)\n post = {\n 'start': startPoint,\n 'end': endPoint,\n 'primeNumbers': getPrimeNumber(startPoint, endPoint)\n }\n print (post)\n client.publish(MQTT_TOPIC, payload=json.dumps(post))\n\n\n\ndef main(argv):\n if len(argv) == 2: \n publishData(int(argv[0]), int(argv[1]))\n else:\n print (\"\"\"\n Useage: python3 main.py <start point> <end point>\n example: python3 main.py 5 20\n \"\"\")\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"45767293","text":"#Day 23 Task\r\n# Gokul Dhakshana Murthy\r\n\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom PIL import Image\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport PyPDF2\r\nimport warnings\r\nfrom PyPDF4 import PdfFileWriter, PdfFileReader\r\nimport PyPDF4\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\noutput = 'combined_pdf.pdf'\r\n\r\ndef browseFiles():\r\n \r\n filename = filedialog.askopenfilename(initialdir = \"/\",\r\n title = \"Select a File\",\r\n filetypes = ((\"jpeg files\",\r\n \"*.jpeg*\"),\r\n (\"all files\",\r\n \"*.*\")))\r\n \r\n label_file_explorer.configure(text=\"File Opened: \"+filename)\r\n\r\n im = Image.open(filename)\r\n \r\n im.save(\"converted.png\", \"PNG\")\r\n\r\ndef getWeather():\r\n res = requests.get('https://www.weather-forecast.com/locations/'+weather_entry.get()+'/forecasts/latest')\r\n\r\n soup = BeautifulSoup(res.text, 'lxml')\r\n\r\n cont=soup.find('span','phrase')\r\n \r\n weather_details.configure(text=cont.text,wraplength=200,justify=CENTER,width=50,height=10,anchor=CENTER)\r\n\r\ndef getpdf1():\r\n filename = filedialog.askopenfilename(initialdir = \"/\",\r\n title = \"Select a File\",\r\n filetypes = ((\"pdf files\",\r\n \"*.pdf*\"),\r\n (\"all files\",\r\n \"*.*\")))\r\n \r\n \r\n pdfmerger_display_file1.configure(text=filename)\r\n\r\ndef getpdf2():\r\n filename = filedialog.askopenfilename(initialdir = \"/\",\r\n title = \"Select a File\",\r\n filetypes = ((\"pdf files\",\r\n \"*.pdf*\"),\r\n (\"all files\",\r\n \"*.*\")))\r\n \r\n pdfmerger_display_file2.configure(text=filename)\r\n\r\n \r\ndef PDFmerge():\r\n pdfs = [pdfmerger_display_file1.cget(\"text\"),pdfmerger_display_file2.cget(\"text\")]\r\n pdfMerger = PyPDF2.PdfFileMerger(strict=False)\r\n for pdf in pdfs:\r\n pdfMerger.append(pdf)\r\n with open(output, 'wb') as f:\r\n pdfMerger.write(f)\r\n input_pdf='combined_pdf.pdf'\r\n watermark='watermark.pdf'\r\n output_pdf='merge.pdf'\r\n watermark_instance = PdfFileReader(watermark)\r\n watermark_page = watermark_instance.getPage(0)\r\n pdf_reader = PdfFileReader(input_pdf)\r\n pdf_writer = PdfFileWriter()\r\n for page in range(pdf_reader.getNumPages()):\r\n page = pdf_reader.getPage(page)\r\n page.mergePage(watermark_page)\r\n pdf_writer.addPage(page)\r\n with open(output_pdf, 'wb') as out:\r\n pdf_writer.write(out)\r\n \r\nwindow = Tk()\r\n\r\nwindow.title(\"Tkinter mini project\")\r\n\r\nwindow.geometry('700x500')\r\n\r\nwindow.configure(background = \"snow2\")\r\n\r\n\r\n\r\nlabel_file_explorer=Label(window,text=\"Your Selected File Will appear here.....\")\r\n\r\nlableBrowseFiles=Label(window,text=\"Browse JPEG files\").grid(row=1,column=1)\r\n\r\nbuttonBrowseFiles=Button(window,text=\"Browse\",command=browseFiles).grid(row=1,column=2)\r\n\r\nlabel_file_explorer.grid(row=2,column=1)\r\n\r\nweather_label=Label(window,text=\"Enter the location : \").grid(row=3,column=1)\r\n\r\nweather_entry=Entry(window)\r\n\r\nweather_details=Label(window)\r\n\r\nweather_fetch=Button(window,text=\"Fetch weather\",command=getWeather).grid(row=3,column=3)\r\n\r\nweather_entry.grid(row=3,column=2)\r\n\r\nweather_details.grid(row=4,column=2)\r\n\r\npdfmerger_display_file1=Label(window)\r\n\r\npdfmerger_display_file2=Label(window)\r\n\r\n\r\npdfmerger_file1_label=Label(window,text=\"select pdf file 1 : \").grid(row=5,column=1)\r\n\r\npdfmerger_file1_browseButton=Button(window,text=\"Browse\",command=getpdf1).grid(row=5,column=2)\r\n\r\npdfmerger_file2_label=Label(window,text=\"select pdf file 1 : \").grid(row=6,column=1)\r\n\r\npdfmerger_file2_browseButton=Button(window,text=\"Browse\",command=getpdf2).grid(row=6,column=2)\r\n\r\npdf_merge_button=Button(window,text=\"Merge with water mark\",command=PDFmerge).grid(row=7,column=2)\r\n\r\npdfmerger_display_file1.grid(row=5,column=3)\r\n\r\npdfmerger_display_file2.grid(row=6,column=3)\r\n\r\n\r\n\r\nwindow.mainloop()\r\n","sub_path":"Day_23_Task.py","file_name":"Day_23_Task.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"454889263","text":"import os\nimport time\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support import wait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom PythonCourse.base_class import BaseClass\nfrom PythonCourse.actions import Actions\nimport re\n\n\ndef check_color_to_string(str):\n result = re.search(r\"rgba?\\((\\d+), (\\d+), (\\d+)(, \\d+)?\\)\", str)\n return {\n \"red\": result[1],\n \"green\": result[2],\n \"blue\": result[3]\n }\n\n\nclass Item(BaseClass):\n\n def item_check(driver):\n driver.get(\"http://localhost/litecart/en/\")\n item = driver.find_element_by_css_selector(\"div#box-campaigns a.link\")\n\n item_name = item.find_element_by_css_selector(\"div.name\").text\n\n regular_price_main = item.find_element_by_css_selector(\"s.regular-price\")\n regular_price_main_text = regular_price_main.text\n regular_price_main_color = check_color_to_string(regular_price_main.value_of_css_property(\"color\"))\n regular_price_main_decorator = regular_price_main.value_of_css_property(\"text-decoration\")\n regular_price_main_font_weight = regular_price_main.value_of_css_property(\"font-weight\")\n regular_price_main_size = regular_price_main.value_of_css_property(\"font-size\")\n\n campaign_price_main = driver.find_element_by_css_selector(\"strong.campaign-price\")\n\n campaign_price_main_text = campaign_price_main.text\n campaign_price_main_color = check_color_to_string(campaign_price_main.value_of_css_property(\"color\"))\n campaign_price_main_decorator = campaign_price_main.value_of_css_property(\"text-decoration\")\n campaign_price_main_font_weight = campaign_price_main.value_of_css_property(\"font-weight\")\n campaign_price_main_size = campaign_price_main.value_of_css_property(\"font-size\")\n\n assert regular_price_main_color[\"red\"] == regular_price_main_color[\"green\"] == regular_price_main_color[\"blue\"]\n assert campaign_price_main_color[\"green\"] == \"0\" and campaign_price_main_color[\"blue\"] == \"0\"\n assert \"line-through\" in regular_price_main_decorator\n assert not (\"line-through\" in campaign_price_main_decorator)\n assert int(regular_price_main_font_weight) in range(100, 501)\n assert int(campaign_price_main_font_weight) in range(600, 901)\n assert (float(regular_price_main_size[0:-2]) < float(campaign_price_main_size[0:-2]))\n\n item.click()\n\n item_name_item_page = driver.find_element_by_css_selector(\"div#box-product .title\").text\n\n regular_price_item = driver.find_element_by_css_selector(\"s.regular-price\")\n regular_price_item_color = check_color_to_string(regular_price_item.value_of_css_property(\"color\"))\n regular_price_item_text = driver.find_element_by_css_selector(\"s.regular-price\").text\n regular_price_item_decorator = \\\n driver.find_element_by_css_selector(\"s.regular-price\").value_of_css_property(\"text-decoration\")\n regular_price_item_font_weight = \\\n driver.find_element_by_css_selector(\"s.regular-price\").value_of_css_property(\"font-weight\")\n regular_price_item_size = driver.find_element_by_css_selector(\"s.regular-price\").value_of_css_property(\"font\"\n \"-size\")\n\n campaign_price_item = driver.find_element_by_css_selector(\"strong.campaign-price\")\n campaign_price_item_color = check_color_to_string(campaign_price_item.value_of_css_property(\"color\"))\n campaign_price_item_text = driver.find_element_by_css_selector(\"strong.campaign-price\").text\n campaign_price_item_decorator = \\\n driver.find_element_by_css_selector(\"strong.campaign-price\").value_of_css_property(\"text-decoration\")\n campaign_price_item_font_weight = \\\n driver.find_element_by_css_selector(\"strong.campaign-price\").value_of_css_property(\"font-weight\")\n campaign_price_item_size = driver.find_element_by_css_selector(\"strong.campaign-price\").value_of_css_property(\"font-size\")\n\n assert regular_price_item_color[\"red\"] == regular_price_item_color[\"green\"] == regular_price_item_color[\"blue\"]\n assert campaign_price_item_color[\"green\"] == \"0\" and campaign_price_item_color[\"blue\"] == \"0\"\n assert \"line-through\" in regular_price_item_decorator\n assert not (\"line-through\" in campaign_price_item_decorator)\n assert int(regular_price_item_font_weight) in range(100, 501)\n assert int(campaign_price_item_font_weight) in range(600, 901)\n assert (float(regular_price_item_size[0:-2]) < float(campaign_price_item_size[0:-2]))\n\n assert item_name == item_name_item_page\n assert regular_price_main_text == regular_price_item_text\n assert campaign_price_main_text == campaign_price_item_text\n\n def item_creation(driver, item):\n wait = WebDriverWait(driver, 10)\n time.sleep(1)\n\n wait.until(ec.element_to_be_clickable((By.XPATH, \"//span[text()='Catalog']\")))\n driver.find_element_by_xpath(\"//span[text()='Catalog']\").click()\n\n wait.until(ec.element_to_be_clickable((By.XPATH, \"//a[text()=' Add New Product']\")))\n driver.find_element_by_xpath(\"//a[text()=' Add New Product']\").click()\n\n wait.until(ec.visibility_of_element_located((By.XPATH, \"//h1[text()=' Add New Product']\")))\n\n # General\n driver.find_element_by_xpath(\"//strong[text()='Status']/../label[text()=' Enabled']/input\").click()\n time.sleep(3)\n driver.find_element_by_xpath(\"//strong[text()='Name']/..//input\").send_keys(item)\n time.sleep(3)\n driver.find_element_by_xpath(\"//strong[text()='Code']/..//input\").send_keys(\"00000\")\n Actions.checkbox_status(driver, \"//input[@data-name='Rubber Ducks']\")\n Actions.checkbox_status(driver, \"//td[text()='Unisex']/../td/input\")\n driver.find_element_by_css_selector(\"input[name=quantity]\").send_keys(100)\n select_status = Select(driver.find_element_by_css_selector(\"select[name=sold_out_status_id]\"))\n select_status.select_by_visible_text(\"Temporary sold out\")\n time.sleep(2)\n driver.find_element_by_css_selector(\"input[name='new_images[]']\").send_keys(os.path.join(os.path.dirname(__file__),'attachments', 'rock.jpg'))\n time.sleep(2)\n driver.find_element_by_css_selector(\"input[name=date_valid_from]\").send_keys(\"01.01.2021\")\n driver.find_element_by_css_selector(\"input[name=date_valid_to]\").send_keys(\"31.12.2025\")\n\n # Information\n driver.find_element_by_xpath(\"//a[text()='Information']\").click()\n time.sleep(1)\n\n select_manufacturer = Select(driver.find_element_by_css_selector(\"select[name=manufacturer_id]\"))\n select_manufacturer.select_by_visible_text(\"ACME Corp.\")\n driver.find_element_by_css_selector(\"input[name=keywords]\").send_keys(\"Duck\")\n driver.find_element_by_css_selector(\"input[name='short_description[en]']\").send_keys(\n \"Short desc\")\n driver.find_element_by_css_selector(\"div.trumbowyg-editor\").send_keys(\"Item desc\")\n driver.find_element_by_css_selector(\"input[name='head_title[en]']\").send_keys(item)\n driver.find_element_by_css_selector(\"input[name='meta_description[en]']\").send_keys(item)\n\n # Prices\n driver.find_element_by_xpath(\"//a[text()='Prices']\").click()\n time.sleep(1)\n\n driver.find_element_by_css_selector(\"input[name=purchase_price]\").send_keys(\"12,00\")\n select_currency = Select(driver.find_element_by_css_selector(\"select[name=purchase_price_currency_code]\"))\n select_currency.select_by_visible_text(\"Euros\")\n driver.find_element_by_css_selector(\"input[name='prices[USD]']\").send_keys(\"10.00\")\n driver.find_element_by_css_selector(\"input[name='prices[EUR]']\").send_keys(\"12.00\")\n\n # Save\n driver.find_element_by_css_selector(\"button[name=save]\").click()\n wait.until(ec.visibility_of_element_located((By.CSS_SELECTOR, \"div.notice.success\")))\n\n def item_alert_check(driver):\n driver.find_element_by_xpath(\"//span[text()='Catalog']\").click()\n time.sleep(15)\n #wait = WebDriverWait(driver, 10)\n #wait.until(ec.presence_of_element_located((By.XPATH, \"//form[@name='catalog_form']\")))\n\n driver.find_element_by_xpath(\"//form[@name='catalog_form']//a[text()='Rubber Ducks']\").click()\n items_array = len(driver.find_elements_by_xpath(\"//*[@class='dataTable']//img/../a\"))\n\n for i in range(items_array):\n driver.find_elements_by_xpath(\"//*[@class='dataTable']//img/../a\")[i].click()\n logs = driver.get_log(\"browser\")\n\n if len(logs) > 0:\n print(\"No alerts\")\n else:\n print(\"Alerts >>>> \")\n for log in logs:\n print(log)\n\n driver.get(\"http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1\")\n\n","sub_path":"item_check.py","file_name":"item_check.py","file_ext":"py","file_size_in_byte":9109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"479415745","text":"import face_recognition as fr\nimport heapq\nimport numpy as np\nfrom rtree import index\nimport os, datetime\n\npath = \"./data/faces\"\n\ndef image_indexing(rtree_name, n_images):\n import os\n from rtree import index\n\n # Image collection folder path\n dirList = os.listdir(path)\n\n # Rtree index properties\n prop = index.Property()\n prop.dimension = 128 #D\n prop.buffering_capacity = 10 #M\n rtreeIndex = index.Index(rtree_name, properties = prop) #r-tree filename\n\n # Variables de apoyo\n index = 0\n breakUtility = False\n imagesList = []\n\n # Iterate over all person folders in collection\n for filePath in dirList:\n folderPath = path + \"/\" + filePath\n imgList = os.listdir(folderPath)\n \n # Iterate over all images inside folder\n for filename in imgList: \n imagePath = folderPath + \"/\" + filename\n img = fr.load_image_file(imagePath)\n\n # Get encodings for all faces in current image\n faceEncodings = fr.face_encodings(img)\n\n # Para cada cara en la imagen\n for face in faceEncodings:\n\n # MAX Images \n if index == n_images:\n breakUtility = True\n break\n\n tempCoords = list(face)\n\n for coord in face:\n tempCoords.append(coord)\n\n format = {\"path\": folderPath, \"name\": filename}\n\n rtreeIndex.insert(index, tempCoords, format)\n imagesList.append((index, imagePath))\n\n index = index + 1\n \n if breakUtility:\n break\n\n if breakUtility:\n break \n rtreeIndex.close()\n\n print(str(index) + \" images were processed.\")\n\n return rtreeIndex\n\n# -------------------------------------------------------------------------------------------\n\nimagesList = os.listdir(path)\n\ndef encode(unencodedQuery):\n image = fr.load_image_file(unencodedQuery)\n return fr.face_encodings(image)[0]\n\n\ndef KNNSequentialIndex():\n dirList = os.listdir(path)\n\n count = 0\n names = []\n known = []\n raeachedN = False\n\n for filepath in dirList:\n\n folderPath = path + '/' + filepath\n imageList = os.listdir(folderPath)\n\n for imageFile in imageList:\n count += 1\n imagePath = folderPath + '/' + imageFile\n\n #processing this image\n\n image = fr.load_image_file(imagePath)\n encodings = fr.face_encodings(image)\n\n if encodings:\n names.append(imageFile)\n known.append(encodings[0])\n \n # if count > 100:\n # return (known, names)\n \n return (known, names)\n\n\ndef KNNSequential(known, names, query, k, n):\n distancesList = fr.face_distance(known[:n], query)\n result = []\n if n <= len(known) and n <= len(names):\n for i in range(n):\n result.append((distancesList[i], names[i]))\n\n heapq.heapify(result)\n return heapq.nsmallest(k, result)\n\n \ndef KNNRtree(k, encodedQuery, n):\n rtree = 'RtreeIndexFile'\n prop = index.Property()\n prop.dimension = 128\n prop.buffering_capacity = 10\n rtreeIndex = index.Rtree(rtree, properties=prop)\n queryList = list(encodedQuery)\n\n for elem in encodedQuery:\n queryList.append(elem)\n return rtreeIndex.nearest(coordinates=queryList, num_results=k, objects='raw')\n\n\n# rtreeName = 'RtreeIndexFile' + str(NImagenes)\nFacesRtree = image_indexing('RtreeIndexFile', 13000)\n\n# result = list(KNNRtree(4, './data/saved/adam-sandler-test.jpeg', NImagenes))\n# print(result[0]['name'])\n","sub_path":"src/facerec_rtree.py","file_name":"facerec_rtree.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"220397553","text":"def removeAdj(string):\n string=list(string)\n s=0\n prev=None\n str_value=\"\"\n for f in range(0,len(string)):\n if(prev!=string[f]):\n string[s]=string[f]\n s+=1\n prev=string[f]\n for i in range(s):\n str_value+=string[i]\n return str_value\n\nif __name__=='__main__':\n string=\"AABAABCCC\"\n print((removeAdj(string)))","sub_path":"Applied Course/4.Problem Solving/7.Problems on Strings/4.Remove all adjacent duplicate characters in a string.py","file_name":"4.Remove all adjacent duplicate characters in a string.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"119912651","text":"from data_collection.management.commands import BaseScotlandSpatialHubImporter\n\nclass Command(BaseScotlandSpatialHubImporter):\n council_id = 'S12000019'\n council_name = 'Midlothian'\n elections = [\n 'local.midlothian.2017-05-04',\n #'parl.2017-06-08'\n ]\n\n def district_record_to_dict(self, record):\n code = str(record[0]).strip()\n\n \"\"\"\n MN4H is represented as a polygon which sits on top of MN4G\n (as opposed to being in an InnerRing inside MN4G).\n This means any point which is in MN4H is also in MN4G.\n Fortunately MN4H and MN4G share the same polling\n station, so in this case we can fix it by just not importing MN4G.\n If they didn't use the same polling station, this would be an issue.\n \"\"\"\n if code == 'MN4H':\n return None\n\n return super().district_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_collection/management/commands/import_midlothian.py","file_name":"import_midlothian.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"618511859","text":"#\n# boxmodel.py\n#\nfrom ctypes import *\nfrom imagefaker import ImageFaker\n\nclass BoxModel(ImageFaker):\n def __init__(self, size, box, c_lib_path):\n width, height = size\n super(BoxModel, self).__init__(width, height, c_lib_path)\n left, top, right, bottom = box\n self.box = box\n self.image = self.library.create_image(width, height,\n left, top, right, bottom)\n\n def init_library(self):\n l = self.library\n l.create_image.argtypes = [c_int, c_int, c_int, c_int, c_int, c_int]\n l.create_image.restype = c_void_p\n l.free_image.argtypes = [c_void_p]\n l.set_color.argtypes = [c_void_p, c_int, c_int,\n c_ushort, c_ushort, c_ushort]\n l.set_color.restype = c_int\n l.get_color.argtypes = [c_void_p, c_int, c_int,\n c_void_p, c_void_p, c_void_p]\n l.get_color.restype = c_int\n l.proc.argtypes = [c_void_p]\n l.proc.restypes = c_int\n\n def set_color(self, pos, color):\n x, y = pos\n r, g, b = color\n return self.library.set_color(self.image, x, y, r, g, b)\n\n def get_color(self, pos):\n x, y = pos\n r, g, b = c_ushort(0), c_ushort(0), c_ushort(0)\n f = self.library.get_color(self.image, x, y,\n byref(r), byref(g), byref(b))\n if f == -1:\n return None\n return (r.value, g.value, b.value)\n\n def proc(self):\n self.logger.debug('running into C')\n return self.library.proc(self.image)\n\n def finalize(self):\n self.logger.debug('cleaning C')\n self.library.free_image(self.image)\n\n @staticmethod\n def get_center_box(original_size, result_size):\n w1, h1 = original_size\n w2, h2 = result_size\n return ((w2 - w1) // 2, (h2 - h1) // 2,\n (w2 + w1) // 2 - 1, (h2 + h1) // 2 - 1)\n\n","sub_path":"boxmodel.py","file_name":"boxmodel.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"72589418","text":"# -*- coding: utf-8 -*-\n\"\"\" \n@Time : 2020/11/14 17:25\n@Author : liufubin\n@FileName: biggest_monthly_down.py\n@description: 下跌相关指标\n\"\"\"\n\n\nclass BiggesrMonthlyDown(object):\n @staticmethod\n def biggest_monthly_down(month_fund_yield: list):\n \"\"\"计算当月最大下跌,需要传入基金月度收益\"\"\"\n down_min = min(month_fund_yield) # 当月最大下跌\n return down_min\n\n @staticmethod\n def down_month_ratio(month_fund_yield: list):\n \"\"\"计算下跌月份比,需要传入基金月度收益\"\"\"\n count_down = 0 # 下跌月份计数\n for i in range(len(month_fund_yield)):\n if month_fund_yield[i] < 0:\n count_down += 1\n down_rate = count_down / len(month_fund_yield) # 下跌月份比\n return down_rate\n\n @staticmethod\n def batting_average(monthly_fund_field: list, benchmark_monthly: list):\n \"\"\"跑赢指数月份比计算,需要传入基金月度收益率列表和基准月度收益率列表\"\"\"\n count_win = 0 # 跑赢指数月份计数\n for i in range(len(monthly_fund_field)):\n if monthly_fund_field[i] >= benchmark_monthly[i]:\n count_win += 1\n batting_average = count_win/len(monthly_fund_field)\n return batting_average\n\n @staticmethod\n def profit_loss_ratio(monthly_fund_field: list):\n \"\"\"计算盈利亏损比,需要传入基金月度收益列表\"\"\"\n profit_sum = 0\n loss_sum = 0\n for i in range(len(monthly_fund_field)):\n if monthly_fund_field[i] > 0:\n profit_sum += 1\n else:\n loss_sum += 1\n if loss_sum == 0:\n return profit_sum\n else:\n return -profit_sum/loss_sum\n\n\nif __name__ == '__main__':\n monthly_fund = [ # 基金月度收益率列表\n 0.050473107,\n 0.032867281,\n 0.08931831,\n 0.021267266,\n 0.060634109,\n -0.039898534,\n 0.049669143,\n 0.081554021,\n 0.052515903,\n -0.001117565,\n 0.141202937,\n 0.077852734,\n ]\n benchmark_monthlys = [ # 基准月度收益率列表\n 0.053941974,\n 0.002554274,\n -0.009327054,\n 0.003932507,\n 0.018933849,\n -0.014943403,\n 0.069975072,\n -0.022623933,\n -0.015947571,\n -0.064439227,\n 0.061425006,\n -0.011642965\n ]\n down_min_result = BiggesrMonthlyDown.biggest_monthly_down(month_fund_yield=monthly_fund)\n down_rate_result = BiggesrMonthlyDown.down_month_ratio(month_fund_yield=monthly_fund)\n batting_average_result = BiggesrMonthlyDown.batting_average(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys)\n prolit_loss_result = BiggesrMonthlyDown.profit_loss_ratio(monthly_fund_field=monthly_fund)\n print('当月最大下跌', down_min_result)\n print('下跌月份比', down_rate_result)\n print('跑赢指数月份比', batting_average_result)\n print('盈利亏损比:', prolit_loss_result)\n","sub_path":"public_method/indicator_calculation_method/biggest_monthly_down.py","file_name":"biggest_monthly_down.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"259898377","text":"def covering(intervals):\n intervals.sort(key=lambda x: x[0])\n\n result = []\n i = 0\n\n while i < len(intervals):\n interval = intervals[i]\n\n while i < len(intervals) and intersecting(intervals[i], interval):\n interval = (max(intervals[i][0], interval[0]), min(intervals[i][1], interval[1]))\n i += 1\n\n result.append(interval[1])\n return result\n\n\ndef intersecting(x, y):\n return not (x[0] > y[1] or y[0] > x[1])\n\n\nprint(covering([(0,3), (2,6), (3,4), (6,9)]))","sub_path":"covering.py","file_name":"covering.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"46524917","text":"__author__ = 'gagecoprivnicar'\nimport sys\n\n_infosent_ = sys.argv[1]\nfinal = \"\"\nfor letter in _infosent_:\n if letter == \"-\":\n final = final + \" \"\n else:\n final = final + letter\n\nfile = open(\"./OUTPUT.txt\", \"a\")\nfile.write(\"Computer Message: \" + final + \"\\n\")\nfile.close()\n","sub_path":"modules/say.py","file_name":"say.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"42336067","text":"import json\nimport os\n\ndata_path = '/home/zhaohenz/scratch/obj-art-dataset/data'\ndata = []\n\nfor group in os.listdir(data_path):\n with open(os.path.join(data_path, group, 'label.json'), 'r') as fp:\n tmp = json.load(fp)\n with open('img_lists/img_list_{}.txt'.format(group), 'w') as fp:\n for entry in tmp:\n for prefix in ['depth', 'normal']:\n folder_path = os.path.join(data_path, group, 'frames', entry['id'], prefix+'_pred')\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n for i in range(120):\n fp.write(os.path.join(data_path, group, 'frames', entry['id'], 'color', '{}_{:06d}.png'.format(entry['id'], i)))\n fp.write('\\n')\n\n\n\npath = [('/z/zhaohenz/rbo_dataset/interactions/cabinet', 0), \n('/z/zhaohenz/rbo_dataset/interactions/ikeasmall', 0),\n('/z/zhaohenz/rbo_dataset/interactions/laptop', 1),\n('/z/zhaohenz/rbo_dataset/interactions/microwave', 1)]\n\nwith open('img_lists/rbo.txt', 'w') as fp:\n for p in path:\n videos = os.listdir(p[0])\n for video in videos:\n img_path = os.path.join(p[0], video, 'camera_rgb')\n for prefix in ['depth', 'normal']:\n output_path = os.path.join(p[0], video, prefix+'_pred')\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for x in sorted(os.listdir(img_path)):\n fp.write(os.path.join(img_path, x))\n fp.write('\\n')\n\n\n","sub_path":"generate_img_list.py","file_name":"generate_img_list.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"61664013","text":"# Copyright 2018, Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport threading\nimport time\nimport types as stdlib_types\n\nimport mock\nimport pytest\nfrom six.moves import queue\n\nfrom google.api_core import bidi\nfrom google.api_core import exceptions\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.gapic import subscriber_client_config\nfrom google.cloud.pubsub_v1.subscriber import client\nfrom google.cloud.pubsub_v1.subscriber import message\nfrom google.cloud.pubsub_v1.subscriber import scheduler\nfrom google.cloud.pubsub_v1.subscriber._protocol import dispatcher\nfrom google.cloud.pubsub_v1.subscriber._protocol import heartbeater\nfrom google.cloud.pubsub_v1.subscriber._protocol import leaser\nfrom google.cloud.pubsub_v1.subscriber._protocol import requests\nfrom google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager\nimport grpc\n\n\n@pytest.mark.parametrize(\n \"exception,expected_cls\",\n [\n (ValueError(\"meep\"), ValueError),\n (\n mock.create_autospec(grpc.RpcError, instance=True),\n exceptions.GoogleAPICallError,\n ),\n ],\n)\ndef test__maybe_wrap_exception(exception, expected_cls):\n assert isinstance(\n streaming_pull_manager._maybe_wrap_exception(exception), expected_cls\n )\n\n\ndef test__wrap_callback_errors_no_error():\n msg = mock.create_autospec(message.Message, instance=True)\n callback = mock.Mock()\n on_callback_error = mock.Mock()\n\n streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)\n\n callback.assert_called_once_with(msg)\n msg.nack.assert_not_called()\n on_callback_error.assert_not_called()\n\n\ndef test__wrap_callback_errors_error():\n callback_error = ValueError(\"meep\")\n\n msg = mock.create_autospec(message.Message, instance=True)\n callback = mock.Mock(side_effect=callback_error)\n on_callback_error = mock.Mock()\n\n streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)\n\n msg.nack.assert_called_once()\n on_callback_error.assert_called_once_with(callback_error)\n\n\ndef test_constructor_and_default_state():\n manager = streaming_pull_manager.StreamingPullManager(\n mock.sentinel.client, mock.sentinel.subscription\n )\n\n # Public state\n assert manager.is_active is False\n assert manager.flow_control == types.FlowControl()\n assert manager.dispatcher is None\n assert manager.leaser is None\n assert manager.ack_histogram is not None\n assert manager.ack_deadline == 10\n assert manager.load == 0\n\n # Private state\n assert manager._client == mock.sentinel.client\n assert manager._subscription == mock.sentinel.subscription\n assert manager._scheduler is not None\n\n\ndef test_constructor_with_options():\n manager = streaming_pull_manager.StreamingPullManager(\n mock.sentinel.client,\n mock.sentinel.subscription,\n flow_control=mock.sentinel.flow_control,\n scheduler=mock.sentinel.scheduler,\n )\n\n assert manager.flow_control == mock.sentinel.flow_control\n assert manager._scheduler == mock.sentinel.scheduler\n\n\ndef make_manager(**kwargs):\n client_ = mock.create_autospec(client.Client, instance=True)\n scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)\n return streaming_pull_manager.StreamingPullManager(\n client_, \"subscription-name\", scheduler=scheduler_, **kwargs\n )\n\n\ndef fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):\n \"\"\"Add a simplified fake add() method to a leaser instance.\n\n The fake add() method actually increases the leaser's internal message count\n by one for each message, and the total bytes by ``assumed_msg_size`` for\n each message (regardless of the actual message size).\n \"\"\"\n\n def fake_add(self, items):\n self.message_count += len(items)\n self.bytes += len(items) * assumed_msg_size\n\n leaser.message_count = init_msg_count\n leaser.bytes = init_msg_count * assumed_msg_size\n leaser.add = stdlib_types.MethodType(fake_add, leaser)\n\n\ndef test_ack_deadline():\n manager = make_manager()\n assert manager.ack_deadline == 10\n manager.ack_histogram.add(20)\n assert manager.ack_deadline == 20\n manager.ack_histogram.add(10)\n assert manager.ack_deadline == 20\n\n\ndef test_maybe_pause_consumer_wo_consumer_set():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n manager.maybe_pause_consumer() # no raise\n # Ensure load > 1\n _leaser = manager._leaser = mock.create_autospec(leaser.Leaser)\n _leaser.message_count = 100\n _leaser.bytes = 10000\n manager.maybe_pause_consumer() # no raise\n\n\ndef test_lease_load_and_pause():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n manager._leaser = leaser.Leaser(manager)\n manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)\n manager._consumer.is_paused = False\n\n # This should mean that our messages count is at 10%, and our bytes\n # are at 15%; load should return the higher (0.15), and shouldn't cause\n # the consumer to pause.\n manager.leaser.add([requests.LeaseRequest(ack_id=\"one\", byte_size=150)])\n assert manager.load == 0.15\n manager.maybe_pause_consumer()\n manager._consumer.pause.assert_not_called()\n\n # After this message is added, the messages should be higher at 20%\n # (versus 16% for bytes).\n manager.leaser.add([requests.LeaseRequest(ack_id=\"two\", byte_size=10)])\n assert manager.load == 0.2\n\n # Returning a number above 100% is fine, and it should cause this to pause.\n manager.leaser.add([requests.LeaseRequest(ack_id=\"three\", byte_size=1000)])\n assert manager.load == 1.16\n manager.maybe_pause_consumer()\n manager._consumer.pause.assert_called_once()\n\n\ndef test_drop_and_resume():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n manager._leaser = leaser.Leaser(manager)\n manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)\n manager._consumer.is_paused = True\n\n # Add several messages until we're over the load threshold.\n manager.leaser.add(\n [\n requests.LeaseRequest(ack_id=\"one\", byte_size=750),\n requests.LeaseRequest(ack_id=\"two\", byte_size=250),\n ]\n )\n\n assert manager.load == 1.0\n\n # Trying to resume now should have no effect as we're over the threshold.\n manager.maybe_resume_consumer()\n manager._consumer.resume.assert_not_called()\n\n # Drop the 200 byte message, which should put us under the resume\n # threshold.\n manager.leaser.remove([requests.DropRequest(ack_id=\"two\", byte_size=250)])\n manager.maybe_resume_consumer()\n manager._consumer.resume.assert_called_once()\n\n\ndef test_resume_not_paused():\n manager = make_manager()\n manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)\n manager._consumer.is_paused = False\n\n # Resuming should have no effect is the consumer is not actually paused.\n manager.maybe_resume_consumer()\n manager._consumer.resume.assert_not_called()\n\n\ndef test_maybe_resume_consumer_wo_consumer_set():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n manager.maybe_resume_consumer() # no raise\n\n\ndef test__maybe_release_messages_on_overload():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n\n msg = mock.create_autospec(message.Message, instance=True, ack_id=\"ack\", size=11)\n manager._messages_on_hold.put(msg)\n manager._on_hold_bytes = msg.size\n\n # Ensure load is exactly 1.0 (to verify that >= condition is used)\n _leaser = manager._leaser = mock.create_autospec(leaser.Leaser)\n _leaser.message_count = 10\n _leaser.bytes = 1000 + msg.size\n\n manager._maybe_release_messages()\n\n assert manager._messages_on_hold.qsize() == 1\n manager._leaser.add.assert_not_called()\n manager._scheduler.schedule.assert_not_called()\n\n\ndef test__maybe_release_messages_below_overload():\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n manager._callback = mock.sentinel.callback\n\n # Init leaser message count to 11, so that when subtracting the 3 messages\n # that are on hold, there is still room for another 2 messages before the\n # max load is hit.\n _leaser = manager._leaser = mock.create_autospec(leaser.Leaser)\n fake_leaser_add(_leaser, init_msg_count=11, assumed_msg_size=10)\n\n messages = [\n mock.create_autospec(message.Message, instance=True, ack_id=\"ack_foo\", size=10),\n mock.create_autospec(message.Message, instance=True, ack_id=\"ack_bar\", size=10),\n mock.create_autospec(message.Message, instance=True, ack_id=\"ack_baz\", size=10),\n ]\n for msg in messages:\n manager._messages_on_hold.put(msg)\n manager._on_hold_bytes = 3 * 10\n\n # the actual call of MUT\n manager._maybe_release_messages()\n\n assert manager._messages_on_hold.qsize() == 1\n msg = manager._messages_on_hold.get_nowait()\n assert msg.ack_id == \"ack_baz\"\n\n schedule_calls = manager._scheduler.schedule.mock_calls\n assert len(schedule_calls) == 2\n for _, call_args, _ in schedule_calls:\n assert call_args[0] == mock.sentinel.callback\n assert isinstance(call_args[1], message.Message)\n assert call_args[1].ack_id in (\"ack_foo\", \"ack_bar\")\n\n\ndef test__maybe_release_messages_negative_on_hold_bytes_warning(caplog):\n manager = make_manager(\n flow_control=types.FlowControl(max_messages=10, max_bytes=1000)\n )\n\n msg = mock.create_autospec(message.Message, instance=True, ack_id=\"ack\", size=17)\n manager._messages_on_hold.put(msg)\n manager._on_hold_bytes = 5 # too low for some reason\n\n _leaser = manager._leaser = mock.create_autospec(leaser.Leaser)\n _leaser.message_count = 3\n _leaser.bytes = 150\n\n with caplog.at_level(logging.WARNING):\n manager._maybe_release_messages()\n\n expected_warnings = [\n record.message.lower()\n for record in caplog.records\n if \"unexpectedly negative\" in record.message\n ]\n assert len(expected_warnings) == 1\n assert \"on hold bytes\" in expected_warnings[0]\n assert \"-12\" in expected_warnings[0]\n\n assert manager._on_hold_bytes == 0 # should be auto-corrected\n\n\ndef test_send_unary():\n manager = make_manager()\n manager._UNARY_REQUESTS = True\n\n manager.send(\n types.StreamingPullRequest(\n ack_ids=[\"ack_id1\", \"ack_id2\"],\n modify_deadline_ack_ids=[\"ack_id3\", \"ack_id4\", \"ack_id5\"],\n modify_deadline_seconds=[10, 20, 20],\n )\n )\n\n manager._client.acknowledge.assert_called_once_with(\n subscription=manager._subscription, ack_ids=[\"ack_id1\", \"ack_id2\"]\n )\n\n manager._client.modify_ack_deadline.assert_has_calls(\n [\n mock.call(\n subscription=manager._subscription,\n ack_ids=[\"ack_id3\"],\n ack_deadline_seconds=10,\n ),\n mock.call(\n subscription=manager._subscription,\n ack_ids=[\"ack_id4\", \"ack_id5\"],\n ack_deadline_seconds=20,\n ),\n ],\n any_order=True,\n )\n\n\ndef test_send_unary_empty():\n manager = make_manager()\n manager._UNARY_REQUESTS = True\n\n manager.send(types.StreamingPullRequest())\n\n manager._client.acknowledge.assert_not_called()\n manager._client.modify_ack_deadline.assert_not_called()\n\n\ndef test_send_unary_api_call_error(caplog):\n caplog.set_level(logging.DEBUG)\n\n manager = make_manager()\n manager._UNARY_REQUESTS = True\n\n error = exceptions.GoogleAPICallError(\"The front fell off\")\n manager._client.acknowledge.side_effect = error\n\n manager.send(types.StreamingPullRequest(ack_ids=[\"ack_id1\", \"ack_id2\"]))\n\n assert \"The front fell off\" in caplog.text\n\n\ndef test_send_unary_retry_error(caplog):\n caplog.set_level(logging.DEBUG)\n\n manager, _, _, _, _, _ = make_running_manager()\n manager._UNARY_REQUESTS = True\n\n error = exceptions.RetryError(\n \"Too long a transient error\", cause=Exception(\"Out of time!\")\n )\n manager._client.acknowledge.side_effect = error\n\n with pytest.raises(exceptions.RetryError):\n manager.send(types.StreamingPullRequest(ack_ids=[\"ack_id1\", \"ack_id2\"]))\n\n assert \"RetryError while sending unary RPC\" in caplog.text\n assert \"signaled streaming pull manager shutdown\" in caplog.text\n\n\ndef test_send_streaming():\n manager = make_manager()\n manager._UNARY_REQUESTS = False\n manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)\n\n manager.send(mock.sentinel.request)\n\n manager._rpc.send.assert_called_once_with(mock.sentinel.request)\n\n\ndef test_heartbeat():\n manager = make_manager()\n manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)\n manager._rpc.is_active = True\n\n manager.heartbeat()\n\n manager._rpc.send.assert_called_once_with(types.StreamingPullRequest())\n\n\ndef test_heartbeat_inactive():\n manager = make_manager()\n manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)\n manager._rpc.is_active = False\n\n manager.heartbeat()\n\n manager._rpc.send.assert_not_called()\n\n\n@mock.patch(\"google.api_core.bidi.ResumableBidiRpc\", autospec=True)\n@mock.patch(\"google.api_core.bidi.BackgroundConsumer\", autospec=True)\n@mock.patch(\"google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser\", autospec=True)\n@mock.patch(\n \"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher\", autospec=True\n)\n@mock.patch(\n \"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater\", autospec=True\n)\ndef test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):\n manager = make_manager()\n\n manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)\n\n heartbeater.assert_called_once_with(manager)\n heartbeater.return_value.start.assert_called_once()\n assert manager._heartbeater == heartbeater.return_value\n\n dispatcher.assert_called_once_with(manager, manager._scheduler.queue)\n dispatcher.return_value.start.assert_called_once()\n assert manager._dispatcher == dispatcher.return_value\n\n leaser.assert_called_once_with(manager)\n leaser.return_value.start.assert_called_once()\n assert manager.leaser == leaser.return_value\n\n background_consumer.assert_called_once_with(manager._rpc, manager._on_response)\n background_consumer.return_value.start.assert_called_once()\n assert manager._consumer == background_consumer.return_value\n\n resumable_bidi_rpc.assert_called_once_with(\n start_rpc=manager._client.api.streaming_pull,\n initial_request=mock.ANY,\n should_recover=manager._should_recover,\n should_terminate=manager._should_terminate,\n throttle_reopen=True,\n )\n initial_request_arg = resumable_bidi_rpc.call_args.kwargs[\"initial_request\"]\n assert initial_request_arg.func == manager._get_initial_request\n assert initial_request_arg.args[0] == 10 # the default stream ACK timeout\n assert not manager._client.api.get_subscription.called\n\n resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(\n manager._on_rpc_done\n )\n assert manager._rpc == resumable_bidi_rpc.return_value\n\n manager._consumer.is_active = True\n assert manager.is_active is True\n\n\ndef test_open_already_active():\n manager = make_manager()\n manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)\n manager._consumer.is_active = True\n\n with pytest.raises(ValueError, match=\"already open\"):\n manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)\n\n\ndef test_open_has_been_closed():\n manager = make_manager()\n manager._closed = True\n\n with pytest.raises(ValueError, match=\"closed\"):\n manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)\n\n\ndef make_running_manager():\n manager = make_manager()\n manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)\n manager._consumer.is_active = True\n manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)\n manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)\n manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)\n\n return (\n manager,\n manager._consumer,\n manager._dispatcher,\n manager._leaser,\n manager._heartbeater,\n manager._scheduler,\n )\n\n\ndef test_close():\n manager, consumer, dispatcher, leaser, heartbeater, scheduler = (\n make_running_manager()\n )\n\n manager.close()\n\n consumer.stop.assert_called_once()\n leaser.stop.assert_called_once()\n dispatcher.stop.assert_called_once()\n heartbeater.stop.assert_called_once()\n scheduler.shutdown.assert_called_once()\n\n assert manager.is_active is False\n\n\ndef test_close_inactive_consumer():\n manager, consumer, dispatcher, leaser, heartbeater, scheduler = (\n make_running_manager()\n )\n consumer.is_active = False\n\n manager.close()\n\n consumer.stop.assert_not_called()\n leaser.stop.assert_called_once()\n dispatcher.stop.assert_called_once()\n heartbeater.stop.assert_called_once()\n scheduler.shutdown.assert_called_once()\n\n\ndef test_close_idempotent():\n manager, _, _, _, _, scheduler = make_running_manager()\n\n manager.close()\n manager.close()\n\n assert scheduler.shutdown.call_count == 1\n\n\nclass FakeDispatcher(object):\n def __init__(self, manager, error_callback):\n self._manager = manager\n self._error_callback = error_callback\n self._thread = None\n self._stop = False\n\n def start(self):\n self._thread = threading.Thread(target=self._do_work)\n self._thread.daemon = True\n self._thread.start()\n\n def stop(self):\n self._stop = True\n self._thread.join()\n self._thread = None\n\n def _do_work(self):\n while not self._stop:\n try:\n self._manager.leaser.add([mock.Mock()])\n except Exception as exc:\n self._error_callback(exc)\n time.sleep(0.1)\n\n # also try to interact with the leaser after the stop flag has been set\n try:\n self._manager.leaser.remove([mock.Mock()])\n except Exception as exc:\n self._error_callback(exc)\n\n\ndef test_close_no_dispatcher_error():\n manager, _, _, _, _, _ = make_running_manager()\n error_callback = mock.Mock(name=\"error_callback\")\n dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)\n manager._dispatcher = dispatcher\n dispatcher.start()\n\n manager.close()\n\n error_callback.assert_not_called()\n\n\ndef test_close_callbacks():\n manager, _, _, _, _, _ = make_running_manager()\n\n callback = mock.Mock()\n\n manager.add_close_callback(callback)\n manager.close(reason=\"meep\")\n\n callback.assert_called_once_with(manager, \"meep\")\n\n\ndef test__get_initial_request():\n manager = make_manager()\n manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)\n manager._leaser.ack_ids = [\"1\", \"2\"]\n\n initial_request = manager._get_initial_request(123)\n\n assert isinstance(initial_request, types.StreamingPullRequest)\n assert initial_request.subscription == \"subscription-name\"\n assert initial_request.stream_ack_deadline_seconds == 123\n assert initial_request.modify_deadline_ack_ids == [\"1\", \"2\"]\n assert initial_request.modify_deadline_seconds == [10, 10]\n\n\ndef test__get_initial_request_wo_leaser():\n manager = make_manager()\n manager._leaser = None\n\n initial_request = manager._get_initial_request(123)\n\n assert isinstance(initial_request, types.StreamingPullRequest)\n assert initial_request.subscription == \"subscription-name\"\n assert initial_request.stream_ack_deadline_seconds == 123\n assert initial_request.modify_deadline_ack_ids == []\n assert initial_request.modify_deadline_seconds == []\n\n\ndef test__on_response_no_leaser_overload():\n manager, _, dispatcher, leaser, _, scheduler = make_running_manager()\n manager._callback = mock.sentinel.callback\n\n # Set up the messages.\n response = types.StreamingPullResponse(\n received_messages=[\n types.ReceivedMessage(\n ack_id=\"fack\", message=types.PubsubMessage(data=b\"foo\", message_id=\"1\")\n ),\n types.ReceivedMessage(\n ack_id=\"back\", message=types.PubsubMessage(data=b\"bar\", message_id=\"2\")\n ),\n ]\n )\n\n # adjust message bookkeeping in leaser\n fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)\n\n # Actually run the method and prove that modack and schedule\n # are called in the expected way.\n manager._on_response(response)\n\n dispatcher.modify_ack_deadline.assert_called_once_with(\n [requests.ModAckRequest(\"fack\", 10), requests.ModAckRequest(\"back\", 10)]\n )\n\n schedule_calls = scheduler.schedule.mock_calls\n assert len(schedule_calls) == 2\n for call in schedule_calls:\n assert call[1][0] == mock.sentinel.callback\n assert isinstance(call[1][1], message.Message)\n\n # the leaser load limit not hit, no messages had to be put on hold\n assert manager._messages_on_hold.qsize() == 0\n\n\ndef test__on_response_with_leaser_overload():\n manager, _, dispatcher, leaser, _, scheduler = make_running_manager()\n manager._callback = mock.sentinel.callback\n\n # Set up the messages.\n response = types.StreamingPullResponse(\n received_messages=[\n types.ReceivedMessage(\n ack_id=\"fack\", message=types.PubsubMessage(data=b\"foo\", message_id=\"1\")\n ),\n types.ReceivedMessage(\n ack_id=\"back\", message=types.PubsubMessage(data=b\"bar\", message_id=\"2\")\n ),\n types.ReceivedMessage(\n ack_id=\"zack\", message=types.PubsubMessage(data=b\"baz\", message_id=\"3\")\n ),\n ]\n )\n\n # Adjust message bookkeeping in leaser. Pick 999 messages, which is just below\n # the default FlowControl.max_messages limit.\n fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)\n\n # Actually run the method and prove that modack and schedule\n # are called in the expected way.\n manager._on_response(response)\n\n # all messages should be added to the lease management and have their ACK\n # deadline extended, even those not dispatched to callbacks\n dispatcher.modify_ack_deadline.assert_called_once_with(\n [\n requests.ModAckRequest(\"fack\", 10),\n requests.ModAckRequest(\"back\", 10),\n requests.ModAckRequest(\"zack\", 10),\n ]\n )\n\n # one message should be scheduled, the flow control limits allow for it\n schedule_calls = scheduler.schedule.mock_calls\n assert len(schedule_calls) == 1\n call_args = schedule_calls[0][1]\n assert call_args[0] == mock.sentinel.callback\n assert isinstance(call_args[1], message.Message)\n assert call_args[1].message_id == \"1\"\n\n # the rest of the messages should have been put on hold\n assert manager._messages_on_hold.qsize() == 2\n while True:\n try:\n msg = manager._messages_on_hold.get_nowait()\n except queue.Empty:\n break\n else:\n assert isinstance(msg, message.Message)\n assert msg.message_id in (\"2\", \"3\")\n\n\ndef test__on_response_none_data(caplog):\n caplog.set_level(logging.DEBUG)\n\n manager, _, dispatcher, leaser, _, scheduler = make_running_manager()\n manager._callback = mock.sentinel.callback\n\n # adjust message bookkeeping in leaser\n fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)\n\n manager._on_response(response=None)\n\n scheduler.schedule.assert_not_called()\n assert \"callback invoked with None\" in caplog.text\n\n\ndef test_retryable_stream_errors():\n # Make sure the config matches our hard-coded tuple of exceptions.\n interfaces = subscriber_client_config.config[\"interfaces\"]\n retry_codes = interfaces[\"google.pubsub.v1.Subscriber\"][\"retry_codes\"]\n idempotent = retry_codes[\"idempotent\"]\n\n status_codes = tuple(getattr(grpc.StatusCode, name, None) for name in idempotent)\n expected = tuple(\n exceptions.exception_class_for_grpc_status(status_code)\n for status_code in status_codes\n )\n assert set(expected).issubset(set(streaming_pull_manager._RETRYABLE_STREAM_ERRORS))\n\n\ndef test__should_recover_true():\n manager = make_manager()\n\n details = \"UNAVAILABLE. Service taking nap.\"\n exc = exceptions.ServiceUnavailable(details)\n\n assert manager._should_recover(exc) is True\n\n\ndef test__should_recover_false():\n manager = make_manager()\n\n exc = TypeError(\"wahhhhhh\")\n\n assert manager._should_recover(exc) is False\n\n\ndef test__should_terminate_true():\n manager = make_manager()\n\n details = \"Cancelled. Go away, before I taunt you a second time.\"\n exc = exceptions.Cancelled(details)\n\n assert manager._should_terminate(exc) is True\n\n\ndef test__should_terminate_false():\n manager = make_manager()\n\n exc = TypeError(\"wahhhhhh\")\n\n assert manager._should_terminate(exc) is False\n\n\n@mock.patch(\"threading.Thread\", autospec=True)\ndef test__on_rpc_done(thread):\n manager = make_manager()\n\n manager._on_rpc_done(mock.sentinel.error)\n\n thread.assert_called_once_with(\n name=mock.ANY, target=manager.close, kwargs={\"reason\": mock.sentinel.error}\n )\n","sub_path":"pubsub/tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py","file_name":"test_streaming_pull_manager.py","file_ext":"py","file_size_in_byte":26215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"491074984","text":"import math\nimport sched\nimport time\nfrom typing import List, Tuple\n\nfrom dialog_bot_sdk.entities.ListLoadMode import ListLoadMode\nfrom dialog_bot_sdk.entities.ReferencedEntities import ReferencedEntities\nfrom dialog_bot_sdk.entities.UUID import UUID\nfrom dialog_bot_sdk.entities.UpdateInteractiveMediaEvent import UpdateInteractiveMediaEvent\nfrom dialog_bot_sdk.entities.UpdateMessage import UpdateMessage\nfrom dialog_bot_sdk.entities.message.TextMessage import MessageMedia\nfrom dialog_bot_sdk.interactive_media import InteractiveMediaGroup\nfrom google.protobuf import empty_pb2\nimport threading\nimport random\nimport grpc\nimport logging\n\nfrom dialog_bot_sdk.entities.message.Message import Message\nfrom dialog_bot_sdk.entities.Peer import Peer\nfrom dialog_bot_sdk.utils import POOL\nfrom .service import ManagedService\nfrom dialog_api import messaging_pb2, sequence_and_updates_pb2, peers_pb2\nfrom .content import content\nimport google.protobuf.wrappers_pb2 as wrappers_pb2\nfrom dialog_bot_sdk.utils import get_peer, async_dec, AsyncTask, is_image, get_uuids\n\nSCHEDULER = sched.scheduler(time.time, time.sleep)\nMAX_SLEEP_TIME = 30\nEXCEPTION_CODES = [1, 13]\n\n\nclass Messaging(ManagedService):\n retry = 0\n timer = 0\n \"\"\"Main messaging class.\n \"\"\"\n @async_dec()\n def send_message(self, peer: Peer or AsyncTask, text: str,\n interactive_media_groups: List[InteractiveMediaGroup] = None,\n uid: int = None) -> UUID:\n \"\"\"Send text message to peer.\n Message can contain interactive media groups (buttons, selects etc.).\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param text: message text (not null)\n :param interactive_media_groups: groups of interactive media components (buttons etc.)\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n if text == '' or text is None:\n raise AttributeError('Text message must contain some text.')\n out_peer = self.manager.get_out_peer(peer)\n msg = messaging_pb2.MessageContent()\n msg.textMessage.text = text\n if interactive_media_groups is not None:\n for g in interactive_media_groups:\n media = msg.textMessage.media.add()\n g.render(media)\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n is_only_for_user=uid\n )\n return self.__send_message(request)\n\n @async_dec()\n def update_message(self, message: Message or AsyncTask, text: str,\n interactive_media_groups: List[InteractiveMediaGroup] = None) -> None:\n \"\"\"Update text message or interactive media (buttons, selects etc.).\n\n :param message: Message or AsyncTask (in which located Message)\n :param text: message text (not null)\n :param interactive_media_groups: groups of interactive media components (buttons etc.)\n :return: None\n \"\"\"\n msg = messaging_pb2.MessageContent()\n msg.textMessage.text = text\n if interactive_media_groups is not None:\n for g in interactive_media_groups:\n media = msg.textMessage.media.add()\n g.render(media)\n\n self.__update(self.__get_message(message), msg)\n\n @async_dec()\n def delete(self, message: Message or AsyncTask) -> None:\n \"\"\"Delete message.\n\n :param message: Message or AsyncTask (in which located Message)\n :return: None\n \"\"\"\n msg = messaging_pb2.MessageContent(\n deletedMessage=messaging_pb2.DeletedMessage(is_local=wrappers_pb2.BoolValue(value=False))\n )\n\n self.__update(self.__get_message(message), msg)\n\n @async_dec()\n def get_messages_by_id(self, mids: List[UUID or AsyncTask]) -> List[Message]:\n \"\"\"Find and return messages by UUIDs\n\n :param mids: list of UUID or AsyncTask (in which located UUID)\n :return: list of Messages\n \"\"\"\n mids = get_uuids(mids)\n request = sequence_and_updates_pb2.RequestGetReferencedEntitites(\n mids=mids\n )\n result = ReferencedEntities.from_api(self.internal.updates.GetReferencedEntitites(request))\n return result.messages\n\n @async_dec()\n def messages_read(self, peer: Peer or AsyncTask, date: int) -> None:\n \"\"\"Marking a message and all previous as read\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param date: date of message\n :return: None\n \"\"\"\n peer = get_peer(peer)\n peer = self.manager.get_out_peer(peer)\n request = messaging_pb2.RequestMessageRead(\n peer=peer,\n date=date\n )\n self.internal.messaging.MessageRead(request)\n\n @async_dec()\n def send_file(self, peer: Peer or AsyncTask, file: str, uid: int = None) -> UUID or None:\n \"\"\"Send file to peer.\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param file: path to file\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n location = self.internal.uploading.upload_file(file).wait()\n if location is None:\n return None\n location = location.to_api()\n\n out_peer = self.manager.get_out_peer(peer)\n msg = messaging_pb2.MessageContent()\n\n msg.documentMessage.CopyFrom(\n content.get_document_content(file, location)\n )\n\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n is_only_for_user=uid\n )\n return self.__send_message(request)\n\n @async_dec()\n def send_media(self, peer: Peer or AsyncTask, medias: List[MessageMedia], uid: int = None) -> UUID:\n \"\"\"Send media to peer.\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param medias: medias (list of MessageMedias)\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n medias = self.__get_medias(medias)\n out_peer = self.manager.get_out_peer(peer)\n text_message = messaging_pb2.TextMessage()\n for media in medias:\n text_message.media.append(media)\n msg = messaging_pb2.MessageContent(textMessage=text_message)\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n is_only_for_user=uid\n )\n return self.__send_message(request)\n\n @async_dec()\n def send_image(self, peer: Peer or AsyncTask, file: str, uid: int = None) -> UUID or None:\n \"\"\"Send image as image (not as file) to peer.\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param file: path to image file\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n\n if isinstance(file, str) and not is_image(file):\n raise IOError('File is not an image.')\n\n location = self.internal.uploading.upload_file(file).wait()\n if location is None:\n return None\n location = location.to_api()\n out_peer = self.manager.get_out_peer(peer)\n msg = messaging_pb2.MessageContent()\n\n msg.documentMessage.CopyFrom(\n content.get_image_content(file, location)\n )\n\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n is_only_for_user=uid\n )\n\n return self.__send_message(request)\n\n @async_dec()\n def reply(self, peer: Peer or AsyncTask, mids: List[UUID or AsyncTask], text: str = None,\n interactive_media_groups: List[InteractiveMediaGroup] = None, uid: int = None) -> UUID:\n \"\"\"Reply messages to peer. Message can contain interactive media groups (buttons, selects etc.).\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param mids: list of UUIDs\n :param text: message text\n :param interactive_media_groups: groups of interactive media components (buttons etc.)\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n mids = get_uuids(mids)\n if text is None:\n text = ''\n\n out_peer = self.manager.get_out_peer(peer)\n msg = messaging_pb2.MessageContent()\n msg.textMessage.text = text\n if interactive_media_groups is not None:\n for g in interactive_media_groups:\n media = msg.textMessage.media.add()\n g.render(media)\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n reply=messaging_pb2.ReferencedMessages(mids=mids),\n is_only_for_user=uid\n )\n return self.__send_message(request)\n\n @async_dec()\n def forward(self, peer: Peer or AsyncTask, mids: List[UUID or AsyncTask], text: str = None,\n interactive_media_groups: List[InteractiveMediaGroup] = None, uid: int = None) -> UUID:\n \"\"\"Forward messages to peer. Message can contain interactive media groups (buttons, selects etc.).\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param mids: list of UUIDs\n :param text: message text\n :param interactive_media_groups: groups of interactive media components (buttons etc.)\n :param uid: send message only for user by id\n :return: UUID (message id)\n \"\"\"\n peer = get_peer(peer)\n mids = get_uuids(mids)\n if text is None:\n text = ''\n\n out_peer, msg = self.__get_out_peer_and_message(peer, text, interactive_media_groups)\n request = messaging_pb2.RequestSendMessage(\n peer=out_peer,\n deduplication_id=random.randint(0, 100000000),\n message=msg,\n forward=messaging_pb2.ReferencedMessages(mids=mids),\n is_only_for_user=uid\n )\n return self.__send_message(request)\n\n @async_dec()\n def load_message_history(self, peer: Peer or AsyncTask, date: int = 0,\n direction: ListLoadMode = ListLoadMode.LISTLOADMODE_BACKWARD,\n limit: int = 2) -> List[Message]:\n \"\"\"Load and return messages by peer.\n\n :param peer: Peer or AsyncTask (in which located User or Group)\n :param date: date of message\n :param direction: ListLoadMode\n :param limit: messages count\n :return: list of Messages\n \"\"\"\n peer = get_peer(peer)\n out_peer = self.manager.get_out_peer(peer)\n request = messaging_pb2.RequestLoadHistory(\n peer=out_peer,\n date=date,\n load_mode=direction,\n limit=limit\n )\n return [Message.from_api(x) for x in self.internal.messaging.LoadHistory(request).history]\n\n def on_message_async(self, callback, interactive_media_callback=None) -> None:\n updates_thread = threading.Thread(target=self.on_message, args=(callback, interactive_media_callback))\n updates_thread.start()\n\n def on_message(self, callback, interactive_media_callback=None, raw_callback=None) -> None:\n \"\"\"Message receiving event handler.\n\n :param callback: function that will be called when message received\n :param interactive_media_callback: function that will be called when interactive media action is performed\n :param raw_callback: function to handle any other type of update\n :return: None\n \"\"\"\n while True:\n try:\n SCHEDULER.enter(min(math.exp(self.retry), MAX_SLEEP_TIME), 1, self.__on_message_schedule,\n kwargs={'callback': callback,\n 'interactive_media_callback': interactive_media_callback,\n 'raw_callback': raw_callback})\n SCHEDULER.run()\n except grpc.RpcError as e:\n logging.error(e)\n if e.details() == 'failed to connect to all addresses' or e._state.code.value[0] in EXCEPTION_CODES:\n self.timer += min(math.exp(self.retry), MAX_SLEEP_TIME)\n self.retry += 1\n except Exception as e:\n logging.error(e)\n self.timer += min(math.exp(self.retry), MAX_SLEEP_TIME)\n self.retry += 1\n\n def __on_message_schedule(self, callback, interactive_media_callback=None, raw_callback=None):\n try:\n self.internal.updates.GetState(sequence_and_updates_pb2.RequestGetState())\n if self.retry:\n logging.info(\"Server was unavailable {} seconds.\".format(int(self.timer)))\n self.timer = 0\n self.retry = 0\n except grpc.RpcError as e:\n raise e\n for update in self.internal.updates.SeqUpdates(empty_pb2.Empty()):\n up = sequence_and_updates_pb2.UpdateSeqUpdate()\n up.ParseFromString(update.update.value)\n if up.WhichOneof('update') == 'updateMessage':\n self.internal.messaging.MessageReceived(messaging_pb2.RequestMessageReceived(\n peer=self.manager.get_out_peer(up.updateMessage.peer),\n date=up.updateMessage.date\n ))\n self.internal.messaging.MessageRead(messaging_pb2.RequestMessageRead(\n peer=self.manager.get_out_peer(up.updateMessage.peer),\n date=up.updateMessage.date\n ))\n POOL.submit(\n callback(UpdateMessage.from_api(up.updateMessage))\n )\n elif up.WhichOneof('update') == 'updateInteractiveMediaEvent' and \\\n callable(interactive_media_callback):\n POOL.submit(\n interactive_media_callback(UpdateInteractiveMediaEvent.from_api(up.updateInteractiveMediaEvent))\n )\n else:\n if callable(raw_callback):\n POOL.submit(\n raw_callback(up)\n )\n\n def __get_out_peer_and_message(self, peer: peers_pb2.Peer, text: str,\n interactive_media_groups: List[InteractiveMediaGroup]) \\\n -> Tuple[peers_pb2.OutPeer, messaging_pb2.MessageContent]:\n out_peer = self.manager.get_out_peer(peer)\n msg = messaging_pb2.MessageContent()\n msg.textMessage.text = text\n if interactive_media_groups is not None:\n for g in interactive_media_groups:\n media = msg.textMessage.media.add()\n g.render(media)\n return out_peer, msg\n\n @staticmethod\n def __get_medias(medias: List[MessageMedia]) -> List[messaging_pb2.MessageMedia] or None:\n for i in range(len(medias)):\n if medias[i].audio and isinstance(medias[i].audio.audio.file_location, AsyncTask):\n medias[i].audio.audio.file_location = medias[i].audio.audio.file_location.wait()\n if medias[i].image and isinstance(medias[i].image.image.file_location, AsyncTask):\n medias[i].image.image.file_location = medias[i].image.image.file_location.wait()\n if medias[i].web_page and medias[i].web_page.image and \\\n isinstance(medias[i].web_page.image.file_location, AsyncTask):\n medias[i].web_page.image.file_location = medias[i].web_page.image.file_location.wait()\n return [x.to_api() for x in medias]\n\n def __send_message(self, request: messaging_pb2.RequestSendMessage) -> UUID:\n return UUID.from_api(self.internal.messaging.SendMessage(request).message_id)\n\n def __update(self, message: Message, new_message: messaging_pb2.MessageContent) -> None:\n if hasattr(message, \"mid\"):\n mid = message.mid\n else:\n raise AttributeError(\"message has not attribute message_id or mid\")\n\n if message.edited_at:\n last_edited_at = message.edited_at\n else:\n last_edited_at = message.date\n\n request = messaging_pb2.RequestUpdateMessage(\n mid=mid.to_api(),\n updated_message=new_message,\n last_edited_at=last_edited_at\n )\n self.internal.messaging.UpdateMessage(request)\n\n @staticmethod\n def __get_message(message: Message or AsyncTask) -> Message:\n try:\n if isinstance(message, AsyncTask):\n message = message.wait()[0]\n if not isinstance(message, Message):\n raise AttributeError()\n except Exception as e:\n raise AttributeError(\"if message is AsyncTask class, result must be list of Message\")\n return message\n","sub_path":"dialog_bot_sdk/messaging.py","file_name":"messaging.py","file_ext":"py","file_size_in_byte":17350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"616720897","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import signals\nfrom scrapy.exporters import CsvItemExporter\n\n\nclass ReceiptPipeline(object):\n def __init__(self):\n self.files = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n pipeline = cls()\n crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)\n crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)\n return pipeline\n\n def spider_opened(self, spider):\n file = open('receipt.csv', 'w+b')\n self.files[spider] = file\n self.exporter = CsvItemExporter(file)\n self.exporter.fields_to_export = [\n 'date',\n 'document_id',\n 'patient_id',\n 'patient',\n 'amount',\n 'cash',\n 'cart',\n 'bank',\n 'procedure',\n 'procedure_quantity',\n 'procedure_discount',\n 'procedure_price',\n 'procedure_amount',\n 'procedure_specialist',\n ]\n self.exporter.start_exporting()\n\n def spider_closed(self, spider):\n self.exporter.finish_exporting()\n file = self.files.pop(spider)\n file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n","sub_path":"receipt/receipt/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"432541260","text":"#1 Display Welcome Message\nprint (\"Welcome to the 1089 trick!\")\n#2 Display About Message\nabout_message_1 = \"This trick's sure to amaze your friends.\"\nabout_message_2 = \"\"\"It's believed that the \"1089 trick\" was a favorite of Albert Einstein.\"\"\"\nabout_message_3 = \"To begin, enter a number with 3 digits, where the 1st digit and the 3rd digit differ by at least 2.\"\nprint (about_message_1,\"\\n\",about_message_2,\"\\n\",about_message_3,sep='')\n\nagain = \"y\"\nwhile again.lower()[0] == \"y\":\n#3 Input a number from the user\n number = input(\"Enter a number: \")\n goodToGo = False\n while not goodToGo:\n if not number.isdigit():\n print(\"Invalid Input. Number must be an integer.\")\n number = input(\"Enter a number: \")\n#4 Check that the number has 3 digits, and do not continue if not\n if not len(str(number)) == 3:\n print(\"You must enter a 3-digit number.\")\n number = input(\"Enter a number: \")\n#5 Check that the 1st digit and the 3rd digit differ by at least 2, and do not continue ifnot\n numberInt = int(number)\n firstNumber = numberInt // 100\n lastTwo = numberInt % 100\n secondNumber = lastTwo // 10\n thirdNumber = lastTwo % 10\n if not abs(firstNumber - thirdNumber) > 1:\n print(\"The 1st digit and the 3rd digit must differ by at least 2\")\n number = input(\"Enter a number: \")\n else:\n goodToGo = (number.isdigit() and len(number) == 3 and abs(firstNumber - thirdNumber) > 1)\n else:\n#6 Calculate the reverse of the input\n reversed = int(str(thirdNumber) + str(secondNumber) + str(firstNumber))\n#7 Calculate the difference of the input and its reverse\n difference = abs(numberInt - reversed)\n#8 Calculate the reverse of the difference\n firstDifference = difference // 100\n lastTwo = difference % 100\n secondDifference = lastTwo // 10\n thirdDifference = lastTwo % 10\n reversed_difference = int(str(thirdDifference) + str(secondDifference) + str(firstDifference))\n#9 Calculate the sum of the difference and its reverse\n sum = reversed_difference + difference\n#10 Output the results of the calculations\n print(\"The reverse of \",number,\" is \",reversed,\".\",sep='')\n print(\"The difference of \",number,\" and \",reversed,\" is \",difference,\".\",sep='')\n print(\"The reverse of \",difference,\" is \",reversed_difference,\".\",sep='')\n print(\"The sum of \",difference,\" and \",reversed_difference,\" is \",sum,\"!\",sep='')\n again = input (\"Again? y/n: \")\n\n","sub_path":"P1/Final/BlackmanMathis_Project1_Improved.py","file_name":"BlackmanMathis_Project1_Improved.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"344775431","text":"from typing import Callable, Dict, Tuple\n\nfrom . import core_ops, more_ops\n\nfrom .CLVMObject import CLVMObject\nfrom .EvalError import EvalError\n\nfrom .casts import int_to_bytes\nfrom .op_utils import operators_for_module\n\n\nKEYWORDS = (\n \". q . a i c f r l x = sha256 + - * divmod \"\n \"substr strlen point_add pubkey_for_exp concat . > >s \"\n \"logand logior logxor lognot ash lsh \"\n \"not any all \"\n \"softfork \"\n).split()\n\nKEYWORD_FROM_ATOM = {int_to_bytes(k): v for k, v in enumerate(KEYWORDS)}\nKEYWORD_TO_ATOM = {v: k for k, v in KEYWORD_FROM_ATOM.items()}\n\nOP_REWRITE = {\n \"+\": \"add\",\n \"-\": \"subtract\",\n \"*\": \"multiply\",\n \"i\": \"if\",\n \"c\": \"cons\",\n \"f\": \"first\",\n \"r\": \"rest\",\n \"l\": \"listp\",\n \"x\": \"raise\",\n \"=\": \"eq\",\n \">\": \"gr\",\n \">s\": \"gr_bytes\",\n}\n\n\nclass OperatorDict(dict):\n \"\"\"\n This is a nice hack that adds `__call__` to a dictionary, so\n operators can be added dynamically.\n \"\"\"\n\n def __new__(class_, d: Dict):\n self = super(OperatorDict, class_).__new__(class_, d)\n self.unknown_op_handler = self.unknown_op_raise\n return self\n\n def __call__(self, op: bytes, arguments: CLVMObject) -> Tuple[int, CLVMObject]:\n f = self.get(op)\n if f is None:\n f = lambda args: self.unknown_op_handler(op, args)\n return f(arguments)\n\n def set_unknown_op_handler(self, callback: Callable[[bytes, CLVMObject], Tuple[int, CLVMObject]]):\n self.unknown_op_handler = callback\n\n def unknown_op_raise(self, op: bytes, arguments: CLVMObject):\n raise EvalError(\"unimplemented operator\", arguments.to(op))\n\n\nOPERATOR_LOOKUP = OperatorDict(\n operators_for_module(KEYWORD_TO_ATOM, core_ops, OP_REWRITE)\n)\nOPERATOR_LOOKUP.update(operators_for_module(KEYWORD_TO_ATOM, more_ops, OP_REWRITE))\n","sub_path":"clvm/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"189660582","text":"from django.db import transaction\nfrom umo.models import EduOrg, Kafedra, EduProgram, Specialization, Discipline, \\\n DisciplineDetails, Profile, Year, Semester, Teacher, Control, Position, BRSpoints\nfrom umo.objgens import check_edu_org\nimport xml.etree.ElementTree as ET\nimport re\n\n\ndef exclude_disciplines_from_program(education_program, including_disciplines=[], except_discipline=[]):\n disciplines = Discipline.objects.filter(program=education_program)\n if len(including_disciplines) > 0:\n disciplines = disciplines.filter(id__in=including_disciplines)\n if len(except_discipline) > 0:\n disciplines = disciplines.exclude(id__in=except_discipline)\n for dis in disciplines:\n if not BRSpoints.objects.filter(course__discipline_detail__discipline__id=dis.id, points__gt=0).exists():\n dis.delete()\n\n\ndef get_qualification(name, program_code=''):\n name = name.lower()\n if name == '1':\n return 1\n elif name == '2':\n if program_code == '3':\n return 4\n elif program_code == '4':\n return 5\n return 2\n elif name == '3':\n if program_code == '3':\n return 6\n elif program_code == '4':\n return 7\n return 3\n elif name == '7':\n return 8\n elif name == '10':\n return 9\n return 0\n\n\n@transaction.atomic\ndef parseRUP_fgos3plusplus(filename, kaf):\n #name_file_xml = os.path.join('upload', filename)\n ns = '{http://tempuri.org/dsMMISDB.xsd}'\n tree = ET.parse(filename)\n root = tree.getroot()\n #имя плана\n name = root.get('LastName')\n root = root[0][0]\n #Уровень образования\n oop = root.find(ns+'ООП')\n level = 2 if oop.get('УровеньОбразования') == '3' else 1\n #тэг Специальность получение названия спец\n #specs = oop.get('Название')\n spec_name = oop.get('Название')\n #тэг Специальность ном2 получения названия профиль\n profile_name = 'Общий'\n profile_oop = oop.find(ns + 'ООП')\n if profile_oop is not None:\n profile_name = profile_oop.get('Название',)\n qual_name = oop.get('Квалификация', '')\n program_code = root.find(ns + 'Планы').get('КодПрограммы','')\n\n code = oop.get('Шифр','')\n yearp = root.find(ns + 'Планы').get('ГодНачалаПодготовки')\n\n year, created = Year.objects.get_or_create(year=yearp)\n\n sp, created = Specialization.objects.get_or_create(code=code, defaults={\n 'name': spec_name,\n 'brief_name': '',\n 'qual': get_qualification(qual_name, program_code),\n 'level': level\n })\n profile, created = Profile.objects.get_or_create(name=profile_name, spec=sp)\n\n edu_prog, created = EduProgram.objects.get_or_create(specialization=sp, profile=profile, cathedra=kaf, year=year, name=name)\n\n disciplines = root.findall(ns + 'ПланыСтроки')\n controls = {\"1\": 1, \"2\": 2, \"3\": 3, \"4\": 5, \"5\": 4, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"10\": 10, \"11\": 11, \"49\": 49}\n ids = []\n for d in disciplines:\n d_code = d.get('ДисциплинаКод', '')\n obj_code = d.get('Код', '')\n d_name = d.get('Дисциплина','')\n dis, created = Discipline.objects.update_or_create(code=d_code, program=edu_prog, defaults={'name': d_name})\n ids.append(dis.id)\n data = {}\n hours = root.findall(ns + 'ПланыНовыеЧасы[@КодОбъекта=\"' + obj_code + '\"][@КодТипаЧасов=\"1\"]')\n for item in hours:\n edu_year = int(item.get('Курс',0))\n semester = int(item.get('Семестр',0))\n semester = str(2*(edu_year - 1) + semester)\n if semester not in data.keys():\n data[semester] = {\n 'hours': {'101': 0, '102': 0, '103': 0, '104': 0, '105': 0, '106': 0, '107': 0, '108': 0, '109': 0, '110': 0,\n '111': 0, '112': 0, '113': 0, '114': 0, '115': 0, '116': 0, '117': 0, '118': 0, '119': 0, '139': 0,\n '140': 0, '141': 0, '142': 0},\n 'control': {},\n 'zed' : 0\n }\n w_type = item.get('КодВидаРаботы', '0')\n if w_type in data[semester]['hours'].keys():\n data[semester]['hours'][w_type] = int(item.get('Количество', '0'))\n elif w_type in controls.keys():\n data[semester]['control'][controls[w_type]] = int(item.get('Количество','0'))\n elif w_type == '50':\n data[semester]['zed'] = int(item.get('Количество','0'))\n for key in data.keys():\n semester, created = Semester.objects.get_or_create(name=key)\n defaults = {'Credit': data[key]['zed'],\n 'Lecture': data[key]['hours']['101'],\n 'Practice': data[key]['hours']['103'],\n 'Lab': data[key]['hours']['102'],\n 'KSR': data[key]['hours']['106'],\n 'SRS': data[key]['hours']['107']}\n dd, created = DisciplineDetails.objects.update_or_create(discipline=dis,\n semester=semester,\n defaults=defaults)\n dd.control_set.all().delete()\n for control_type in data[key]['control'].keys():\n c, created = Control.objects.update_or_create(discipline_detail=dd, control_type=control_type,\n defaults={'control_hours': data[key]['control'][control_type]})\n exclude_disciplines_from_program(edu_prog, except_discipline=ids)\n\n\ndef get_qualification_fgos3(name):\n name = name.lower()\n if 'специали' in name:\n return 1\n elif 'бакалавр' in name:\n if 'академ' in name:\n return 4\n elif 'приклад' in name:\n return 5\n return 2\n elif 'магистр' in name:\n return 3\n return 0\n\n\ndef get_education_level_fgos3(name):\n name = name.lower()\n if 'спо' in name:\n return 1\n elif 'впо' in name:\n return 2\n return 0\n\n\n@transaction.atomic\ndef parseRUP_fgos3(filename, kaf):\n #name_file_xml = os.path.join('upload', filename)\n tree = ET.parse(filename)\n root = tree.getroot()\n title = root[0][0]\n name = title.get('ПолноеИмяПлана')\n #Уровень образования\n level = root[0].get('УровеньОбразования')\n #тэг Специальность получение названия спец\n specs = title.find('Специальности')\n spec_name = ' '.join(specs[0].get('Название').split()[1:])\n #тэг Специальность ном2 получения названия профиль\n if len(specs) > 1:\n profile_name = ' '.join(specs[1].get('Название').split()[1:])\n else:\n profile_name = 'Общий'\n #qual = root[0][0][7][0] #тэг Квалификация получения квалиф\n qual_name = title.find('Квалификации')[0].get('Название')\n #code = root[0][0] #тэг План получения КодКафедры и ПоследнийШифр\n code = title.get('ПоследнийШифр')\n yearp = title.get('ГодНачалаПодготовки')\n\n year, created = Year.objects.get_or_create(year=yearp)\n\n sp, created = Specialization.objects.get_or_create(code=code, defaults={\n 'name': spec_name,\n 'brief_name': '',\n 'qual': get_qualification_fgos3(qual_name),\n 'level': get_education_level_fgos3(level)\n })\n profile, created = Profile.objects.get_or_create(name=profile_name, spec=sp)\n\n edu_prog, created = EduProgram.objects.get_or_create(specialization=sp, profile=profile, cathedra=kaf, year=year, name=name)\n ids = []\n for elem in root[0][1]:\n disname = elem.get('Дис')\n code_dis = elem.get('ИдетификаторДисциплины')\n new_code = elem.get('НовИдДисциплины', code_dis)\n dis_kaf = elem.get('Кафедра', None)\n if dis_kaf is None or len(dis_kaf) < 1:\n continue\n\n dis = Discipline.objects.filter(code=code_dis, program=edu_prog, name=disname).first()\n if dis is None:\n dis = Discipline.objects.filter(code=new_code, program=edu_prog, name=disname).first()\n if dis is None:\n dis = Discipline.objects.create(name=disname, code=new_code, program=edu_prog)\n else:\n dis.name = disname\n dis.code = new_code\n dis.save()\n ids.append(dis.id)\n for details in elem.findall('Сем'):\n #if details is ('Ном' and 'Пр' and 'КСР' and 'СРС' and 'ЗЕТ') or ('Ном' and 'КСР' and 'СРС' and 'ЗЕТ') or ('Ном' and 'Лек' and 'Пр' and 'КСР' and 'СРС' and 'ЗЕТ') or ('Ном' and 'Лек' and 'Пр' and 'ЗЕТ') or ('Ном' and 'Лек' and 'Лаб' and 'КСР' and 'СРС' and 'ЗЕТ') or ('Ном' and 'Лек' and 'Лаб' and 'Пр' and 'КСР' and 'СРС' and 'ЗЕТ') or ('Ном' and 'Пр') or ('Ном' and 'СРС'):\n data = {'101': 0, '102': 0, '103': 0, '106': 0, '107': 0, '108': 0}\n total_h = 0\n for vz in details.findall('VZ'):\n if 'H' in vz.attrib.keys():\n data[vz.get(\"ID\")] = int(vz.get('H'))\n total_h += int(vz.get('H'))\n if total_h < 1:\n continue\n\n #semester_nom = '1'\n semester_nom = details.get('Ном','1')\n zet = details.get('ЗЕТ','1')\n z = details.get('Зач', None)\n exam = details.get('Экз', None)\n zO = details.get('ЗачО', None)\n CW = details.get('КР', None)\n\n semester, created = Semester.objects.get_or_create(name=semester_nom)\n defaults={'Credit': int(zet),\n 'Lecture': data['101'],\n 'Practice': data['103'],\n 'Lab': data['102'],\n 'KSR': data['106'],\n 'SRS': data['107']}\n d, created = DisciplineDetails.objects.update_or_create(discipline=dis,\n semester=semester,\n defaults=defaults)\n d.control_set.all().delete()\n if z is not None:\n c, created = Control.objects.get_or_create(discipline_detail=d, control_type=2,\n defaults={'control_hours': data['108']})\n if exam is not None:\n c, created = Control.objects.get_or_create(discipline_detail=d, control_type=1,\n defaults={'control_hours': data['108']})\n if zO is not None:\n c, created = Control.objects.get_or_create(discipline_detail=d, control_type=3,\n defaults={'control_hours': data['108']})\n if CW is not None:\n c, created = Control.objects.get_or_create(discipline_detail=d, control_type=4,\n defaults={'control_hours': 0})\n exclude_disciplines_from_program(edu_prog, except_discipline=ids)\n\n\ndef parseRUP(filename, cathedra):\n if '.plx' in filename:\n parseRUP_fgos3plusplus(filename, cathedra)\n else:\n parseRUP_fgos3(filename, cathedra)","sub_path":"nomenclature/parseRUP.py","file_name":"parseRUP.py","file_ext":"py","file_size_in_byte":11937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"534001161","text":"import os\nimport json\nimport shutil\nimport random\nimport numpy as np\nimport tensorflow as tf\n# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # TF > 1.12\ntf.logging.set_verbosity(tf.logging.ERROR)\nfrom warnings import simplefilter\n\n# Seed for reproducibility of results\nseed = 1337\nrandom.seed(seed)\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\nfrom datasets.Movielens import Movielens\nfrom GANRec.CFGAN import CFGAN\nfrom Base.Evaluation.Evaluator import EvaluatorHoldout\n\n# Supress Tensorflow logs\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['KMP_WARNINGS'] = '0'\nsimplefilter(action='ignore', category=UserWarning)\nsimplefilter(action='ignore', category=FutureWarning)\n\nuse_gpu = False\nverbose = True\nonly_build = False\ntransposed = False\n\nif not use_gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\nreader = Movielens(version='100K', split_ratio=[0.8, 0.2, 0.0], use_local=True, implicit=True, verbose=True, seed=seed)\n\nURM_train = reader.get_URM_train(transposed=transposed)\n# URM_validation = reader.get_URM_validation(transposed=transposed)\nURM_test = reader.get_URM_test(transposed=transposed)\n\nevaluator = EvaluatorHoldout(URM_test, [5, 20], exclude_seen=True)\n# evaluatorValidation = EvaluatorHoldout(URM_validation, [5], exclude_seen=True)\n\ngan = CFGAN(URM_train, mode='item')\n\ngan.fit(d_nodes=125,\n g_nodes=400,\n d_hidden_act='sigmoid',\n g_hidden_act='sigmoid',\n d_reg=0,\n g_reg=1e-3,\n d_lr=1e-4,\n g_lr=1e-4,\n d_batch_size=64,\n g_batch_size=32,\n g_steps=4,\n d_steps=2,\n scheme='ZP',\n zr_ratio=0.7,\n zp_ratio=0.7,\n zr_coefficient=0.03,\n allow_worse=5,\n freq=5,\n validation_evaluator=evaluator,\n sample_every=10,\n validation_set=URM_test)\n\nif not only_build:\n results_dic, results_run_string = evaluator.evaluateRecommender(gan)\n print(results_run_string)\n\n map_folder = os.path.join('plots', gan.RECOMMENDER_NAME, 'MAP_' + str(results_dic[5]['MAP'])[:7])\n if os.path.exists(map_folder):\n shutil.rmtree(map_folder)\n shutil.move(src=gan.logsdir, dst=map_folder)\n","sub_path":"run_CFGAN.py","file_name":"run_CFGAN.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"636404715","text":"from ui import Ui_AddAuthorDialog\nfrom PyQt5.QtWidgets import QWidget, QDialog\nfrom model import Author\n\n\nclass AddAuthorDialog(QDialog):\n\n author = None\n\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.new_item = None\n self.ui = Ui_AddAuthorDialog()\n self.ui.setupUi(self)\n\n def accept(self):\n birth_year = self.ui.birth_year.text()\n death_year = self.ui.death_year.text()\n self.author = Author(name=self.ui.name.text(),\n country=self.ui.country.text(),\n years='-'.join((birth_year,\n death_year))\n if len(death_year) > 0\n else birth_year)\n self.parent().db.insert_author(self.author)\n self.close()\n","sub_path":"Laba4/3/dialogs/add_author_dialog.py","file_name":"add_author_dialog.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"91750650","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 26 13:09:32 2017\n\n@author: nl211\n\"\"\"\nimport sqlite3\nimport numpy as np\n\n\n#database_path = \"C:\\\\Users\\\\GOBS\\\\Dropbox\\\\Uni\\Other\\\\UROP - Salvador\\\\Niccolo_project\\\\Code\\\\Sainsburys.sqlite\" # Path to database file\ndatabase_path = \"Sainsburys.sqlite\" # Path to database file\n\n\n# auxiliary function\ndef nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0] \n\n#interpolate data from database to cover all the data range requested. \ndef interpolate(times, values, time_start, time_stop):\n Time_vec = np.linspace(time_start , time_stop-1, num =-time_start+time_stop ) \n Temp = np.empty(len(Time_vec))\n Temp[:] = np.NaN\n #populate with known values\n mask = np.in1d(Time_vec,times)\n Temp[mask] = values\n #populate with interpolated values\n nans, x= nan_helper(Temp)\n Temp[nans]= np.interp(x(nans), x(~nans), Temp[~nans])\n #quality control\n if len(x(nans)) > len(Temp)*0.6:\n print(\"nans:\", len(x(nans)), \"not nans:\", len(x(~nans)), \"... interpolated data are not reliable\") \n raise ValueError \n assert len(Temp) == len(Time_vec)\n return Time_vec, Temp\n\nclass store:\n \n def __init__(self, store_id):\n self.store_id = store_id\n self.HH_open = 14\n self.HH_close = 47\n try: \n conn = sqlite3.connect(database_path)\n cur = conn.cursor()\n except ValueError:\n print(\"Cannot connect to database\")\n try: \n cur.execute(\"SELECT DNO, name FROM Stores WHERE id = ?\", (store_id,))\n dummy = cur.fetchall()\n self.DNO = dummy[0][0] \n self.Voltage = 1 #all stores are assumed to have low voltage sub connection.\n self.name = dummy[0][1] \n except ValueError: \n print(\"Cannot retrieve store data\") \n try: \n cur.execute(\"SELECT PostCode, Lat, Lon, Area FROM Stores WHERE id = ?\", (store_id,))\n dummy = cur.fetchall()\n self.postcode = dummy[0][0]\n self.lat = dummy[0][1]\n self.lon = dummy[0][2]\n self.area = dummy[0][3]\n except ValueError: \n pass \n try: \n cur.execute(\"SELECT HH_WD_open, HH_WD_close, HH_Sat_open, HH_Sat_close, HH_Sun_open, HH_Sun_close FROM Stores WHERE id = ?\", (store_id,))\n dummy = cur.fetchall()\n self.HH_WD_open = dummy[0][0] \n self.HH_WD_close = dummy[0][1]\n self.HH_Sat_open = dummy[0][2]\n self.HH_Sat_close = dummy[0][3]\n self.HH_Sun_open = dummy[0][4]\n self.HH_Sun_close = dummy[0][5]\n except ValueError: \n pass \n conn.commit() \n \n \n\n #get demands from time_start included to time_start excluded every HH. \n #time_start and time-stop have to be supplied as seconds from the epoque divided 1800 (HH integer). \n def getSimpleDemand(self,time_start,time_stop, Utilities = [1,1,0]):\n \n Ele = Utilities[0]\n Gas = Utilities[1]\n Ref = Utilities[2] \n conn = sqlite3.connect(database_path)\n cur = conn.cursor()\n \n if Ele== 1:\n cur.execute('''SELECT Gas FROM Demand_Check Where Stores_id= ?''', (self.store_id,))\n dummy= cur.fetchone()\n try:\n if dummy[0] is not 1:\n raise TypeError \n else:\n cur.execute('''SELECT Time_id, Gas FROM Demand Where Stores_id= ? AND Time_id > ? AND Time_id < ? ''', (self.store_id, time_start-1, time_stop))\n RawData = cur.fetchall()\n timeControl_start = RawData[0][0]\n timeControl_stop = RawData[-1][0]\n if timeControl_start == time_start and timeControl_stop == time_stop - 1:\n d_gas = np.array([elt[1] for elt in RawData]) \n timestamp = np.array([elt[0] for elt in RawData]) \n self.timestamp, self.d_gas = interpolate(timestamp, d_gas, time_start, time_stop) \n else:\n print(\"time_id requested out of range. Gas range:\", timeControl_start, timeControl_stop, \"you put:\", time_start, time_stop) \n raise ValueError \n except TypeError:\n print(\"We don't have the gas demand\")\n \n if Gas== 1:\n cur.execute('''SELECT Ele FROM Demand_Check Where Stores_id= ?''', (self.store_id,))\n dummy= cur.fetchone()\n try:\n if dummy[0] is not 1:\n raise TypeError \n else:\n cur.execute('''SELECT Time_id, Ele FROM Demand Where Stores_id= ? AND Time_id > ? AND Time_id < ? ''', (self.store_id, time_start-1, time_stop))\n RawData = cur.fetchall()\n timeControl_start = RawData[0][0]\n timeControl_stop = RawData[-1][0]\n if timeControl_start == time_start and timeControl_stop == time_stop - 1:\n d_ele = np.array([elt[1] for elt in RawData]) \n timestamp = np.array([elt[0] for elt in RawData]) \n self.timestamp, self.d_ele = interpolate(timestamp, d_ele, time_start, time_stop) \n else:\n print(\"time_id requested out of range. Electricity range:\", timeControl_start, timeControl_stop, \"you put:\", time_start, time_stop) \n raise ValueError \n except TypeError:\n print(\"We don't have the electricity demand\") \n conn.commit() \n \n if Ref == 1:\n cur.execute('''SELECT Ref FROM Demand_Check Where Stores_id= ?''', (self.store_id,))\n dummy= cur.fetchone()\n try:\n if dummy[0] is not 1:\n raise TypeError \n else:\n cur.execute('''SELECT Time_id, Ref FROM Demand Where Stores_id= ? AND Time_id > ? AND Time_id < ? ''', (self.store_id, time_start-1, time_stop))\n RawData = cur.fetchall()\n timeControl_start = RawData[0][0]\n timeControl_stop = RawData[-1][0]\n if timeControl_start == time_start and timeControl_stop == time_stop - 1:\n d_ref = np.array([elt[1] for elt in RawData])\n timestamp = np.array([elt[0] for elt in RawData]) \n self.timestamp, self.d_ref = interpolate(timestamp, d_ref, time_start, time_stop) \n else:\n print(\"time_id requested out of range. Refrigeration range:\", timeControl_start, timeControl_stop, \"you put:\", time_start, time_stop) \n raise ValueError \n except TypeError:\n print(\"We don't have the Refrigeration demand\") \n \n conn.commit() \n ##get carbon factor (utilising 2016)\n self.crc = 16.1 #£/tCo2\n self.cf_ele = 0.412 #kgCO2/kWh\n self.cf_gas = 0.184 #kgCO2/kWh\n self.cf_diesel = 0.244 #kgCO2/kWh \n \n \n #get prices from time_start included to time_start excluded every HH. \n #time_start and time-stop have to be supplied as seconds from the epoque divided 1800 (HH integer). \n def getSimplePrice(self, time_start, time_stop, string_table): \n conn = sqlite3.connect(database_path)\n cur = conn.cursor()\n try:\n sql_string = '''SELECT id, Ele, Gas, Ele_exp FROM {Table_name} Where DNO= ? AND Voltage = ? AND id > ? AND id < ?'''\n sql = sql_string.format(Table_name=string_table)\n cur.execute(sql, (self.DNO-9, self.Voltage, time_start-1, time_stop)) \n RawData = cur.fetchall()\n timeControl_start = RawData[0][0]\n timeControl_stop = RawData[-1][0]\n if timeControl_start == time_start and timeControl_stop == time_stop - 1:\n p_ele = np.array([elt[1] for elt in RawData]) \n p_gas = np.array([elt[2] for elt in RawData]) \n p_ele_exp = np.array([elt[3] for elt in RawData]) \n timestamp = np.array([elt[0] for elt in RawData]) \n ## interpolate missing values\n self.timestamp, self.p_ele = interpolate(timestamp, p_ele, time_start, time_stop) \n self.timestamp, self.p_gas = interpolate(timestamp, p_gas, time_start, time_stop) \n self.timestamp, self.p_ele_exp = interpolate(timestamp, p_ele_exp, time_start, time_stop) \n else:\n print(\"time_id requested out of range. Price range:\", timeControl_start, timeControl_stop, \"you put:\", time_start, time_stop) \n raise ValueError \n except:\n print(\"An error occured. Possibly selected table doesn't exist. Please chose a valid Table\") \n conn.commit()\n \n def getWeatherData(self,time_start,time_stop): \n #get data from the closest weather station.\n self.putMIDASstation() \n #get weather data\n conn = sqlite3.connect(database_path)\n cur = conn.cursor() \n found = 0\n i = 0\n while found == 0:\n MIDAStry = self.MIDASall[i]\n i = i+1\n cur.execute('''SELECT Time_id, Temp, dewpoint, wetb_temp, rltv_hum FROM Weather WHERE Station_id = ? AND Time_id > ? AND Time_id < ?''', (MIDAStry, time_start-1, time_stop )) \n Raw_data = cur.fetchall()\n Times = np.array([elt[0] for elt in Raw_data])\n if len(Times) < 0.40*(time_stop - time_start): # not enough values, reiterate\n pass\n else:\n self.MIDAS = MIDAStry\n found = 1\n #remove empty string value. not clean, should be changed somehow.\n TempData =np.array([elt[1] for elt in Raw_data])\n try: ## check if empty string are present adn process\n TempData[TempData == ' '] = 'NaN'\n TempData.astype(np.float)\n except:\n pass\n \n Time_vec, Temp = interpolate(Times, TempData, time_start, time_stop)\n self.temp = Temp \n self.timestamp = Time_vec\n ##put irradiance using 2015-2016 data (irr doesnt vary much and I was lazy)\n found = 0\n i = 0 \n time_start_irr= 788928 #1/1/2015\n time_stop_irr= 806446 #1/1/2016 \n while found == 0:\n MIDAStry = self.MIDASall[i]\n i = i+1\n cur.execute('''SELECT Time_id, irr FROM Weather WHERE Station_id = ? AND Time_id > ? AND Time_id < ?''', (MIDAStry, time_start_irr-1, time_stop_irr )) \n Raw_data = cur.fetchall()\n Times = np.array([elt[0] for elt in Raw_data])\n irr_data2 = np.array([elt[1] for elt in Raw_data])\n if len([i for i in irr_data2 if i != None]) < 0.40*(time_stop_irr - time_start_irr): # not enough values, reiterate\n pass\n else:\n found = 1\n #remove empty string value. not clean, should be changed somehow.\n try: ## check if empty string are present adn process\n irr_data2[irr_data2 == ' '] = 'NaN'\n irr_data2.astype(np.float)\n except:\n pass\n Time_vec, irr = interpolate(Times, irr_data2, time_start_irr, time_stop_irr)\n dummy = np.append(irr, irr)\n self.irr = dummy[:len(self.timestamp)] \n \n \n \n \n def putMIDASstation(self): \n #get closest MIDAS stations.\n conn = sqlite3.connect(database_path)\n cur = conn.cursor()\n cur.execute('''SELECT * FROM MIDAS_stations''')\n Raw_data = cur.fetchall()\n Data = np.array([elt for elt in Raw_data])\n AvData = Data[Data[:,4]==1]\n id_MIDAS = np.array(AvData[:,0])\n Lat = np.array(AvData[:,2])\n Lon = np.array(AvData[:,3])\n d = np.square(Lat- self.lat)+np.square(Lon - self.lon)\n pos = np.argsort(d)\n self.MIDAS = id_MIDAS[pos[0]] \n self.MIDASall = id_MIDAS[pos]\n \n \n def getTSODemand(): \n pass\n","sub_path":"Common/classStore.py","file_name":"classStore.py","file_ext":"py","file_size_in_byte":12749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}